mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
Preserve errors returned by diskInfo to detect disk errors (#9727)
This PR basically reverts #9720 and re-implements it differently
This commit is contained in:
parent
b330c2c57e
commit
b2db8123ec
@ -284,7 +284,8 @@ func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
storageInfo := objectAPI.StorageInfo(ctx, false)
|
||||
// ignores any errors here.
|
||||
storageInfo, _ := objectAPI.StorageInfo(ctx, false)
|
||||
|
||||
// Marshal API response
|
||||
jsonBytes, err := json.Marshal(storageInfo)
|
||||
@ -707,8 +708,8 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// find number of disks in the setup
|
||||
info := objectAPI.StorageInfo(ctx, false)
|
||||
// find number of disks in the setup, ignore any errors here.
|
||||
info, _ := objectAPI.StorageInfo(ctx, false)
|
||||
numDisks := info.Backend.OfflineDisks.Sum() + info.Backend.OnlineDisks.Sum()
|
||||
|
||||
healPath := pathJoin(hip.bucket, hip.objPrefix)
|
||||
@ -1354,8 +1355,8 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
// Get the notification target info
|
||||
notifyTarget := fetchLambdaInfo(cfg)
|
||||
|
||||
// Fetching the Storage information
|
||||
storageInfo := objectAPI.StorageInfo(ctx, false)
|
||||
// Fetching the Storage information, ignore any errors.
|
||||
storageInfo, _ := objectAPI.StorageInfo(ctx, false)
|
||||
|
||||
var OnDisks int
|
||||
var OffDisks int
|
||||
|
@ -114,8 +114,9 @@ func startBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
go globalBackgroundHealRoutine.run(ctx, objAPI)
|
||||
|
||||
// Launch the background healer sequence to track
|
||||
// background healing operations
|
||||
info := objAPI.StorageInfo(ctx, false)
|
||||
// background healing operations, ignore errors
|
||||
// errors are handled into offline disks already.
|
||||
info, _ := objAPI.StorageInfo(ctx, false)
|
||||
numDisks := info.Backend.OnlineDisks.Sum() + info.Backend.OfflineDisks.Sum()
|
||||
nh := newBgHealSequence(numDisks)
|
||||
globalBackgroundHealState.LaunchNewHealSequence(nh)
|
||||
|
@ -203,7 +203,7 @@ func (fs *FSObjects) Shutdown(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// StorageInfo - returns underlying storage statistics.
|
||||
func (fs *FSObjects) StorageInfo(ctx context.Context, _ bool) StorageInfo {
|
||||
func (fs *FSObjects) StorageInfo(ctx context.Context, _ bool) (StorageInfo, []error) {
|
||||
|
||||
atomic.AddInt64(&fs.activeIOCount, 1)
|
||||
defer func() {
|
||||
@ -212,7 +212,7 @@ func (fs *FSObjects) StorageInfo(ctx context.Context, _ bool) StorageInfo {
|
||||
|
||||
di, err := getDiskInfo(fs.fsPath)
|
||||
if err != nil {
|
||||
return StorageInfo{}
|
||||
return StorageInfo{}, []error{err}
|
||||
}
|
||||
used := di.Total - di.Free
|
||||
if !fs.diskMount {
|
||||
@ -226,7 +226,7 @@ func (fs *FSObjects) StorageInfo(ctx context.Context, _ bool) StorageInfo {
|
||||
MountPaths: []string{fs.fsPath},
|
||||
}
|
||||
storageInfo.Backend.Type = BackendFS
|
||||
return storageInfo
|
||||
return storageInfo, nil
|
||||
}
|
||||
|
||||
func (fs *FSObjects) waitForLowActiveIO() {
|
||||
|
@ -542,10 +542,10 @@ func (a *azureObjects) Shutdown(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// StorageInfo - Not relevant to Azure backend.
|
||||
func (a *azureObjects) StorageInfo(ctx context.Context, _ bool) (si minio.StorageInfo) {
|
||||
func (a *azureObjects) StorageInfo(ctx context.Context, _ bool) (si minio.StorageInfo, _ []error) {
|
||||
si.Backend.Type = minio.BackendGateway
|
||||
si.Backend.GatewayOnline = minio.IsBackendOnline(ctx, a.httpClient, a.endpoint)
|
||||
return si
|
||||
return si, nil
|
||||
}
|
||||
|
||||
// MakeBucketWithLocation - Create a new container on azure backend.
|
||||
|
@ -414,10 +414,10 @@ func (l *gcsGateway) Shutdown(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// StorageInfo - Not relevant to GCS backend.
|
||||
func (l *gcsGateway) StorageInfo(ctx context.Context, _ bool) (si minio.StorageInfo) {
|
||||
func (l *gcsGateway) StorageInfo(ctx context.Context, _ bool) (si minio.StorageInfo, _ []error) {
|
||||
si.Backend.Type = minio.BackendGateway
|
||||
si.Backend.GatewayOnline = minio.IsBackendOnline(ctx, l.httpClient, "https://storage.googleapis.com")
|
||||
return si
|
||||
return si, nil
|
||||
}
|
||||
|
||||
// MakeBucketWithLocation - Create a new container on GCS backend.
|
||||
|
@ -205,16 +205,15 @@ func (n *hdfsObjects) Shutdown(ctx context.Context) error {
|
||||
return n.clnt.Close()
|
||||
}
|
||||
|
||||
func (n *hdfsObjects) StorageInfo(ctx context.Context, _ bool) minio.StorageInfo {
|
||||
func (n *hdfsObjects) StorageInfo(ctx context.Context, _ bool) (si minio.StorageInfo, errs []error) {
|
||||
fsInfo, err := n.clnt.StatFs()
|
||||
if err != nil {
|
||||
return minio.StorageInfo{}
|
||||
return minio.StorageInfo{}, []error{err}
|
||||
}
|
||||
sinfo := minio.StorageInfo{}
|
||||
sinfo.Used = []uint64{fsInfo.Used}
|
||||
sinfo.Backend.Type = minio.BackendGateway
|
||||
sinfo.Backend.GatewayOnline = true
|
||||
return sinfo
|
||||
si.Used = []uint64{fsInfo.Used}
|
||||
si.Backend.Type = minio.BackendGateway
|
||||
si.Backend.GatewayOnline = true
|
||||
return si, nil
|
||||
}
|
||||
|
||||
// hdfsObjects implements gateway for Minio and S3 compatible object storage servers.
|
||||
@ -758,6 +757,7 @@ func (n *hdfsObjects) AbortMultipartUpload(ctx context.Context, bucket, object,
|
||||
}
|
||||
|
||||
// IsReady returns whether the layer is ready to take requests.
|
||||
func (n *hdfsObjects) IsReady(_ context.Context) bool {
|
||||
return true
|
||||
func (n *hdfsObjects) IsReady(ctx context.Context) bool {
|
||||
si, _ := n.StorageInfo(ctx, false)
|
||||
return si.Backend.GatewayOnline
|
||||
}
|
||||
|
@ -110,11 +110,11 @@ func (n *nasObjects) IsListenBucketSupported() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (n *nasObjects) StorageInfo(ctx context.Context, _ bool) minio.StorageInfo {
|
||||
sinfo := n.ObjectLayer.StorageInfo(ctx, false)
|
||||
sinfo.Backend.GatewayOnline = sinfo.Backend.Type == minio.BackendFS
|
||||
sinfo.Backend.Type = minio.BackendGateway
|
||||
return sinfo
|
||||
func (n *nasObjects) StorageInfo(ctx context.Context, _ bool) (si minio.StorageInfo, _ []error) {
|
||||
si, errs := n.ObjectLayer.StorageInfo(ctx, false)
|
||||
si.Backend.GatewayOnline = si.Backend.Type == minio.BackendFS
|
||||
si.Backend.Type = minio.BackendGateway
|
||||
return si, errs
|
||||
}
|
||||
|
||||
// nasObjects implements gateway for MinIO and S3 compatible object storage servers.
|
||||
@ -134,8 +134,8 @@ func (n *nasObjects) SetBucketObjectLockConfig(ctx context.Context, bucket strin
|
||||
|
||||
// IsReady returns whether the layer is ready to take requests.
|
||||
func (n *nasObjects) IsReady(ctx context.Context) bool {
|
||||
sinfo := n.ObjectLayer.StorageInfo(ctx, false)
|
||||
return sinfo.Backend.Type == minio.BackendFS
|
||||
si, _ := n.StorageInfo(ctx, false)
|
||||
return si.Backend.GatewayOnline
|
||||
}
|
||||
|
||||
func (n *nasObjects) IsTaggingSupported() bool {
|
||||
|
@ -280,10 +280,10 @@ func (l *s3Objects) Shutdown(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// StorageInfo is not relevant to S3 backend.
|
||||
func (l *s3Objects) StorageInfo(ctx context.Context, _ bool) (si minio.StorageInfo) {
|
||||
func (l *s3Objects) StorageInfo(ctx context.Context, _ bool) (si minio.StorageInfo, _ []error) {
|
||||
si.Backend.Type = minio.BackendGateway
|
||||
si.Backend.GatewayOnline = minio.IsBackendOnline(ctx, l.HTTPClient, l.Client.EndpointURL().String())
|
||||
return si
|
||||
return si, nil
|
||||
}
|
||||
|
||||
// MakeBucket creates a new container on S3 backend.
|
||||
|
@ -416,8 +416,8 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch disk space info
|
||||
storageInfo := objLayer.StorageInfo(GlobalContext, true)
|
||||
// Fetch disk space info, ignore errors
|
||||
storageInfo, _ := objLayer.StorageInfo(GlobalContext, true)
|
||||
|
||||
offlineDisks := storageInfo.Backend.OfflineDisks
|
||||
onlineDisks := storageInfo.Backend.OnlineDisks
|
||||
|
@ -59,7 +59,7 @@ type ObjectLayer interface {
|
||||
// Storage operations.
|
||||
Shutdown(context.Context) error
|
||||
CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error
|
||||
StorageInfo(ctx context.Context, local bool) StorageInfo // local queries only local disks
|
||||
StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) // local queries only local disks
|
||||
|
||||
// Bucket operations.
|
||||
MakeBucketWithLocation(ctx context.Context, bucket string, location string, lockEnabled bool) error
|
||||
|
@ -413,6 +413,7 @@ type DiskInfo struct {
|
||||
Used uint64
|
||||
RootDisk bool
|
||||
MountPath string
|
||||
Error string // reports any error returned by underlying disk
|
||||
}
|
||||
|
||||
// DiskInfo provides current information about disk space usage,
|
||||
|
@ -47,6 +47,11 @@ func getFormatStr(strLen int, padding int) string {
|
||||
return "%" + formatStr
|
||||
}
|
||||
|
||||
func mustGetStorageInfo(objAPI ObjectLayer) StorageInfo {
|
||||
storageInfo, _ := objAPI.StorageInfo(GlobalContext, false)
|
||||
return storageInfo
|
||||
}
|
||||
|
||||
func printStartupSafeModeMessage(apiEndpoints []string, err error) {
|
||||
logStartupMessage(color.RedBold("Server startup failed with '%v'", err))
|
||||
logStartupMessage(color.RedBold("Server switching to safe mode"))
|
||||
@ -55,7 +60,7 @@ func printStartupSafeModeMessage(apiEndpoints []string, err error) {
|
||||
// Object layer is initialized then print StorageInfo in safe mode.
|
||||
objAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objAPI != nil {
|
||||
if msg := getStorageInfoMsgSafeMode(objAPI.StorageInfo(GlobalContext, false)); msg != "" {
|
||||
if msg := getStorageInfoMsgSafeMode(mustGetStorageInfo(objAPI)); msg != "" {
|
||||
logStartupMessage(msg)
|
||||
}
|
||||
}
|
||||
@ -117,7 +122,7 @@ func printStartupMessage(apiEndpoints []string) {
|
||||
// Object layer is initialized then print StorageInfo.
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI != nil {
|
||||
printStorageInfo(objAPI.StorageInfo(GlobalContext, false))
|
||||
printStorageInfo(mustGetStorageInfo(objAPI))
|
||||
}
|
||||
|
||||
// Prints credential, region and browser access.
|
||||
|
@ -31,7 +31,7 @@ type StorageAPI interface {
|
||||
IsLocal() bool
|
||||
Hostname() string // Returns host name if remote host.
|
||||
Close() error
|
||||
GetDiskID() (string, error) // Could be expensive
|
||||
GetDiskID() (string, error)
|
||||
SetDiskID(id string)
|
||||
|
||||
DiskInfo() (info DiskInfo, err error)
|
||||
|
@ -177,20 +177,12 @@ func (client *storageRESTClient) CrawlAndGetDataUsage(ctx context.Context, cache
|
||||
}
|
||||
|
||||
func (client *storageRESTClient) GetDiskID() (string, error) {
|
||||
respBody, err := client.call(storageRESTMethodGetDiskID, nil, nil, -1)
|
||||
if err != nil {
|
||||
// Ignore when other nodes does not support GetDiskID call, this check
|
||||
// can be removed when the storage API version is bumped.
|
||||
if strings.Contains(err.Error(), "404 page not found") {
|
||||
// This call should never be over the network, this is always
|
||||
// a cached value - caller should make sure to use this
|
||||
// function on a fresh disk or make sure to look at the error
|
||||
// from a different networked call to validate the GetDiskID()
|
||||
return client.diskID, nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
var s string
|
||||
err = gob.NewDecoder(respBody).Decode(&s)
|
||||
return s, err
|
||||
}
|
||||
|
||||
func (client *storageRESTClient) SetDiskID(id string) {
|
||||
client.diskID = id
|
||||
|
@ -24,7 +24,6 @@ const (
|
||||
|
||||
const (
|
||||
storageRESTMethodDiskInfo = "/diskinfo"
|
||||
storageRESTMethodGetDiskID = "/getdiskid"
|
||||
storageRESTMethodCrawlAndGetDataUsage = "/crawlandgetdatausage"
|
||||
storageRESTMethodMakeVol = "/makevol"
|
||||
storageRESTMethodMakeVolBulk = "/makevolbulk"
|
||||
|
@ -132,22 +132,6 @@ func (s *storageRESTServer) DiskInfoHandler(w http.ResponseWriter, r *http.Reque
|
||||
gob.NewEncoder(w).Encode(info)
|
||||
}
|
||||
|
||||
// GetDiskIDHandler - returns disk id.
|
||||
func (s *storageRESTServer) GetDiskIDHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if err := storageServerRequestValidate(r); err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
info, err := s.storage.GetDiskID()
|
||||
if err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
defer w.(http.Flusher).Flush()
|
||||
gob.NewEncoder(w).Encode(info)
|
||||
}
|
||||
|
||||
func (s *storageRESTServer) CrawlAndGetDataUsageHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
return
|
||||
@ -800,7 +784,6 @@ func registerStorageRESTHandlers(router *mux.Router, endpointZones EndpointZones
|
||||
subrouter := router.PathPrefix(path.Join(storageRESTPrefix, endpoint.Path)).Subrouter()
|
||||
|
||||
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDiskInfo).HandlerFunc(httpTraceHdrs(server.DiskInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodGetDiskID).HandlerFunc(httpTraceHdrs(server.GetDiskIDHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCrawlAndGetDataUsage).HandlerFunc(httpTraceHdrs(server.CrawlAndGetDataUsageHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodMakeVol).HandlerFunc(httpTraceHdrs(server.MakeVolHandler)).Queries(restQueries(storageRESTVolume)...)
|
||||
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodMakeVolBulk).HandlerFunc(httpTraceHdrs(server.MakeVolBulkHandler)).Queries(restQueries(storageRESTVolumes)...)
|
||||
|
@ -129,7 +129,7 @@ func (web *webAPIHandlers) StorageInfo(r *http.Request, args *WebGenericArgs, re
|
||||
if authErr != nil {
|
||||
return toJSONError(ctx, authErr)
|
||||
}
|
||||
reply.StorageInfo = objectAPI.StorageInfo(ctx, false)
|
||||
reply.StorageInfo, _ = objectAPI.StorageInfo(ctx, false)
|
||||
reply.UIVersion = browser.UIVersion
|
||||
return nil
|
||||
}
|
||||
|
133
cmd/xl-sets.go
133
cmd/xl-sets.go
@ -37,20 +37,9 @@ import (
|
||||
"github.com/minio/minio/pkg/sync/errgroup"
|
||||
)
|
||||
|
||||
// setsStorageAPI is encapsulated type for Close()
|
||||
type setsStorageAPI [][]StorageAPI
|
||||
|
||||
// setsDsyncLockers is encapsulated type for Close()
|
||||
type setsDsyncLockers [][]dsync.NetLocker
|
||||
|
||||
func (s setsStorageAPI) Copy() [][]StorageAPI {
|
||||
copyS := make(setsStorageAPI, len(s))
|
||||
for i, disks := range s {
|
||||
copyS[i] = append(copyS[i], disks...)
|
||||
}
|
||||
return copyS
|
||||
}
|
||||
|
||||
// Information of a new disk connection
|
||||
type diskConnectInfo struct {
|
||||
setIndex int
|
||||
@ -71,7 +60,7 @@ type xlSets struct {
|
||||
xlDisksMu sync.RWMutex
|
||||
|
||||
// Re-ordered list of disks per set.
|
||||
xlDisks setsStorageAPI
|
||||
xlDisks [][]StorageAPI
|
||||
|
||||
// Distributed locker clients.
|
||||
xlLockers setsDsyncLockers
|
||||
@ -369,24 +358,13 @@ func (s *xlSets) NewNSLock(ctx context.Context, bucket string, objects ...string
|
||||
return s.getHashedSet("").NewNSLock(ctx, bucket, objects...)
|
||||
}
|
||||
|
||||
// StorageInfo - combines output of StorageInfo across all erasure coded object sets.
|
||||
// Caches values for 1 second.
|
||||
func (s *xlSets) StorageInfo(ctx context.Context, local bool) StorageInfo {
|
||||
s.disksStorageInfoCache.Once.Do(func() {
|
||||
s.disksStorageInfoCache.TTL = time.Second
|
||||
s.disksStorageInfoCache.Update = func() (interface{}, error) {
|
||||
return s.storageInfo(ctx, local), nil
|
||||
}
|
||||
})
|
||||
v, _ := s.disksStorageInfoCache.Get()
|
||||
return v.(StorageInfo)
|
||||
}
|
||||
|
||||
// storageInfo - combines output of StorageInfo across all erasure coded object sets.
|
||||
// Use StorageInfo for a cached version.
|
||||
func (s *xlSets) storageInfo(ctx context.Context, local bool) StorageInfo {
|
||||
// StorageUsageInfo - combines output of StorageInfo across all erasure coded object sets.
|
||||
// This only returns disk usage info for Zones to perform placement decision, this call
|
||||
// is not implemented in Object interface and is not meant to be used by other object
|
||||
// layer implementations.
|
||||
func (s *xlSets) StorageUsageInfo(ctx context.Context) StorageInfo {
|
||||
storageUsageInfo := func() StorageInfo {
|
||||
var storageInfo StorageInfo
|
||||
|
||||
storageInfos := make([]StorageInfo, len(s.sets))
|
||||
storageInfo.Backend.Type = BackendErasure
|
||||
|
||||
@ -394,7 +372,51 @@ func (s *xlSets) storageInfo(ctx context.Context, local bool) StorageInfo {
|
||||
for index := range s.sets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
storageInfos[index] = s.sets[index].StorageInfo(ctx, local)
|
||||
// ignoring errors on purpose
|
||||
storageInfos[index], _ = s.sets[index].StorageInfo(ctx, false)
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
|
||||
// Wait for the go routines.
|
||||
g.Wait()
|
||||
|
||||
for _, lstorageInfo := range storageInfos {
|
||||
storageInfo.Used = append(storageInfo.Used, lstorageInfo.Used...)
|
||||
storageInfo.Total = append(storageInfo.Total, lstorageInfo.Total...)
|
||||
storageInfo.Available = append(storageInfo.Available, lstorageInfo.Available...)
|
||||
storageInfo.MountPaths = append(storageInfo.MountPaths, lstorageInfo.MountPaths...)
|
||||
storageInfo.Backend.OnlineDisks = storageInfo.Backend.OnlineDisks.Merge(lstorageInfo.Backend.OnlineDisks)
|
||||
storageInfo.Backend.OfflineDisks = storageInfo.Backend.OfflineDisks.Merge(lstorageInfo.Backend.OfflineDisks)
|
||||
}
|
||||
|
||||
return storageInfo
|
||||
}
|
||||
|
||||
s.disksStorageInfoCache.Once.Do(func() {
|
||||
s.disksStorageInfoCache.TTL = time.Second
|
||||
s.disksStorageInfoCache.Update = func() (interface{}, error) {
|
||||
return storageUsageInfo(), nil
|
||||
}
|
||||
})
|
||||
|
||||
v, _ := s.disksStorageInfoCache.Get()
|
||||
return v.(StorageInfo)
|
||||
}
|
||||
|
||||
// StorageInfo - combines output of StorageInfo across all erasure coded object sets.
|
||||
func (s *xlSets) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) {
|
||||
var storageInfo StorageInfo
|
||||
|
||||
storageInfos := make([]StorageInfo, len(s.sets))
|
||||
storageInfoErrs := make([][]error, len(s.sets))
|
||||
storageInfo.Backend.Type = BackendErasure
|
||||
|
||||
g := errgroup.WithNErrs(len(s.sets))
|
||||
for index := range s.sets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
storageInfos[index], storageInfoErrs[index] = s.sets[index].StorageInfo(ctx, local)
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
@ -428,49 +450,56 @@ func (s *xlSets) storageInfo(ctx context.Context, local bool) StorageInfo {
|
||||
}
|
||||
|
||||
if local {
|
||||
// if local is true, we don't need to read format.json
|
||||
return storageInfo
|
||||
// if local is true, we are not interested in the drive UUID info.
|
||||
// this is called primarily by prometheus
|
||||
return storageInfo, nil
|
||||
}
|
||||
|
||||
s.xlDisksMu.RLock()
|
||||
storageDisks := s.xlDisks.Copy()
|
||||
s.xlDisksMu.RUnlock()
|
||||
|
||||
for i := 0; i < s.setCount; i++ {
|
||||
for j := 0; j < s.drivesPerSet; j++ {
|
||||
if storageDisks[i][j] == nil {
|
||||
for i, set := range s.sets {
|
||||
storageDisks := set.getDisks()
|
||||
for j, storageErr := range storageInfoErrs[i] {
|
||||
if storageDisks[j] == OfflineDisk {
|
||||
storageInfo.Backend.Sets[i][j] = madmin.DriveInfo{
|
||||
State: madmin.DriveStateOffline,
|
||||
Endpoint: s.endpointStrings[i*s.drivesPerSet+j],
|
||||
}
|
||||
continue
|
||||
}
|
||||
diskID, err := storageDisks[i][j].GetDiskID()
|
||||
if err != nil {
|
||||
if err == errUnformattedDisk {
|
||||
var diskID string
|
||||
if storageErr == nil {
|
||||
// No errors returned by storage, look for its DiskID()
|
||||
diskID, storageErr = storageDisks[j].GetDiskID()
|
||||
}
|
||||
if storageErr == nil {
|
||||
storageInfo.Backend.Sets[i][j] = madmin.DriveInfo{
|
||||
State: madmin.DriveStateOk,
|
||||
Endpoint: storageDisks[j].String(),
|
||||
UUID: diskID,
|
||||
}
|
||||
continue
|
||||
}
|
||||
if storageErr == errUnformattedDisk {
|
||||
storageInfo.Backend.Sets[i][j] = madmin.DriveInfo{
|
||||
State: madmin.DriveStateUnformatted,
|
||||
Endpoint: storageDisks[i][j].String(),
|
||||
Endpoint: storageDisks[j].String(),
|
||||
UUID: "",
|
||||
}
|
||||
} else {
|
||||
storageInfo.Backend.Sets[i][j] = madmin.DriveInfo{
|
||||
State: madmin.DriveStateCorrupt,
|
||||
Endpoint: storageDisks[i][j].String(),
|
||||
Endpoint: storageDisks[j].String(),
|
||||
UUID: "",
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
storageInfo.Backend.Sets[i][j] = madmin.DriveInfo{
|
||||
State: madmin.DriveStateOk,
|
||||
Endpoint: storageDisks[i][j].String(),
|
||||
UUID: diskID,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return storageInfo
|
||||
var errs []error
|
||||
for i := range s.sets {
|
||||
errs = append(errs, storageInfoErrs[i]...)
|
||||
}
|
||||
|
||||
return storageInfo, errs
|
||||
}
|
||||
|
||||
func (s *xlSets) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error {
|
||||
|
22
cmd/xl-v1.go
22
cmd/xl-v1.go
@ -93,8 +93,9 @@ func (d byDiskTotal) Less(i, j int) bool {
|
||||
}
|
||||
|
||||
// getDisksInfo - fetch disks info across all other storage API.
|
||||
func getDisksInfo(disks []StorageAPI) (disksInfo []DiskInfo, onlineDisks, offlineDisks madmin.BackendDisks) {
|
||||
func getDisksInfo(disks []StorageAPI) (disksInfo []DiskInfo, errs []error, onlineDisks, offlineDisks madmin.BackendDisks) {
|
||||
disksInfo = make([]DiskInfo, len(disks))
|
||||
errs = make([]error, len(disks))
|
||||
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
for index := range disks {
|
||||
@ -106,13 +107,13 @@ func getDisksInfo(disks []StorageAPI) (disksInfo []DiskInfo, onlineDisks, offlin
|
||||
}
|
||||
info, err := disks[index].DiskInfo()
|
||||
if err != nil {
|
||||
if IsErr(err, baseErrs...) {
|
||||
return err
|
||||
}
|
||||
if !IsErr(err, baseErrs...) {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("disk", disks[index].String())
|
||||
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
disksInfo[index] = info
|
||||
return nil
|
||||
}, index)
|
||||
@ -121,8 +122,9 @@ func getDisksInfo(disks []StorageAPI) (disksInfo []DiskInfo, onlineDisks, offlin
|
||||
onlineDisks = make(madmin.BackendDisks)
|
||||
offlineDisks = make(madmin.BackendDisks)
|
||||
|
||||
errs = g.Wait()
|
||||
// Wait for the routines.
|
||||
for i, diskInfoErr := range g.Wait() {
|
||||
for i, diskInfoErr := range errs {
|
||||
if disks[i] == nil {
|
||||
continue
|
||||
}
|
||||
@ -157,12 +159,12 @@ func getDisksInfo(disks []StorageAPI) (disksInfo []DiskInfo, onlineDisks, offlin
|
||||
}
|
||||
|
||||
// Success.
|
||||
return disksInfo, onlineDisks, offlineDisks
|
||||
return disksInfo, errs, onlineDisks, offlineDisks
|
||||
}
|
||||
|
||||
// Get an aggregated storage info across all disks.
|
||||
func getStorageInfo(disks []StorageAPI) StorageInfo {
|
||||
disksInfo, onlineDisks, offlineDisks := getDisksInfo(disks)
|
||||
func getStorageInfo(disks []StorageAPI) (StorageInfo, []error) {
|
||||
disksInfo, errs, onlineDisks, offlineDisks := getDisksInfo(disks)
|
||||
|
||||
// Sort so that the first element is the smallest.
|
||||
sort.Sort(byDiskTotal(disksInfo))
|
||||
@ -191,11 +193,11 @@ func getStorageInfo(disks []StorageAPI) StorageInfo {
|
||||
storageInfo.Backend.OnlineDisks = onlineDisks
|
||||
storageInfo.Backend.OfflineDisks = offlineDisks
|
||||
|
||||
return storageInfo
|
||||
return storageInfo, errs
|
||||
}
|
||||
|
||||
// StorageInfo - returns underlying storage statistics.
|
||||
func (xl xlObjects) StorageInfo(ctx context.Context, local bool) StorageInfo {
|
||||
func (xl xlObjects) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) {
|
||||
|
||||
disks := xl.getDisks()
|
||||
if local {
|
||||
|
@ -130,7 +130,7 @@ func (z *xlZones) getZonesAvailableSpace(ctx context.Context) zonesAvailableSpac
|
||||
for index := range z.zones {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
storageInfos[index] = z.zones[index].StorageInfo(ctx, false)
|
||||
storageInfos[index] = z.zones[index].StorageUsageInfo(ctx)
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
@ -175,7 +175,7 @@ func (z *xlZones) Shutdown(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (z *xlZones) StorageInfo(ctx context.Context, local bool) StorageInfo {
|
||||
func (z *xlZones) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) {
|
||||
if z.SingleZone() {
|
||||
return z.zones[0].StorageInfo(ctx, local)
|
||||
}
|
||||
@ -183,11 +183,12 @@ func (z *xlZones) StorageInfo(ctx context.Context, local bool) StorageInfo {
|
||||
var storageInfo StorageInfo
|
||||
|
||||
storageInfos := make([]StorageInfo, len(z.zones))
|
||||
storageInfosErrs := make([][]error, len(z.zones))
|
||||
g := errgroup.WithNErrs(len(z.zones))
|
||||
for index := range z.zones {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
storageInfos[index] = z.zones[index].StorageInfo(ctx, local)
|
||||
storageInfos[index], storageInfosErrs[index] = z.zones[index].StorageInfo(ctx, local)
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
@ -211,7 +212,11 @@ func (z *xlZones) StorageInfo(ctx context.Context, local bool) StorageInfo {
|
||||
storageInfo.Backend.RRSCData = storageInfos[0].Backend.RRSCData
|
||||
storageInfo.Backend.RRSCParity = storageInfos[0].Backend.RRSCParity
|
||||
|
||||
return storageInfo
|
||||
var errs []error
|
||||
for i := range z.zones {
|
||||
errs = append(errs, storageInfosErrs[i]...)
|
||||
}
|
||||
return storageInfo, errs
|
||||
}
|
||||
|
||||
func (z *xlZones) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error {
|
||||
|
Loading…
Reference in New Issue
Block a user