fix: calculate prometheus disks_offline/disks_total correctly (#11215)

fixes #11196
This commit is contained in:
Harshavardhana 2021-01-04 09:42:09 -08:00 committed by GitHub
parent 153d4be032
commit e7ae49f9c9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 21 additions and 42 deletions

View File

@ -292,7 +292,7 @@ func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Requ
} }
// ignores any errors here. // ignores any errors here.
storageInfo, _ := objectAPI.StorageInfo(ctx, false) storageInfo, _ := objectAPI.StorageInfo(ctx)
// Collect any disk healing. // Collect any disk healing.
healing, _ := getAggregatedBackgroundHealState(ctx) healing, _ := getAggregatedBackgroundHealState(ctx)

View File

@ -289,7 +289,7 @@ func (z *erasureServerPools) BackendInfo() (b BackendInfo) {
return return
} }
func (z *erasureServerPools) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) { func (z *erasureServerPools) StorageInfo(ctx context.Context) (StorageInfo, []error) {
var storageInfo StorageInfo var storageInfo StorageInfo
storageInfos := make([]StorageInfo, len(z.serverPools)) storageInfos := make([]StorageInfo, len(z.serverPools))
@ -298,7 +298,7 @@ func (z *erasureServerPools) StorageInfo(ctx context.Context, local bool) (Stora
for index := range z.serverPools { for index := range z.serverPools {
index := index index := index
g.Go(func() error { g.Go(func() error {
storageInfos[index], storageInfosErrs[index] = z.serverPools[index].StorageInfo(ctx, local) storageInfos[index], storageInfosErrs[index] = z.serverPools[index].StorageInfo(ctx)
return nil return nil
}, index) }, index)
} }

View File

@ -481,7 +481,7 @@ func (s *erasureSets) StorageUsageInfo(ctx context.Context) StorageInfo {
index := index index := index
g.Go(func() error { g.Go(func() error {
// ignoring errors on purpose // ignoring errors on purpose
storageInfos[index], _ = s.sets[index].StorageInfo(ctx, false) storageInfos[index], _ = s.sets[index].StorageInfo(ctx)
return nil return nil
}, index) }, index)
} }
@ -508,7 +508,7 @@ func (s *erasureSets) StorageUsageInfo(ctx context.Context) StorageInfo {
} }
// StorageInfo - combines output of StorageInfo across all erasure coded object sets. // StorageInfo - combines output of StorageInfo across all erasure coded object sets.
func (s *erasureSets) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) { func (s *erasureSets) StorageInfo(ctx context.Context) (StorageInfo, []error) {
var storageInfo StorageInfo var storageInfo StorageInfo
storageInfos := make([]StorageInfo, len(s.sets)) storageInfos := make([]StorageInfo, len(s.sets))
@ -518,7 +518,7 @@ func (s *erasureSets) StorageInfo(ctx context.Context, local bool) (StorageInfo,
for index := range s.sets { for index := range s.sets {
index := index index := index
g.Go(func() error { g.Go(func() error {
storageInfos[index], storageInfoErrs[index] = s.sets[index].StorageInfo(ctx, local) storageInfos[index], storageInfoErrs[index] = s.sets[index].StorageInfo(ctx)
return nil return nil
}, index) }, index)
} }
@ -530,12 +530,6 @@ func (s *erasureSets) StorageInfo(ctx context.Context, local bool) (StorageInfo,
storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...) storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...)
} }
if local {
// if local is true, we are not interested in the drive UUID info.
// this is called primarily by prometheus
return storageInfo, nil
}
var errs []error var errs []error
for i := range s.sets { for i := range s.sets {
errs = append(errs, storageInfoErrs[i]...) errs = append(errs, storageInfoErrs[i]...)

View File

@ -216,24 +216,9 @@ func getStorageInfo(disks []StorageAPI, endpoints []string) (StorageInfo, []erro
} }
// StorageInfo - returns underlying storage statistics. // StorageInfo - returns underlying storage statistics.
func (er erasureObjects) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) { func (er erasureObjects) StorageInfo(ctx context.Context) (StorageInfo, []error) {
disks := er.getDisks() disks := er.getDisks()
endpoints := er.getEndpoints() endpoints := er.getEndpoints()
if local {
var localDisks []StorageAPI
var localEndpoints []string
for i, disk := range disks {
if disk != nil {
if disk.IsLocal() {
// Append this local disk since local flag is true
localDisks = append(localDisks, disk)
localEndpoints = append(localEndpoints, endpoints[i])
}
}
}
disks = localDisks
endpoints = localEndpoints
}
return getStorageInfo(disks, endpoints) return getStorageInfo(disks, endpoints)
} }

View File

@ -206,8 +206,7 @@ func (fs *FSObjects) BackendInfo() BackendInfo {
} }
// StorageInfo - returns underlying storage statistics. // StorageInfo - returns underlying storage statistics.
func (fs *FSObjects) StorageInfo(ctx context.Context, _ bool) (StorageInfo, []error) { func (fs *FSObjects) StorageInfo(ctx context.Context) (StorageInfo, []error) {
atomic.AddInt64(&fs.activeIOCount, 1) atomic.AddInt64(&fs.activeIOCount, 1)
defer func() { defer func() {
atomic.AddInt64(&fs.activeIOCount, -1) atomic.AddInt64(&fs.activeIOCount, -1)

View File

@ -561,7 +561,7 @@ func (a *azureObjects) Shutdown(ctx context.Context) error {
} }
// StorageInfo - Not relevant to Azure backend. // StorageInfo - Not relevant to Azure backend.
func (a *azureObjects) StorageInfo(ctx context.Context, _ bool) (si minio.StorageInfo, _ []error) { func (a *azureObjects) StorageInfo(ctx context.Context) (si minio.StorageInfo, _ []error) {
si.Backend.Type = minio.BackendGateway si.Backend.Type = minio.BackendGateway
host := a.endpoint.Host host := a.endpoint.Host
if a.endpoint.Port() == "" { if a.endpoint.Port() == "" {

View File

@ -412,7 +412,7 @@ func (l *gcsGateway) Shutdown(ctx context.Context) error {
} }
// StorageInfo - Not relevant to GCS backend. // StorageInfo - Not relevant to GCS backend.
func (l *gcsGateway) StorageInfo(ctx context.Context, _ bool) (si minio.StorageInfo, _ []error) { func (l *gcsGateway) StorageInfo(ctx context.Context) (si minio.StorageInfo, _ []error) {
si.Backend.Type = minio.BackendGateway si.Backend.Type = minio.BackendGateway
si.Backend.GatewayOnline = minio.IsBackendOnline(ctx, "storage.googleapis.com:443") si.Backend.GatewayOnline = minio.IsBackendOnline(ctx, "storage.googleapis.com:443")
return si, nil return si, nil

View File

@ -214,7 +214,7 @@ func (n *hdfsObjects) Shutdown(ctx context.Context) error {
return n.clnt.Close() return n.clnt.Close()
} }
func (n *hdfsObjects) StorageInfo(ctx context.Context, _ bool) (si minio.StorageInfo, errs []error) { func (n *hdfsObjects) StorageInfo(ctx context.Context) (si minio.StorageInfo, errs []error) {
fsInfo, err := n.clnt.StatFs() fsInfo, err := n.clnt.StatFs()
if err != nil { if err != nil {
return minio.StorageInfo{}, []error{err} return minio.StorageInfo{}, []error{err}

View File

@ -104,8 +104,8 @@ func (n *nasObjects) IsListenSupported() bool {
return false return false
} }
func (n *nasObjects) StorageInfo(ctx context.Context, _ bool) (si minio.StorageInfo, _ []error) { func (n *nasObjects) StorageInfo(ctx context.Context) (si minio.StorageInfo, _ []error) {
si, errs := n.ObjectLayer.StorageInfo(ctx, false) si, errs := n.ObjectLayer.StorageInfo(ctx)
si.Backend.GatewayOnline = si.Backend.Type == minio.BackendFS si.Backend.GatewayOnline = si.Backend.Type == minio.BackendFS
si.Backend.Type = minio.BackendGateway si.Backend.Type = minio.BackendGateway
return si, errs return si, errs

View File

@ -274,7 +274,7 @@ func (l *s3Objects) Shutdown(ctx context.Context) error {
} }
// StorageInfo is not relevant to S3 backend. // StorageInfo is not relevant to S3 backend.
func (l *s3Objects) StorageInfo(ctx context.Context, _ bool) (si minio.StorageInfo, _ []error) { func (l *s3Objects) StorageInfo(ctx context.Context) (si minio.StorageInfo, _ []error) {
si.Backend.Type = minio.BackendGateway si.Backend.Type = minio.BackendGateway
host := l.Client.EndpointURL().Host host := l.Client.EndpointURL().Host
if l.Client.EndpointURL().Port() == "" { if l.Client.EndpointURL().Port() == "" {

View File

@ -497,10 +497,11 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
return return
} }
// Fetch disk space info, ignore errors server := getLocalServerProperty(globalEndpoints, &http.Request{
storageInfo, _ := objLayer.StorageInfo(GlobalContext, true) Host: GetLocalPeer(globalEndpoints),
})
onlineDisks, offlineDisks := getOnlineOfflineDisksStats(storageInfo.Disks) onlineDisks, offlineDisks := getOnlineOfflineDisksStats(server.Disks)
totalDisks := offlineDisks.Merge(onlineDisks) totalDisks := offlineDisks.Merge(onlineDisks)
// MinIO Offline Disks per node // MinIO Offline Disks per node
@ -523,7 +524,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
float64(totalDisks.Sum()), float64(totalDisks.Sum()),
) )
for _, disk := range storageInfo.Disks { for _, disk := range server.Disks {
// Total disk usage by the disk // Total disk usage by the disk
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc( prometheus.NewDesc(

View File

@ -82,7 +82,7 @@ type ObjectLayer interface {
CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error
BackendInfo() BackendInfo BackendInfo() BackendInfo
StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) // local queries only local disks StorageInfo(ctx context.Context) (StorageInfo, []error) // local queries only local disks
// Bucket operations. // Bucket operations.
MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error

View File

@ -47,7 +47,7 @@ func getFormatStr(strLen int, padding int) string {
} }
func mustGetStorageInfo(objAPI ObjectLayer) StorageInfo { func mustGetStorageInfo(objAPI ObjectLayer) StorageInfo {
storageInfo, _ := objAPI.StorageInfo(GlobalContext, false) storageInfo, _ := objAPI.StorageInfo(GlobalContext)
return storageInfo return storageInfo
} }