re-implement StorageInfo to be a peer call (#16155)

This commit is contained in:
Harshavardhana 2022-12-01 14:31:35 -08:00 committed by GitHub
parent c84e2939e4
commit 5a8df7efb3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 191 additions and 128 deletions

View File

@ -344,8 +344,7 @@ func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Requ
return return
} }
// ignores any errors here. storageInfo := objectAPI.StorageInfo(ctx)
storageInfo, _ := objectAPI.StorageInfo(ctx)
// Collect any disk healing. // Collect any disk healing.
healing, _ := getAggregatedBackgroundHealState(ctx, nil) healing, _ := getAggregatedBackgroundHealState(ctx, nil)
@ -1234,7 +1233,7 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
duration = time.Second * 10 duration = time.Second * 10
} }
storageInfo, _ := objectAPI.StorageInfo(ctx) storageInfo := objectAPI.StorageInfo(ctx)
sufficientCapacity, canAutotune, capacityErrMsg := validateObjPerfOptions(ctx, storageInfo, concurrent, size, autotune) sufficientCapacity, canAutotune, capacityErrMsg := validateObjPerfOptions(ctx, storageInfo, concurrent, size, autotune)
if !sufficientCapacity { if !sufficientCapacity {
@ -2581,7 +2580,7 @@ func getClusterMetaInfo(ctx context.Context) []byte {
ci.Info.NoOfServers = len(globalEndpoints.Hostnames()) ci.Info.NoOfServers = len(globalEndpoints.Hostnames())
ci.Info.MinioVersion = Version ci.Info.MinioVersion = Version
si, _ := objectAPI.StorageInfo(ctx) si := objectAPI.StorageInfo(ctx)
ci.Info.NoOfDrives = len(si.Disks) ci.Info.NoOfDrives = len(si.Disks)
for _, disk := range si.Disks { for _, disk := range si.Disks {

View File

@ -143,10 +143,11 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
objLayer := newObjectLayerFn() objLayer := newObjectLayerFn()
if objLayer != nil { if objLayer != nil {
// only need Disks information in server mode. storageInfo := objLayer.LocalStorageInfo(GlobalContext)
storageInfo, _ := objLayer.LocalStorageInfo(GlobalContext)
props.State = string(madmin.ItemOnline) props.State = string(madmin.ItemOnline)
props.Disks = storageInfo.Disks props.Disks = storageInfo.Disks
} else {
props.State = string(madmin.ItemOffline)
} }
return props return props

View File

@ -58,9 +58,9 @@ const (
scannerMetricLastRealtime scannerMetricLastRealtime
// Trace only metrics: // Trace only metrics:
scannerMetricScanFolder // Scan a folder on disk, recursively. scannerMetricScanFolder // Scan a folder on disk, recursively.
scannerMetricScanCycle // Full cycle, cluster global scannerMetricScanCycle // Full cycle, cluster global
scannerMetricScanBucketDisk // Single bucket on one disk scannerMetricScanBucketDrive // Single bucket on one drive
// Must be last: // Must be last:
scannerMetricLast scannerMetricLast
@ -181,9 +181,9 @@ func (p *scannerMetrics) getCurrentPaths() []string {
return res return res
} }
// activeDisks returns the number of currently active disks. // activeDrives returns the number of currently active disks.
// (since this is concurrent it may not be 100% reliable) // (since this is concurrent it may not be 100% reliable)
func (p *scannerMetrics) activeDisks() int { func (p *scannerMetrics) activeDrives() int {
var i int var i int
p.currentPaths.Range(func(k, v interface{}) bool { p.currentPaths.Range(func(k, v interface{}) bool {
i++ i++

View File

@ -1069,7 +1069,7 @@ func (z *erasureServerPools) getDecommissionPoolSpaceInfo(idx int) (pi poolSpace
return pi, errInvalidArgument return pi, errInvalidArgument
} }
info, _ := z.serverPools[idx].StorageInfo(context.Background()) info := z.serverPools[idx].StorageInfo(context.Background())
info.Backend = z.BackendInfo() info.Backend = z.BackendInfo()
usableTotal := int64(GetTotalUsableCapacity(info.Disks, info)) usableTotal := int64(GetTotalUsableCapacity(info.Disks, info))

View File

@ -125,7 +125,7 @@ func (z *erasureServerPools) initRebalanceMeta(ctx context.Context, buckets []st
} }
// Fetch disk capacity and available space. // Fetch disk capacity and available space.
si, _ := z.StorageInfo(ctx) si := z.StorageInfo(ctx)
diskStats := make([]struct { diskStats := make([]struct {
AvailableSpace uint64 AvailableSpace uint64
TotalSpace uint64 TotalSpace uint64

View File

@ -545,16 +545,15 @@ func (z *erasureServerPools) BackendInfo() (b madmin.BackendInfo) {
return return
} }
func (z *erasureServerPools) LocalStorageInfo(ctx context.Context) (StorageInfo, []error) { func (z *erasureServerPools) LocalStorageInfo(ctx context.Context) StorageInfo {
var storageInfo StorageInfo var storageInfo StorageInfo
storageInfos := make([]StorageInfo, len(z.serverPools)) storageInfos := make([]StorageInfo, len(z.serverPools))
storageInfosErrs := make([][]error, len(z.serverPools))
g := errgroup.WithNErrs(len(z.serverPools)) g := errgroup.WithNErrs(len(z.serverPools))
for index := range z.serverPools { for index := range z.serverPools {
index := index index := index
g.Go(func() error { g.Go(func() error {
storageInfos[index], storageInfosErrs[index] = z.serverPools[index].LocalStorageInfo(ctx) storageInfos[index] = z.serverPools[index].LocalStorageInfo(ctx)
return nil return nil
}, index) }, index)
} }
@ -567,40 +566,11 @@ func (z *erasureServerPools) LocalStorageInfo(ctx context.Context) (StorageInfo,
storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...) storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...)
} }
var errs []error return storageInfo
for i := range z.serverPools {
errs = append(errs, storageInfosErrs[i]...)
}
return storageInfo, errs
} }
func (z *erasureServerPools) StorageInfo(ctx context.Context) (StorageInfo, []error) { func (z *erasureServerPools) StorageInfo(ctx context.Context) StorageInfo {
var storageInfo StorageInfo return globalNotificationSys.StorageInfo(z)
storageInfos := make([]StorageInfo, len(z.serverPools))
storageInfosErrs := make([][]error, len(z.serverPools))
g := errgroup.WithNErrs(len(z.serverPools))
for index := range z.serverPools {
index := index
g.Go(func() error {
storageInfos[index], storageInfosErrs[index] = z.serverPools[index].StorageInfo(ctx)
return nil
}, index)
}
// Wait for the go routines.
g.Wait()
storageInfo.Backend = z.BackendInfo()
for _, lstorageInfo := range storageInfos {
storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...)
}
var errs []error
for i := range z.serverPools {
errs = append(errs, storageInfosErrs[i]...)
}
return storageInfo, errs
} }
func (z *erasureServerPools) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo, wantCycle uint32, healScanMode madmin.HealScanMode) error { func (z *erasureServerPools) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo, wantCycle uint32, healScanMode madmin.HealScanMode) error {

View File

@ -600,17 +600,16 @@ func (s *erasureSets) ParityCount() int {
} }
// StorageInfo - combines output of StorageInfo across all erasure coded object sets. // StorageInfo - combines output of StorageInfo across all erasure coded object sets.
func (s *erasureSets) StorageInfo(ctx context.Context) (StorageInfo, []error) { func (s *erasureSets) StorageInfo(ctx context.Context) StorageInfo {
var storageInfo madmin.StorageInfo var storageInfo madmin.StorageInfo
storageInfos := make([]madmin.StorageInfo, len(s.sets)) storageInfos := make([]madmin.StorageInfo, len(s.sets))
storageInfoErrs := make([][]error, len(s.sets))
g := errgroup.WithNErrs(len(s.sets)) g := errgroup.WithNErrs(len(s.sets))
for index := range s.sets { for index := range s.sets {
index := index index := index
g.Go(func() error { g.Go(func() error {
storageInfos[index], storageInfoErrs[index] = s.sets[index].StorageInfo(ctx) storageInfos[index] = s.sets[index].StorageInfo(ctx)
return nil return nil
}, index) }, index)
} }
@ -622,26 +621,20 @@ func (s *erasureSets) StorageInfo(ctx context.Context) (StorageInfo, []error) {
storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...) storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...)
} }
errs := make([]error, 0, len(s.sets)*s.setDriveCount) return storageInfo
for i := range s.sets {
errs = append(errs, storageInfoErrs[i]...)
}
return storageInfo, errs
} }
// StorageInfo - combines output of StorageInfo across all erasure coded object sets. // StorageInfo - combines output of StorageInfo across all erasure coded object sets.
func (s *erasureSets) LocalStorageInfo(ctx context.Context) (StorageInfo, []error) { func (s *erasureSets) LocalStorageInfo(ctx context.Context) StorageInfo {
var storageInfo StorageInfo var storageInfo StorageInfo
storageInfos := make([]StorageInfo, len(s.sets)) storageInfos := make([]StorageInfo, len(s.sets))
storageInfoErrs := make([][]error, len(s.sets))
g := errgroup.WithNErrs(len(s.sets)) g := errgroup.WithNErrs(len(s.sets))
for index := range s.sets { for index := range s.sets {
index := index index := index
g.Go(func() error { g.Go(func() error {
storageInfos[index], storageInfoErrs[index] = s.sets[index].LocalStorageInfo(ctx) storageInfos[index] = s.sets[index].LocalStorageInfo(ctx)
return nil return nil
}, index) }, index)
} }
@ -653,12 +646,7 @@ func (s *erasureSets) LocalStorageInfo(ctx context.Context) (StorageInfo, []erro
storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...) storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...)
} }
var errs []error return storageInfo
for i := range s.sets {
errs = append(errs, storageInfoErrs[i]...)
}
return storageInfo, errs
} }
// Shutdown shutsdown all erasure coded sets in parallel // Shutdown shutsdown all erasure coded sets in parallel

View File

@ -175,7 +175,7 @@ func getOnlineOfflineDisksStats(disksInfo []madmin.Disk) (onlineDisks, offlineDi
} }
// getDisksInfo - fetch disks info across all other storage API. // getDisksInfo - fetch disks info across all other storage API.
func getDisksInfo(disks []StorageAPI, endpoints []Endpoint) (disksInfo []madmin.Disk, errs []error) { func getDisksInfo(disks []StorageAPI, endpoints []Endpoint) (disksInfo []madmin.Disk) {
disksInfo = make([]madmin.Disk, len(disks)) disksInfo = make([]madmin.Disk, len(disks))
g := errgroup.WithNErrs(len(disks)) g := errgroup.WithNErrs(len(disks))
@ -189,8 +189,7 @@ func getDisksInfo(disks []StorageAPI, endpoints []Endpoint) (disksInfo []madmin.
State: diskErrToDriveState(errDiskNotFound), State: diskErrToDriveState(errDiskNotFound),
Endpoint: diskEndpoint, Endpoint: diskEndpoint,
} }
// Storage disk is empty, perhaps ignored disk or not available. return nil
return errDiskNotFound
} }
info, err := disks[index].DiskInfo(context.TODO()) info, err := disks[index].DiskInfo(context.TODO())
di := madmin.Disk{ di := madmin.Disk{
@ -231,16 +230,17 @@ func getDisksInfo(disks []StorageAPI, endpoints []Endpoint) (disksInfo []madmin.
di.Utilization = float64(info.Used / info.Total * 100) di.Utilization = float64(info.Used / info.Total * 100)
} }
disksInfo[index] = di disksInfo[index] = di
return err return nil
}, index) }, index)
} }
return disksInfo, g.Wait() g.Wait()
return disksInfo
} }
// Get an aggregated storage info across all disks. // Get an aggregated storage info across all disks.
func getStorageInfo(disks []StorageAPI, endpoints []Endpoint) (StorageInfo, []error) { func getStorageInfo(disks []StorageAPI, endpoints []Endpoint) StorageInfo {
disksInfo, errs := getDisksInfo(disks, endpoints) disksInfo := getDisksInfo(disks, endpoints)
// Sort so that the first element is the smallest. // Sort so that the first element is the smallest.
sort.Sort(byDiskTotal(disksInfo)) sort.Sort(byDiskTotal(disksInfo))
@ -250,18 +250,18 @@ func getStorageInfo(disks []StorageAPI, endpoints []Endpoint) (StorageInfo, []er
} }
storageInfo.Backend.Type = madmin.Erasure storageInfo.Backend.Type = madmin.Erasure
return storageInfo, errs return storageInfo
} }
// StorageInfo - returns underlying storage statistics. // StorageInfo - returns underlying storage statistics.
func (er erasureObjects) StorageInfo(ctx context.Context) (StorageInfo, []error) { func (er erasureObjects) StorageInfo(ctx context.Context) StorageInfo {
disks := er.getDisks() disks := er.getDisks()
endpoints := er.getEndpoints() endpoints := er.getEndpoints()
return getStorageInfo(disks, endpoints) return getStorageInfo(disks, endpoints)
} }
// LocalStorageInfo - returns underlying local storage statistics. // LocalStorageInfo - returns underlying local storage statistics.
func (er erasureObjects) LocalStorageInfo(ctx context.Context) (StorageInfo, []error) { func (er erasureObjects) LocalStorageInfo(ctx context.Context) StorageInfo {
disks := er.getDisks() disks := er.getDisks()
endpoints := er.getEndpoints() endpoints := er.getEndpoints()

View File

@ -105,8 +105,7 @@ func getBackgroundHealStatus(ctx context.Context, o ObjectLayer) (madmin.BgHealS
return status, true return status, true
} }
// ignores any errors here. si := o.StorageInfo(ctx)
si, _ := o.StorageInfo(ctx)
indexed := make(map[string][]madmin.Disk) indexed := make(map[string][]madmin.Disk)
for _, disk := range si.Disks { for _, disk := range si.Disks {

View File

@ -91,10 +91,8 @@ func collectLocalDisksMetrics(disks map[string]struct{}) map[string]madmin.DiskM
return metrics return metrics
} }
// only need Disks information in server mode. storageInfo := objLayer.LocalStorageInfo(GlobalContext)
storageInfo, errs := objLayer.LocalStorageInfo(GlobalContext) for _, d := range storageInfo.Disks {
for i, d := range storageInfo.Disks {
if len(disks) != 0 { if len(disks) != 0 {
_, ok := disks[d.Endpoint] _, ok := disks[d.Endpoint]
if !ok { if !ok {
@ -102,7 +100,7 @@ func collectLocalDisksMetrics(disks map[string]struct{}) map[string]madmin.DiskM
} }
} }
if errs[i] != nil { if d.State != madmin.DriveStateOk && d.State != madmin.DriveStateUnformatted {
metrics[d.Endpoint] = madmin.DiskMetric{NDisks: 1, Offline: 1} metrics[d.Endpoint] = madmin.DiskMetric{NDisks: 1, Offline: 1}
continue continue
} }

View File

@ -78,7 +78,7 @@ func init() {
nodeCollector = newMinioCollectorNode([]*MetricsGroup{ nodeCollector = newMinioCollectorNode([]*MetricsGroup{
getNodeHealthMetrics(), getNodeHealthMetrics(),
getLocalDiskStorageMetrics(), getLocalDriveStorageMetrics(),
getCacheMetrics(), getCacheMetrics(),
getHTTPMetrics(), getHTTPMetrics(),
getNetworkMetrics(), getNetworkMetrics(),
@ -333,7 +333,7 @@ func getClusterCapacityUsageFreeBytesMD() MetricDescription {
} }
} }
func getNodeDiskAPILatencyMD() MetricDescription { func getNodeDriveAPILatencyMD() MetricDescription {
return MetricDescription{ return MetricDescription{
Namespace: nodeMetricNamespace, Namespace: nodeMetricNamespace,
Subsystem: diskSubsystem, Subsystem: diskSubsystem,
@ -343,7 +343,7 @@ func getNodeDiskAPILatencyMD() MetricDescription {
} }
} }
func getNodeDiskUsedBytesMD() MetricDescription { func getNodeDriveUsedBytesMD() MetricDescription {
return MetricDescription{ return MetricDescription{
Namespace: nodeMetricNamespace, Namespace: nodeMetricNamespace,
Subsystem: diskSubsystem, Subsystem: diskSubsystem,
@ -353,7 +353,7 @@ func getNodeDiskUsedBytesMD() MetricDescription {
} }
} }
func getNodeDiskFreeBytesMD() MetricDescription { func getNodeDriveFreeBytesMD() MetricDescription {
return MetricDescription{ return MetricDescription{
Namespace: nodeMetricNamespace, Namespace: nodeMetricNamespace,
Subsystem: diskSubsystem, Subsystem: diskSubsystem,
@ -363,7 +363,7 @@ func getNodeDiskFreeBytesMD() MetricDescription {
} }
} }
func getClusterDisksOfflineTotalMD() MetricDescription { func getClusterDrivesOfflineTotalMD() MetricDescription {
return MetricDescription{ return MetricDescription{
Namespace: clusterMetricNamespace, Namespace: clusterMetricNamespace,
Subsystem: diskSubsystem, Subsystem: diskSubsystem,
@ -373,7 +373,7 @@ func getClusterDisksOfflineTotalMD() MetricDescription {
} }
} }
func getClusterDisksOnlineTotalMD() MetricDescription { func getClusterDrivesOnlineTotalMD() MetricDescription {
return MetricDescription{ return MetricDescription{
Namespace: clusterMetricNamespace, Namespace: clusterMetricNamespace,
Subsystem: diskSubsystem, Subsystem: diskSubsystem,
@ -383,7 +383,7 @@ func getClusterDisksOnlineTotalMD() MetricDescription {
} }
} }
func getClusterDisksTotalMD() MetricDescription { func getClusterDrivesTotalMD() MetricDescription {
return MetricDescription{ return MetricDescription{
Namespace: clusterMetricNamespace, Namespace: clusterMetricNamespace,
Subsystem: diskSubsystem, Subsystem: diskSubsystem,
@ -393,9 +393,39 @@ func getClusterDisksTotalMD() MetricDescription {
} }
} }
func getClusterDisksFreeInodes() MetricDescription { func getNodeDrivesOfflineTotalMD() MetricDescription {
return MetricDescription{ return MetricDescription{
Namespace: clusterMetricNamespace, Namespace: nodeMetricNamespace,
Subsystem: diskSubsystem,
Name: offlineTotal,
Help: "Total drives offline",
Type: gaugeMetric,
}
}
func getNodeDrivesOnlineTotalMD() MetricDescription {
return MetricDescription{
Namespace: nodeMetricNamespace,
Subsystem: diskSubsystem,
Name: onlineTotal,
Help: "Total drives online",
Type: gaugeMetric,
}
}
func getNodeDrivesTotalMD() MetricDescription {
return MetricDescription{
Namespace: nodeMetricNamespace,
Subsystem: diskSubsystem,
Name: total,
Help: "Total drives",
Type: gaugeMetric,
}
}
func getNodeDrivesFreeInodes() MetricDescription {
return MetricDescription{
Namespace: nodeMetricNamespace,
Subsystem: diskSubsystem, Subsystem: diskSubsystem,
Name: freeInodes, Name: freeInodes,
Help: "Total free inodes", Help: "Total free inodes",
@ -403,7 +433,7 @@ func getClusterDisksFreeInodes() MetricDescription {
} }
} }
func getNodeDiskTotalBytesMD() MetricDescription { func getNodeDriveTotalBytesMD() MetricDescription {
return MetricDescription{ return MetricDescription{
Namespace: nodeMetricNamespace, Namespace: nodeMetricNamespace,
Subsystem: diskSubsystem, Subsystem: diskSubsystem,
@ -1288,7 +1318,7 @@ func getScannerNodeMetrics() *MetricsGroup {
Help: "Total number of bucket scans started since server start", Help: "Total number of bucket scans started since server start",
Type: counterMetric, Type: counterMetric,
}, },
Value: float64(globalScannerMetrics.lifetime(scannerMetricScanBucketDisk) + uint64(globalScannerMetrics.activeDisks())), Value: float64(globalScannerMetrics.lifetime(scannerMetricScanBucketDrive) + uint64(globalScannerMetrics.activeDrives())),
}, },
{ {
Description: MetricDescription{ Description: MetricDescription{
@ -1298,7 +1328,7 @@ func getScannerNodeMetrics() *MetricsGroup {
Help: "Total number of bucket scans finished since server start", Help: "Total number of bucket scans finished since server start",
Type: counterMetric, Type: counterMetric,
}, },
Value: float64(globalScannerMetrics.lifetime(scannerMetricScanBucketDisk)), Value: float64(globalScannerMetrics.lifetime(scannerMetricScanBucketDrive)),
}, },
{ {
Description: MetricDescription{ Description: MetricDescription{
@ -1918,39 +1948,57 @@ func getLocalStorageMetrics() *MetricsGroup {
} }
metrics = make([]Metric, 0, 50) metrics = make([]Metric, 0, 50)
storageInfo, _ := objLayer.LocalStorageInfo(ctx) storageInfo := objLayer.LocalStorageInfo(ctx)
onlineDrives, offlineDrives := getOnlineOfflineDisksStats(storageInfo.Disks)
totalDrives := onlineDrives.Merge(offlineDrives)
for _, disk := range storageInfo.Disks { for _, disk := range storageInfo.Disks {
metrics = append(metrics, Metric{ metrics = append(metrics, Metric{
Description: getNodeDiskUsedBytesMD(), Description: getNodeDriveUsedBytesMD(),
Value: float64(disk.UsedSpace), Value: float64(disk.UsedSpace),
VariableLabels: map[string]string{"disk": disk.DrivePath}, VariableLabels: map[string]string{"disk": disk.DrivePath},
}) })
metrics = append(metrics, Metric{ metrics = append(metrics, Metric{
Description: getNodeDiskFreeBytesMD(), Description: getNodeDriveFreeBytesMD(),
Value: float64(disk.AvailableSpace), Value: float64(disk.AvailableSpace),
VariableLabels: map[string]string{"disk": disk.DrivePath}, VariableLabels: map[string]string{"disk": disk.DrivePath},
}) })
metrics = append(metrics, Metric{ metrics = append(metrics, Metric{
Description: getNodeDiskTotalBytesMD(), Description: getNodeDriveTotalBytesMD(),
Value: float64(disk.TotalSpace), Value: float64(disk.TotalSpace),
VariableLabels: map[string]string{"disk": disk.DrivePath}, VariableLabels: map[string]string{"disk": disk.DrivePath},
}) })
metrics = append(metrics, Metric{ metrics = append(metrics, Metric{
Description: getClusterDisksFreeInodes(), Description: getNodeDrivesFreeInodes(),
Value: float64(disk.FreeInodes), Value: float64(disk.FreeInodes),
VariableLabels: map[string]string{"disk": disk.DrivePath}, VariableLabels: map[string]string{"disk": disk.DrivePath},
}) })
metrics = append(metrics, Metric{
Description: getNodeDrivesOfflineTotalMD(),
Value: float64(offlineDrives.Sum()),
})
metrics = append(metrics, Metric{
Description: getNodeDrivesOnlineTotalMD(),
Value: float64(onlineDrives.Sum()),
})
metrics = append(metrics, Metric{
Description: getNodeDrivesTotalMD(),
Value: float64(totalDrives.Sum()),
})
} }
return return
}) })
return mg return mg
} }
func getLocalDiskStorageMetrics() *MetricsGroup { func getLocalDriveStorageMetrics() *MetricsGroup {
mg := &MetricsGroup{ mg := &MetricsGroup{
cacheInterval: 3 * time.Second, cacheInterval: 3 * time.Second,
} }
@ -1961,7 +2009,7 @@ func getLocalDiskStorageMetrics() *MetricsGroup {
return return
} }
storageInfo, _ := objLayer.LocalStorageInfo(ctx) storageInfo := objLayer.LocalStorageInfo(ctx)
if storageInfo.Backend.Type == madmin.FS { if storageInfo.Backend.Type == madmin.FS {
return return
} }
@ -1972,7 +2020,7 @@ func getLocalDiskStorageMetrics() *MetricsGroup {
} }
for apiName, latency := range disk.Metrics.LastMinute { for apiName, latency := range disk.Metrics.LastMinute {
metrics = append(metrics, Metric{ metrics = append(metrics, Metric{
Description: getNodeDiskAPILatencyMD(), Description: getNodeDriveAPILatencyMD(),
Value: float64(latency.Avg().Microseconds()), Value: float64(latency.Avg().Microseconds()),
VariableLabels: map[string]string{"disk": disk.DrivePath, "api": "storage." + apiName}, VariableLabels: map[string]string{"disk": disk.DrivePath, "api": "storage." + apiName},
}) })
@ -1996,9 +2044,9 @@ func getClusterStorageMetrics() *MetricsGroup {
// Fetch disk space info, ignore errors // Fetch disk space info, ignore errors
metrics = make([]Metric, 0, 10) metrics = make([]Metric, 0, 10)
storageInfo, _ := objLayer.StorageInfo(ctx) storageInfo := objLayer.StorageInfo(ctx)
onlineDisks, offlineDisks := getOnlineOfflineDisksStats(storageInfo.Disks) onlineDrives, offlineDrives := getOnlineOfflineDisksStats(storageInfo.Disks)
totalDisks := onlineDisks.Merge(offlineDisks) totalDrives := onlineDrives.Merge(offlineDrives)
metrics = append(metrics, Metric{ metrics = append(metrics, Metric{
Description: getClusterCapacityTotalBytesMD(), Description: getClusterCapacityTotalBytesMD(),
@ -2021,18 +2069,18 @@ func getClusterStorageMetrics() *MetricsGroup {
}) })
metrics = append(metrics, Metric{ metrics = append(metrics, Metric{
Description: getClusterDisksOfflineTotalMD(), Description: getClusterDrivesOfflineTotalMD(),
Value: float64(offlineDisks.Sum()), Value: float64(offlineDrives.Sum()),
}) })
metrics = append(metrics, Metric{ metrics = append(metrics, Metric{
Description: getClusterDisksOnlineTotalMD(), Description: getClusterDrivesOnlineTotalMD(),
Value: float64(onlineDisks.Sum()), Value: float64(onlineDrives.Sum()),
}) })
metrics = append(metrics, Metric{ metrics = append(metrics, Metric{
Description: getClusterDisksTotalMD(), Description: getClusterDrivesTotalMD(),
Value: float64(totalDisks.Sum()), Value: float64(totalDrives.Sum()),
}) })
return return
}) })

View File

@ -478,7 +478,8 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
float64(GetTotalCapacityFree(server.Disks)), float64(GetTotalCapacityFree(server.Disks)),
) )
s, _ := objLayer.StorageInfo(GlobalContext) sinfo := objLayer.StorageInfo(GlobalContext)
// Report total usable capacity // Report total usable capacity
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc( prometheus.NewDesc(
@ -486,8 +487,9 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
"Total usable capacity online in the cluster", "Total usable capacity online in the cluster",
nil, nil), nil, nil),
prometheus.GaugeValue, prometheus.GaugeValue,
float64(GetTotalUsableCapacity(server.Disks, s)), float64(GetTotalUsableCapacity(server.Disks, sinfo)),
) )
// Report total usable capacity free // Report total usable capacity free
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc( prometheus.NewDesc(
@ -495,7 +497,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
"Total free usable capacity online in the cluster", "Total free usable capacity online in the cluster",
nil, nil), nil, nil),
prometheus.GaugeValue, prometheus.GaugeValue,
float64(GetTotalUsableCapacityFree(server.Disks, s)), float64(GetTotalUsableCapacityFree(server.Disks, sinfo)),
) )
// MinIO Offline Disks per node // MinIO Offline Disks per node

View File

@ -952,6 +952,39 @@ func getOfflineDisks(offlineHost string, endpoints EndpointServerPools) []madmin
return offlineDisks return offlineDisks
} }
// StorageInfo returns disk information across all peers
func (sys *NotificationSys) StorageInfo(objLayer ObjectLayer) StorageInfo {
var storageInfo StorageInfo
replies := make([]StorageInfo, len(sys.peerClients))
var wg sync.WaitGroup
for i, client := range sys.peerClients {
if client == nil {
continue
}
wg.Add(1)
go func(client *peerRESTClient, idx int) {
defer wg.Done()
info, err := client.LocalStorageInfo()
if err != nil {
info.Disks = getOfflineDisks(client.host.String(), globalEndpoints)
}
replies[idx] = info
}(client, i)
}
wg.Wait()
// Add local to this server.
replies = append(replies, objLayer.LocalStorageInfo(GlobalContext))
storageInfo.Backend = objLayer.BackendInfo()
for _, sinfo := range replies {
storageInfo.Disks = append(storageInfo.Disks, sinfo.Disks...)
}
return storageInfo
}
// ServerInfo - calls ServerInfo RPC call on all peers. // ServerInfo - calls ServerInfo RPC call on all peers.
func (sys *NotificationSys) ServerInfo() []madmin.ServerProperties { func (sys *NotificationSys) ServerInfo() []madmin.ServerProperties {
reply := make([]madmin.ServerProperties, len(sys.peerClients)) reply := make([]madmin.ServerProperties, len(sys.peerClients))

View File

@ -194,8 +194,8 @@ type ObjectLayer interface {
Shutdown(context.Context) error Shutdown(context.Context) error
NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo, wantCycle uint32, scanMode madmin.HealScanMode) error NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo, wantCycle uint32, scanMode madmin.HealScanMode) error
BackendInfo() madmin.BackendInfo BackendInfo() madmin.BackendInfo
StorageInfo(ctx context.Context) (StorageInfo, []error) StorageInfo(ctx context.Context) StorageInfo
LocalStorageInfo(ctx context.Context) (StorageInfo, []error) LocalStorageInfo(ctx context.Context) StorageInfo
// Bucket operations. // Bucket operations.
MakeBucketWithLocation(ctx context.Context, bucket string, opts MakeBucketOptions) error MakeBucketWithLocation(ctx context.Context, bucket string, opts MakeBucketOptions) error

View File

@ -97,6 +97,17 @@ func (client *peerRESTClient) GetLocks() (lockMap map[string][]lockRequesterInfo
return lockMap, err return lockMap, err
} }
// LocalStorageInfo - fetch server information for a remote node.
func (client *peerRESTClient) LocalStorageInfo() (info StorageInfo, err error) {
respBody, err := client.call(peerRESTMethodLocalStorageInfo, nil, nil, -1)
if err != nil {
return
}
defer http.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
}
// ServerInfo - fetch server information for a remote node. // ServerInfo - fetch server information for a remote node.
func (client *peerRESTClient) ServerInfo() (info madmin.ServerProperties, err error) { func (client *peerRESTClient) ServerInfo() (info madmin.ServerProperties, err error) {
respBody, err := client.call(peerRESTMethodServerInfo, nil, nil, -1) respBody, err := client.call(peerRESTMethodServerInfo, nil, nil, -1)

View File

@ -18,7 +18,7 @@
package cmd package cmd
const ( const (
peerRESTVersion = "v28" // Added Rebalance peer APIs peerRESTVersion = "v29" // Added LocalStorageInfo peer API
peerRESTVersionPrefix = SlashSeparator + peerRESTVersion peerRESTVersionPrefix = SlashSeparator + peerRESTVersion
peerRESTPrefix = minioReservedBucketPath + "/peer" peerRESTPrefix = minioReservedBucketPath + "/peer"
@ -28,6 +28,7 @@ const (
const ( const (
peerRESTMethodHealth = "/health" peerRESTMethodHealth = "/health"
peerRESTMethodServerInfo = "/serverinfo" peerRESTMethodServerInfo = "/serverinfo"
peerRESTMethodLocalStorageInfo = "/localstorageinfo"
peerRESTMethodCPUInfo = "/cpuinfo" peerRESTMethodCPUInfo = "/cpuinfo"
peerRESTMethodDiskHwInfo = "/diskhwinfo" peerRESTMethodDiskHwInfo = "/diskhwinfo"
peerRESTMethodOsInfo = "/osinfo" peerRESTMethodOsInfo = "/osinfo"

View File

@ -330,6 +330,23 @@ func (s *peerRESTServer) DownloadProfilingDataHandler(w http.ResponseWriter, r *
logger.LogIf(ctx, gob.NewEncoder(w).Encode(profileData)) logger.LogIf(ctx, gob.NewEncoder(w).Encode(profileData))
} }
func (s *peerRESTServer) LocalStorageInfoHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
ctx := newContext(r, w, "LocalStorageInfo")
objLayer := newObjectLayerFn()
if objLayer == nil {
s.writeErrorResponse(w, errServerNotInitialized)
return
}
logger.LogIf(ctx, gob.NewEncoder(w).Encode(objLayer.LocalStorageInfo(r.Context())))
}
// ServerInfoHandler - returns Server Info // ServerInfoHandler - returns Server Info
func (s *peerRESTServer) ServerInfoHandler(w http.ResponseWriter, r *http.Request) { func (s *peerRESTServer) ServerInfoHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) { if !s.IsValid(w, r) {
@ -1363,6 +1380,7 @@ func registerPeerRESTHandlers(router *mux.Router) {
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodHealth).HandlerFunc(httpTraceHdrs(server.HealthHandler)) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodHealth).HandlerFunc(httpTraceHdrs(server.HealthHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetLocks).HandlerFunc(httpTraceHdrs(server.GetLocksHandler)) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetLocks).HandlerFunc(httpTraceHdrs(server.GetLocksHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodServerInfo).HandlerFunc(httpTraceHdrs(server.ServerInfoHandler)) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodServerInfo).HandlerFunc(httpTraceHdrs(server.ServerInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodLocalStorageInfo).HandlerFunc(httpTraceHdrs(server.LocalStorageInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodProcInfo).HandlerFunc(httpTraceHdrs(server.GetProcInfoHandler)) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodProcInfo).HandlerFunc(httpTraceHdrs(server.GetProcInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodMemInfo).HandlerFunc(httpTraceHdrs(server.GetMemInfoHandler)) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodMemInfo).HandlerFunc(httpTraceHdrs(server.GetMemInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodMetrics).HandlerFunc(httpTraceHdrs(server.GetMetricsHandler)).Queries(restQueries(peerRESTMetricsTypes)...) subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodMetrics).HandlerFunc(httpTraceHdrs(server.GetMetricsHandler)).Queries(restQueries(peerRESTMetricsTypes)...)

View File

@ -55,7 +55,7 @@ func rebalanceStatus(ctx context.Context, z *erasureServerPools) (r rebalanceAdm
} }
// Compute disk usage percentage // Compute disk usage percentage
si, _ := z.StorageInfo(ctx) si := z.StorageInfo(ctx)
diskStats := make([]struct { diskStats := make([]struct {
AvailableSpace uint64 AvailableSpace uint64
TotalSpace uint64 TotalSpace uint64

View File

@ -25,13 +25,13 @@ func _() {
_ = x[scannerMetricLastRealtime-14] _ = x[scannerMetricLastRealtime-14]
_ = x[scannerMetricScanFolder-15] _ = x[scannerMetricScanFolder-15]
_ = x[scannerMetricScanCycle-16] _ = x[scannerMetricScanCycle-16]
_ = x[scannerMetricScanBucketDisk-17] _ = x[scannerMetricScanBucketDrive-17]
_ = x[scannerMetricLast-18] _ = x[scannerMetricLast-18]
} }
const _scannerMetric_name = "ReadMetadataCheckMissingSaveUsageApplyAllApplyVersionTierObjSweepHealCheckILMCheckReplicationYieldCleanAbandonedApplyNonCurrentStartTraceScanObjectLastRealtimeScanFolderScanCycleScanBucketDiskLast" const _scannerMetric_name = "ReadMetadataCheckMissingSaveUsageApplyAllApplyVersionTierObjSweepHealCheckILMCheckReplicationYieldCleanAbandonedApplyNonCurrentStartTraceScanObjectLastRealtimeScanFolderScanCycleScanBucketDriveLast"
var _scannerMetric_index = [...]uint8{0, 12, 24, 33, 41, 53, 65, 74, 77, 93, 98, 112, 127, 137, 147, 159, 169, 178, 192, 196} var _scannerMetric_index = [...]uint8{0, 12, 24, 33, 41, 53, 65, 74, 77, 93, 98, 112, 127, 137, 147, 159, 169, 178, 193, 197}
func (i scannerMetric) String() string { func (i scannerMetric) String() string {
if i >= scannerMetric(len(_scannerMetric_index)-1) { if i >= scannerMetric(len(_scannerMetric_index)-1) {

View File

@ -37,11 +37,6 @@ func getFormatStr(strLen int, padding int) string {
return "%" + formatStr return "%" + formatStr
} }
func mustGetStorageInfo(objAPI ObjectLayer) StorageInfo {
storageInfo, _ := objAPI.StorageInfo(GlobalContext)
return storageInfo
}
// Prints the formatted startup message. // Prints the formatted startup message.
func printStartupMessage(apiEndpoints []string, err error) { func printStartupMessage(apiEndpoints []string, err error) {
logger.Info(color.Bold("MinIO Object Storage Server")) logger.Info(color.Bold("MinIO Object Storage Server"))
@ -67,7 +62,7 @@ func printStartupMessage(apiEndpoints []string, err error) {
// Object layer is initialized then print StorageInfo. // Object layer is initialized then print StorageInfo.
objAPI := newObjectLayerFn() objAPI := newObjectLayerFn()
if objAPI != nil { if objAPI != nil {
printStorageInfo(mustGetStorageInfo(objAPI)) printStorageInfo(objAPI.StorageInfo(GlobalContext))
} }
// Prints credential, region and browser access. // Prints credential, region and browser access.