Introduce metrics caching for performant metrics (#11831)

This commit is contained in:
Ritesh H Shukla 2021-03-19 00:04:29 -07:00 committed by GitHub
parent 0843280dc3
commit b5dcaaccb4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -116,8 +116,8 @@ const (
serverName = "server" serverName = "server"
) )
// GaugeMetricType for the types of metrics supported // MetricType for the types of metrics supported
type GaugeMetricType string type MetricType string
const ( const (
gaugeMetric = "gaugeMetric" gaugeMetric = "gaugeMetric"
@ -131,7 +131,7 @@ type MetricDescription struct {
Subsystem MetricSubsystem `json:"Subsystem"` Subsystem MetricSubsystem `json:"Subsystem"`
Name MetricName `json:"MetricName"` Name MetricName `json:"MetricName"`
Help string `json:"Help"` Help string `json:"Help"`
Type GaugeMetricType `json:"Type"` Type MetricType `json:"Type"`
} }
// Metric captures the details for a metric // Metric captures the details for a metric
@ -144,10 +144,66 @@ type Metric struct {
Histogram map[string]uint64 `json:"Histogram"` Histogram map[string]uint64 `json:"Histogram"`
} }
func (m *Metric) copyMetric() Metric {
metric := Metric{
Description: m.Description,
Value: m.Value,
HistogramBucketLabel: m.HistogramBucketLabel,
StaticLabels: make(map[string]string),
VariableLabels: make(map[string]string),
Histogram: make(map[string]uint64),
}
for k, v := range m.StaticLabels {
metric.StaticLabels[k] = v
}
for k, v := range m.VariableLabels {
metric.VariableLabels[k] = v
}
for k, v := range m.Histogram {
metric.Histogram[k] = v
}
return metric
}
// MetricsGroup are a group of metrics that are initialized together. // MetricsGroup are a group of metrics that are initialized together.
type MetricsGroup struct { type MetricsGroup struct {
Metrics []Metric id string
initialize func(ctx context.Context, m *MetricsGroup) cacheInterval time.Duration
cachedRead func(ctx context.Context, mg *MetricsGroup) []Metric
read func(ctx context.Context) []Metric
}
var metricsGroupCache = make(map[string]*timedValue)
var cacheLock sync.Mutex
func cachedRead(ctx context.Context, mg *MetricsGroup) (metrics []Metric) {
cacheLock.Lock()
defer cacheLock.Unlock()
v, ok := metricsGroupCache[mg.id]
if !ok {
interval := mg.cacheInterval
if interval == 0 {
interval = 30 * time.Second
}
v = &timedValue{}
v.Once.Do(func() {
v.Update = func() (interface{}, error) {
c := mg.read(ctx)
return c, nil
}
v.TTL = interval
})
metricsGroupCache[mg.id] = v
}
c, err := v.Get()
if err != nil {
return []Metric{}
}
m := c.([]Metric)
for i := range m {
metrics = append(metrics, m[i].copyMetric())
}
return metrics
} }
// MetricsGenerator are functions that generate metric groups. // MetricsGenerator are functions that generate metric groups.
@ -674,8 +730,10 @@ func getMinIOProcessStartTimeMD() MetricDescription {
} }
func getMinioProcMetrics() MetricsGroup { func getMinioProcMetrics() MetricsGroup {
return MetricsGroup{ return MetricsGroup{
Metrics: []Metric{}, id: "MinioProcMetrics",
initialize: func(ctx context.Context, metrics *MetricsGroup) { cachedRead: cachedRead,
read: func(ctx context.Context) (metrics []Metric) {
metrics = make([]Metric, 0)
p, err := procfs.Self() p, err := procfs.Self()
if err != nil { if err != nil {
logger.LogOnceIf(ctx, err, nodeMetricNamespace) logger.LogOnceIf(ctx, err, nodeMetricNamespace)
@ -708,70 +766,74 @@ func getMinioProcMetrics() MetricsGroup {
return return
} }
metrics.Metrics = append(metrics.Metrics, metrics = append(metrics,
Metric{ Metric{
Description: getMinioFDOpenMD(), Description: getMinioFDOpenMD(),
Value: float64(openFDs), Value: float64(openFDs),
}, },
) )
metrics.Metrics = append(metrics.Metrics, metrics = append(metrics,
Metric{ Metric{
Description: getMinioFDLimitMD(), Description: getMinioFDLimitMD(),
Value: float64(l.OpenFiles), Value: float64(l.OpenFiles),
}) })
metrics.Metrics = append(metrics.Metrics, metrics = append(metrics,
Metric{ Metric{
Description: getMinIOProcessSysCallRMD(), Description: getMinIOProcessSysCallRMD(),
Value: float64(io.SyscR), Value: float64(io.SyscR),
}) })
metrics.Metrics = append(metrics.Metrics, metrics = append(metrics,
Metric{ Metric{
Description: getMinIOProcessSysCallWMD(), Description: getMinIOProcessSysCallWMD(),
Value: float64(io.SyscW), Value: float64(io.SyscW),
}) })
metrics.Metrics = append(metrics.Metrics, metrics = append(metrics,
Metric{ Metric{
Description: getMinioProcessIOReadBytesMD(), Description: getMinioProcessIOReadBytesMD(),
Value: float64(io.ReadBytes), Value: float64(io.ReadBytes),
}) })
metrics.Metrics = append(metrics.Metrics, metrics = append(metrics,
Metric{ Metric{
Description: getMinioProcessIOWriteBytesMD(), Description: getMinioProcessIOWriteBytesMD(),
Value: float64(io.WriteBytes), Value: float64(io.WriteBytes),
}) })
metrics.Metrics = append(metrics.Metrics, metrics = append(metrics,
Metric{ Metric{
Description: getMinioProcessIOReadCachedBytesMD(), Description: getMinioProcessIOReadCachedBytesMD(),
Value: float64(io.RChar), Value: float64(io.RChar),
}) })
metrics.Metrics = append(metrics.Metrics, metrics = append(metrics,
Metric{ Metric{
Description: getMinioProcessIOWriteCachedBytesMD(), Description: getMinioProcessIOWriteCachedBytesMD(),
Value: float64(io.WChar), Value: float64(io.WChar),
}) })
metrics.Metrics = append(metrics.Metrics, metrics = append(metrics,
Metric{ Metric{
Description: getMinIOProcessStartTimeMD(), Description: getMinIOProcessStartTimeMD(),
Value: startTime, Value: startTime,
}) })
return
}, },
} }
} }
func getGoMetrics() MetricsGroup { func getGoMetrics() MetricsGroup {
return MetricsGroup{ return MetricsGroup{
Metrics: []Metric{}, id: "GoMetrics",
initialize: func(ctx context.Context, metrics *MetricsGroup) { cachedRead: cachedRead,
metrics.Metrics = append(metrics.Metrics, Metric{ read: func(ctx context.Context) (metrics []Metric) {
metrics = append(metrics, Metric{
Description: getMinIOGORoutineCountMD(), Description: getMinIOGORoutineCountMD(),
Value: float64(runtime.NumGoroutine()), Value: float64(runtime.NumGoroutine()),
}) })
return
}, },
} }
} }
func getS3TTFBMetric() MetricsGroup { func getS3TTFBMetric() MetricsGroup {
return MetricsGroup{ return MetricsGroup{
Metrics: []Metric{}, id: "s3TTFBMetric",
initialize: func(ctx context.Context, metrics *MetricsGroup) { cachedRead: cachedRead,
read: func(ctx context.Context) (metrics []Metric) {
// Read prometheus metric on this channel // Read prometheus metric on this channel
ch := make(chan prometheus.Metric) ch := make(chan prometheus.Metric)
@ -800,7 +862,7 @@ func getS3TTFBMetric() MetricsGroup {
VariableLabels: labels, VariableLabels: labels,
Value: float64(b.GetCumulativeCount()), Value: float64(b.GetCumulativeCount()),
} }
metrics.Metrics = append(metrics.Metrics, metric) metrics = append(metrics, metric)
} }
} }
@ -809,53 +871,54 @@ func getS3TTFBMetric() MetricsGroup {
httpRequestsDuration.Collect(ch) httpRequestsDuration.Collect(ch)
close(ch) close(ch)
wg.Wait() wg.Wait()
return
}, },
} }
} }
func getMinioVersionMetrics() MetricsGroup { func getMinioVersionMetrics() MetricsGroup {
return MetricsGroup{ return MetricsGroup{
Metrics: []Metric{}, id: "MinioVersionMetrics",
initialize: func(_ context.Context, m *MetricsGroup) { cachedRead: cachedRead,
m.Metrics = append(m.Metrics, Metric{ read: func(_ context.Context) (metrics []Metric) {
metrics = append(metrics, Metric{
Description: getMinIOCommitMD(), Description: getMinIOCommitMD(),
VariableLabels: map[string]string{"commit": CommitID}, VariableLabels: map[string]string{"commit": CommitID},
}) })
m.Metrics = append(m.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getMinIOVersionMD(), Description: getMinIOVersionMD(),
VariableLabels: map[string]string{"version": Version}, VariableLabels: map[string]string{"version": Version},
}) })
return
}, },
} }
} }
func getNodeHealthMetrics() MetricsGroup { func getNodeHealthMetrics() MetricsGroup {
return MetricsGroup{ return MetricsGroup{
Metrics: []Metric{ id: "NodeHealthMetrics",
{ cachedRead: cachedRead,
Description: getNodeOnlineTotalMD(), read: func(_ context.Context) (metrics []Metric) {
}, {
Description: getNodeOfflineTotalMD(),
},
},
initialize: func(_ context.Context, m *MetricsGroup) {
nodesUp, nodesDown := GetPeerOnlineCount() nodesUp, nodesDown := GetPeerOnlineCount()
for i := range m.Metrics { metrics = append(metrics, Metric{
switch { Description: getNodeOnlineTotalMD(),
case m.Metrics[i].Description.Name == onlineTotal: Value: float64(nodesUp),
m.Metrics[i].Value = float64(nodesUp) })
case m.Metrics[i].Description.Name == offlineTotal: metrics = append(metrics, Metric{
m.Metrics[i].Value = float64(nodesDown) Description: getNodeOfflineTotalMD(),
} Value: float64(nodesDown),
} })
return
}, },
} }
} }
func getMinioHealingMetrics() MetricsGroup { func getMinioHealingMetrics() MetricsGroup {
return MetricsGroup{ return MetricsGroup{
Metrics: []Metric{}, id: "minioHealingMetrics",
initialize: func(_ context.Context, m *MetricsGroup) { cachedRead: cachedRead,
read: func(_ context.Context) (metrics []Metric) {
metrics = make([]Metric, 0)
if !globalIsErasure { if !globalIsErasure {
return return
} }
@ -867,13 +930,14 @@ func getMinioHealingMetrics() MetricsGroup {
if !bgSeq.lastHealActivity.IsZero() { if !bgSeq.lastHealActivity.IsZero() {
dur = time.Since(bgSeq.lastHealActivity) dur = time.Since(bgSeq.lastHealActivity)
} }
m.Metrics = append(m.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getHealLastActivityTimeMD(), Description: getHealLastActivityTimeMD(),
Value: float64(dur), Value: float64(dur),
}) })
m.Metrics = append(m.Metrics, getObjectsScanned(bgSeq)...) metrics = append(metrics, getObjectsScanned(bgSeq)...)
m.Metrics = append(m.Metrics, getScannedItems(bgSeq)...) metrics = append(metrics, getScannedItems(bgSeq)...)
m.Metrics = append(m.Metrics, getFailedItems(bgSeq)...) metrics = append(metrics, getFailedItems(bgSeq)...)
return
}, },
} }
} }
@ -919,118 +983,127 @@ func getObjectsScanned(seq *healSequence) (m []Metric) {
} }
func getCacheMetrics() MetricsGroup { func getCacheMetrics() MetricsGroup {
return MetricsGroup{ return MetricsGroup{
Metrics: []Metric{}, id: "CacheMetrics",
initialize: func(ctx context.Context, m *MetricsGroup) { cachedRead: cachedRead,
read: func(ctx context.Context) (metrics []Metric) {
metrics = make([]Metric, 0)
cacheObjLayer := newCachedObjectLayerFn() cacheObjLayer := newCachedObjectLayerFn()
// Service not initialized yet // Service not initialized yet
if cacheObjLayer == nil { if cacheObjLayer == nil {
return return
} }
m.Metrics = append(m.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getCacheHitsTotalMD(), Description: getCacheHitsTotalMD(),
Value: float64(cacheObjLayer.CacheStats().getHits()), Value: float64(cacheObjLayer.CacheStats().getHits()),
}) })
m.Metrics = append(m.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getCacheHitsMissedTotalMD(), Description: getCacheHitsMissedTotalMD(),
Value: float64(cacheObjLayer.CacheStats().getMisses()), Value: float64(cacheObjLayer.CacheStats().getMisses()),
}) })
m.Metrics = append(m.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getCacheSentBytesMD(), Description: getCacheSentBytesMD(),
Value: float64(cacheObjLayer.CacheStats().getBytesServed()), Value: float64(cacheObjLayer.CacheStats().getBytesServed()),
}) })
for _, cdStats := range cacheObjLayer.CacheStats().GetDiskStats() { for _, cdStats := range cacheObjLayer.CacheStats().GetDiskStats() {
m.Metrics = append(m.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getCacheUsagePercentMD(), Description: getCacheUsagePercentMD(),
Value: float64(cdStats.UsagePercent), Value: float64(cdStats.UsagePercent),
VariableLabels: map[string]string{"disk": cdStats.Dir}, VariableLabels: map[string]string{"disk": cdStats.Dir},
}) })
m.Metrics = append(m.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getCacheUsageInfoMD(), Description: getCacheUsageInfoMD(),
Value: float64(cdStats.UsageState), Value: float64(cdStats.UsageState),
VariableLabels: map[string]string{"disk": cdStats.Dir, "level": cdStats.GetUsageLevelString()}, VariableLabels: map[string]string{"disk": cdStats.Dir, "level": cdStats.GetUsageLevelString()},
}) })
m.Metrics = append(m.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getCacheUsedBytesMD(), Description: getCacheUsedBytesMD(),
Value: float64(cdStats.UsageSize), Value: float64(cdStats.UsageSize),
VariableLabels: map[string]string{"disk": cdStats.Dir}, VariableLabels: map[string]string{"disk": cdStats.Dir},
}) })
m.Metrics = append(m.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getCacheTotalBytesMD(), Description: getCacheTotalBytesMD(),
Value: float64(cdStats.TotalCapacity), Value: float64(cdStats.TotalCapacity),
VariableLabels: map[string]string{"disk": cdStats.Dir}, VariableLabels: map[string]string{"disk": cdStats.Dir},
}) })
} }
return
}, },
} }
} }
func getHTTPMetrics() MetricsGroup { func getHTTPMetrics() MetricsGroup {
return MetricsGroup{ return MetricsGroup{
Metrics: []Metric{}, id: "httpMetrics",
initialize: func(ctx context.Context, metrics *MetricsGroup) { cachedRead: cachedRead,
read: func(ctx context.Context) (metrics []Metric) {
httpStats := globalHTTPStats.toServerHTTPStats() httpStats := globalHTTPStats.toServerHTTPStats()
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getS3RequestsInQueueMD(), Description: getS3RequestsInQueueMD(),
Value: float64(httpStats.S3RequestsInQueue), Value: float64(httpStats.S3RequestsInQueue),
}) })
for api, value := range httpStats.CurrentS3Requests.APIStats { for api, value := range httpStats.CurrentS3Requests.APIStats {
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getS3RequestsInFlightMD(), Description: getS3RequestsInFlightMD(),
Value: float64(value), Value: float64(value),
VariableLabels: map[string]string{"api": api}, VariableLabels: map[string]string{"api": api},
}) })
} }
for api, value := range httpStats.TotalS3Requests.APIStats { for api, value := range httpStats.TotalS3Requests.APIStats {
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getS3RequestsTotalMD(), Description: getS3RequestsTotalMD(),
Value: float64(value), Value: float64(value),
VariableLabels: map[string]string{"api": api}, VariableLabels: map[string]string{"api": api},
}) })
} }
for api, value := range httpStats.TotalS3Errors.APIStats { for api, value := range httpStats.TotalS3Errors.APIStats {
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getS3RequestsErrorsMD(), Description: getS3RequestsErrorsMD(),
Value: float64(value), Value: float64(value),
VariableLabels: map[string]string{"api": api}, VariableLabels: map[string]string{"api": api},
}) })
} }
return
}, },
} }
} }
func getNetworkMetrics() MetricsGroup { func getNetworkMetrics() MetricsGroup {
return MetricsGroup{ return MetricsGroup{
Metrics: []Metric{}, id: "networkMetrics",
initialize: func(ctx context.Context, metrics *MetricsGroup) { cachedRead: cachedRead,
metrics.Metrics = append(metrics.Metrics, Metric{ read: func(ctx context.Context) (metrics []Metric) {
metrics = append(metrics, Metric{
Description: getInternodeFailedRequests(), Description: getInternodeFailedRequests(),
Value: float64(loadAndResetRPCNetworkErrsCounter()), Value: float64(loadAndResetRPCNetworkErrsCounter()),
}) })
connStats := globalConnStats.toServerConnStats() connStats := globalConnStats.toServerConnStats()
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getInterNodeSentBytesMD(), Description: getInterNodeSentBytesMD(),
Value: float64(connStats.TotalOutputBytes), Value: float64(connStats.TotalOutputBytes),
}) })
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getInterNodeReceivedBytesMD(), Description: getInterNodeReceivedBytesMD(),
Value: float64(connStats.TotalInputBytes), Value: float64(connStats.TotalInputBytes),
}) })
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getS3SentBytesMD(), Description: getS3SentBytesMD(),
Value: float64(connStats.S3OutputBytes), Value: float64(connStats.S3OutputBytes),
}) })
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getS3ReceivedBytesMD(), Description: getS3ReceivedBytesMD(),
Value: float64(connStats.S3InputBytes), Value: float64(connStats.S3InputBytes),
}) })
return
}, },
} }
} }
func getBucketUsageMetrics() MetricsGroup { func getBucketUsageMetrics() MetricsGroup {
return MetricsGroup{ return MetricsGroup{
Metrics: []Metric{}, id: "BucketUsageMetrics",
initialize: func(ctx context.Context, metrics *MetricsGroup) { cachedRead: cachedRead,
read: func(ctx context.Context) (metrics []Metric) {
metrics = make([]Metric, 0)
objLayer := newObjectLayerFn() objLayer := newObjectLayerFn()
// Service not initialized yet // Service not initialized yet
if objLayer == nil { if objLayer == nil {
@ -1052,42 +1125,42 @@ func getBucketUsageMetrics() MetricsGroup {
} }
for bucket, usage := range dataUsageInfo.BucketsUsage { for bucket, usage := range dataUsageInfo.BucketsUsage {
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getBucketUsageTotalBytesMD(), Description: getBucketUsageTotalBytesMD(),
Value: float64(usage.Size), Value: float64(usage.Size),
VariableLabels: map[string]string{"bucket": bucket}, VariableLabels: map[string]string{"bucket": bucket},
}) })
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getBucketUsageObjectsTotalMD(), Description: getBucketUsageObjectsTotalMD(),
Value: float64(usage.ObjectsCount), Value: float64(usage.ObjectsCount),
VariableLabels: map[string]string{"bucket": bucket}, VariableLabels: map[string]string{"bucket": bucket},
}) })
if usage.hasReplicationUsage() { if usage.hasReplicationUsage() {
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getBucketRepPendingBytesMD(), Description: getBucketRepPendingBytesMD(),
Value: float64(usage.ReplicationPendingSize), Value: float64(usage.ReplicationPendingSize),
VariableLabels: map[string]string{"bucket": bucket}, VariableLabels: map[string]string{"bucket": bucket},
}) })
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getBucketRepFailedBytesMD(), Description: getBucketRepFailedBytesMD(),
Value: float64(usage.ReplicationFailedSize), Value: float64(usage.ReplicationFailedSize),
VariableLabels: map[string]string{"bucket": bucket}, VariableLabels: map[string]string{"bucket": bucket},
}) })
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getBucketRepSentBytesMD(), Description: getBucketRepSentBytesMD(),
Value: float64(usage.ReplicatedSize), Value: float64(usage.ReplicatedSize),
VariableLabels: map[string]string{"bucket": bucket}, VariableLabels: map[string]string{"bucket": bucket},
}) })
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getBucketRepReceivedBytesMD(), Description: getBucketRepReceivedBytesMD(),
Value: float64(usage.ReplicaSize), Value: float64(usage.ReplicaSize),
VariableLabels: map[string]string{"bucket": bucket}, VariableLabels: map[string]string{"bucket": bucket},
}) })
} }
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getBucketObjectDistributionMD(), Description: getBucketObjectDistributionMD(),
Histogram: usage.ObjectSizesHistogram, Histogram: usage.ObjectSizesHistogram,
HistogramBucketLabel: "range", HistogramBucketLabel: "range",
@ -1095,13 +1168,16 @@ func getBucketUsageMetrics() MetricsGroup {
}) })
} }
return
}, },
} }
} }
func getLocalStorageMetrics() MetricsGroup { func getLocalStorageMetrics() MetricsGroup {
return MetricsGroup{ return MetricsGroup{
Metrics: []Metric{}, id: "localStorageMetrics",
initialize: func(ctx context.Context, metrics *MetricsGroup) { cachedRead: cachedRead,
read: func(ctx context.Context) (metrics []Metric) {
metrics = make([]Metric, 0)
objLayer := newObjectLayerFn() objLayer := newObjectLayerFn()
// Service not initialized yet // Service not initialized yet
if objLayer == nil { if objLayer == nil {
@ -1114,31 +1190,34 @@ func getLocalStorageMetrics() MetricsGroup {
storageInfo, _ := objLayer.LocalStorageInfo(ctx) storageInfo, _ := objLayer.LocalStorageInfo(ctx)
for _, disk := range storageInfo.Disks { for _, disk := range storageInfo.Disks {
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getNodeDiskUsedBytesMD(), Description: getNodeDiskUsedBytesMD(),
Value: float64(disk.UsedSpace), Value: float64(disk.UsedSpace),
VariableLabels: map[string]string{"disk": disk.DrivePath}, VariableLabels: map[string]string{"disk": disk.DrivePath},
}) })
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getNodeDiskFreeBytesMD(), Description: getNodeDiskFreeBytesMD(),
Value: float64(disk.AvailableSpace), Value: float64(disk.AvailableSpace),
VariableLabels: map[string]string{"disk": disk.DrivePath}, VariableLabels: map[string]string{"disk": disk.DrivePath},
}) })
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getNodeDiskTotalBytesMD(), Description: getNodeDiskTotalBytesMD(),
Value: float64(disk.TotalSpace), Value: float64(disk.TotalSpace),
VariableLabels: map[string]string{"disk": disk.DrivePath}, VariableLabels: map[string]string{"disk": disk.DrivePath},
}) })
} }
return
}, },
} }
} }
func getClusterStorageMetrics() MetricsGroup { func getClusterStorageMetrics() MetricsGroup {
return MetricsGroup{ return MetricsGroup{
Metrics: []Metric{}, id: "ClusterStorageMetrics",
initialize: func(ctx context.Context, metrics *MetricsGroup) { cachedRead: cachedRead,
read: func(ctx context.Context) (metrics []Metric) {
metrics = make([]Metric, 0)
objLayer := newObjectLayerFn() objLayer := newObjectLayerFn()
// Service not initialized yet // Service not initialized yet
if objLayer == nil { if objLayer == nil {
@ -1154,40 +1233,41 @@ func getClusterStorageMetrics() MetricsGroup {
onlineDisks, offlineDisks := getOnlineOfflineDisksStats(storageInfo.Disks) onlineDisks, offlineDisks := getOnlineOfflineDisksStats(storageInfo.Disks)
totalDisks := onlineDisks.Merge(offlineDisks) totalDisks := onlineDisks.Merge(offlineDisks)
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getClusterCapacityTotalBytesMD(), Description: getClusterCapacityTotalBytesMD(),
Value: float64(GetTotalCapacity(storageInfo.Disks)), Value: float64(GetTotalCapacity(storageInfo.Disks)),
}) })
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getClusterCapacityFreeBytesMD(), Description: getClusterCapacityFreeBytesMD(),
Value: float64(GetTotalCapacityFree(storageInfo.Disks)), Value: float64(GetTotalCapacityFree(storageInfo.Disks)),
}) })
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getClusterCapacityUsageBytesMD(), Description: getClusterCapacityUsageBytesMD(),
Value: GetTotalUsableCapacity(storageInfo.Disks, storageInfo), Value: GetTotalUsableCapacity(storageInfo.Disks, storageInfo),
}) })
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getClusterCapacityUsageFreeBytesMD(), Description: getClusterCapacityUsageFreeBytesMD(),
Value: GetTotalUsableCapacityFree(storageInfo.Disks, storageInfo), Value: GetTotalUsableCapacityFree(storageInfo.Disks, storageInfo),
}) })
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getClusterDisksOfflineTotalMD(), Description: getClusterDisksOfflineTotalMD(),
Value: float64(offlineDisks.Sum()), Value: float64(offlineDisks.Sum()),
}) })
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getClusterDisksOnlineTotalMD(), Description: getClusterDisksOnlineTotalMD(),
Value: float64(onlineDisks.Sum()), Value: float64(onlineDisks.Sum()),
}) })
metrics.Metrics = append(metrics.Metrics, Metric{ metrics = append(metrics, Metric{
Description: getClusterDisksTotalMD(), Description: getClusterDisksTotalMD(),
Value: float64(totalDisks.Sum()), Value: float64(totalDisks.Sum()),
}) })
return
}, },
} }
} }
@ -1312,9 +1392,9 @@ func (c *minioCollectorV2) Describe(ch chan<- *prometheus.Desc) {
func populateAndPublish(generatorFn func() []MetricsGenerator, publish func(m Metric) bool) { func populateAndPublish(generatorFn func() []MetricsGenerator, publish func(m Metric) bool) {
generators := generatorFn() generators := generatorFn()
for _, g := range generators { for _, g := range generators {
metrics := g() metricsGroup := g()
metrics.initialize(GlobalContext, &metrics) metrics := metricsGroup.cachedRead(GlobalContext, &metricsGroup)
for _, metric := range metrics.Metrics { for _, metric := range metrics {
if !publish(metric) { if !publish(metric) {
return return
} }