2021-04-18 15:41:13 -04:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2021-01-18 23:35:38 -05:00
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"net/http"
|
2021-01-22 21:30:16 -05:00
|
|
|
"runtime"
|
2021-01-18 23:35:38 -05:00
|
|
|
"strings"
|
|
|
|
"sync"
|
2021-10-02 12:31:05 -04:00
|
|
|
"sync/atomic"
|
2021-01-18 23:35:38 -05:00
|
|
|
"time"
|
|
|
|
|
2022-01-31 03:44:26 -05:00
|
|
|
"github.com/minio/madmin-go"
|
2021-10-02 12:31:05 -04:00
|
|
|
"github.com/minio/minio/internal/bucket/lifecycle"
|
2021-06-01 17:59:40 -04:00
|
|
|
"github.com/minio/minio/internal/logger"
|
2021-01-18 23:35:38 -05:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
|
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
|
|
|
dto "github.com/prometheus/client_model/go"
|
2021-01-22 21:30:16 -05:00
|
|
|
"github.com/prometheus/procfs"
|
2021-01-18 23:35:38 -05:00
|
|
|
)
|
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
var (
|
|
|
|
nodeCollector *minioNodeCollector
|
|
|
|
clusterCollector *minioClusterCollector
|
|
|
|
peerMetricsGroups []*MetricsGroup
|
|
|
|
)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
clusterMetricsGroups := []*MetricsGroup{
|
|
|
|
getBucketUsageMetrics(),
|
|
|
|
getMinioHealingMetrics(),
|
|
|
|
getNodeHealthMetrics(),
|
|
|
|
getClusterStorageMetrics(),
|
2022-02-08 15:45:28 -05:00
|
|
|
getClusterTierMetrics(),
|
2021-12-17 13:11:04 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
peerMetricsGroups = []*MetricsGroup{
|
|
|
|
getCacheMetrics(),
|
|
|
|
getGoMetrics(),
|
|
|
|
getHTTPMetrics(),
|
|
|
|
getLocalStorageMetrics(),
|
|
|
|
getMinioProcMetrics(),
|
|
|
|
getMinioVersionMetrics(),
|
|
|
|
getNetworkMetrics(),
|
|
|
|
getS3TTFBMetric(),
|
|
|
|
getILMNodeMetrics(),
|
|
|
|
getScannerNodeMetrics(),
|
|
|
|
}
|
|
|
|
|
|
|
|
allMetricsGroups := func() (allMetrics []*MetricsGroup) {
|
|
|
|
allMetrics = append(allMetrics, clusterMetricsGroups...)
|
|
|
|
allMetrics = append(allMetrics, peerMetricsGroups...)
|
|
|
|
return allMetrics
|
|
|
|
}()
|
|
|
|
|
|
|
|
nodeCollector = newMinioCollectorNode([]*MetricsGroup{
|
|
|
|
getNodeHealthMetrics(),
|
2022-01-25 19:31:44 -05:00
|
|
|
getLocalDiskStorageMetrics(),
|
2021-12-17 13:11:04 -05:00
|
|
|
getCacheMetrics(),
|
|
|
|
getHTTPMetrics(),
|
|
|
|
getNetworkMetrics(),
|
|
|
|
getMinioVersionMetrics(),
|
|
|
|
getS3TTFBMetric(),
|
|
|
|
})
|
|
|
|
|
|
|
|
clusterCollector = newMinioClusterCollector(allMetricsGroups)
|
|
|
|
}
|
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
// MetricNamespace is top level grouping of metrics to create the metric name.
|
|
|
|
type MetricNamespace string
|
|
|
|
|
|
|
|
// MetricSubsystem is the sub grouping for metrics within a namespace.
|
|
|
|
type MetricSubsystem string
|
|
|
|
|
|
|
|
const (
|
|
|
|
bucketMetricNamespace MetricNamespace = "minio_bucket"
|
|
|
|
clusterMetricNamespace MetricNamespace = "minio_cluster"
|
|
|
|
healMetricNamespace MetricNamespace = "minio_heal"
|
|
|
|
interNodeMetricNamespace MetricNamespace = "minio_inter_node"
|
|
|
|
nodeMetricNamespace MetricNamespace = "minio_node"
|
2021-04-03 12:03:42 -04:00
|
|
|
minioMetricNamespace MetricNamespace = "minio"
|
2021-01-18 23:35:38 -05:00
|
|
|
s3MetricNamespace MetricNamespace = "minio_s3"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2021-03-31 02:19:36 -04:00
|
|
|
cacheSubsystem MetricSubsystem = "cache"
|
|
|
|
capacityRawSubsystem MetricSubsystem = "capacity_raw"
|
|
|
|
capacityUsableSubsystem MetricSubsystem = "capacity_usable"
|
|
|
|
diskSubsystem MetricSubsystem = "disk"
|
|
|
|
fileDescriptorSubsystem MetricSubsystem = "file_descriptor"
|
|
|
|
goRoutines MetricSubsystem = "go_routine"
|
|
|
|
ioSubsystem MetricSubsystem = "io"
|
|
|
|
nodesSubsystem MetricSubsystem = "nodes"
|
|
|
|
objectsSubsystem MetricSubsystem = "objects"
|
|
|
|
processSubsystem MetricSubsystem = "process"
|
|
|
|
replicationSubsystem MetricSubsystem = "replication"
|
|
|
|
requestsSubsystem MetricSubsystem = "requests"
|
|
|
|
requestsRejectedSubsystem MetricSubsystem = "requests_rejected"
|
|
|
|
timeSubsystem MetricSubsystem = "time"
|
|
|
|
trafficSubsystem MetricSubsystem = "traffic"
|
|
|
|
softwareSubsystem MetricSubsystem = "software"
|
|
|
|
sysCallSubsystem MetricSubsystem = "syscall"
|
|
|
|
usageSubsystem MetricSubsystem = "usage"
|
2022-01-31 20:27:43 -05:00
|
|
|
quotaSubsystem MetricSubsystem = "quota"
|
2021-08-17 13:21:19 -04:00
|
|
|
ilmSubsystem MetricSubsystem = "ilm"
|
2021-10-02 12:31:05 -04:00
|
|
|
scannerSubsystem MetricSubsystem = "scanner"
|
2021-01-18 23:35:38 -05:00
|
|
|
)
|
|
|
|
|
2021-01-22 21:30:16 -05:00
|
|
|
// MetricName are the individual names for the metric.
|
|
|
|
type MetricName string
|
2021-01-18 23:35:38 -05:00
|
|
|
|
|
|
|
const (
|
2021-03-31 02:19:36 -04:00
|
|
|
authTotal MetricName = "auth_total"
|
|
|
|
canceledTotal MetricName = "canceled_total"
|
|
|
|
errorsTotal MetricName = "errors_total"
|
|
|
|
headerTotal MetricName = "header_total"
|
|
|
|
healTotal MetricName = "heal_total"
|
|
|
|
hitsTotal MetricName = "hits_total"
|
|
|
|
inflightTotal MetricName = "inflight_total"
|
|
|
|
invalidTotal MetricName = "invalid_total"
|
|
|
|
limitTotal MetricName = "limit_total"
|
|
|
|
missedTotal MetricName = "missed_total"
|
|
|
|
waitingTotal MetricName = "waiting_total"
|
2022-02-07 19:30:14 -05:00
|
|
|
incomingTotal MetricName = "incoming_total"
|
2021-03-31 02:19:36 -04:00
|
|
|
objectTotal MetricName = "object_total"
|
|
|
|
offlineTotal MetricName = "offline_total"
|
|
|
|
onlineTotal MetricName = "online_total"
|
|
|
|
openTotal MetricName = "open_total"
|
|
|
|
readTotal MetricName = "read_total"
|
|
|
|
timestampTotal MetricName = "timestamp_total"
|
|
|
|
writeTotal MetricName = "write_total"
|
|
|
|
total MetricName = "total"
|
2021-05-06 02:03:40 -04:00
|
|
|
freeInodes MetricName = "free_inodes"
|
2021-01-22 21:30:16 -05:00
|
|
|
|
2021-11-17 15:10:57 -05:00
|
|
|
failedCount MetricName = "failed_count"
|
|
|
|
failedBytes MetricName = "failed_bytes"
|
|
|
|
freeBytes MetricName = "free_bytes"
|
|
|
|
readBytes MetricName = "read_bytes"
|
|
|
|
rcharBytes MetricName = "rchar_bytes"
|
|
|
|
receivedBytes MetricName = "received_bytes"
|
|
|
|
latencyMilliSec MetricName = "latency_ms"
|
|
|
|
sentBytes MetricName = "sent_bytes"
|
|
|
|
totalBytes MetricName = "total_bytes"
|
|
|
|
usedBytes MetricName = "used_bytes"
|
|
|
|
writeBytes MetricName = "write_bytes"
|
|
|
|
wcharBytes MetricName = "wchar_bytes"
|
2021-01-22 21:30:16 -05:00
|
|
|
|
2022-01-25 19:31:44 -05:00
|
|
|
apiLatencyMicroSec MetricName = "latency_us"
|
|
|
|
|
2021-01-22 21:30:16 -05:00
|
|
|
usagePercent MetricName = "update_percent"
|
|
|
|
|
|
|
|
commitInfo MetricName = "commit_info"
|
|
|
|
usageInfo MetricName = "usage_info"
|
|
|
|
versionInfo MetricName = "version_info"
|
2021-01-18 23:35:38 -05:00
|
|
|
|
|
|
|
sizeDistribution = "size_distribution"
|
2021-03-24 17:19:52 -04:00
|
|
|
ttfbDistribution = "ttfb_seconds_distribution"
|
2021-01-18 23:35:38 -05:00
|
|
|
|
|
|
|
lastActivityTime = "last_activity_nano_seconds"
|
2021-02-02 02:02:18 -05:00
|
|
|
startTime = "starttime_seconds"
|
2021-03-21 00:23:27 -04:00
|
|
|
upTime = "uptime_seconds"
|
2021-05-04 13:17:10 -04:00
|
|
|
memory = "resident_memory_bytes"
|
|
|
|
cpu = "cpu_total_seconds"
|
2021-08-17 13:21:19 -04:00
|
|
|
|
|
|
|
expiryPendingTasks MetricName = "expiry_pending_tasks"
|
|
|
|
transitionPendingTasks MetricName = "transition_pending_tasks"
|
|
|
|
transitionActiveTasks MetricName = "transition_active_tasks"
|
2022-02-08 15:45:28 -05:00
|
|
|
|
|
|
|
transitionedBytes MetricName = "transitioned_bytes"
|
|
|
|
transitionedObjects MetricName = "transitioned_objects"
|
|
|
|
transitionedVersions MetricName = "transitioned_versions"
|
2021-01-18 23:35:38 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
serverName = "server"
|
|
|
|
)
|
|
|
|
|
2021-03-19 03:04:29 -04:00
|
|
|
// MetricType for the types of metrics supported
|
|
|
|
type MetricType string
|
2021-01-18 23:35:38 -05:00
|
|
|
|
|
|
|
const (
|
|
|
|
gaugeMetric = "gaugeMetric"
|
|
|
|
counterMetric = "counterMetric"
|
|
|
|
histogramMetric = "histogramMetric"
|
|
|
|
)
|
|
|
|
|
|
|
|
// MetricDescription describes the metric
|
|
|
|
type MetricDescription struct {
|
|
|
|
Namespace MetricNamespace `json:"MetricNamespace"`
|
|
|
|
Subsystem MetricSubsystem `json:"Subsystem"`
|
2021-01-22 21:30:16 -05:00
|
|
|
Name MetricName `json:"MetricName"`
|
2021-01-18 23:35:38 -05:00
|
|
|
Help string `json:"Help"`
|
2021-03-19 03:04:29 -04:00
|
|
|
Type MetricType `json:"Type"`
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Metric captures the details for a metric
|
|
|
|
type Metric struct {
|
|
|
|
Description MetricDescription `json:"Description"`
|
|
|
|
StaticLabels map[string]string `json:"StaticLabels"`
|
|
|
|
Value float64 `json:"Value"`
|
|
|
|
VariableLabels map[string]string `json:"VariableLabels"`
|
|
|
|
HistogramBucketLabel string `json:"HistogramBucketLabel"`
|
|
|
|
Histogram map[string]uint64 `json:"Histogram"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// MetricsGroup are a group of metrics that are initialized together.
|
|
|
|
type MetricsGroup struct {
|
2021-12-17 14:40:09 -05:00
|
|
|
metricsCache timedValue
|
|
|
|
cacheInterval time.Duration
|
2021-03-19 03:04:29 -04:00
|
|
|
}
|
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
// RegisterRead register the metrics populator function to be used
|
|
|
|
// to populate new values upon cache invalidation.
|
|
|
|
func (g *MetricsGroup) RegisterRead(read func(ctx context.Context) []Metric) {
|
|
|
|
g.metricsCache.Once.Do(func() {
|
2021-12-17 14:40:09 -05:00
|
|
|
g.metricsCache.TTL = g.cacheInterval
|
2021-12-17 13:11:04 -05:00
|
|
|
g.metricsCache.Update = func() (interface{}, error) {
|
|
|
|
return read(GlobalContext), nil
|
2021-03-19 03:04:29 -04:00
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
})
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
|
|
|
|
2022-01-14 19:48:19 -05:00
|
|
|
func (m *Metric) copyMetric() Metric {
|
|
|
|
metric := Metric{
|
|
|
|
Description: m.Description,
|
|
|
|
Value: m.Value,
|
|
|
|
HistogramBucketLabel: m.HistogramBucketLabel,
|
|
|
|
StaticLabels: make(map[string]string),
|
|
|
|
VariableLabels: make(map[string]string),
|
|
|
|
Histogram: make(map[string]uint64),
|
|
|
|
}
|
|
|
|
for k, v := range m.StaticLabels {
|
|
|
|
metric.StaticLabels[k] = v
|
|
|
|
}
|
|
|
|
for k, v := range m.VariableLabels {
|
|
|
|
metric.VariableLabels[k] = v
|
|
|
|
}
|
|
|
|
for k, v := range m.Histogram {
|
|
|
|
metric.Histogram[k] = v
|
|
|
|
}
|
|
|
|
return metric
|
|
|
|
}
|
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
// Get - returns cached value always upton the configured TTL,
|
|
|
|
// once the TTL expires "read()" registered function is called
|
|
|
|
// to return the new values and updated.
|
2022-01-14 19:48:19 -05:00
|
|
|
func (g *MetricsGroup) Get() (metrics []Metric) {
|
2021-12-17 14:40:09 -05:00
|
|
|
var c interface{}
|
|
|
|
var err error
|
|
|
|
if g.cacheInterval <= 0 {
|
|
|
|
c, err = g.metricsCache.Update()
|
|
|
|
} else {
|
|
|
|
c, err = g.metricsCache.Get()
|
|
|
|
}
|
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
if err != nil {
|
|
|
|
return []Metric{}
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
m, ok := c.([]Metric)
|
|
|
|
if !ok {
|
|
|
|
return []Metric{}
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
|
|
|
|
2022-01-14 19:48:19 -05:00
|
|
|
metrics = make([]Metric, 0, len(m))
|
|
|
|
for i := range m {
|
|
|
|
metrics = append(metrics, m[i].copyMetric())
|
|
|
|
}
|
|
|
|
return metrics
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func getClusterCapacityTotalBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: clusterMetricNamespace,
|
|
|
|
Subsystem: capacityRawSubsystem,
|
|
|
|
Name: totalBytes,
|
|
|
|
Help: "Total capacity online in the cluster.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getClusterCapacityFreeBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: clusterMetricNamespace,
|
|
|
|
Subsystem: capacityRawSubsystem,
|
|
|
|
Name: freeBytes,
|
|
|
|
Help: "Total free capacity online in the cluster.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getClusterCapacityUsageBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: clusterMetricNamespace,
|
|
|
|
Subsystem: capacityUsableSubsystem,
|
|
|
|
Name: totalBytes,
|
|
|
|
Help: "Total usable capacity online in the cluster.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getClusterCapacityUsageFreeBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: clusterMetricNamespace,
|
|
|
|
Subsystem: capacityUsableSubsystem,
|
|
|
|
Name: freeBytes,
|
|
|
|
Help: "Total free usable capacity online in the cluster.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-25 19:31:44 -05:00
|
|
|
func getNodeDiskAPILatencyMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: diskSubsystem,
|
|
|
|
Name: apiLatencyMicroSec,
|
|
|
|
Help: "Average last minute latency in µs for disk API storage operations.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getNodeDiskUsedBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: diskSubsystem,
|
|
|
|
Name: usedBytes,
|
|
|
|
Help: "Total storage used on a disk.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getNodeDiskFreeBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: diskSubsystem,
|
|
|
|
Name: freeBytes,
|
|
|
|
Help: "Total storage available on a disk.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-03-03 14:18:41 -05:00
|
|
|
func getClusterDisksOfflineTotalMD() MetricDescription {
|
2021-01-18 23:35:38 -05:00
|
|
|
return MetricDescription{
|
|
|
|
Namespace: clusterMetricNamespace,
|
|
|
|
Subsystem: diskSubsystem,
|
|
|
|
Name: offlineTotal,
|
|
|
|
Help: "Total disks offline.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-03 14:18:41 -05:00
|
|
|
func getClusterDisksOnlineTotalMD() MetricDescription {
|
2021-01-18 23:35:38 -05:00
|
|
|
return MetricDescription{
|
|
|
|
Namespace: clusterMetricNamespace,
|
|
|
|
Subsystem: diskSubsystem,
|
|
|
|
Name: onlineTotal,
|
|
|
|
Help: "Total disks online.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-03 14:18:41 -05:00
|
|
|
func getClusterDisksTotalMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: clusterMetricNamespace,
|
|
|
|
Subsystem: diskSubsystem,
|
|
|
|
Name: total,
|
|
|
|
Help: "Total disks.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-06 02:03:40 -04:00
|
|
|
func getClusterDisksFreeInodes() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: clusterMetricNamespace,
|
|
|
|
Subsystem: diskSubsystem,
|
|
|
|
Name: freeInodes,
|
|
|
|
Help: "Total free inodes.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getNodeDiskTotalBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: diskSubsystem,
|
|
|
|
Name: totalBytes,
|
|
|
|
Help: "Total storage on a disk.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-04-03 12:03:42 -04:00
|
|
|
func getUsageLastScanActivityMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: minioMetricNamespace,
|
|
|
|
Subsystem: usageSubsystem,
|
|
|
|
Name: lastActivityTime,
|
|
|
|
Help: "Time elapsed (in nano seconds) since last scan activity. This is set to 0 until first scan cycle",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-31 20:27:43 -05:00
|
|
|
func getBucketUsageQuotaTotalBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: bucketMetricNamespace,
|
|
|
|
Subsystem: quotaSubsystem,
|
|
|
|
Name: totalBytes,
|
|
|
|
Help: "Total bucket quota size in bytes",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getBucketUsageTotalBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: bucketMetricNamespace,
|
|
|
|
Subsystem: usageSubsystem,
|
|
|
|
Name: totalBytes,
|
|
|
|
Help: "Total bucket size in bytes",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2022-01-02 12:15:06 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getBucketUsageObjectsTotalMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: bucketMetricNamespace,
|
|
|
|
Subsystem: usageSubsystem,
|
|
|
|
Name: objectTotal,
|
|
|
|
Help: "Total number of objects",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-05-31 23:26:52 -04:00
|
|
|
|
2021-11-17 15:10:57 -05:00
|
|
|
func getBucketRepLatencyMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: bucketMetricNamespace,
|
|
|
|
Subsystem: replicationSubsystem,
|
|
|
|
Name: latencyMilliSec,
|
|
|
|
Help: "Replication latency in milliseconds.",
|
|
|
|
Type: histogramMetric,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getBucketRepFailedBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: bucketMetricNamespace,
|
|
|
|
Subsystem: replicationSubsystem,
|
|
|
|
Name: failedBytes,
|
|
|
|
Help: "Total number of bytes failed at least once to replicate.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getBucketRepSentBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: bucketMetricNamespace,
|
|
|
|
Subsystem: replicationSubsystem,
|
|
|
|
Name: sentBytes,
|
|
|
|
Help: "Total number of bytes replicated to the target bucket.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getBucketRepReceivedBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: bucketMetricNamespace,
|
|
|
|
Subsystem: replicationSubsystem,
|
|
|
|
Name: receivedBytes,
|
|
|
|
Help: "Total number of bytes replicated to this bucket from another source bucket.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-05-31 23:26:52 -04:00
|
|
|
|
2021-04-03 12:03:42 -04:00
|
|
|
func getBucketRepFailedOperationsMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: bucketMetricNamespace,
|
|
|
|
Subsystem: replicationSubsystem,
|
|
|
|
Name: failedCount,
|
|
|
|
Help: "Total number of objects which failed replication",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getBucketObjectDistributionMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: bucketMetricNamespace,
|
|
|
|
Subsystem: objectsSubsystem,
|
|
|
|
Name: sizeDistribution,
|
|
|
|
Help: "Distribution of object sizes in the bucket, includes label for the bucket name.",
|
|
|
|
Type: histogramMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-03-01 15:31:33 -05:00
|
|
|
func getInternodeFailedRequests() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: interNodeMetricNamespace,
|
|
|
|
Subsystem: trafficSubsystem,
|
|
|
|
Name: errorsTotal,
|
|
|
|
Help: "Total number of failed internode calls.",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getInterNodeSentBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: interNodeMetricNamespace,
|
|
|
|
Subsystem: trafficSubsystem,
|
|
|
|
Name: sentBytes,
|
|
|
|
Help: "Total number of bytes sent to the other peer nodes.",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getInterNodeReceivedBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: interNodeMetricNamespace,
|
|
|
|
Subsystem: trafficSubsystem,
|
|
|
|
Name: receivedBytes,
|
|
|
|
Help: "Total number of bytes received from other peer nodes.",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getS3SentBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: s3MetricNamespace,
|
|
|
|
Subsystem: trafficSubsystem,
|
|
|
|
Name: sentBytes,
|
|
|
|
Help: "Total number of s3 bytes sent",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getS3ReceivedBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: s3MetricNamespace,
|
|
|
|
Subsystem: trafficSubsystem,
|
|
|
|
Name: receivedBytes,
|
|
|
|
Help: "Total number of s3 bytes received.",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getS3RequestsInFlightMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: s3MetricNamespace,
|
|
|
|
Subsystem: requestsSubsystem,
|
|
|
|
Name: inflightTotal,
|
2021-02-20 03:21:55 -05:00
|
|
|
Help: "Total number of S3 requests currently in flight",
|
2021-02-04 15:26:58 -05:00
|
|
|
Type: gaugeMetric,
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-02-20 03:21:55 -05:00
|
|
|
func getS3RequestsInQueueMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: s3MetricNamespace,
|
|
|
|
Subsystem: requestsSubsystem,
|
|
|
|
Name: waitingTotal,
|
|
|
|
Help: "Number of S3 requests in the waiting queue",
|
2021-03-24 12:06:37 -04:00
|
|
|
Type: gaugeMetric,
|
2021-02-20 03:21:55 -05:00
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2022-02-07 19:30:14 -05:00
|
|
|
func getIncomingS3RequestsMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: s3MetricNamespace,
|
|
|
|
Subsystem: requestsSubsystem,
|
|
|
|
Name: incomingTotal,
|
|
|
|
Help: "Volatile number of total incoming S3 requests",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getS3RequestsTotalMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: s3MetricNamespace,
|
|
|
|
Subsystem: requestsSubsystem,
|
|
|
|
Name: total,
|
|
|
|
Help: "Total number S3 requests",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getS3RequestsErrorsMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: s3MetricNamespace,
|
|
|
|
Subsystem: requestsSubsystem,
|
|
|
|
Name: errorsTotal,
|
|
|
|
Help: "Total number S3 requests with errors",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-03-24 13:25:27 -04:00
|
|
|
func getS3RequestsCanceledMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: s3MetricNamespace,
|
|
|
|
Subsystem: requestsSubsystem,
|
|
|
|
Name: canceledTotal,
|
|
|
|
Help: "Total number S3 requests that were canceled from the client while processing",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-03-31 02:19:36 -04:00
|
|
|
func getS3RejectedAuthRequestsTotalMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: s3MetricNamespace,
|
|
|
|
Subsystem: requestsRejectedSubsystem,
|
|
|
|
Name: authTotal,
|
|
|
|
Help: "Total number S3 requests rejected for auth failure.",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-03-31 02:19:36 -04:00
|
|
|
func getS3RejectedHeaderRequestsTotalMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: s3MetricNamespace,
|
|
|
|
Subsystem: requestsRejectedSubsystem,
|
|
|
|
Name: headerTotal,
|
|
|
|
Help: "Total number S3 requests rejected for invalid header.",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-03-31 02:19:36 -04:00
|
|
|
func getS3RejectedTimestampRequestsTotalMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: s3MetricNamespace,
|
|
|
|
Subsystem: requestsRejectedSubsystem,
|
|
|
|
Name: timestampTotal,
|
|
|
|
Help: "Total number S3 requests rejected for invalid timestamp.",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-03-31 02:19:36 -04:00
|
|
|
func getS3RejectedInvalidRequestsTotalMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: s3MetricNamespace,
|
|
|
|
Subsystem: requestsRejectedSubsystem,
|
|
|
|
Name: invalidTotal,
|
|
|
|
Help: "Total number S3 invalid requests.",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getCacheHitsTotalMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: minioNamespace,
|
|
|
|
Subsystem: cacheSubsystem,
|
|
|
|
Name: hitsTotal,
|
|
|
|
Help: "Total number of disk cache hits",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getCacheHitsMissedTotalMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: minioNamespace,
|
|
|
|
Subsystem: cacheSubsystem,
|
|
|
|
Name: missedTotal,
|
|
|
|
Help: "Total number of disk cache misses",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getCacheUsagePercentMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: minioNamespace,
|
|
|
|
Subsystem: minioNamespace,
|
|
|
|
Name: usagePercent,
|
|
|
|
Help: "Total percentage cache usage",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getCacheUsageInfoMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: minioNamespace,
|
|
|
|
Subsystem: cacheSubsystem,
|
|
|
|
Name: usageInfo,
|
|
|
|
Help: "Total percentage cache usage, value of 1 indicates high and 0 low, label level is set as well",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getCacheUsedBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: minioNamespace,
|
|
|
|
Subsystem: cacheSubsystem,
|
|
|
|
Name: usedBytes,
|
|
|
|
Help: "Current cache usage in bytes",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getCacheTotalBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: minioNamespace,
|
|
|
|
Subsystem: cacheSubsystem,
|
|
|
|
Name: totalBytes,
|
|
|
|
Help: "Total size of cache disk in bytes",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getCacheSentBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: minioNamespace,
|
|
|
|
Subsystem: cacheSubsystem,
|
|
|
|
Name: sentBytes,
|
|
|
|
Help: "Total number of bytes served from cache",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getHealObjectsTotalMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: healMetricNamespace,
|
|
|
|
Subsystem: objectsSubsystem,
|
|
|
|
Name: total,
|
|
|
|
Help: "Objects scanned in current self healing run",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getHealObjectsHealTotalMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: healMetricNamespace,
|
|
|
|
Subsystem: objectsSubsystem,
|
|
|
|
Name: healTotal,
|
|
|
|
Help: "Objects healed in current self healing run",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-03-24 13:25:27 -04:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getHealObjectsFailTotalMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: healMetricNamespace,
|
|
|
|
Subsystem: objectsSubsystem,
|
|
|
|
Name: errorsTotal,
|
|
|
|
Help: "Objects for which healing failed in current self healing run",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getHealLastActivityTimeMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: healMetricNamespace,
|
|
|
|
Subsystem: timeSubsystem,
|
|
|
|
Name: lastActivityTime,
|
|
|
|
Help: "Time elapsed (in nano seconds) since last self healing activity. This is set to -1 until initial self heal activity",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getNodeOnlineTotalMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: clusterMetricNamespace,
|
|
|
|
Subsystem: nodesSubsystem,
|
|
|
|
Name: onlineTotal,
|
|
|
|
Help: "Total number of MinIO nodes online.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getNodeOfflineTotalMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: clusterMetricNamespace,
|
|
|
|
Subsystem: nodesSubsystem,
|
|
|
|
Name: offlineTotal,
|
|
|
|
Help: "Total number of MinIO nodes offline.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getMinIOVersionMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
2021-04-03 12:03:42 -04:00
|
|
|
Namespace: minioMetricNamespace,
|
2021-01-18 23:35:38 -05:00
|
|
|
Subsystem: softwareSubsystem,
|
|
|
|
Name: versionInfo,
|
|
|
|
Help: "MinIO Release tag for the server",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getMinIOCommitMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
2021-04-03 12:03:42 -04:00
|
|
|
Namespace: minioMetricNamespace,
|
2021-01-18 23:35:38 -05:00
|
|
|
Subsystem: softwareSubsystem,
|
|
|
|
Name: commitInfo,
|
|
|
|
Help: "Git commit hash for the MinIO release.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
func getS3TTFBDistributionMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: s3MetricNamespace,
|
|
|
|
Subsystem: timeSubsystem,
|
|
|
|
Name: ttfbDistribution,
|
|
|
|
Help: "Distribution of the time to first byte across API calls.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-22 21:30:16 -05:00
|
|
|
func getMinioFDOpenMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: fileDescriptorSubsystem,
|
|
|
|
Name: openTotal,
|
|
|
|
Help: "Total number of open file descriptors by the MinIO Server process.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-22 21:30:16 -05:00
|
|
|
func getMinioFDLimitMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: fileDescriptorSubsystem,
|
|
|
|
Name: limitTotal,
|
|
|
|
Help: "Limit on total number of open file descriptors for the MinIO Server process.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-22 21:30:16 -05:00
|
|
|
func getMinioProcessIOWriteBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: ioSubsystem,
|
|
|
|
Name: writeBytes,
|
|
|
|
Help: "Total bytes written by the process to the underlying storage system, /proc/[pid]/io write_bytes",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-22 21:30:16 -05:00
|
|
|
func getMinioProcessIOReadBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: ioSubsystem,
|
|
|
|
Name: readBytes,
|
|
|
|
Help: "Total bytes read by the process from the underlying storage system, /proc/[pid]/io read_bytes",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-22 21:30:16 -05:00
|
|
|
func getMinioProcessIOWriteCachedBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: ioSubsystem,
|
|
|
|
Name: wcharBytes,
|
|
|
|
Help: "Total bytes written by the process to the underlying storage system including page cache, /proc/[pid]/io wchar",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-22 21:30:16 -05:00
|
|
|
func getMinioProcessIOReadCachedBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: ioSubsystem,
|
|
|
|
Name: rcharBytes,
|
|
|
|
Help: "Total bytes read by the process from the underlying storage system including cache, /proc/[pid]/io rchar",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-22 21:30:16 -05:00
|
|
|
func getMinIOProcessSysCallRMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: sysCallSubsystem,
|
|
|
|
Name: readTotal,
|
|
|
|
Help: "Total read SysCalls to the kernel. /proc/[pid]/io syscr",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-22 21:30:16 -05:00
|
|
|
func getMinIOProcessSysCallWMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: sysCallSubsystem,
|
|
|
|
Name: writeTotal,
|
|
|
|
Help: "Total write SysCalls to the kernel. /proc/[pid]/io syscw",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-22 21:30:16 -05:00
|
|
|
func getMinIOGORoutineCountMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: goRoutines,
|
|
|
|
Name: total,
|
|
|
|
Help: "Total number of go routines running.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-02-02 02:02:18 -05:00
|
|
|
func getMinIOProcessStartTimeMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: processSubsystem,
|
|
|
|
Name: startTime,
|
2021-03-21 00:23:27 -04:00
|
|
|
Help: "Start time for MinIO process per node, time in seconds since Unix epoc.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-03-21 00:23:27 -04:00
|
|
|
func getMinIOProcessUptimeMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: processSubsystem,
|
|
|
|
Name: upTime,
|
|
|
|
Help: "Uptime for MinIO process per node in seconds.",
|
2021-02-02 02:02:18 -05:00
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-05-04 13:17:10 -04:00
|
|
|
func getMinIOProcessResidentMemory() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: processSubsystem,
|
|
|
|
Name: memory,
|
|
|
|
Help: "Resident memory size in bytes.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-05-04 13:17:10 -04:00
|
|
|
func getMinIOProcessCPUTime() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: processSubsystem,
|
|
|
|
Name: cpu,
|
|
|
|
Help: "Total user and system CPU time spent in seconds.",
|
|
|
|
Type: counterMetric,
|
|
|
|
}
|
|
|
|
}
|
2021-01-18 23:35:38 -05:00
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
func getMinioProcMetrics() *MetricsGroup {
|
|
|
|
mg := &MetricsGroup{}
|
|
|
|
mg.RegisterRead(func(ctx context.Context) (metrics []Metric) {
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
metrics = make([]Metric, 0, 20)
|
|
|
|
p, err := procfs.Self()
|
|
|
|
if err != nil {
|
|
|
|
logger.LogOnceIf(ctx, err, nodeMetricNamespace)
|
2021-03-19 03:04:29 -04:00
|
|
|
return
|
2021-12-17 13:11:04 -05:00
|
|
|
}
|
2022-02-03 12:45:12 -05:00
|
|
|
|
|
|
|
openFDs, _ := p.FileDescriptorsLen()
|
|
|
|
l, _ := p.Limits()
|
|
|
|
io, _ := p.IO()
|
|
|
|
stat, _ := p.Stat()
|
|
|
|
startTime, _ := stat.StartTime()
|
|
|
|
|
|
|
|
if openFDs > 0 {
|
|
|
|
metrics = append(metrics,
|
|
|
|
Metric{
|
|
|
|
Description: getMinioFDOpenMD(),
|
|
|
|
Value: float64(openFDs),
|
|
|
|
},
|
|
|
|
)
|
2021-12-17 13:11:04 -05:00
|
|
|
}
|
2022-02-03 12:45:12 -05:00
|
|
|
|
|
|
|
if l.OpenFiles > 0 {
|
|
|
|
metrics = append(metrics,
|
|
|
|
Metric{
|
|
|
|
Description: getMinioFDLimitMD(),
|
|
|
|
Value: float64(l.OpenFiles),
|
|
|
|
})
|
2021-12-17 13:11:04 -05:00
|
|
|
}
|
2022-02-03 12:45:12 -05:00
|
|
|
|
|
|
|
if io.SyscR > 0 {
|
|
|
|
metrics = append(metrics,
|
|
|
|
Metric{
|
|
|
|
Description: getMinIOProcessSysCallRMD(),
|
|
|
|
Value: float64(io.SyscR),
|
|
|
|
})
|
2021-12-17 13:11:04 -05:00
|
|
|
}
|
2022-02-03 12:45:12 -05:00
|
|
|
|
|
|
|
if io.SyscW > 0 {
|
|
|
|
metrics = append(metrics,
|
|
|
|
Metric{
|
|
|
|
Description: getMinIOProcessSysCallWMD(),
|
|
|
|
Value: float64(io.SyscW),
|
|
|
|
})
|
2021-12-17 13:11:04 -05:00
|
|
|
}
|
2022-02-03 12:45:12 -05:00
|
|
|
|
|
|
|
if io.ReadBytes > 0 {
|
|
|
|
metrics = append(metrics,
|
|
|
|
Metric{
|
|
|
|
Description: getMinioProcessIOReadBytesMD(),
|
|
|
|
Value: float64(io.ReadBytes),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if io.WriteBytes > 0 {
|
|
|
|
metrics = append(metrics,
|
|
|
|
Metric{
|
|
|
|
Description: getMinioProcessIOWriteBytesMD(),
|
|
|
|
Value: float64(io.WriteBytes),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if io.RChar > 0 {
|
|
|
|
metrics = append(metrics,
|
|
|
|
Metric{
|
|
|
|
Description: getMinioProcessIOReadCachedBytesMD(),
|
|
|
|
Value: float64(io.RChar),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if io.WChar > 0 {
|
|
|
|
metrics = append(metrics,
|
|
|
|
Metric{
|
|
|
|
Description: getMinioProcessIOWriteCachedBytesMD(),
|
|
|
|
Value: float64(io.WChar),
|
|
|
|
})
|
2021-12-17 13:11:04 -05:00
|
|
|
}
|
|
|
|
|
2022-02-03 12:45:12 -05:00
|
|
|
if startTime > 0 {
|
|
|
|
metrics = append(metrics,
|
|
|
|
Metric{
|
|
|
|
Description: getMinIOProcessStartTimeMD(),
|
|
|
|
Value: startTime,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if !globalBootTime.IsZero() {
|
|
|
|
metrics = append(metrics,
|
|
|
|
Metric{
|
|
|
|
Description: getMinIOProcessUptimeMD(),
|
|
|
|
Value: time.Since(globalBootTime).Seconds(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if stat.ResidentMemory() > 0 {
|
|
|
|
metrics = append(metrics,
|
|
|
|
Metric{
|
|
|
|
Description: getMinIOProcessResidentMemory(),
|
|
|
|
Value: float64(stat.ResidentMemory()),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if stat.CPUTime() > 0 {
|
|
|
|
metrics = append(metrics,
|
|
|
|
Metric{
|
|
|
|
Description: getMinIOProcessCPUTime(),
|
|
|
|
Value: stat.CPUTime(),
|
|
|
|
})
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
return
|
|
|
|
})
|
|
|
|
return mg
|
|
|
|
}
|
|
|
|
|
|
|
|
func getGoMetrics() *MetricsGroup {
|
|
|
|
mg := &MetricsGroup{}
|
|
|
|
mg.RegisterRead(func(ctx context.Context) (metrics []Metric) {
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getMinIOGORoutineCountMD(),
|
|
|
|
Value: float64(runtime.NumGoroutine()),
|
|
|
|
})
|
|
|
|
return
|
|
|
|
})
|
|
|
|
return mg
|
|
|
|
}
|
|
|
|
|
|
|
|
func getS3TTFBMetric() *MetricsGroup {
|
|
|
|
mg := &MetricsGroup{}
|
|
|
|
mg.RegisterRead(func(ctx context.Context) (metrics []Metric) {
|
|
|
|
// Read prometheus metric on this channel
|
|
|
|
ch := make(chan prometheus.Metric)
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
|
|
|
|
// Read prometheus histogram data and convert it to internal metric data
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
for promMetric := range ch {
|
|
|
|
dtoMetric := &dto.Metric{}
|
|
|
|
err := promMetric.Write(dtoMetric)
|
|
|
|
if err != nil {
|
|
|
|
logger.LogIf(GlobalContext, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
h := dtoMetric.GetHistogram()
|
|
|
|
for _, b := range h.Bucket {
|
|
|
|
labels := make(map[string]string)
|
|
|
|
for _, lp := range dtoMetric.GetLabel() {
|
|
|
|
labels[*lp.Name] = *lp.Value
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
labels["le"] = fmt.Sprintf("%.3f", *b.UpperBound)
|
|
|
|
metric := Metric{
|
|
|
|
Description: getS3TTFBDistributionMD(),
|
|
|
|
VariableLabels: labels,
|
|
|
|
Value: float64(b.GetCumulativeCount()),
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
metrics = append(metrics, metric)
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
}
|
|
|
|
}()
|
2021-01-18 23:35:38 -05:00
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
httpRequestsDuration.Collect(ch)
|
|
|
|
close(ch)
|
|
|
|
wg.Wait()
|
|
|
|
return
|
|
|
|
})
|
|
|
|
return mg
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
|
|
|
|
2021-08-17 13:21:19 -04:00
|
|
|
func getTransitionPendingTasksMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: ilmSubsystem,
|
|
|
|
Name: transitionPendingTasks,
|
|
|
|
Help: "Number of pending ILM transition tasks in the queue.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func getTransitionActiveTasksMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: ilmSubsystem,
|
|
|
|
Name: transitionActiveTasks,
|
|
|
|
Help: "Number of active ILM transition tasks.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func getExpiryPendingTasksMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: ilmSubsystem,
|
|
|
|
Name: expiryPendingTasks,
|
|
|
|
Help: "Number of pending ILM expiry tasks in the queue.",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
func getILMNodeMetrics() *MetricsGroup {
|
|
|
|
mg := &MetricsGroup{}
|
|
|
|
mg.RegisterRead(func(_ context.Context) []Metric {
|
|
|
|
expPendingTasks := Metric{
|
|
|
|
Description: getExpiryPendingTasksMD(),
|
|
|
|
}
|
|
|
|
trPendingTasks := Metric{
|
|
|
|
Description: getTransitionPendingTasksMD(),
|
|
|
|
}
|
|
|
|
trActiveTasks := Metric{
|
|
|
|
Description: getTransitionActiveTasksMD(),
|
|
|
|
}
|
|
|
|
if globalExpiryState != nil {
|
|
|
|
expPendingTasks.Value = float64(globalExpiryState.PendingTasks())
|
|
|
|
}
|
|
|
|
if globalTransitionState != nil {
|
|
|
|
trPendingTasks.Value = float64(globalTransitionState.PendingTasks())
|
|
|
|
trActiveTasks.Value = float64(globalTransitionState.ActiveTasks())
|
|
|
|
}
|
|
|
|
return []Metric{
|
|
|
|
expPendingTasks,
|
|
|
|
trPendingTasks,
|
|
|
|
trActiveTasks,
|
|
|
|
}
|
|
|
|
})
|
|
|
|
return mg
|
|
|
|
}
|
|
|
|
|
|
|
|
func getScannerNodeMetrics() *MetricsGroup {
|
|
|
|
mg := &MetricsGroup{}
|
|
|
|
mg.RegisterRead(func(_ context.Context) []Metric {
|
|
|
|
metrics := []Metric{
|
|
|
|
{
|
|
|
|
Description: MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: scannerSubsystem,
|
|
|
|
Name: "objects_scanned",
|
|
|
|
Help: "Total number of unique objects scanned since server start.",
|
|
|
|
Type: counterMetric,
|
2021-10-02 12:31:05 -04:00
|
|
|
},
|
2021-12-17 13:11:04 -05:00
|
|
|
Value: float64(atomic.LoadUint64(&globalScannerStats.accTotalObjects)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Description: MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: scannerSubsystem,
|
|
|
|
Name: "versions_scanned",
|
|
|
|
Help: "Total number of object versions scanned since server start.",
|
|
|
|
Type: counterMetric,
|
2021-10-02 12:31:05 -04:00
|
|
|
},
|
2021-12-17 13:11:04 -05:00
|
|
|
Value: float64(atomic.LoadUint64(&globalScannerStats.accTotalVersions)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Description: MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: scannerSubsystem,
|
|
|
|
Name: "directories_scanned",
|
|
|
|
Help: "Total number of directories scanned since server start.",
|
|
|
|
Type: counterMetric,
|
2021-10-02 12:31:05 -04:00
|
|
|
},
|
2021-12-17 13:11:04 -05:00
|
|
|
Value: float64(atomic.LoadUint64(&globalScannerStats.accFolders)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Description: MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: scannerSubsystem,
|
|
|
|
Name: "bucket_scans_started",
|
|
|
|
Help: "Total number of bucket scans started since server start",
|
|
|
|
Type: counterMetric,
|
2021-10-02 12:31:05 -04:00
|
|
|
},
|
2021-12-17 13:11:04 -05:00
|
|
|
Value: float64(atomic.LoadUint64(&globalScannerStats.bucketsStarted)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Description: MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: scannerSubsystem,
|
|
|
|
Name: "bucket_scans_finished",
|
|
|
|
Help: "Total number of bucket scans finished since server start",
|
|
|
|
Type: counterMetric,
|
2021-10-02 12:31:05 -04:00
|
|
|
},
|
2021-12-17 13:11:04 -05:00
|
|
|
Value: float64(atomic.LoadUint64(&globalScannerStats.bucketsFinished)),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Description: MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: ilmSubsystem,
|
|
|
|
Name: "versions_scanned",
|
|
|
|
Help: "Total number of object versions checked for ilm actions since server start",
|
|
|
|
Type: counterMetric,
|
2021-10-02 12:31:05 -04:00
|
|
|
},
|
2021-12-17 13:11:04 -05:00
|
|
|
Value: float64(atomic.LoadUint64(&globalScannerStats.ilmChecks)),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for i := range globalScannerStats.actions {
|
|
|
|
action := lifecycle.Action(i)
|
|
|
|
v := atomic.LoadUint64(&globalScannerStats.actions[action])
|
|
|
|
if v == 0 {
|
|
|
|
continue
|
2021-10-02 12:31:05 -04:00
|
|
|
}
|
2021-03-19 03:04:29 -04:00
|
|
|
metrics = append(metrics, Metric{
|
2021-12-17 13:11:04 -05:00
|
|
|
Description: MetricDescription{
|
|
|
|
Namespace: nodeMetricNamespace,
|
|
|
|
Subsystem: ilmSubsystem,
|
|
|
|
Name: MetricName("action_count_" + toSnake(action.String())),
|
|
|
|
Help: "Total action outcome of lifecycle checks since server start",
|
|
|
|
Type: counterMetric,
|
|
|
|
},
|
|
|
|
Value: float64(v),
|
2021-01-18 23:35:38 -05:00
|
|
|
})
|
2021-12-17 13:11:04 -05:00
|
|
|
}
|
|
|
|
return metrics
|
|
|
|
})
|
|
|
|
return mg
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
func getMinioVersionMetrics() *MetricsGroup {
|
|
|
|
mg := &MetricsGroup{}
|
|
|
|
mg.RegisterRead(func(_ context.Context) (metrics []Metric) {
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getMinIOCommitMD(),
|
|
|
|
VariableLabels: map[string]string{"commit": CommitID},
|
|
|
|
})
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getMinIOVersionMD(),
|
|
|
|
VariableLabels: map[string]string{"version": Version},
|
|
|
|
})
|
|
|
|
return
|
|
|
|
})
|
|
|
|
return mg
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
func getNodeHealthMetrics() *MetricsGroup {
|
|
|
|
mg := &MetricsGroup{}
|
|
|
|
mg.RegisterRead(func(_ context.Context) (metrics []Metric) {
|
|
|
|
if globalIsGateway {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
metrics = make([]Metric, 0, 16)
|
|
|
|
nodesUp, nodesDown := GetPeerOnlineCount()
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getNodeOnlineTotalMD(),
|
|
|
|
Value: float64(nodesUp),
|
|
|
|
})
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getNodeOfflineTotalMD(),
|
|
|
|
Value: float64(nodesDown),
|
|
|
|
})
|
|
|
|
return
|
|
|
|
})
|
|
|
|
return mg
|
|
|
|
}
|
2021-04-03 12:03:42 -04:00
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
func getMinioHealingMetrics() *MetricsGroup {
|
|
|
|
mg := &MetricsGroup{}
|
|
|
|
mg.RegisterRead(func(_ context.Context) (metrics []Metric) {
|
|
|
|
metrics = make([]Metric, 0, 5)
|
|
|
|
if !globalIsErasure {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
bgSeq, exists := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID)
|
|
|
|
if !exists {
|
|
|
|
return
|
|
|
|
}
|
2021-04-03 12:03:42 -04:00
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
if bgSeq.lastHealActivity.IsZero() {
|
2021-03-19 03:04:29 -04:00
|
|
|
return
|
2021-12-17 13:11:04 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getHealLastActivityTimeMD(),
|
|
|
|
Value: float64(time.Since(bgSeq.lastHealActivity)),
|
|
|
|
})
|
|
|
|
metrics = append(metrics, getObjectsScanned(bgSeq)...)
|
|
|
|
metrics = append(metrics, getHealedItems(bgSeq)...)
|
|
|
|
metrics = append(metrics, getFailedItems(bgSeq)...)
|
|
|
|
return
|
|
|
|
})
|
|
|
|
return mg
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func getFailedItems(seq *healSequence) (m []Metric) {
|
2021-03-25 15:58:43 -04:00
|
|
|
m = make([]Metric, 0, 1)
|
2021-01-18 23:35:38 -05:00
|
|
|
for k, v := range seq.gethealFailedItemsMap() {
|
|
|
|
s := strings.Split(k, ",")
|
|
|
|
m = append(m, Metric{
|
|
|
|
Description: getHealObjectsFailTotalMD(),
|
|
|
|
VariableLabels: map[string]string{
|
|
|
|
"mount_path": s[0],
|
|
|
|
"volume_status": s[1],
|
|
|
|
},
|
|
|
|
Value: float64(v),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-06-17 23:27:04 -04:00
|
|
|
func getHealedItems(seq *healSequence) (m []Metric) {
|
2021-03-25 15:58:43 -04:00
|
|
|
items := seq.getHealedItemsMap()
|
|
|
|
m = make([]Metric, 0, len(items))
|
|
|
|
for k, v := range items {
|
2021-01-18 23:35:38 -05:00
|
|
|
m = append(m, Metric{
|
|
|
|
Description: getHealObjectsHealTotalMD(),
|
|
|
|
VariableLabels: map[string]string{"type": string(k)},
|
|
|
|
Value: float64(v),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func getObjectsScanned(seq *healSequence) (m []Metric) {
|
2021-06-17 23:27:04 -04:00
|
|
|
items := seq.getScannedItemsMap()
|
2021-03-25 15:58:43 -04:00
|
|
|
m = make([]Metric, 0, len(items))
|
2021-06-17 23:27:04 -04:00
|
|
|
for k, v := range items {
|
2021-01-18 23:35:38 -05:00
|
|
|
m = append(m, Metric{
|
|
|
|
Description: getHealObjectsTotalMD(),
|
|
|
|
VariableLabels: map[string]string{"type": string(k)},
|
|
|
|
Value: float64(v),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2021-06-17 23:27:04 -04:00
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
func getCacheMetrics() *MetricsGroup {
|
2021-12-17 14:40:09 -05:00
|
|
|
mg := &MetricsGroup{
|
|
|
|
cacheInterval: 10 * time.Second,
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
mg.RegisterRead(func(ctx context.Context) (metrics []Metric) {
|
|
|
|
metrics = make([]Metric, 0, 20)
|
|
|
|
cacheObjLayer := newCachedObjectLayerFn()
|
|
|
|
// Service not initialized yet
|
|
|
|
if cacheObjLayer == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getCacheHitsTotalMD(),
|
|
|
|
Value: float64(cacheObjLayer.CacheStats().getHits()),
|
|
|
|
})
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getCacheHitsMissedTotalMD(),
|
|
|
|
Value: float64(cacheObjLayer.CacheStats().getMisses()),
|
|
|
|
})
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getCacheSentBytesMD(),
|
|
|
|
Value: float64(cacheObjLayer.CacheStats().getBytesServed()),
|
|
|
|
})
|
|
|
|
for _, cdStats := range cacheObjLayer.CacheStats().GetDiskStats() {
|
2021-03-19 03:04:29 -04:00
|
|
|
metrics = append(metrics, Metric{
|
2021-12-17 13:11:04 -05:00
|
|
|
Description: getCacheUsagePercentMD(),
|
|
|
|
Value: float64(cdStats.UsagePercent),
|
|
|
|
VariableLabels: map[string]string{"disk": cdStats.Dir},
|
2021-01-18 23:35:38 -05:00
|
|
|
})
|
2021-03-19 03:04:29 -04:00
|
|
|
metrics = append(metrics, Metric{
|
2021-12-17 13:11:04 -05:00
|
|
|
Description: getCacheUsageInfoMD(),
|
|
|
|
Value: float64(cdStats.UsageState),
|
|
|
|
VariableLabels: map[string]string{"disk": cdStats.Dir, "level": cdStats.GetUsageLevelString()},
|
2021-01-18 23:35:38 -05:00
|
|
|
})
|
2021-03-19 03:04:29 -04:00
|
|
|
metrics = append(metrics, Metric{
|
2021-12-17 13:11:04 -05:00
|
|
|
Description: getCacheUsedBytesMD(),
|
|
|
|
Value: float64(cdStats.UsageSize),
|
|
|
|
VariableLabels: map[string]string{"disk": cdStats.Dir},
|
2021-01-18 23:35:38 -05:00
|
|
|
})
|
2021-03-31 02:19:36 -04:00
|
|
|
metrics = append(metrics, Metric{
|
2021-12-17 13:11:04 -05:00
|
|
|
Description: getCacheTotalBytesMD(),
|
|
|
|
Value: float64(cdStats.TotalCapacity),
|
|
|
|
VariableLabels: map[string]string{"disk": cdStats.Dir},
|
2021-03-31 02:19:36 -04:00
|
|
|
})
|
2021-12-17 13:11:04 -05:00
|
|
|
}
|
|
|
|
return
|
|
|
|
})
|
|
|
|
return mg
|
|
|
|
}
|
|
|
|
|
|
|
|
func getHTTPMetrics() *MetricsGroup {
|
|
|
|
mg := &MetricsGroup{}
|
|
|
|
mg.RegisterRead(func(ctx context.Context) (metrics []Metric) {
|
|
|
|
httpStats := globalHTTPStats.toServerHTTPStats()
|
|
|
|
metrics = make([]Metric, 0, 3+
|
|
|
|
len(httpStats.CurrentS3Requests.APIStats)+
|
|
|
|
len(httpStats.TotalS3Requests.APIStats)+
|
|
|
|
len(httpStats.TotalS3Errors.APIStats))
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getS3RejectedAuthRequestsTotalMD(),
|
|
|
|
Value: float64(httpStats.TotalS3RejectedAuth),
|
|
|
|
})
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getS3RejectedTimestampRequestsTotalMD(),
|
|
|
|
Value: float64(httpStats.TotalS3RejectedTime),
|
|
|
|
})
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getS3RejectedHeaderRequestsTotalMD(),
|
|
|
|
Value: float64(httpStats.TotalS3RejectedHeader),
|
|
|
|
})
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getS3RejectedInvalidRequestsTotalMD(),
|
|
|
|
Value: float64(httpStats.TotalS3RejectedInvalid),
|
|
|
|
})
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getS3RequestsInQueueMD(),
|
|
|
|
Value: float64(httpStats.S3RequestsInQueue),
|
|
|
|
})
|
2022-02-07 19:30:14 -05:00
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getIncomingS3RequestsMD(),
|
|
|
|
Value: float64(httpStats.S3RequestsIncoming),
|
|
|
|
})
|
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
for api, value := range httpStats.CurrentS3Requests.APIStats {
|
2021-03-31 02:19:36 -04:00
|
|
|
metrics = append(metrics, Metric{
|
2021-12-17 13:11:04 -05:00
|
|
|
Description: getS3RequestsInFlightMD(),
|
|
|
|
Value: float64(value),
|
|
|
|
VariableLabels: map[string]string{"api": api},
|
2021-03-31 02:19:36 -04:00
|
|
|
})
|
2021-12-17 13:11:04 -05:00
|
|
|
}
|
|
|
|
for api, value := range httpStats.TotalS3Requests.APIStats {
|
2021-03-31 02:19:36 -04:00
|
|
|
metrics = append(metrics, Metric{
|
2021-12-17 13:11:04 -05:00
|
|
|
Description: getS3RequestsTotalMD(),
|
|
|
|
Value: float64(value),
|
|
|
|
VariableLabels: map[string]string{"api": api},
|
2021-03-31 02:19:36 -04:00
|
|
|
})
|
2021-12-17 13:11:04 -05:00
|
|
|
}
|
|
|
|
for api, value := range httpStats.TotalS3Errors.APIStats {
|
2021-03-31 02:19:36 -04:00
|
|
|
metrics = append(metrics, Metric{
|
2021-12-17 13:11:04 -05:00
|
|
|
Description: getS3RequestsErrorsMD(),
|
|
|
|
Value: float64(value),
|
|
|
|
VariableLabels: map[string]string{"api": api},
|
2021-03-31 02:19:36 -04:00
|
|
|
})
|
2021-12-17 13:11:04 -05:00
|
|
|
}
|
|
|
|
for api, value := range httpStats.TotalS3Canceled.APIStats {
|
2021-03-19 03:04:29 -04:00
|
|
|
metrics = append(metrics, Metric{
|
2021-12-17 13:11:04 -05:00
|
|
|
Description: getS3RequestsCanceledMD(),
|
|
|
|
Value: float64(value),
|
|
|
|
VariableLabels: map[string]string{"api": api},
|
2021-02-20 03:21:55 -05:00
|
|
|
})
|
2021-12-17 13:11:04 -05:00
|
|
|
}
|
|
|
|
return
|
|
|
|
})
|
|
|
|
return mg
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
func getNetworkMetrics() *MetricsGroup {
|
|
|
|
mg := &MetricsGroup{}
|
|
|
|
mg.RegisterRead(func(ctx context.Context) (metrics []Metric) {
|
|
|
|
metrics = make([]Metric, 0, 10)
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getInternodeFailedRequests(),
|
|
|
|
Value: float64(loadAndResetRPCNetworkErrsCounter()),
|
|
|
|
})
|
|
|
|
connStats := globalConnStats.toServerConnStats()
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getInterNodeSentBytesMD(),
|
|
|
|
Value: float64(connStats.TotalOutputBytes),
|
|
|
|
})
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getInterNodeReceivedBytesMD(),
|
|
|
|
Value: float64(connStats.TotalInputBytes),
|
|
|
|
})
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getS3SentBytesMD(),
|
|
|
|
Value: float64(connStats.S3OutputBytes),
|
|
|
|
})
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getS3ReceivedBytesMD(),
|
|
|
|
Value: float64(connStats.S3InputBytes),
|
|
|
|
})
|
|
|
|
return
|
|
|
|
})
|
|
|
|
return mg
|
|
|
|
}
|
|
|
|
|
|
|
|
func getBucketUsageMetrics() *MetricsGroup {
|
2021-12-17 14:40:09 -05:00
|
|
|
mg := &MetricsGroup{
|
|
|
|
cacheInterval: 10 * time.Second,
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
mg.RegisterRead(func(ctx context.Context) (metrics []Metric) {
|
|
|
|
objLayer := newObjectLayerFn()
|
|
|
|
// Service not initialized yet
|
|
|
|
if objLayer == nil || globalIsGateway {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
metrics = make([]Metric, 0, 50)
|
|
|
|
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objLayer)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// data usage has not captured any data yet.
|
|
|
|
if dataUsageInfo.LastUpdate.IsZero() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getUsageLastScanActivityMD(),
|
|
|
|
Value: float64(time.Since(dataUsageInfo.LastUpdate)),
|
|
|
|
})
|
|
|
|
|
|
|
|
for bucket, usage := range dataUsageInfo.BucketsUsage {
|
|
|
|
stats := getLatestReplicationStats(bucket, usage)
|
|
|
|
|
2022-01-31 20:27:43 -05:00
|
|
|
quota, _ := globalBucketQuotaSys.Get(ctx, bucket)
|
|
|
|
|
2021-03-19 03:04:29 -04:00
|
|
|
metrics = append(metrics, Metric{
|
2021-12-17 13:11:04 -05:00
|
|
|
Description: getBucketUsageTotalBytesMD(),
|
|
|
|
Value: float64(usage.Size),
|
|
|
|
VariableLabels: map[string]string{"bucket": bucket},
|
2021-01-18 23:35:38 -05:00
|
|
|
})
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-03-19 03:04:29 -04:00
|
|
|
metrics = append(metrics, Metric{
|
2021-12-17 13:11:04 -05:00
|
|
|
Description: getBucketUsageObjectsTotalMD(),
|
|
|
|
Value: float64(usage.ObjectsCount),
|
|
|
|
VariableLabels: map[string]string{"bucket": bucket},
|
2021-01-18 23:35:38 -05:00
|
|
|
})
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-03-19 03:04:29 -04:00
|
|
|
metrics = append(metrics, Metric{
|
2021-12-17 13:11:04 -05:00
|
|
|
Description: getBucketRepReceivedBytesMD(),
|
|
|
|
Value: float64(stats.ReplicaSize),
|
|
|
|
VariableLabels: map[string]string{"bucket": bucket},
|
2021-01-18 23:35:38 -05:00
|
|
|
})
|
|
|
|
|
2022-01-31 20:27:43 -05:00
|
|
|
if quota != nil && quota.Quota > 0 {
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getBucketUsageQuotaTotalBytesMD(),
|
|
|
|
Value: float64(quota.Quota),
|
|
|
|
VariableLabels: map[string]string{"bucket": bucket},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
if stats.hasReplicationUsage() {
|
|
|
|
for arn, stat := range stats.Stats {
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getBucketRepFailedBytesMD(),
|
|
|
|
Value: float64(stat.FailedSize),
|
|
|
|
VariableLabels: map[string]string{"bucket": bucket, "targetArn": arn},
|
|
|
|
})
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getBucketRepSentBytesMD(),
|
|
|
|
Value: float64(stat.ReplicatedSize),
|
|
|
|
VariableLabels: map[string]string{"bucket": bucket, "targetArn": arn},
|
|
|
|
})
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getBucketRepFailedOperationsMD(),
|
|
|
|
Value: float64(stat.FailedCount),
|
|
|
|
VariableLabels: map[string]string{"bucket": bucket, "targetArn": arn},
|
|
|
|
})
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getBucketRepLatencyMD(),
|
|
|
|
HistogramBucketLabel: "range",
|
|
|
|
Histogram: stat.Latency.getUploadLatency(),
|
|
|
|
VariableLabels: map[string]string{"bucket": bucket, "operation": "upload", "targetArn": arn},
|
|
|
|
})
|
2021-01-18 23:35:38 -05:00
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
}
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
|
|
|
|
2021-04-03 12:03:42 -04:00
|
|
|
metrics = append(metrics, Metric{
|
2021-12-17 13:11:04 -05:00
|
|
|
Description: getBucketObjectDistributionMD(),
|
|
|
|
Histogram: usage.ObjectSizesHistogram,
|
|
|
|
HistogramBucketLabel: "range",
|
|
|
|
VariableLabels: map[string]string{"bucket": bucket},
|
2021-04-03 12:03:42 -04:00
|
|
|
})
|
2021-12-17 13:11:04 -05:00
|
|
|
}
|
|
|
|
return
|
|
|
|
})
|
|
|
|
return mg
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
2021-03-02 20:28:04 -05:00
|
|
|
|
2022-02-08 15:45:28 -05:00
|
|
|
func getClusterTransitionedBytesMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: clusterMetricNamespace,
|
|
|
|
Subsystem: ilmSubsystem,
|
|
|
|
Name: transitionedBytes,
|
|
|
|
Help: "Total bytes transitioned to a tier",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func getClusterTransitionedObjectsMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: clusterMetricNamespace,
|
|
|
|
Subsystem: ilmSubsystem,
|
|
|
|
Name: transitionedObjects,
|
|
|
|
Help: "Total number of objects transitioned to a tier",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func getClusterTransitionedVersionsMD() MetricDescription {
|
|
|
|
return MetricDescription{
|
|
|
|
Namespace: clusterMetricNamespace,
|
|
|
|
Subsystem: ilmSubsystem,
|
|
|
|
Name: transitionedVersions,
|
|
|
|
Help: "Total number of versions transitioned to a tier",
|
|
|
|
Type: gaugeMetric,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func getClusterTierMetrics() *MetricsGroup {
|
|
|
|
mg := &MetricsGroup{
|
|
|
|
cacheInterval: 10 * time.Second,
|
|
|
|
}
|
|
|
|
mg.RegisterRead(func(ctx context.Context) (metrics []Metric) {
|
|
|
|
if globalTierConfigMgr.Empty() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
objLayer := newObjectLayerFn()
|
|
|
|
if objLayer == nil || globalIsGateway {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
dui, err := loadDataUsageFromBackend(GlobalContext, objLayer)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// data usage has not captured any data yet.
|
|
|
|
if dui.LastUpdate.IsZero() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// e.g minio_cluster_ilm_transitioned_bytes{tier="S3TIER-1"}=136314880
|
|
|
|
// minio_cluster_ilm_transitioned_objects{tier="S3TIER-1"}=1
|
|
|
|
// minio_cluster_ilm_transitioned_versions{tier="S3TIER-1"}=3
|
|
|
|
for tier, st := range dui.TierStats.Tiers {
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getClusterTransitionedBytesMD(),
|
|
|
|
Value: float64(st.TotalSize),
|
|
|
|
VariableLabels: map[string]string{"tier": tier},
|
|
|
|
})
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getClusterTransitionedObjectsMD(),
|
|
|
|
Value: float64(st.NumObjects),
|
|
|
|
VariableLabels: map[string]string{"tier": tier},
|
|
|
|
})
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getClusterTransitionedVersionsMD(),
|
|
|
|
Value: float64(st.NumVersions),
|
|
|
|
VariableLabels: map[string]string{"tier": tier},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return metrics
|
|
|
|
})
|
|
|
|
return mg
|
|
|
|
}
|
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
func getLocalStorageMetrics() *MetricsGroup {
|
2021-12-17 14:40:09 -05:00
|
|
|
mg := &MetricsGroup{
|
|
|
|
cacheInterval: 10 * time.Second,
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
mg.RegisterRead(func(ctx context.Context) (metrics []Metric) {
|
|
|
|
objLayer := newObjectLayerFn()
|
|
|
|
// Service not initialized yet
|
|
|
|
if objLayer == nil || globalIsGateway {
|
2021-03-19 03:04:29 -04:00
|
|
|
return
|
2021-12-17 13:11:04 -05:00
|
|
|
}
|
2021-01-18 23:35:38 -05:00
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
metrics = make([]Metric, 0, 50)
|
|
|
|
storageInfo, _ := objLayer.LocalStorageInfo(ctx)
|
|
|
|
for _, disk := range storageInfo.Disks {
|
2021-03-19 03:04:29 -04:00
|
|
|
metrics = append(metrics, Metric{
|
2021-12-17 13:11:04 -05:00
|
|
|
Description: getNodeDiskUsedBytesMD(),
|
|
|
|
Value: float64(disk.UsedSpace),
|
|
|
|
VariableLabels: map[string]string{"disk": disk.DrivePath},
|
2021-01-18 23:35:38 -05:00
|
|
|
})
|
|
|
|
|
2021-03-19 03:04:29 -04:00
|
|
|
metrics = append(metrics, Metric{
|
2021-12-17 13:11:04 -05:00
|
|
|
Description: getNodeDiskFreeBytesMD(),
|
|
|
|
Value: float64(disk.AvailableSpace),
|
|
|
|
VariableLabels: map[string]string{"disk": disk.DrivePath},
|
2021-01-18 23:35:38 -05:00
|
|
|
})
|
|
|
|
|
2021-03-19 03:04:29 -04:00
|
|
|
metrics = append(metrics, Metric{
|
2021-12-17 13:11:04 -05:00
|
|
|
Description: getNodeDiskTotalBytesMD(),
|
|
|
|
Value: float64(disk.TotalSpace),
|
|
|
|
VariableLabels: map[string]string{"disk": disk.DrivePath},
|
2021-01-18 23:35:38 -05:00
|
|
|
})
|
|
|
|
|
2021-03-19 03:04:29 -04:00
|
|
|
metrics = append(metrics, Metric{
|
2021-12-17 13:11:04 -05:00
|
|
|
Description: getClusterDisksFreeInodes(),
|
|
|
|
Value: float64(disk.FreeInodes),
|
|
|
|
VariableLabels: map[string]string{"disk": disk.DrivePath},
|
2021-01-18 23:35:38 -05:00
|
|
|
})
|
2022-01-25 19:31:44 -05:00
|
|
|
|
|
|
|
}
|
|
|
|
return
|
|
|
|
})
|
|
|
|
return mg
|
|
|
|
}
|
|
|
|
|
|
|
|
func getLocalDiskStorageMetrics() *MetricsGroup {
|
|
|
|
mg := &MetricsGroup{
|
|
|
|
cacheInterval: 3 * time.Second,
|
|
|
|
}
|
|
|
|
mg.RegisterRead(func(ctx context.Context) (metrics []Metric) {
|
|
|
|
objLayer := newObjectLayerFn()
|
|
|
|
// Service not initialized yet
|
|
|
|
if objLayer == nil || globalIsGateway {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
storageInfo, _ := objLayer.LocalStorageInfo(ctx)
|
2022-01-31 03:44:26 -05:00
|
|
|
if storageInfo.Backend.Type == madmin.FS {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
metrics = make([]Metric, 0, 50)
|
2022-01-25 19:31:44 -05:00
|
|
|
for _, disk := range storageInfo.Disks {
|
2022-01-31 03:44:26 -05:00
|
|
|
if disk.Metrics == nil {
|
|
|
|
continue
|
|
|
|
}
|
2022-01-25 19:31:44 -05:00
|
|
|
for apiName, latency := range disk.Metrics.APILatencies {
|
|
|
|
val := latency.(uint64)
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getNodeDiskAPILatencyMD(),
|
|
|
|
Value: float64(val / 1000),
|
|
|
|
VariableLabels: map[string]string{"disk": disk.DrivePath, "api": "storage." + apiName},
|
|
|
|
})
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
}
|
|
|
|
return
|
|
|
|
})
|
|
|
|
return mg
|
|
|
|
}
|
2021-01-18 23:35:38 -05:00
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
func getClusterStorageMetrics() *MetricsGroup {
|
2021-12-17 14:40:09 -05:00
|
|
|
mg := &MetricsGroup{
|
|
|
|
cacheInterval: 10 * time.Second,
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
mg.RegisterRead(func(ctx context.Context) (metrics []Metric) {
|
|
|
|
objLayer := newObjectLayerFn()
|
|
|
|
// Service not initialized yet
|
|
|
|
if objLayer == nil || !globalIsErasure {
|
|
|
|
return
|
|
|
|
}
|
2021-01-18 23:35:38 -05:00
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
// Fetch disk space info, ignore errors
|
|
|
|
metrics = make([]Metric, 0, 10)
|
|
|
|
storageInfo, _ := objLayer.StorageInfo(ctx)
|
|
|
|
onlineDisks, offlineDisks := getOnlineOfflineDisksStats(storageInfo.Disks)
|
|
|
|
totalDisks := onlineDisks.Merge(offlineDisks)
|
2021-03-03 14:18:41 -05:00
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getClusterCapacityTotalBytesMD(),
|
|
|
|
Value: float64(GetTotalCapacity(storageInfo.Disks)),
|
|
|
|
})
|
|
|
|
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getClusterCapacityFreeBytesMD(),
|
|
|
|
Value: float64(GetTotalCapacityFree(storageInfo.Disks)),
|
|
|
|
})
|
|
|
|
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getClusterCapacityUsageBytesMD(),
|
|
|
|
Value: GetTotalUsableCapacity(storageInfo.Disks, storageInfo),
|
|
|
|
})
|
|
|
|
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getClusterCapacityUsageFreeBytesMD(),
|
|
|
|
Value: GetTotalUsableCapacityFree(storageInfo.Disks, storageInfo),
|
|
|
|
})
|
|
|
|
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getClusterDisksOfflineTotalMD(),
|
|
|
|
Value: float64(offlineDisks.Sum()),
|
|
|
|
})
|
|
|
|
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getClusterDisksOnlineTotalMD(),
|
|
|
|
Value: float64(onlineDisks.Sum()),
|
|
|
|
})
|
|
|
|
|
|
|
|
metrics = append(metrics, Metric{
|
|
|
|
Description: getClusterDisksTotalMD(),
|
|
|
|
Value: float64(totalDisks.Sum()),
|
|
|
|
})
|
|
|
|
return
|
|
|
|
})
|
|
|
|
return mg
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
type minioClusterCollector struct {
|
2021-12-17 13:11:04 -05:00
|
|
|
metricsGroups []*MetricsGroup
|
|
|
|
desc *prometheus.Desc
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
func newMinioClusterCollector(metricsGroups []*MetricsGroup) *minioClusterCollector {
|
2021-01-18 23:35:38 -05:00
|
|
|
return &minioClusterCollector{
|
2021-12-17 13:11:04 -05:00
|
|
|
metricsGroups: metricsGroups,
|
|
|
|
desc: prometheus.NewDesc("minio_stats", "Statistics exposed by MinIO server per cluster", nil, nil),
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Describe sends the super-set of all possible descriptors of metrics
|
|
|
|
func (c *minioClusterCollector) Describe(ch chan<- *prometheus.Desc) {
|
|
|
|
ch <- c.desc
|
|
|
|
}
|
|
|
|
|
|
|
|
// Collect is called by the Prometheus registry when collecting metrics.
|
|
|
|
func (c *minioClusterCollector) Collect(out chan<- prometheus.Metric) {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
publish := func(in <-chan Metric) {
|
|
|
|
defer wg.Done()
|
|
|
|
for metric := range in {
|
|
|
|
labels, values := getOrderedLabelValueArrays(metric.VariableLabels)
|
|
|
|
if metric.Description.Type == histogramMetric {
|
|
|
|
if metric.Histogram == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for k, v := range metric.Histogram {
|
|
|
|
out <- prometheus.MustNewConstMetric(
|
|
|
|
prometheus.NewDesc(
|
|
|
|
prometheus.BuildFQName(string(metric.Description.Namespace),
|
|
|
|
string(metric.Description.Subsystem),
|
|
|
|
string(metric.Description.Name)),
|
|
|
|
metric.Description.Help,
|
2021-11-16 12:28:29 -05:00
|
|
|
append(labels, metric.HistogramBucketLabel),
|
2021-01-18 23:35:38 -05:00
|
|
|
metric.StaticLabels,
|
|
|
|
),
|
|
|
|
prometheus.GaugeValue,
|
|
|
|
float64(v),
|
2021-11-16 12:28:29 -05:00
|
|
|
append(values, k)...)
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
metricType := prometheus.GaugeValue
|
|
|
|
switch metric.Description.Type {
|
|
|
|
case counterMetric:
|
|
|
|
metricType = prometheus.CounterValue
|
|
|
|
}
|
|
|
|
toPost := prometheus.MustNewConstMetric(
|
|
|
|
prometheus.NewDesc(
|
|
|
|
prometheus.BuildFQName(string(metric.Description.Namespace),
|
|
|
|
string(metric.Description.Subsystem),
|
|
|
|
string(metric.Description.Name)),
|
|
|
|
metric.Description.Help,
|
|
|
|
labels,
|
|
|
|
metric.StaticLabels,
|
|
|
|
),
|
|
|
|
metricType,
|
|
|
|
metric.Value,
|
|
|
|
values...)
|
|
|
|
out <- toPost
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Call peer api to fetch metrics
|
|
|
|
peerCh := globalNotificationSys.GetClusterMetrics(GlobalContext)
|
2021-12-17 13:11:04 -05:00
|
|
|
selfCh := ReportMetrics(GlobalContext, c.metricsGroups)
|
2021-01-18 23:35:38 -05:00
|
|
|
wg.Add(2)
|
|
|
|
go publish(peerCh)
|
|
|
|
go publish(selfCh)
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReportMetrics reports serialized metrics to the channel passed for the metrics generated.
|
2021-12-17 13:11:04 -05:00
|
|
|
func ReportMetrics(ctx context.Context, metricsGroups []*MetricsGroup) <-chan Metric {
|
2021-01-18 23:35:38 -05:00
|
|
|
ch := make(chan Metric)
|
|
|
|
go func() {
|
|
|
|
defer close(ch)
|
2021-12-17 13:11:04 -05:00
|
|
|
populateAndPublish(metricsGroups, func(m Metric) bool {
|
2021-01-18 23:35:38 -05:00
|
|
|
if m.VariableLabels == nil {
|
|
|
|
m.VariableLabels = make(map[string]string)
|
|
|
|
}
|
2021-03-26 14:37:58 -04:00
|
|
|
m.VariableLabels[serverName] = globalLocalNodeName
|
2021-01-18 23:35:38 -05:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case ch <- m:
|
|
|
|
return true
|
|
|
|
case <-ctx.Done():
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}()
|
|
|
|
return ch
|
|
|
|
}
|
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
// minioNodeCollector is the Custom Collector
|
|
|
|
type minioNodeCollector struct {
|
|
|
|
metricsGroups []*MetricsGroup
|
|
|
|
desc *prometheus.Desc
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Describe sends the super-set of all possible descriptors of metrics
|
2021-12-17 13:11:04 -05:00
|
|
|
func (c *minioNodeCollector) Describe(ch chan<- *prometheus.Desc) {
|
2021-01-18 23:35:38 -05:00
|
|
|
ch <- c.desc
|
|
|
|
}
|
|
|
|
|
|
|
|
// populateAndPublish populates and then publishes the metrics generated by the generator function.
|
2021-12-17 13:11:04 -05:00
|
|
|
func populateAndPublish(metricsGroups []*MetricsGroup, publish func(m Metric) bool) {
|
|
|
|
for _, mg := range metricsGroups {
|
|
|
|
if mg == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, metric := range mg.Get() {
|
2021-01-18 23:35:38 -05:00
|
|
|
if !publish(metric) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Collect is called by the Prometheus registry when collecting metrics.
|
2021-12-17 13:11:04 -05:00
|
|
|
func (c *minioNodeCollector) Collect(ch chan<- prometheus.Metric) {
|
2021-01-18 23:35:38 -05:00
|
|
|
// Expose MinIO's version information
|
|
|
|
minioVersionInfo.WithLabelValues(Version, CommitID).Set(1.0)
|
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
populateAndPublish(c.metricsGroups, func(metric Metric) bool {
|
2021-01-18 23:35:38 -05:00
|
|
|
labels, values := getOrderedLabelValueArrays(metric.VariableLabels)
|
2021-03-26 14:37:58 -04:00
|
|
|
values = append(values, globalLocalNodeName)
|
2021-01-18 23:35:38 -05:00
|
|
|
labels = append(labels, serverName)
|
|
|
|
|
|
|
|
if metric.Description.Type == histogramMetric {
|
|
|
|
if metric.Histogram == nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
for k, v := range metric.Histogram {
|
|
|
|
labels = append(labels, metric.HistogramBucketLabel)
|
|
|
|
values = append(values, k)
|
|
|
|
ch <- prometheus.MustNewConstMetric(
|
|
|
|
prometheus.NewDesc(
|
|
|
|
prometheus.BuildFQName(string(metric.Description.Namespace),
|
|
|
|
string(metric.Description.Subsystem),
|
|
|
|
string(metric.Description.Name)),
|
|
|
|
metric.Description.Help,
|
|
|
|
labels,
|
|
|
|
metric.StaticLabels,
|
|
|
|
),
|
|
|
|
prometheus.GaugeValue,
|
|
|
|
float64(v),
|
|
|
|
values...)
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
metricType := prometheus.GaugeValue
|
|
|
|
switch metric.Description.Type {
|
|
|
|
case counterMetric:
|
|
|
|
metricType = prometheus.CounterValue
|
|
|
|
}
|
|
|
|
ch <- prometheus.MustNewConstMetric(
|
|
|
|
prometheus.NewDesc(
|
|
|
|
prometheus.BuildFQName(string(metric.Description.Namespace),
|
|
|
|
string(metric.Description.Subsystem),
|
|
|
|
string(metric.Description.Name)),
|
|
|
|
metric.Description.Help,
|
|
|
|
labels,
|
|
|
|
metric.StaticLabels,
|
|
|
|
),
|
|
|
|
metricType,
|
|
|
|
metric.Value,
|
|
|
|
values...)
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func getOrderedLabelValueArrays(labelsWithValue map[string]string) (labels, values []string) {
|
|
|
|
labels = make([]string, 0)
|
|
|
|
values = make([]string, 0)
|
|
|
|
for l, v := range labelsWithValue {
|
|
|
|
labels = append(labels, l)
|
|
|
|
values = append(values, v)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
// newMinioCollectorNode describes the collector
|
2021-01-18 23:35:38 -05:00
|
|
|
// and returns reference of minioCollector for version 2
|
|
|
|
// It creates the Prometheus Description which is used
|
|
|
|
// to define Metric and help string
|
2021-12-17 13:11:04 -05:00
|
|
|
func newMinioCollectorNode(metricsGroups []*MetricsGroup) *minioNodeCollector {
|
|
|
|
return &minioNodeCollector{
|
|
|
|
metricsGroups: metricsGroups,
|
|
|
|
desc: prometheus.NewDesc("minio_stats", "Statistics exposed by MinIO server per node", nil, nil),
|
2021-01-18 23:35:38 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func metricsServerHandler() http.Handler {
|
|
|
|
registry := prometheus.NewRegistry()
|
|
|
|
|
|
|
|
// Report all other metrics
|
2021-12-17 13:11:04 -05:00
|
|
|
err := registry.Register(clusterCollector)
|
2021-01-18 23:35:38 -05:00
|
|
|
if err != nil {
|
|
|
|
logger.CriticalIf(GlobalContext, err)
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
// DefaultGatherers include golang metrics and process metrics.
|
|
|
|
gatherers := prometheus.Gatherers{
|
|
|
|
registry,
|
|
|
|
}
|
2021-12-17 13:11:04 -05:00
|
|
|
|
2021-01-18 23:35:38 -05:00
|
|
|
// Delegate http serving to Prometheus client library, which will call collector.Collect.
|
|
|
|
return promhttp.InstrumentMetricHandler(
|
|
|
|
registry,
|
|
|
|
promhttp.HandlerFor(gatherers,
|
|
|
|
promhttp.HandlerOpts{
|
|
|
|
ErrorHandling: promhttp.ContinueOnError,
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
func metricsNodeHandler() http.Handler {
|
|
|
|
registry := prometheus.NewRegistry()
|
|
|
|
|
2021-12-17 13:11:04 -05:00
|
|
|
err := registry.Register(nodeCollector)
|
2021-01-18 23:35:38 -05:00
|
|
|
if err != nil {
|
|
|
|
logger.CriticalIf(GlobalContext, err)
|
|
|
|
}
|
2021-01-22 21:30:16 -05:00
|
|
|
err = registry.Register(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{
|
|
|
|
Namespace: minioNamespace,
|
|
|
|
ReportErrors: true,
|
|
|
|
}))
|
|
|
|
if err != nil {
|
|
|
|
logger.CriticalIf(GlobalContext, err)
|
|
|
|
}
|
|
|
|
err = registry.Register(prometheus.NewGoCollector())
|
|
|
|
if err != nil {
|
|
|
|
logger.CriticalIf(GlobalContext, err)
|
|
|
|
}
|
2021-01-18 23:35:38 -05:00
|
|
|
gatherers := prometheus.Gatherers{
|
|
|
|
registry,
|
|
|
|
}
|
|
|
|
// Delegate http serving to Prometheus client library, which will call collector.Collect.
|
|
|
|
return promhttp.InstrumentMetricHandler(
|
|
|
|
registry,
|
|
|
|
promhttp.HandlerFor(gatherers,
|
|
|
|
promhttp.HandlerOpts{
|
|
|
|
ErrorHandling: promhttp.ContinueOnError,
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
}
|
2021-10-02 12:31:05 -04:00
|
|
|
|
|
|
|
func toSnake(camel string) (snake string) {
|
|
|
|
var b strings.Builder
|
|
|
|
l := len(camel)
|
|
|
|
for i, v := range camel {
|
|
|
|
// A is 65, a is 97
|
|
|
|
if v >= 'a' {
|
|
|
|
b.WriteRune(v)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// v is capital letter here
|
|
|
|
// disregard first letter
|
|
|
|
// add underscore if last letter is capital letter
|
|
|
|
// add underscore when previous letter is lowercase
|
|
|
|
// add underscore when next letter is lowercase
|
|
|
|
if (i != 0 || i == l-1) && ((i > 0 && rune(camel[i-1]) >= 'a') ||
|
|
|
|
(i < l-1 && rune(camel[i+1]) >= 'a')) {
|
|
|
|
b.WriteRune('_')
|
|
|
|
}
|
|
|
|
b.WriteRune(v + 'a' - 'A')
|
|
|
|
}
|
|
|
|
return b.String()
|
|
|
|
}
|