Refactor health data structure (#11914)

This feature comes with simplified data structures and versioning support.

Signed-off-by: Bala.FA <bala.gluster@gmail.com>
This commit is contained in:
Bala FA
2021-06-01 21:25:49 +05:30
committed by GitHub
parent 8347db8be3
commit 120951d9e9
9 changed files with 368 additions and 829 deletions

View File

@@ -1366,6 +1366,105 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
writeSuccessResponseJSON(w, resp)
}
func getServerInfo(ctx context.Context, r *http.Request) madmin.InfoMessage {
kmsStat := fetchKMSStatus()
ldap := madmin.LDAP{}
if globalLDAPConfig.Enabled {
ldapConn, err := globalLDAPConfig.Connect()
if err != nil {
ldap.Status = string(madmin.ItemOffline)
} else if ldapConn == nil {
ldap.Status = "Not Configured"
} else {
// Close ldap connection to avoid leaks.
ldapConn.Close()
ldap.Status = string(madmin.ItemOnline)
}
}
log, audit := fetchLoggerInfo()
// Get the notification target info
notifyTarget := fetchLambdaInfo()
local := getLocalServerProperty(globalEndpoints, r)
servers := globalNotificationSys.ServerInfo()
servers = append(servers, local)
assignPoolNumbers(servers)
var backend interface{}
mode := madmin.ItemInitializing
buckets := madmin.Buckets{}
objects := madmin.Objects{}
usage := madmin.Usage{}
objectAPI := newObjectLayerFn()
if objectAPI != nil {
mode = madmin.ItemOnline
// Load data usage
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
if err == nil {
buckets = madmin.Buckets{Count: dataUsageInfo.BucketsCount}
objects = madmin.Objects{Count: dataUsageInfo.ObjectsTotalCount}
usage = madmin.Usage{Size: dataUsageInfo.ObjectsTotalSize}
} else {
buckets = madmin.Buckets{Error: err.Error()}
objects = madmin.Objects{Error: err.Error()}
usage = madmin.Usage{Error: err.Error()}
}
// Fetching the backend information
backendInfo := objectAPI.BackendInfo()
if backendInfo.Type == madmin.Erasure {
// Calculate the number of online/offline disks of all nodes
var allDisks []madmin.Disk
for _, s := range servers {
allDisks = append(allDisks, s.Disks...)
}
onlineDisks, offlineDisks := getOnlineOfflineDisksStats(allDisks)
backend = madmin.ErasureBackend{
Type: madmin.ErasureType,
OnlineDisks: onlineDisks.Sum(),
OfflineDisks: offlineDisks.Sum(),
StandardSCParity: backendInfo.StandardSCParity,
RRSCParity: backendInfo.RRSCParity,
}
} else {
backend = madmin.FSBackend{
Type: madmin.FsType,
}
}
}
domain := globalDomainNames
services := madmin.Services{
KMS: kmsStat,
LDAP: ldap,
Logger: log,
Audit: audit,
Notifications: notifyTarget,
}
return madmin.InfoMessage{
Mode: string(mode),
Domain: domain,
Region: globalServerRegion,
SQSARN: globalNotificationSys.GetARNList(false),
DeploymentID: globalDeploymentID,
Buckets: buckets,
Objects: objects,
Usage: usage,
Services: services,
Backend: backend,
Servers: servers,
}
}
// HealthInfoHandler - GET /minio/admin/v3/healthinfo
// ----------
// Get server health info
@@ -1380,7 +1479,7 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
}
query := r.URL.Query()
healthInfo := madmin.HealthInfo{}
healthInfo := madmin.HealthInfo{Version: madmin.HealthInfoVersion}
healthInfoCh := make(chan madmin.HealthInfo)
enc := json.NewEncoder(w)
@@ -1402,7 +1501,7 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
logger.LogIf(ctx, enc.Encode(healthInfo))
}
deadline := 3600 * time.Second
deadline := 1 * time.Hour
if dstr := r.URL.Query().Get("deadline"); dstr != "" {
var err error
deadline, err = time.ParseDuration(dstr)
@@ -1426,86 +1525,67 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
go func() {
defer close(healthInfoCh)
if cpu := query.Get("syscpu"); cpu == "true" {
cpuInfo := getLocalCPUInfo(deadlinedCtx, r)
partialWrite(healthInfo) // Write first message with only version populated
healthInfo.Sys.CPUInfo = append(healthInfo.Sys.CPUInfo, cpuInfo)
healthInfo.Sys.CPUInfo = append(healthInfo.Sys.CPUInfo, globalNotificationSys.CPUInfo(deadlinedCtx)...)
if query.Get("syscpu") == "true" {
healthInfo.Sys.CPUInfo = append(healthInfo.Sys.CPUInfo, madmin.GetCPUs(deadlinedCtx, r.Host))
healthInfo.Sys.CPUInfo = append(healthInfo.Sys.CPUInfo, globalNotificationSys.GetCPUs(deadlinedCtx)...)
partialWrite(healthInfo)
}
if diskHw := query.Get("sysdiskhw"); diskHw == "true" {
diskHwInfo := getLocalDiskHwInfo(deadlinedCtx, r)
healthInfo.Sys.DiskHwInfo = append(healthInfo.Sys.DiskHwInfo, diskHwInfo)
healthInfo.Sys.DiskHwInfo = append(healthInfo.Sys.DiskHwInfo, globalNotificationSys.DiskHwInfo(deadlinedCtx)...)
if query.Get("sysdrivehw") == "true" {
healthInfo.Sys.Partitions = append(healthInfo.Sys.Partitions, madmin.GetPartitions(deadlinedCtx, r.Host))
healthInfo.Sys.Partitions = append(healthInfo.Sys.Partitions, globalNotificationSys.GetPartitions(deadlinedCtx)...)
partialWrite(healthInfo)
}
if osInfo := query.Get("sysosinfo"); osInfo == "true" {
osInfo := getLocalOsInfo(deadlinedCtx, r)
healthInfo.Sys.OsInfo = append(healthInfo.Sys.OsInfo, osInfo)
healthInfo.Sys.OsInfo = append(healthInfo.Sys.OsInfo, globalNotificationSys.OsInfo(deadlinedCtx)...)
if query.Get("sysosinfo") == "true" {
healthInfo.Sys.OSInfo = append(healthInfo.Sys.OSInfo, madmin.GetOSInfo(deadlinedCtx, r.Host))
healthInfo.Sys.OSInfo = append(healthInfo.Sys.OSInfo, globalNotificationSys.GetOSInfo(deadlinedCtx)...)
partialWrite(healthInfo)
}
if mem := query.Get("sysmem"); mem == "true" {
memInfo := getLocalMemInfo(deadlinedCtx, r)
healthInfo.Sys.MemInfo = append(healthInfo.Sys.MemInfo, memInfo)
healthInfo.Sys.MemInfo = append(healthInfo.Sys.MemInfo, globalNotificationSys.MemInfo(deadlinedCtx)...)
if query.Get("sysmem") == "true" {
healthInfo.Sys.MemInfo = append(healthInfo.Sys.MemInfo, madmin.GetMemInfo(deadlinedCtx, r.Host))
healthInfo.Sys.MemInfo = append(healthInfo.Sys.MemInfo, globalNotificationSys.GetMemInfo(deadlinedCtx)...)
partialWrite(healthInfo)
}
if proc := query.Get("sysprocess"); proc == "true" {
procInfo := getLocalProcInfo(deadlinedCtx, r)
healthInfo.Sys.ProcInfo = append(healthInfo.Sys.ProcInfo, procInfo)
healthInfo.Sys.ProcInfo = append(healthInfo.Sys.ProcInfo, globalNotificationSys.ProcInfo(deadlinedCtx)...)
if query.Get("sysprocess") == "true" {
healthInfo.Sys.ProcInfo = append(healthInfo.Sys.ProcInfo, madmin.GetProcInfo(deadlinedCtx, r.Host))
healthInfo.Sys.ProcInfo = append(healthInfo.Sys.ProcInfo, globalNotificationSys.GetProcInfo(deadlinedCtx)...)
partialWrite(healthInfo)
}
if config := query.Get("minioconfig"); config == "true" {
cfg, err := readServerConfig(ctx, objectAPI)
logger.LogIf(ctx, err)
healthInfo.Minio.Config = cfg
partialWrite(healthInfo)
}
if drive := query.Get("perfdrive"); drive == "true" {
// Get drive perf details from local server's drive(s)
drivePerfSerial := getLocalDrives(deadlinedCtx, false, globalEndpoints, r)
drivePerfParallel := getLocalDrives(deadlinedCtx, true, globalEndpoints, r)
errStr := ""
if drivePerfSerial.Error != "" {
errStr = "serial: " + drivePerfSerial.Error
}
if drivePerfParallel.Error != "" {
errStr = errStr + " parallel: " + drivePerfParallel.Error
if query.Get("minioconfig") == "true" {
config, err := readServerConfig(ctx, objectAPI)
if err != nil {
healthInfo.Minio.Config = madmin.MinioConfig{
Error: err.Error(),
}
} else {
healthInfo.Minio.Config = madmin.MinioConfig{
Config: config,
}
}
partialWrite(healthInfo)
}
driveInfo := madmin.ServerDrivesInfo{
Addr: drivePerfSerial.Addr,
Serial: drivePerfSerial.Serial,
Parallel: drivePerfParallel.Parallel,
Error: errStr,
}
healthInfo.Perf.DriveInfo = append(healthInfo.Perf.DriveInfo, driveInfo)
if query.Get("perfdrive") == "true" {
healthInfo.Perf.Drives = append(healthInfo.Perf.Drives, getDrivePerfInfos(deadlinedCtx, r.Host))
partialWrite(healthInfo)
// Notify all other MinIO peers to report drive perf numbers
driveInfos := globalNotificationSys.DrivePerfInfoChan(deadlinedCtx)
for obd := range driveInfos {
healthInfo.Perf.DriveInfo = append(healthInfo.Perf.DriveInfo, obd)
perfCh := globalNotificationSys.GetDrivePerfInfos(deadlinedCtx)
for perfInfo := range perfCh {
healthInfo.Perf.Drives = append(healthInfo.Perf.Drives, perfInfo)
partialWrite(healthInfo)
}
partialWrite(healthInfo)
}
if net := query.Get("perfnet"); net == "true" && globalIsDistErasure {
healthInfo.Perf.Net = append(healthInfo.Perf.Net, globalNotificationSys.NetInfo(deadlinedCtx))
if globalIsDistErasure && query.Get("perfnet") == "true" {
healthInfo.Perf.Net = append(healthInfo.Perf.Net, globalNotificationSys.GetNetPerfInfo(deadlinedCtx))
partialWrite(healthInfo)
netInfos := globalNotificationSys.DispatchNetPerfChan(deadlinedCtx)
@@ -1515,10 +1595,47 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
}
partialWrite(healthInfo)
healthInfo.Perf.NetParallel = globalNotificationSys.NetPerfParallelInfo(deadlinedCtx)
healthInfo.Perf.NetParallel = globalNotificationSys.GetParallelNetPerfInfo(deadlinedCtx)
partialWrite(healthInfo)
}
if query.Get("minioinfo") == "true" {
infoMessage := getServerInfo(ctx, r)
servers := []madmin.ServerInfo{}
for _, server := range infoMessage.Servers {
servers = append(servers, madmin.ServerInfo{
State: server.State,
Endpoint: server.Endpoint,
Uptime: server.Uptime,
Version: server.Version,
CommitID: server.CommitID,
Network: server.Network,
Drives: server.Disks,
PoolNumber: server.PoolNumber,
MemStats: madmin.MemStats{
Alloc: server.MemStats.Alloc,
TotalAlloc: server.MemStats.TotalAlloc,
Mallocs: server.MemStats.Mallocs,
Frees: server.MemStats.Frees,
HeapAlloc: server.MemStats.HeapAlloc,
},
})
}
healthInfo.Minio.Info = madmin.MinioInfo{
Mode: infoMessage.Mode,
Domain: infoMessage.Domain,
Region: infoMessage.Region,
SQSARN: infoMessage.SQSARN,
DeploymentID: infoMessage.DeploymentID,
Buckets: infoMessage.Buckets,
Objects: infoMessage.Objects,
Usage: infoMessage.Usage,
Services: infoMessage.Services,
Backend: infoMessage.Backend,
Servers: servers,
}
partialWrite(healthInfo)
}
}()
ticker := time.NewTicker(5 * time.Second)
@@ -1616,105 +1733,8 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
return
}
kmsStat := fetchKMSStatus()
ldap := madmin.LDAP{}
if globalLDAPConfig.Enabled {
ldapConn, err := globalLDAPConfig.Connect()
if err != nil {
ldap.Status = string(madmin.ItemOffline)
} else if ldapConn == nil {
ldap.Status = "Not Configured"
} else {
// Close ldap connection to avoid leaks.
ldapConn.Close()
ldap.Status = string(madmin.ItemOnline)
}
}
log, audit := fetchLoggerInfo()
// Get the notification target info
notifyTarget := fetchLambdaInfo()
local := getLocalServerProperty(globalEndpoints, r)
servers := globalNotificationSys.ServerInfo()
servers = append(servers, local)
assignPoolNumbers(servers)
var backend interface{}
mode := madmin.ItemInitializing
buckets := madmin.Buckets{}
objects := madmin.Objects{}
usage := madmin.Usage{}
objectAPI := newObjectLayerFn()
if objectAPI != nil {
mode = madmin.ItemOnline
// Load data usage
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
if err == nil {
buckets = madmin.Buckets{Count: dataUsageInfo.BucketsCount}
objects = madmin.Objects{Count: dataUsageInfo.ObjectsTotalCount}
usage = madmin.Usage{Size: dataUsageInfo.ObjectsTotalSize}
} else {
buckets = madmin.Buckets{Error: err.Error()}
objects = madmin.Objects{Error: err.Error()}
usage = madmin.Usage{Error: err.Error()}
}
// Fetching the backend information
backendInfo := objectAPI.BackendInfo()
if backendInfo.Type == madmin.Erasure {
// Calculate the number of online/offline disks of all nodes
var allDisks []madmin.Disk
for _, s := range servers {
allDisks = append(allDisks, s.Disks...)
}
onlineDisks, offlineDisks := getOnlineOfflineDisksStats(allDisks)
backend = madmin.ErasureBackend{
Type: madmin.ErasureType,
OnlineDisks: onlineDisks.Sum(),
OfflineDisks: offlineDisks.Sum(),
StandardSCParity: backendInfo.StandardSCParity,
RRSCParity: backendInfo.RRSCParity,
}
} else {
backend = madmin.FSBackend{
Type: madmin.FsType,
}
}
}
domain := globalDomainNames
services := madmin.Services{
KMS: kmsStat,
LDAP: ldap,
Logger: log,
Audit: audit,
Notifications: notifyTarget,
}
infoMsg := madmin.InfoMessage{
Mode: string(mode),
Domain: domain,
Region: globalServerRegion,
SQSARN: globalNotificationSys.GetARNList(false),
DeploymentID: globalDeploymentID,
Buckets: buckets,
Objects: objects,
Usage: usage,
Services: services,
Backend: backend,
Servers: servers,
}
// Marshal API response
jsonBytes, err := json.Marshal(infoMsg)
jsonBytes, err := json.Marshal(getServerInfo(ctx, r))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return

View File

@@ -19,329 +19,95 @@ package cmd
import (
"context"
"fmt"
"net/http"
"math"
"os"
"sync"
"syscall"
"github.com/minio/madmin-go"
"github.com/minio/minio/pkg/disk"
cpuhw "github.com/shirou/gopsutil/v3/cpu"
memhw "github.com/shirou/gopsutil/v3/mem"
"github.com/shirou/gopsutil/v3/process"
)
func getLocalCPUInfo(ctx context.Context, r *http.Request) madmin.ServerCPUInfo {
addr := r.Host
if globalIsDistErasure {
addr = globalLocalNodeName
}
info, err := cpuhw.InfoWithContext(ctx)
if err != nil {
return madmin.ServerCPUInfo{
Addr: addr,
Error: fmt.Sprintf("info: %v", err),
}
}
time, err := cpuhw.TimesWithContext(ctx, false)
if err != nil {
return madmin.ServerCPUInfo{
Addr: addr,
Error: fmt.Sprintf("times: %v", err),
}
}
return madmin.ServerCPUInfo{
Addr: addr,
CPUStat: info,
TimeStat: time,
// round returns value rounding to specified decimal places.
func round(f float64, n int) float64 {
if n <= 0 {
return math.Round(f)
}
p := math.Pow10(n)
return math.Round(f*p) / p
}
func getLocalDrives(ctx context.Context, parallel bool, endpointServerPools EndpointServerPools, r *http.Request) madmin.ServerDrivesInfo {
var drivesPerfInfo []madmin.DrivePerfInfo
func getDrivePerfInfo(ctx context.Context, parallel bool) []madmin.DrivePerfInfo {
pools := globalEndpoints
info := []madmin.DrivePerfInfo{}
var wg sync.WaitGroup
for _, ep := range endpointServerPools {
for _, endpoint := range ep.Endpoints {
// Only proceed for local endpoints
if endpoint.IsLocal {
if _, err := os.Stat(endpoint.Path); err != nil {
// Since this drive is not available, add relevant details and proceed
drivesPerfInfo = append(drivesPerfInfo, madmin.DrivePerfInfo{
Path: endpoint.Path,
Error: fmt.Sprintf("stat: %v", err),
})
continue
}
measurePath := pathJoin(minioMetaTmpBucket, mustGetUUID())
measure := func(path string) {
defer wg.Done()
driveInfo := madmin.DrivePerfInfo{
Path: path,
}
latency, throughput, err := disk.GetHealthInfo(ctx, path, pathJoin(path, measurePath))
if err != nil {
driveInfo.Error = fmt.Sprintf("health-info: %v", err)
} else {
driveInfo.Latency = latency
driveInfo.Throughput = throughput
}
drivesPerfInfo = append(drivesPerfInfo, driveInfo)
}
wg.Add(1)
for _, pool := range pools {
for _, endpoint := range pool.Endpoints {
if !endpoint.IsLocal {
continue
}
if parallel {
go measure(endpoint.Path)
if _, err := os.Stat(endpoint.Path); err != nil {
info = append(info, madmin.DrivePerfInfo{
Path: endpoint.Path,
Error: err.Error(),
})
continue
}
getHealthInfo := func(path string) {
defer wg.Done()
latency, throughput, err := disk.GetHealthInfo(
ctx, path, pathJoin(path, minioMetaTmpBucket, mustGetUUID()),
)
if err != nil {
info = append(info, madmin.DrivePerfInfo{
Path: path,
Error: err.Error(),
})
} else {
measure(endpoint.Path)
info = append(info, madmin.DrivePerfInfo{
Path: path,
Latency: madmin.Latency{
Avg: round(latency.Avg, 3),
Max: round(latency.Max, 3),
Min: round(latency.Min, 3),
Percentile50: round(latency.Percentile50, 3),
Percentile90: round(latency.Percentile90, 3),
Percentile99: round(latency.Percentile99, 3),
},
Throughput: madmin.Throughput{
Avg: uint64(round(throughput.Avg, 0)),
Max: uint64(round(throughput.Max, 0)),
Min: uint64(round(throughput.Min, 0)),
Percentile50: uint64(round(throughput.Percentile50, 0)),
Percentile90: uint64(round(throughput.Percentile90, 0)),
Percentile99: uint64(round(throughput.Percentile99, 0)),
},
})
}
}
wg.Add(1)
if parallel {
go getHealthInfo(endpoint.Path)
} else {
getHealthInfo(endpoint.Path)
}
}
}
wg.Wait()
addr := r.Host
if globalIsDistErasure {
addr = globalLocalNodeName
}
if parallel {
return madmin.ServerDrivesInfo{
Addr: addr,
Parallel: drivesPerfInfo,
}
}
return madmin.ServerDrivesInfo{
Addr: addr,
Serial: drivesPerfInfo,
}
return info
}
func getLocalMemInfo(ctx context.Context, r *http.Request) madmin.ServerMemInfo {
addr := r.Host
if globalIsDistErasure {
addr = globalLocalNodeName
}
swap, err := memhw.SwapMemoryWithContext(ctx)
if err != nil {
return madmin.ServerMemInfo{
Addr: addr,
Error: fmt.Sprintf("swap: %v", err),
}
}
vm, err := memhw.VirtualMemoryWithContext(ctx)
if err != nil {
return madmin.ServerMemInfo{
Addr: addr,
Error: fmt.Sprintf("virtual-mem: %v", err),
}
}
return madmin.ServerMemInfo{
Addr: addr,
SwapMem: swap,
VirtualMem: vm,
}
}
func getLocalProcInfo(ctx context.Context, r *http.Request) madmin.ServerProcInfo {
addr := r.Host
if globalIsDistErasure {
addr = globalLocalNodeName
}
errProcInfo := func(tag string, err error) madmin.ServerProcInfo {
return madmin.ServerProcInfo{
Addr: addr,
Error: fmt.Sprintf("%s: %v", tag, err),
}
}
selfPid := int32(syscall.Getpid())
self, err := process.NewProcess(selfPid)
if err != nil {
return errProcInfo("new-process", err)
}
processes := []*process.Process{self}
sysProcs := []madmin.SysProcess{}
for _, proc := range processes {
sysProc := madmin.SysProcess{}
sysProc.Pid = proc.Pid
bg, err := proc.BackgroundWithContext(ctx)
if err != nil {
return errProcInfo("background", err)
}
sysProc.Background = bg
cpuPercent, err := proc.CPUPercentWithContext(ctx)
if err != nil {
return errProcInfo("cpu-percent", err)
}
sysProc.CPUPercent = cpuPercent
children, _ := proc.ChildrenWithContext(ctx)
for _, c := range children {
sysProc.Children = append(sysProc.Children, c.Pid)
}
cmdLine, err := proc.CmdlineWithContext(ctx)
if err != nil {
return errProcInfo("cmdline", err)
}
sysProc.CmdLine = cmdLine
conns, err := proc.ConnectionsWithContext(ctx)
if err != nil {
return errProcInfo("conns", err)
}
sysProc.ConnectionCount = len(conns)
createTime, err := proc.CreateTimeWithContext(ctx)
if err != nil {
return errProcInfo("create-time", err)
}
sysProc.CreateTime = createTime
cwd, err := proc.CwdWithContext(ctx)
if err != nil {
return errProcInfo("cwd", err)
}
sysProc.Cwd = cwd
exe, err := proc.ExeWithContext(ctx)
if err != nil {
return errProcInfo("exe", err)
}
sysProc.Exe = exe
gids, err := proc.GidsWithContext(ctx)
if err != nil {
return errProcInfo("gids", err)
}
sysProc.Gids = gids
ioCounters, err := proc.IOCountersWithContext(ctx)
if err != nil {
return errProcInfo("iocounters", err)
}
sysProc.IOCounters = ioCounters
isRunning, err := proc.IsRunningWithContext(ctx)
if err != nil {
return errProcInfo("is-running", err)
}
sysProc.IsRunning = isRunning
memInfo, err := proc.MemoryInfoWithContext(ctx)
if err != nil {
return errProcInfo("mem-info", err)
}
sysProc.MemInfo = memInfo
memMaps, err := proc.MemoryMapsWithContext(ctx, true)
if err != nil {
return errProcInfo("mem-maps", err)
}
sysProc.MemMaps = memMaps
memPercent, err := proc.MemoryPercentWithContext(ctx)
if err != nil {
return errProcInfo("mem-percent", err)
}
sysProc.MemPercent = memPercent
name, err := proc.NameWithContext(ctx)
if err != nil {
return errProcInfo("name", err)
}
sysProc.Name = name
// Refer for more information on NetIOCounters
// is useless https://github.com/shirou/gopsutil/issues/429
nice, err := proc.NiceWithContext(ctx)
if err != nil {
return errProcInfo("nice", err)
}
sysProc.Nice = nice
numCtxSwitches, err := proc.NumCtxSwitchesWithContext(ctx)
if err != nil {
return errProcInfo("num-ctx-switches", err)
}
sysProc.NumCtxSwitches = numCtxSwitches
numFds, err := proc.NumFDsWithContext(ctx)
if err != nil {
return errProcInfo("num-fds", err)
}
sysProc.NumFds = numFds
numThreads, err := proc.NumThreadsWithContext(ctx)
if err != nil {
return errProcInfo("num-threads", err)
}
sysProc.NumThreads = numThreads
pageFaults, err := proc.PageFaultsWithContext(ctx)
if err != nil {
return errProcInfo("page-faults", err)
}
sysProc.PageFaults = pageFaults
parent, err := proc.ParentWithContext(ctx)
if err == nil {
sysProc.Parent = parent.Pid
}
ppid, err := proc.PpidWithContext(ctx)
if err == nil {
sysProc.Ppid = ppid
}
status, err := proc.StatusWithContext(ctx)
if err != nil {
return errProcInfo("status", err)
}
sysProc.Status = status[0]
tgid, err := proc.Tgid()
if err != nil {
return errProcInfo("tgid", err)
}
sysProc.Tgid = tgid
times, err := proc.TimesWithContext(ctx)
if err != nil {
return errProcInfo("times", err)
}
sysProc.Times = times
uids, err := proc.UidsWithContext(ctx)
if err != nil {
return errProcInfo("uids", err)
}
sysProc.Uids = uids
username, err := proc.UsernameWithContext(ctx)
if err != nil {
return errProcInfo("username", err)
}
sysProc.Username = username
sysProcs = append(sysProcs, sysProc)
}
return madmin.ServerProcInfo{
Addr: addr,
Processes: sysProcs,
func getDrivePerfInfos(ctx context.Context, addr string) madmin.DrivePerfInfos {
serialPerf := getDrivePerfInfo(ctx, false)
parallelPerf := getDrivePerfInfo(ctx, true)
return madmin.DrivePerfInfos{
Addr: addr,
SerialPerf: serialPerf,
ParallelPerf: parallelPerf,
}
}

View File

@@ -1,138 +0,0 @@
// +build linux
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"fmt"
"net/http"
"strings"
"github.com/minio/madmin-go"
"github.com/minio/minio/pkg/smart"
diskhw "github.com/shirou/gopsutil/v3/disk"
"github.com/shirou/gopsutil/v3/host"
)
func getLocalOsInfo(ctx context.Context, r *http.Request) madmin.ServerOsInfo {
addr := r.Host
if globalIsDistErasure {
addr = globalLocalNodeName
}
srvrOsInfo := madmin.ServerOsInfo{Addr: addr}
var err error
srvrOsInfo.Info, err = host.InfoWithContext(ctx)
if err != nil {
return madmin.ServerOsInfo{
Addr: addr,
Error: fmt.Sprintf("info: %v", err),
}
}
srvrOsInfo.Sensors, err = host.SensorsTemperaturesWithContext(ctx)
if err != nil {
// Set error only when it's not of WARNINGS type
if _, isWarning := err.(*host.Warnings); !isWarning {
srvrOsInfo.Error = fmt.Sprintf("sensors-temp: %v", err)
}
}
// ignore user err, as it cannot be obtained reliably inside containers
srvrOsInfo.Users, _ = host.UsersWithContext(ctx)
return srvrOsInfo
}
func getLocalDiskHwInfo(ctx context.Context, r *http.Request) madmin.ServerDiskHwInfo {
addr := r.Host
if globalIsDistErasure {
addr = globalLocalNodeName
}
parts, err := diskhw.PartitionsWithContext(ctx, true)
if err != nil {
return madmin.ServerDiskHwInfo{
Addr: addr,
Error: fmt.Sprintf("partitions: %v", err),
}
}
drives := []string{}
paths := []string{}
partitions := []madmin.PartitionStat{}
for _, part := range parts {
device := part.Device
path := part.Mountpoint
if strings.Index(device, "/dev/") == 0 {
if strings.Contains(device, "loop") {
continue
}
if strings.Contains(device, "/dev/fuse") {
continue
}
drives = append(drives, device)
paths = append(paths, path)
smartInfo, err := smart.GetInfo(device)
if err != nil {
smartInfo.Error = fmt.Sprintf("smart: %v", err)
}
partition := madmin.PartitionStat{
Device: part.Device,
Mountpoint: part.Mountpoint,
Fstype: part.Fstype,
Opts: strings.Join(part.Opts, ","),
SmartInfo: smartInfo,
}
partitions = append(partitions, partition)
}
}
ioCounters, err := diskhw.IOCountersWithContext(ctx, drives...)
if err != nil {
return madmin.ServerDiskHwInfo{
Addr: addr,
Error: fmt.Sprintf("iocounters: %v", err),
}
}
usages := []*diskhw.UsageStat{}
for _, path := range paths {
usage, err := diskhw.UsageWithContext(ctx, path)
if err != nil {
return madmin.ServerDiskHwInfo{
Addr: addr,
Error: fmt.Sprintf("usage: %v", err),
}
}
usages = append(usages, usage)
}
return madmin.ServerDiskHwInfo{
Addr: addr,
Usage: usages,
Partitions: partitions,
Counters: ioCounters,
Error: "",
}
}

View File

@@ -1,52 +0,0 @@
// +build !linux
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"net/http"
"runtime"
"github.com/minio/madmin-go"
)
func getLocalDiskHwInfo(ctx context.Context, r *http.Request) madmin.ServerDiskHwInfo {
addr := r.Host
if globalIsDistErasure {
addr = globalLocalNodeName
}
return madmin.ServerDiskHwInfo{
Addr: addr,
Error: "unsupported platform: " + runtime.GOOS,
}
}
func getLocalOsInfo(ctx context.Context, r *http.Request) madmin.ServerOsInfo {
addr := r.Host
if globalIsDistErasure {
addr = globalLocalNodeName
}
return madmin.ServerOsInfo{
Addr: addr,
Error: "unsupported platform: " + runtime.GOOS,
}
}

View File

@@ -831,8 +831,8 @@ func (sys *NotificationSys) Send(args eventArgs) {
sys.targetList.Send(args.ToEvent(true), targetIDSet, sys.targetResCh)
}
// NetInfo - Net information
func (sys *NotificationSys) NetInfo(ctx context.Context) madmin.ServerNetHealthInfo {
// GetNetPerfInfo - Net information
func (sys *NotificationSys) GetNetPerfInfo(ctx context.Context) madmin.NetPerfInfo {
var sortedGlobalEndpoints []string
/*
@@ -889,14 +889,14 @@ func (sys *NotificationSys) NetInfo(ctx context.Context) madmin.ServerNetHealthI
}
}
netInfos := make([]madmin.NetPerfInfo, len(remoteTargets))
netInfos := make([]madmin.PeerNetPerfInfo, len(remoteTargets))
for index, client := range remoteTargets {
if client == nil {
continue
}
var err error
netInfos[index], err = client.NetInfo(ctx)
netInfos[index], err = client.GetNetPerfInfo(ctx)
addr := client.host.String()
reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
@@ -907,15 +907,15 @@ func (sys *NotificationSys) NetInfo(ctx context.Context) madmin.ServerNetHealthI
netInfos[index].Error = err.Error()
}
}
return madmin.ServerNetHealthInfo{
Net: netInfos,
Addr: globalLocalNodeName,
return madmin.NetPerfInfo{
Addr: globalLocalNodeName,
RemotePeers: netInfos,
}
}
// DispatchNetPerfInfo - Net perf information from other nodes
func (sys *NotificationSys) DispatchNetPerfInfo(ctx context.Context) []madmin.ServerNetHealthInfo {
serverNetInfos := []madmin.ServerNetHealthInfo{}
func (sys *NotificationSys) DispatchNetPerfInfo(ctx context.Context) []madmin.NetPerfInfo {
serverNetInfos := []madmin.NetPerfInfo{}
for index, client := range sys.peerClients {
if client == nil {
@@ -932,8 +932,8 @@ func (sys *NotificationSys) DispatchNetPerfInfo(ctx context.Context) []madmin.Se
}
// DispatchNetPerfChan - Net perf information from other nodes
func (sys *NotificationSys) DispatchNetPerfChan(ctx context.Context) chan madmin.ServerNetHealthInfo {
serverNetInfos := make(chan madmin.ServerNetHealthInfo)
func (sys *NotificationSys) DispatchNetPerfChan(ctx context.Context) chan madmin.NetPerfInfo {
serverNetInfos := make(chan madmin.NetPerfInfo)
wg := sync.WaitGroup{}
wg.Add(1)
@@ -960,9 +960,9 @@ func (sys *NotificationSys) DispatchNetPerfChan(ctx context.Context) chan madmin
return serverNetInfos
}
// NetPerfParallelInfo - Performs Net parallel tests
func (sys *NotificationSys) NetPerfParallelInfo(ctx context.Context) madmin.ServerNetHealthInfo {
netInfos := []madmin.NetPerfInfo{}
// GetParallelNetPerfInfo - Performs Net parallel tests
func (sys *NotificationSys) GetParallelNetPerfInfo(ctx context.Context) madmin.NetPerfInfo {
netInfos := []madmin.PeerNetPerfInfo{}
wg := sync.WaitGroup{}
for index, client := range sys.peerClients {
@@ -972,7 +972,7 @@ func (sys *NotificationSys) NetPerfParallelInfo(ctx context.Context) madmin.Serv
wg.Add(1)
go func(index int) {
netInfo, err := sys.peerClients[index].NetInfo(ctx)
netInfo, err := sys.peerClients[index].GetNetPerfInfo(ctx)
netInfo.Addr = sys.peerClients[index].host.String()
if err != nil {
netInfo.Error = err.Error()
@@ -982,46 +982,15 @@ func (sys *NotificationSys) NetPerfParallelInfo(ctx context.Context) madmin.Serv
}(index)
}
wg.Wait()
return madmin.ServerNetHealthInfo{
Net: netInfos,
Addr: globalLocalNodeName,
return madmin.NetPerfInfo{
Addr: globalLocalNodeName,
RemotePeers: netInfos,
}
}
// DrivePerfInfo - Drive perf information
func (sys *NotificationSys) DrivePerfInfo(ctx context.Context) []madmin.ServerDrivesInfo {
reply := make([]madmin.ServerDrivesInfo, len(sys.peerClients))
g := errgroup.WithNErrs(len(sys.peerClients))
for index, client := range sys.peerClients {
if client == nil {
continue
}
index := index
g.Go(func() error {
var err error
reply[index], err = sys.peerClients[index].DriveInfo(ctx)
return err
}, index)
}
for index, err := range g.Wait() {
if err != nil {
addr := sys.peerClients[index].host.String()
reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
logger.LogIf(ctx, err)
reply[index].Addr = addr
reply[index].Error = err.Error()
}
}
return reply
}
// DrivePerfInfoChan - Drive perf information
func (sys *NotificationSys) DrivePerfInfoChan(ctx context.Context) chan madmin.ServerDrivesInfo {
updateChan := make(chan madmin.ServerDrivesInfo)
// GetDrivePerfInfos - Drive performance information
func (sys *NotificationSys) GetDrivePerfInfos(ctx context.Context) chan madmin.DrivePerfInfos {
updateChan := make(chan madmin.DrivePerfInfos)
wg := sync.WaitGroup{}
for _, client := range sys.peerClients {
@@ -1030,7 +999,7 @@ func (sys *NotificationSys) DrivePerfInfoChan(ctx context.Context) chan madmin.S
}
wg.Add(1)
go func(client *peerRESTClient) {
reply, err := client.DriveInfo(ctx)
reply, err := client.GetDrivePerfInfos(ctx)
addr := client.host.String()
reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
@@ -1055,9 +1024,9 @@ func (sys *NotificationSys) DrivePerfInfoChan(ctx context.Context) chan madmin.S
return updateChan
}
// CPUInfo - CPU information
func (sys *NotificationSys) CPUInfo(ctx context.Context) []madmin.ServerCPUInfo {
reply := make([]madmin.ServerCPUInfo, len(sys.peerClients))
// GetCPUs - Get all CPU information.
func (sys *NotificationSys) GetCPUs(ctx context.Context) []madmin.CPUs {
reply := make([]madmin.CPUs, len(sys.peerClients))
g := errgroup.WithNErrs(len(sys.peerClients))
for index, client := range sys.peerClients {
@@ -1067,7 +1036,7 @@ func (sys *NotificationSys) CPUInfo(ctx context.Context) []madmin.ServerCPUInfo
index := index
g.Go(func() error {
var err error
reply[index], err = sys.peerClients[index].CPUInfo(ctx)
reply[index], err = sys.peerClients[index].GetCPUs(ctx)
return err
}, index)
}
@@ -1085,9 +1054,9 @@ func (sys *NotificationSys) CPUInfo(ctx context.Context) []madmin.ServerCPUInfo
return reply
}
// DiskHwInfo - Disk HW information
func (sys *NotificationSys) DiskHwInfo(ctx context.Context) []madmin.ServerDiskHwInfo {
reply := make([]madmin.ServerDiskHwInfo, len(sys.peerClients))
// GetPartitions - Disk partition information
func (sys *NotificationSys) GetPartitions(ctx context.Context) []madmin.Partitions {
reply := make([]madmin.Partitions, len(sys.peerClients))
g := errgroup.WithNErrs(len(sys.peerClients))
for index, client := range sys.peerClients {
@@ -1097,7 +1066,7 @@ func (sys *NotificationSys) DiskHwInfo(ctx context.Context) []madmin.ServerDiskH
index := index
g.Go(func() error {
var err error
reply[index], err = sys.peerClients[index].DiskHwInfo(ctx)
reply[index], err = sys.peerClients[index].GetPartitions(ctx)
return err
}, index)
}
@@ -1115,9 +1084,9 @@ func (sys *NotificationSys) DiskHwInfo(ctx context.Context) []madmin.ServerDiskH
return reply
}
// OsInfo - Os information
func (sys *NotificationSys) OsInfo(ctx context.Context) []madmin.ServerOsInfo {
reply := make([]madmin.ServerOsInfo, len(sys.peerClients))
// GetOSInfo - Get operating system's information
func (sys *NotificationSys) GetOSInfo(ctx context.Context) []madmin.OSInfo {
reply := make([]madmin.OSInfo, len(sys.peerClients))
g := errgroup.WithNErrs(len(sys.peerClients))
for index, client := range sys.peerClients {
@@ -1127,7 +1096,7 @@ func (sys *NotificationSys) OsInfo(ctx context.Context) []madmin.ServerOsInfo {
index := index
g.Go(func() error {
var err error
reply[index], err = sys.peerClients[index].OsInfo(ctx)
reply[index], err = sys.peerClients[index].GetOSInfo(ctx)
return err
}, index)
}
@@ -1145,9 +1114,9 @@ func (sys *NotificationSys) OsInfo(ctx context.Context) []madmin.ServerOsInfo {
return reply
}
// MemInfo - Mem information
func (sys *NotificationSys) MemInfo(ctx context.Context) []madmin.ServerMemInfo {
reply := make([]madmin.ServerMemInfo, len(sys.peerClients))
// GetMemInfo - Memory information
func (sys *NotificationSys) GetMemInfo(ctx context.Context) []madmin.MemInfo {
reply := make([]madmin.MemInfo, len(sys.peerClients))
g := errgroup.WithNErrs(len(sys.peerClients))
for index, client := range sys.peerClients {
@@ -1157,7 +1126,7 @@ func (sys *NotificationSys) MemInfo(ctx context.Context) []madmin.ServerMemInfo
index := index
g.Go(func() error {
var err error
reply[index], err = sys.peerClients[index].MemInfo(ctx)
reply[index], err = sys.peerClients[index].GetMemInfo(ctx)
return err
}, index)
}
@@ -1175,9 +1144,9 @@ func (sys *NotificationSys) MemInfo(ctx context.Context) []madmin.ServerMemInfo
return reply
}
// ProcInfo - Process information
func (sys *NotificationSys) ProcInfo(ctx context.Context) []madmin.ServerProcInfo {
reply := make([]madmin.ServerProcInfo, len(sys.peerClients))
// GetProcInfo - Process information
func (sys *NotificationSys) GetProcInfo(ctx context.Context) []madmin.ProcInfo {
reply := make([]madmin.ProcInfo, len(sys.peerClients))
g := errgroup.WithNErrs(len(sys.peerClients))
for index, client := range sys.peerClients {
@@ -1187,7 +1156,7 @@ func (sys *NotificationSys) ProcInfo(ctx context.Context) []madmin.ServerProcInf
index := index
g.Go(func() error {
var err error
reply[index], err = sys.peerClients[index].ProcInfo(ctx)
reply[index], err = sys.peerClients[index].GetProcInfo(ctx)
return err
}, index)
}

View File

@@ -125,7 +125,7 @@ func (r *nullReader) Read(b []byte) (int, error) {
return len(b), nil
}
func (client *peerRESTClient) doNetTest(ctx context.Context, dataSize int64, threadCount uint) (info madmin.NetPerfInfo, err error) {
func (client *peerRESTClient) doNetTest(ctx context.Context, dataSize int64, threadCount uint) (info madmin.PeerNetPerfInfo, err error) {
var mu sync.Mutex // mutex used to protect these slices in go-routines
latencies := []float64{}
throughputs := []float64{}
@@ -228,12 +228,24 @@ func (client *peerRESTClient) doNetTest(ctx context.Context, dataSize int64, thr
}
latency, throughput, err := xnet.ComputePerfStats(latencies, throughputs)
info = madmin.NetPerfInfo{
Latency: latency,
Throughput: throughput,
}
return info, err
return madmin.PeerNetPerfInfo{
Latency: madmin.Latency{
Avg: round(latency.Avg, 3),
Max: round(latency.Max, 3),
Min: round(latency.Min, 3),
Percentile50: round(latency.Percentile50, 3),
Percentile90: round(latency.Percentile90, 3),
Percentile99: round(latency.Percentile99, 3),
},
Throughput: madmin.Throughput{
Avg: uint64(round(throughput.Avg, 0)),
Max: uint64(round(throughput.Max, 0)),
Min: uint64(round(throughput.Min, 0)),
Percentile50: uint64(round(throughput.Percentile50, 0)),
Percentile90: uint64(round(throughput.Percentile90, 0)),
Percentile99: uint64(round(throughput.Percentile99, 0)),
},
}, nil
}
func maxLatencyForSizeThreads(size int64, threadCount uint) float64 {
@@ -275,8 +287,8 @@ func maxLatencyForSizeThreads(size int64, threadCount uint) float64 {
return math.MaxFloat64
}
// NetInfo - fetch Net information for a remote node.
func (client *peerRESTClient) NetInfo(ctx context.Context) (info madmin.NetPerfInfo, err error) {
// GetNetPerfInfo - fetch network information for a remote node.
func (client *peerRESTClient) GetNetPerfInfo(ctx context.Context) (info madmin.PeerNetPerfInfo, err error) {
// 100 Gbit -> 256 MiB * 50 threads
// 40 Gbit -> 256 MiB * 20 threads
@@ -330,7 +342,7 @@ func (client *peerRESTClient) NetInfo(ctx context.Context) (info madmin.NetPerfI
}
// DispatchNetInfo - dispatch other nodes to run Net info.
func (client *peerRESTClient) DispatchNetInfo(ctx context.Context) (info madmin.ServerNetHealthInfo, err error) {
func (client *peerRESTClient) DispatchNetInfo(ctx context.Context) (info madmin.NetPerfInfo, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodDispatchNetInfo, nil, nil, -1)
if err != nil {
return
@@ -344,8 +356,8 @@ func (client *peerRESTClient) DispatchNetInfo(ctx context.Context) (info madmin.
return
}
// DriveInfo - fetch Drive information for a remote node.
func (client *peerRESTClient) DriveInfo(ctx context.Context) (info madmin.ServerDrivesInfo, err error) {
// GetDrivePerfInfos - fetch all disk's serial/parallal performance information for a remote node.
func (client *peerRESTClient) GetDrivePerfInfos(ctx context.Context) (info madmin.DrivePerfInfos, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodDriveInfo, nil, nil, -1)
if err != nil {
return
@@ -355,8 +367,8 @@ func (client *peerRESTClient) DriveInfo(ctx context.Context) (info madmin.Server
return info, err
}
// CPUInfo - fetch CPU information for a remote node.
func (client *peerRESTClient) CPUInfo(ctx context.Context) (info madmin.ServerCPUInfo, err error) {
// GetCPUs - fetch CPU information for a remote node.
func (client *peerRESTClient) GetCPUs(ctx context.Context) (info madmin.CPUs, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodCPUInfo, nil, nil, -1)
if err != nil {
return
@@ -366,8 +378,8 @@ func (client *peerRESTClient) CPUInfo(ctx context.Context) (info madmin.ServerCP
return info, err
}
// DiskHwInfo - fetch Disk HW information for a remote node.
func (client *peerRESTClient) DiskHwInfo(ctx context.Context) (info madmin.ServerDiskHwInfo, err error) {
// GetPartitions - fetch disk partition information for a remote node.
func (client *peerRESTClient) GetPartitions(ctx context.Context) (info madmin.Partitions, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodDiskHwInfo, nil, nil, -1)
if err != nil {
return
@@ -377,8 +389,8 @@ func (client *peerRESTClient) DiskHwInfo(ctx context.Context) (info madmin.Serve
return info, err
}
// OsInfo - fetch OS information for a remote node.
func (client *peerRESTClient) OsInfo(ctx context.Context) (info madmin.ServerOsInfo, err error) {
// GetOSInfo - fetch OS information for a remote node.
func (client *peerRESTClient) GetOSInfo(ctx context.Context) (info madmin.OSInfo, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodOsInfo, nil, nil, -1)
if err != nil {
return
@@ -388,8 +400,8 @@ func (client *peerRESTClient) OsInfo(ctx context.Context) (info madmin.ServerOsI
return info, err
}
// MemInfo - fetch Memory information for a remote node.
func (client *peerRESTClient) MemInfo(ctx context.Context) (info madmin.ServerMemInfo, err error) {
// GetMemInfo - fetch memory information for a remote node.
func (client *peerRESTClient) GetMemInfo(ctx context.Context) (info madmin.MemInfo, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodMemInfo, nil, nil, -1)
if err != nil {
return
@@ -399,8 +411,8 @@ func (client *peerRESTClient) MemInfo(ctx context.Context) (info madmin.ServerMe
return info, err
}
// ProcInfo - fetch Process information for a remote node.
func (client *peerRESTClient) ProcInfo(ctx context.Context) (info madmin.ServerProcInfo, err error) {
// GetProcInfo - fetch MinIO process information for a remote node.
func (client *peerRESTClient) GetProcInfo(ctx context.Context) (info madmin.ProcInfo, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodProcInfo, nil, nil, -1)
if err != nil {
return

View File

@@ -402,15 +402,15 @@ func (s *peerRESTServer) DispatchNetInfoHandler(w http.ResponseWriter, r *http.R
done := keepHTTPResponseAlive(w)
ctx := newContext(r, w, "DispatchNetInfo")
info := globalNotificationSys.NetInfo(ctx)
info := globalNotificationSys.GetNetPerfInfo(ctx)
done(nil)
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
w.(http.Flusher).Flush()
}
// DriveInfoHandler - returns Drive info.
func (s *peerRESTServer) DriveInfoHandler(w http.ResponseWriter, r *http.Request) {
// GetDrivePerfInfosHandler - returns all disk's serial/parallal performance information.
func (s *peerRESTServer) GetDrivePerfInfosHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
@@ -418,28 +418,15 @@ func (s *peerRESTServer) DriveInfoHandler(w http.ResponseWriter, r *http.Request
ctx, cancel := context.WithCancel(newContext(r, w, "DriveInfo"))
defer cancel()
infoSerial := getLocalDrives(ctx, false, globalEndpoints, r)
infoParallel := getLocalDrives(ctx, true, globalEndpoints, r)
errStr := ""
if infoSerial.Error != "" {
errStr = "serial: " + infoSerial.Error
}
if infoParallel.Error != "" {
errStr = errStr + " parallel: " + infoParallel.Error
}
info := madmin.ServerDrivesInfo{
Addr: infoSerial.Addr,
Serial: infoSerial.Serial,
Parallel: infoParallel.Parallel,
Error: errStr,
}
info := getDrivePerfInfos(ctx, r.Host)
defer w.(http.Flusher).Flush()
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
}
// CPUInfoHandler - returns CPU info.
func (s *peerRESTServer) CPUInfoHandler(w http.ResponseWriter, r *http.Request) {
// GetCPUsHandler - returns CPU info.
func (s *peerRESTServer) GetCPUsHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
@@ -448,14 +435,14 @@ func (s *peerRESTServer) CPUInfoHandler(w http.ResponseWriter, r *http.Request)
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := getLocalCPUInfo(ctx, r)
info := madmin.GetCPUs(ctx, r.Host)
defer w.(http.Flusher).Flush()
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
}
// DiskHwInfoHandler - returns Disk HW info.
func (s *peerRESTServer) DiskHwInfoHandler(w http.ResponseWriter, r *http.Request) {
// GetPartitionsHandler - returns disk partition information.
func (s *peerRESTServer) GetPartitionsHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
@@ -464,14 +451,14 @@ func (s *peerRESTServer) DiskHwInfoHandler(w http.ResponseWriter, r *http.Reques
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := getLocalDiskHwInfo(ctx, r)
info := madmin.GetPartitions(ctx, r.Host)
defer w.(http.Flusher).Flush()
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
}
// OsInfoHandler - returns Os info.
func (s *peerRESTServer) OsInfoHandler(w http.ResponseWriter, r *http.Request) {
// GetOSInfoHandler - returns operating system's information.
func (s *peerRESTServer) GetOSInfoHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
@@ -480,14 +467,14 @@ func (s *peerRESTServer) OsInfoHandler(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := getLocalOsInfo(ctx, r)
info := madmin.GetOSInfo(ctx, r.Host)
defer w.(http.Flusher).Flush()
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
}
// ProcInfoHandler - returns Proc info.
func (s *peerRESTServer) ProcInfoHandler(w http.ResponseWriter, r *http.Request) {
// GetProcInfoHandler - returns this MinIO process information.
func (s *peerRESTServer) GetProcInfoHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
@@ -496,14 +483,14 @@ func (s *peerRESTServer) ProcInfoHandler(w http.ResponseWriter, r *http.Request)
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := getLocalProcInfo(ctx, r)
info := madmin.GetProcInfo(ctx, r.Host)
defer w.(http.Flusher).Flush()
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
}
// MemInfoHandler - returns Memory info.
func (s *peerRESTServer) MemInfoHandler(w http.ResponseWriter, r *http.Request) {
// GetMemInfoHandler - returns memory information.
func (s *peerRESTServer) GetMemInfoHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
@@ -512,7 +499,7 @@ func (s *peerRESTServer) MemInfoHandler(w http.ResponseWriter, r *http.Request)
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := getLocalMemInfo(ctx, r)
info := madmin.GetMemInfo(ctx, r.Host)
defer w.(http.Flusher).Flush()
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
@@ -1117,12 +1104,12 @@ func registerPeerRESTHandlers(router *mux.Router) {
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodHealth).HandlerFunc(httpTraceHdrs(server.HealthHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetLocks).HandlerFunc(httpTraceHdrs(server.GetLocksHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodServerInfo).HandlerFunc(httpTraceHdrs(server.ServerInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodProcInfo).HandlerFunc(httpTraceHdrs(server.ProcInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodMemInfo).HandlerFunc(httpTraceHdrs(server.MemInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodOsInfo).HandlerFunc(httpTraceHdrs(server.OsInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDiskHwInfo).HandlerFunc(httpTraceHdrs(server.DiskHwInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodCPUInfo).HandlerFunc(httpTraceHdrs(server.CPUInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDriveInfo).HandlerFunc(httpTraceHdrs(server.DriveInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodProcInfo).HandlerFunc(httpTraceHdrs(server.GetProcInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodMemInfo).HandlerFunc(httpTraceHdrs(server.GetMemInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodOsInfo).HandlerFunc(httpTraceHdrs(server.GetOSInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDiskHwInfo).HandlerFunc(httpTraceHdrs(server.GetPartitionsHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodCPUInfo).HandlerFunc(httpTraceHdrs(server.GetCPUsHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDriveInfo).HandlerFunc(httpTraceHdrs(server.GetDrivePerfInfosHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodNetInfo).HandlerFunc(httpTraceHdrs(server.NetInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDispatchNetInfo).HandlerFunc(httpTraceHdrs(server.DispatchNetInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodCycleBloom).HandlerFunc(httpTraceHdrs(server.CycleServerBloomFilterHandler))