mirror of
https://github.com/minio/minio.git
synced 2025-11-10 14:09:48 -05:00
fix: remove unusued PerfInfoHandler code (#9328)
- Removes PerfInfo admin API as its not OBDInfo - Keep the drive path without the metaBucket in OBD global latency map. - Remove all the unused code related to PerfInfo API - Do not redefined global mib,gib constants use humanize.MiByte and humanize.GiByte instead always
This commit is contained in:
@@ -44,12 +44,10 @@ import (
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/logger/message/log"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/cpu"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/mem"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
trace "github.com/minio/minio/pkg/trace"
|
||||
)
|
||||
@@ -378,155 +376,6 @@ func (a adminAPIHandlers) AccountingUsageInfoHandler(w http.ResponseWriter, r *h
|
||||
writeSuccessResponseJSON(w, usageInfoJSON)
|
||||
}
|
||||
|
||||
// ServerCPULoadInfo holds informantion about cpu utilization
|
||||
// of one minio node. It also reports any errors if encountered
|
||||
// while trying to reach this server.
|
||||
type ServerCPULoadInfo struct {
|
||||
Addr string `json:"addr"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Load []cpu.Load `json:"load"`
|
||||
HistoricLoad []cpu.Load `json:"historicLoad"`
|
||||
}
|
||||
|
||||
// ServerMemUsageInfo holds informantion about memory utilization
|
||||
// of one minio node. It also reports any errors if encountered
|
||||
// while trying to reach this server.
|
||||
type ServerMemUsageInfo struct {
|
||||
Addr string `json:"addr"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Usage []mem.Usage `json:"usage"`
|
||||
HistoricUsage []mem.Usage `json:"historicUsage"`
|
||||
}
|
||||
|
||||
// ServerNetReadPerfInfo network read performance information.
|
||||
type ServerNetReadPerfInfo struct {
|
||||
Addr string `json:"addr"`
|
||||
ReadThroughput uint64 `json:"readThroughput"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// PerfInfoHandler - GET /minio/admin/v3/performance?perfType={perfType}
|
||||
// ----------
|
||||
// Get all performance information based on input type
|
||||
// Supported types = drive
|
||||
func (a adminAPIHandlers) PerfInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PerfInfo")
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.PerfInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
switch perfType := vars["perfType"]; perfType {
|
||||
case "net":
|
||||
var size int64 = defaultNetPerfSize
|
||||
if sizeStr, found := vars["size"]; found {
|
||||
var err error
|
||||
if size, err = strconv.ParseInt(sizeStr, 10, 64); err != nil || size < 0 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !globalIsDistXL {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(globalEndpoints)
|
||||
}
|
||||
|
||||
infos := map[string][]ServerNetReadPerfInfo{}
|
||||
infos[addr] = globalNotificationSys.NetReadPerfInfo(size)
|
||||
for peer, info := range globalNotificationSys.CollectNetPerfInfo(size) {
|
||||
infos[peer] = info
|
||||
}
|
||||
|
||||
// Marshal API response
|
||||
jsonBytes, err := json.Marshal(infos)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Reply with performance information (across nodes in a
|
||||
// distributed setup) as json.
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
|
||||
case "drive":
|
||||
// Drive Perf is only implemented for Erasure coded backends
|
||||
if !globalIsXL {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var size int64 = madmin.DefaultDrivePerfSize
|
||||
if sizeStr, found := vars["size"]; found {
|
||||
var err error
|
||||
if size, err = strconv.ParseInt(sizeStr, 10, 64); err != nil || size <= 0 {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
// Get drive performance details from local server's drive(s)
|
||||
dp := getLocalDrivesPerf(globalEndpoints, size, r)
|
||||
|
||||
// Notify all other MinIO peers to report drive performance numbers
|
||||
dps := globalNotificationSys.DrivePerfInfo(size)
|
||||
dps = append(dps, dp)
|
||||
|
||||
// Marshal API response
|
||||
jsonBytes, err := json.Marshal(dps)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Reply with performance information (across nodes in a
|
||||
// distributed setup) as json.
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
case "cpu":
|
||||
// Get CPU load details from local server's cpu(s)
|
||||
cpu := getLocalCPULoad(globalEndpoints, r)
|
||||
// Notify all other MinIO peers to report cpu load numbers
|
||||
cpus := globalNotificationSys.CPULoadInfo()
|
||||
cpus = append(cpus, cpu)
|
||||
|
||||
// Marshal API response
|
||||
jsonBytes, err := json.Marshal(cpus)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Reply with cpu load information (across nodes in a
|
||||
// distributed setup) as json.
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
case "mem":
|
||||
// Get mem usage details from local server(s)
|
||||
m := getLocalMemUsage(globalEndpoints, r)
|
||||
// Notify all other MinIO peers to report mem usage numbers
|
||||
mems := globalNotificationSys.MemUsageInfo()
|
||||
mems = append(mems, m)
|
||||
|
||||
// Marshal API response
|
||||
jsonBytes, err := json.Marshal(mems)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Reply with mem usage information (across nodes in a
|
||||
// distributed setup) as json.
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
}
|
||||
}
|
||||
|
||||
func newLockEntry(l lockRequesterInfo, resource, server string) *madmin.LockEntry {
|
||||
entry := &madmin.LockEntry{
|
||||
Timestamp: l.Timestamp,
|
||||
@@ -1315,63 +1164,6 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
|
||||
writeSuccessResponseJSON(w, resp)
|
||||
}
|
||||
|
||||
// ServerHardwareInfoHandler - GET /minio/admin/v3/hardwareinfo?Type={hwType}
|
||||
// ----------
|
||||
// Get all hardware information based on input type
|
||||
// Supported types = cpu
|
||||
func (a adminAPIHandlers) ServerHardwareInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HardwareInfo")
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerHardwareInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
hardware := vars[madmin.HARDWARE]
|
||||
|
||||
switch madmin.HardwareType(hardware) {
|
||||
case madmin.CPU:
|
||||
// Get CPU hardware details from local server's cpu(s)
|
||||
cpu := getLocalCPUInfo(globalEndpoints, r)
|
||||
// Notify all other MinIO peers to report cpu hardware
|
||||
cpus := globalNotificationSys.CPUInfo()
|
||||
cpus = append(cpus, cpu)
|
||||
|
||||
// Marshal API response
|
||||
jsonBytes, err := json.Marshal(cpus)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Reply with cpu hardware information (across nodes in a
|
||||
// distributed setup) as json.
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
|
||||
case madmin.NETWORK:
|
||||
// Get Network hardware details from local server's network(s)
|
||||
network := getLocalNetworkInfo(globalEndpoints, r)
|
||||
// Notify all other MinIO peers to report network hardware
|
||||
networks := globalNotificationSys.NetworkInfo()
|
||||
networks = append(networks, network)
|
||||
|
||||
// Marshal API response
|
||||
jsonBytes, err := json.Marshal(networks)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Reply with cpu network information (across nodes in a
|
||||
// distributed setup) as json.
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
|
||||
default:
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
|
||||
}
|
||||
}
|
||||
|
||||
// OBDInfoHandler - GET /minio/admin/v3/obdinfo
|
||||
// ----------
|
||||
// Get server on-board diagnostics
|
||||
|
||||
@@ -56,8 +56,6 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
|
||||
// Info operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
|
||||
// Harware Info operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/hardware").HandlerFunc(httpTraceAll(adminAPI.ServerHardwareInfoHandler)).Queries("hwType", "{hwType:.*}")
|
||||
|
||||
// StorageInfo operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/storageinfo").HandlerFunc(httpTraceAll(adminAPI.StorageInfoHandler))
|
||||
@@ -79,8 +77,6 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
||||
/// Health operations
|
||||
|
||||
}
|
||||
// Performance command - return performance details based on input type
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/performance").HandlerFunc(httpTraceAll(adminAPI.PerfInfoHandler)).Queries("perfType", "{perfType:.*}")
|
||||
|
||||
// Profiling operations
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)).
|
||||
|
||||
@@ -17,176 +17,14 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
"github.com/minio/minio/pkg/cpu"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/mem"
|
||||
|
||||
cpuhw "github.com/shirou/gopsutil/cpu"
|
||||
)
|
||||
|
||||
// getLocalMemUsage - returns ServerMemUsageInfo for all zones, endpoints.
|
||||
func getLocalMemUsage(endpointZones EndpointZones, r *http.Request) ServerMemUsageInfo {
|
||||
var memUsages []mem.Usage
|
||||
var historicUsages []mem.Usage
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
seenHosts.Add(endpoint.Host)
|
||||
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
memUsages = append(memUsages, mem.GetUsage())
|
||||
historicUsages = append(historicUsages, mem.GetHistoricUsage())
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
return ServerMemUsageInfo{
|
||||
Addr: addr,
|
||||
Usage: memUsages,
|
||||
HistoricUsage: historicUsages,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalCPULoad - returns ServerCPULoadInfo for all zones, endpoints.
|
||||
func getLocalCPULoad(endpointZones EndpointZones, r *http.Request) ServerCPULoadInfo {
|
||||
var cpuLoads []cpu.Load
|
||||
var historicLoads []cpu.Load
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
seenHosts.Add(endpoint.Host)
|
||||
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
cpuLoads = append(cpuLoads, cpu.GetLoad())
|
||||
historicLoads = append(historicLoads, cpu.GetHistoricLoad())
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
return ServerCPULoadInfo{
|
||||
Addr: addr,
|
||||
Load: cpuLoads,
|
||||
HistoricLoad: historicLoads,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalDrivesPerf - returns ServerDrivesPerfInfo for all zones, endpoints.
|
||||
func getLocalDrivesPerf(endpointZones EndpointZones, size int64, r *http.Request) madmin.ServerDrivesPerfInfo {
|
||||
var dps []disk.Performance
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
if _, err := os.Stat(endpoint.Path); err != nil {
|
||||
// Since this drive is not available, add relevant details and proceed
|
||||
dps = append(dps, disk.Performance{Path: endpoint.Path, Error: err.Error()})
|
||||
continue
|
||||
}
|
||||
dp := disk.GetPerformance(pathJoin(endpoint.Path, minioMetaTmpBucket, mustGetUUID()), size)
|
||||
dp.Path = endpoint.Path
|
||||
dps = append(dps, dp)
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
return madmin.ServerDrivesPerfInfo{
|
||||
Addr: addr,
|
||||
Perf: dps,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalCPUInfo - returns ServerCPUHardwareInfo for all zones, endpoints.
|
||||
func getLocalCPUInfo(endpointZones EndpointZones, r *http.Request) madmin.ServerCPUHardwareInfo {
|
||||
var cpuHardwares []cpuhw.InfoStat
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
// Add to the list of visited hosts
|
||||
seenHosts.Add(endpoint.Host)
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
cpuHardware, err := cpuhw.Info()
|
||||
if err != nil {
|
||||
return madmin.ServerCPUHardwareInfo{
|
||||
Error: err.Error(),
|
||||
}
|
||||
}
|
||||
cpuHardwares = append(cpuHardwares, cpuHardware...)
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
|
||||
return madmin.ServerCPUHardwareInfo{
|
||||
Addr: addr,
|
||||
CPUInfo: cpuHardwares,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalNetworkInfo - returns ServerNetworkHardwareInfo for all zones, endpoints.
|
||||
func getLocalNetworkInfo(endpointZones EndpointZones, r *http.Request) madmin.ServerNetworkHardwareInfo {
|
||||
var networkHardwares []net.Interface
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
// Add to the list of visited hosts
|
||||
seenHosts.Add(endpoint.Host)
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
networkHardware, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return madmin.ServerNetworkHardwareInfo{
|
||||
Error: err.Error(),
|
||||
}
|
||||
}
|
||||
networkHardwares = append(networkHardwares, networkHardware...)
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
|
||||
return madmin.ServerNetworkHardwareInfo{
|
||||
Addr: addr,
|
||||
NetworkInfo: networkHardwares,
|
||||
}
|
||||
}
|
||||
|
||||
// getLocalServerProperty - returns ServerDrivesPerfInfo for only the
|
||||
// getLocalServerProperty - returns madmin.ServerProperties for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin.ServerProperties {
|
||||
var disks []madmin.Disk
|
||||
|
||||
@@ -830,55 +830,6 @@ func (sys *NotificationSys) PutBucketObjectLockConfig(ctx context.Context, bucke
|
||||
}
|
||||
}
|
||||
|
||||
// NetReadPerfInfo - Network read performance information.
|
||||
func (sys *NotificationSys) NetReadPerfInfo(size int64) []ServerNetReadPerfInfo {
|
||||
reply := make([]ServerNetReadPerfInfo, len(sys.peerClients))
|
||||
|
||||
// Execution is done serially.
|
||||
for i, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
info, err := client.NetReadPerfInfo(size)
|
||||
if err != nil {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", client.host.String())
|
||||
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
|
||||
info.Addr = client.host.String()
|
||||
info.Error = err.Error()
|
||||
}
|
||||
|
||||
reply[i] = info
|
||||
}
|
||||
|
||||
return reply
|
||||
}
|
||||
|
||||
// CollectNetPerfInfo - Collect network performance information of all peers.
|
||||
func (sys *NotificationSys) CollectNetPerfInfo(size int64) map[string][]ServerNetReadPerfInfo {
|
||||
reply := map[string][]ServerNetReadPerfInfo{}
|
||||
|
||||
// Execution is done serially.
|
||||
for _, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
info, err := client.CollectNetPerfInfo(size)
|
||||
if err != nil {
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", client.host.String())
|
||||
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
reply[client.host.String()] = info
|
||||
}
|
||||
|
||||
return reply
|
||||
}
|
||||
|
||||
// NetOBDInfo - Net OBD information
|
||||
func (sys *NotificationSys) NetOBDInfo(ctx context.Context) madmin.ServerNetOBDInfo {
|
||||
var sortedGlobalEndpoints []string
|
||||
@@ -1188,142 +1139,6 @@ func (sys *NotificationSys) ProcOBDInfo(ctx context.Context) []madmin.ServerProc
|
||||
return reply
|
||||
}
|
||||
|
||||
// DrivePerfInfo - Drive speed (read and write) information
|
||||
func (sys *NotificationSys) DrivePerfInfo(size int64) []madmin.ServerDrivesPerfInfo {
|
||||
reply := make([]madmin.ServerDrivesPerfInfo, len(sys.peerClients))
|
||||
|
||||
g := errgroup.WithNErrs(len(sys.peerClients))
|
||||
for index, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
var err error
|
||||
reply[index], err = sys.peerClients[index].DrivePerfInfo(size)
|
||||
return err
|
||||
}, index)
|
||||
}
|
||||
|
||||
for index, err := range g.Wait() {
|
||||
if err != nil {
|
||||
addr := sys.peerClients[index].host.String()
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
|
||||
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
reply[index].Addr = addr
|
||||
reply[index].Error = err.Error()
|
||||
}
|
||||
}
|
||||
return reply
|
||||
}
|
||||
|
||||
// MemUsageInfo - Mem utilization information
|
||||
func (sys *NotificationSys) MemUsageInfo() []ServerMemUsageInfo {
|
||||
reply := make([]ServerMemUsageInfo, len(sys.peerClients))
|
||||
|
||||
g := errgroup.WithNErrs(len(sys.peerClients))
|
||||
for index, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
var err error
|
||||
reply[index], err = sys.peerClients[index].MemUsageInfo()
|
||||
return err
|
||||
}, index)
|
||||
}
|
||||
|
||||
for index, err := range g.Wait() {
|
||||
if err != nil {
|
||||
addr := sys.peerClients[index].host.String()
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
|
||||
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
reply[index].Addr = addr
|
||||
reply[index].Error = err.Error()
|
||||
}
|
||||
}
|
||||
return reply
|
||||
}
|
||||
|
||||
// CPULoadInfo - CPU utilization information
|
||||
func (sys *NotificationSys) CPULoadInfo() []ServerCPULoadInfo {
|
||||
reply := make([]ServerCPULoadInfo, len(sys.peerClients))
|
||||
|
||||
g := errgroup.WithNErrs(len(sys.peerClients))
|
||||
for index, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
var err error
|
||||
reply[index], err = sys.peerClients[index].CPULoadInfo()
|
||||
return err
|
||||
}, index)
|
||||
}
|
||||
|
||||
for index, err := range g.Wait() {
|
||||
if err != nil {
|
||||
addr := sys.peerClients[index].host.String()
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
|
||||
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
reply[index].Addr = addr
|
||||
reply[index].Error = err.Error()
|
||||
}
|
||||
}
|
||||
return reply
|
||||
}
|
||||
|
||||
// CPUInfo - CPU Hardware info
|
||||
func (sys *NotificationSys) CPUInfo() []madmin.ServerCPUHardwareInfo {
|
||||
reply := make([]madmin.ServerCPUHardwareInfo, len(sys.peerClients))
|
||||
var wg sync.WaitGroup
|
||||
for i, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(client *peerRESTClient, idx int) {
|
||||
defer wg.Done()
|
||||
cpui, err := client.CPUInfo()
|
||||
if err != nil {
|
||||
cpui.Addr = client.host.String()
|
||||
cpui.Error = err.Error()
|
||||
}
|
||||
reply[idx] = cpui
|
||||
}(client, i)
|
||||
}
|
||||
wg.Wait()
|
||||
return reply
|
||||
}
|
||||
|
||||
// NetworkInfo - Network Hardware info
|
||||
func (sys *NotificationSys) NetworkInfo() []madmin.ServerNetworkHardwareInfo {
|
||||
reply := make([]madmin.ServerNetworkHardwareInfo, len(sys.peerClients))
|
||||
var wg sync.WaitGroup
|
||||
for i, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(client *peerRESTClient, idx int) {
|
||||
defer wg.Done()
|
||||
netinfo, err := client.NetworkInfo()
|
||||
if err != nil {
|
||||
netinfo.Addr = client.host.String()
|
||||
netinfo.Error = err.Error()
|
||||
}
|
||||
reply[idx] = netinfo
|
||||
}(client, i)
|
||||
}
|
||||
wg.Wait()
|
||||
return reply
|
||||
}
|
||||
|
||||
// ServerInfo - calls ServerInfo RPC call on all peers.
|
||||
func (sys *NotificationSys) ServerInfo() []madmin.ServerProperties {
|
||||
reply := make([]madmin.ServerProperties, len(sys.peerClients))
|
||||
|
||||
@@ -80,16 +80,16 @@ func getLocalDrivesOBD(ctx context.Context, parallel bool, endpointZones Endpoin
|
||||
})
|
||||
continue
|
||||
}
|
||||
measurePath := pathJoin(minioMetaTmpBucket, mustGetUUID())
|
||||
measure := func(index int, path string) {
|
||||
latency, throughput, err := disk.GetOBDInfo(ctx, pathJoin(endpoint.Path, minioMetaTmpBucket, mustGetUUID()))
|
||||
driveOBDInfo := madmin.DriveOBDInfo{
|
||||
Path: path,
|
||||
Latency: latency,
|
||||
Throughput: throughput,
|
||||
}
|
||||
var driveOBDInfo madmin.DriveOBDInfo
|
||||
latency, throughput, err := disk.GetOBDInfo(ctx, path, pathJoin(path, measurePath))
|
||||
if err != nil {
|
||||
driveOBDInfo.Error = err.Error()
|
||||
}
|
||||
driveOBDInfo.Path = path
|
||||
driveOBDInfo.Latency = latency
|
||||
driveOBDInfo.Throughput = throughput
|
||||
drivesOBDInfo = append(drivesOBDInfo, driveOBDInfo)
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
@@ -24,13 +24,13 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/rest"
|
||||
@@ -44,12 +44,6 @@ import (
|
||||
trace "github.com/minio/minio/pkg/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
kiB int64 = 1 << 10
|
||||
miB int64 = kiB << 10
|
||||
giB int64 = miB << 10
|
||||
)
|
||||
|
||||
// client to talk to peer Nodes.
|
||||
type peerRESTClient struct {
|
||||
host *xnet.Host
|
||||
@@ -113,37 +107,6 @@ func (client *peerRESTClient) Close() error {
|
||||
// GetLocksResp stores various info from the client for each lock that is requested.
|
||||
type GetLocksResp []map[string][]lockRequesterInfo
|
||||
|
||||
// NetReadPerfInfo - fetch network read performance information for a remote node.
|
||||
func (client *peerRESTClient) NetReadPerfInfo(size int64) (info ServerNetReadPerfInfo, err error) {
|
||||
params := make(url.Values)
|
||||
params.Set(peerRESTNetPerfSize, strconv.FormatInt(size, 10))
|
||||
respBody, err := client.call(
|
||||
peerRESTMethodNetReadPerfInfo,
|
||||
params,
|
||||
rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
size,
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
err = gob.NewDecoder(respBody).Decode(&info)
|
||||
return info, err
|
||||
}
|
||||
|
||||
// CollectNetPerfInfo - collect network performance information of other peers.
|
||||
func (client *peerRESTClient) CollectNetPerfInfo(size int64) (info []ServerNetReadPerfInfo, err error) {
|
||||
params := make(url.Values)
|
||||
params.Set(peerRESTNetPerfSize, strconv.FormatInt(size, 10))
|
||||
respBody, err := client.call(peerRESTMethodCollectNetPerfInfo, params, nil, -1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
err = gob.NewDecoder(respBody).Decode(&info)
|
||||
return info, err
|
||||
}
|
||||
|
||||
// GetLocks - fetch older locks for a remote node.
|
||||
func (client *peerRESTClient) GetLocks() (locks GetLocksResp, err error) {
|
||||
respBody, err := client.call(peerRESTMethodGetLocks, nil, nil, -1)
|
||||
@@ -166,39 +129,6 @@ func (client *peerRESTClient) ServerInfo() (info madmin.ServerProperties, err er
|
||||
return info, err
|
||||
}
|
||||
|
||||
// CPULoadInfo - fetch CPU information for a remote node.
|
||||
func (client *peerRESTClient) CPULoadInfo() (info ServerCPULoadInfo, err error) {
|
||||
respBody, err := client.call(peerRESTMethodCPULoadInfo, nil, nil, -1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
err = gob.NewDecoder(respBody).Decode(&info)
|
||||
return info, err
|
||||
}
|
||||
|
||||
// CPUInfo - fetch CPU hardware information for a remote node.
|
||||
func (client *peerRESTClient) CPUInfo() (info madmin.ServerCPUHardwareInfo, err error) {
|
||||
respBody, err := client.call(peerRESTMethodHardwareCPUInfo, nil, nil, -1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
err = gob.NewDecoder(respBody).Decode(&info)
|
||||
return info, err
|
||||
}
|
||||
|
||||
// NetworkInfo - fetch network hardware information for a remote node.
|
||||
func (client *peerRESTClient) NetworkInfo() (info madmin.ServerNetworkHardwareInfo, err error) {
|
||||
respBody, err := client.call(peerRESTMethodHardwareNetworkInfo, nil, nil, -1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
err = gob.NewDecoder(respBody).Decode(&info)
|
||||
return info, err
|
||||
}
|
||||
|
||||
type networkOverloadedErr struct{}
|
||||
|
||||
var networkOverloaded networkOverloadedErr
|
||||
@@ -338,11 +268,11 @@ func (client *peerRESTClient) doNetOBDTest(ctx context.Context, dataSize int64,
|
||||
}
|
||||
|
||||
func maxLatencyForSizeThreads(size int64, threadCount uint) float64 {
|
||||
Gbit100 := 12.5 * float64(giB)
|
||||
Gbit40 := 5.00 * float64(giB)
|
||||
Gbit25 := 3.25 * float64(giB)
|
||||
Gbit10 := 1.25 * float64(giB)
|
||||
// Gbit1 := 0.25 * float64(giB)
|
||||
Gbit100 := 12.5 * float64(humanize.GiByte)
|
||||
Gbit40 := 5.00 * float64(humanize.GiByte)
|
||||
Gbit25 := 3.25 * float64(humanize.GiByte)
|
||||
Gbit10 := 1.25 * float64(humanize.GiByte)
|
||||
// Gbit1 := 0.25 * float64(humanize.GiByte)
|
||||
|
||||
// Given the current defaults, each combination of size/thread
|
||||
// is supposed to fully saturate the intended pipe when all threads are active
|
||||
@@ -391,23 +321,23 @@ func (client *peerRESTClient) NetOBDInfo(ctx context.Context) (info madmin.NetOB
|
||||
}
|
||||
steps := []step{
|
||||
{ // 100 Gbit
|
||||
size: 256 * miB,
|
||||
size: 256 * humanize.MiByte,
|
||||
threads: 50,
|
||||
},
|
||||
{ // 40 Gbit
|
||||
size: 256 * miB,
|
||||
size: 256 * humanize.MiByte,
|
||||
threads: 20,
|
||||
},
|
||||
{ // 25 Gbit
|
||||
size: 128 * miB,
|
||||
size: 128 * humanize.MiByte,
|
||||
threads: 25,
|
||||
},
|
||||
{ // 10 Gbit
|
||||
size: 128 * miB,
|
||||
size: 128 * humanize.MiByte,
|
||||
threads: 10,
|
||||
},
|
||||
{ // 1 Gbit
|
||||
size: 64 * miB,
|
||||
size: 64 * humanize.MiByte,
|
||||
threads: 2,
|
||||
},
|
||||
}
|
||||
@@ -514,30 +444,6 @@ func (client *peerRESTClient) ProcOBDInfo(ctx context.Context) (info madmin.Serv
|
||||
return info, err
|
||||
}
|
||||
|
||||
// DrivePerfInfo - fetch Drive performance information for a remote node.
|
||||
func (client *peerRESTClient) DrivePerfInfo(size int64) (info madmin.ServerDrivesPerfInfo, err error) {
|
||||
params := make(url.Values)
|
||||
params.Set(peerRESTDrivePerfSize, strconv.FormatInt(size, 10))
|
||||
respBody, err := client.call(peerRESTMethodDrivePerfInfo, params, nil, -1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
err = gob.NewDecoder(respBody).Decode(&info)
|
||||
return info, err
|
||||
}
|
||||
|
||||
// MemUsageInfo - fetch memory usage information for a remote node.
|
||||
func (client *peerRESTClient) MemUsageInfo() (info ServerMemUsageInfo, err error) {
|
||||
respBody, err := client.call(peerRESTMethodMemUsageInfo, nil, nil, -1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
err = gob.NewDecoder(respBody).Decode(&info)
|
||||
return info, err
|
||||
}
|
||||
|
||||
// StartProfiling - Issues profiling command on the peer node.
|
||||
func (client *peerRESTClient) StartProfiling(profiler string) error {
|
||||
values := make(url.Values)
|
||||
|
||||
@@ -24,12 +24,7 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
peerRESTMethodNetReadPerfInfo = "/netreadperfinfo"
|
||||
peerRESTMethodCollectNetPerfInfo = "/collectnetperfinfo"
|
||||
peerRESTMethodServerInfo = "/serverinfo"
|
||||
peerRESTMethodCPULoadInfo = "/cpuloadinfo"
|
||||
peerRESTMethodMemUsageInfo = "/memusageinfo"
|
||||
peerRESTMethodDrivePerfInfo = "/driveperfinfo"
|
||||
peerRESTMethodDriveOBDInfo = "/driveobdinfo"
|
||||
peerRESTMethodNetOBDInfo = "/netobdinfo"
|
||||
peerRESTMethodCPUOBDInfo = "/cpuobdinfo"
|
||||
@@ -66,8 +61,6 @@ const (
|
||||
peerRESTMethodBucketEncryptionSet = "/setbucketencryption"
|
||||
peerRESTMethodBucketEncryptionRemove = "/removebucketencryption"
|
||||
peerRESTMethodLog = "/log"
|
||||
peerRESTMethodHardwareCPUInfo = "/cpuhardwareinfo"
|
||||
peerRESTMethodHardwareNetworkInfo = "/networkhardwareinfo"
|
||||
peerRESTMethodPutBucketObjectLockConfig = "/putbucketobjectlockconfig"
|
||||
peerRESTMethodBucketObjectLockConfigRemove = "/removebucketobjectlockconfig"
|
||||
)
|
||||
|
||||
@@ -65,84 +65,6 @@ func getServerInfo() (*ServerInfoData, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NetReadPerfInfoHandler - returns network read performance information.
|
||||
func (s *peerRESTServer) NetReadPerfInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
params := mux.Vars(r)
|
||||
|
||||
sizeStr, found := params[peerRESTNetPerfSize]
|
||||
if !found {
|
||||
s.writeErrorResponse(w, errors.New("size is missing"))
|
||||
return
|
||||
}
|
||||
|
||||
size, err := strconv.ParseInt(sizeStr, 10, 64)
|
||||
if err != nil || size < 0 {
|
||||
s.writeErrorResponse(w, errInvalidArgument)
|
||||
return
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
n, err := io.CopyN(ioutil.Discard, r.Body, size)
|
||||
end := time.Now()
|
||||
|
||||
if err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
if n != size {
|
||||
s.writeErrorResponse(w, fmt.Errorf("short read; expected: %v, got: %v", size, n))
|
||||
return
|
||||
}
|
||||
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(globalEndpoints)
|
||||
}
|
||||
|
||||
d := end.Sub(start)
|
||||
info := ServerNetReadPerfInfo{
|
||||
Addr: addr,
|
||||
ReadThroughput: uint64(int64(time.Second) * size / int64(d)),
|
||||
}
|
||||
|
||||
ctx := newContext(r, w, "NetReadPerfInfo")
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// CollectNetPerfInfoHandler - returns network performance information collected from other peers.
|
||||
func (s *peerRESTServer) CollectNetPerfInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
params := mux.Vars(r)
|
||||
sizeStr, found := params[peerRESTNetPerfSize]
|
||||
if !found {
|
||||
s.writeErrorResponse(w, errors.New("size is missing"))
|
||||
return
|
||||
}
|
||||
|
||||
size, err := strconv.ParseInt(sizeStr, 10, 64)
|
||||
if err != nil || size < 0 {
|
||||
s.writeErrorResponse(w, errInvalidArgument)
|
||||
return
|
||||
}
|
||||
|
||||
info := globalNotificationSys.NetReadPerfInfo(size)
|
||||
|
||||
ctx := newContext(r, w, "CollectNetPerfInfo")
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// GetLocksHandler - returns list of older lock from the server.
|
||||
func (s *peerRESTServer) GetLocksHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
@@ -427,48 +349,6 @@ func (s *peerRESTServer) DownloadProfilingDataHandler(w http.ResponseWriter, r *
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(profileData))
|
||||
}
|
||||
|
||||
// CPULoadInfoHandler - returns CPU Load info.
|
||||
func (s *peerRESTServer) CPULoadInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
ctx := newContext(r, w, "CPULoadInfo")
|
||||
info := getLocalCPULoad(globalEndpoints, r)
|
||||
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
||||
// CPUInfoHandler - returns CPU Hardware info.
|
||||
func (s *peerRESTServer) CPUInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
ctx := newContext(r, w, "CPUInfo")
|
||||
info := getLocalCPUInfo(globalEndpoints, r)
|
||||
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
||||
// NetworkInfoHandler - returns Network Hardware info.
|
||||
func (s *peerRESTServer) NetworkInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
ctx := newContext(r, w, "NetworkInfo")
|
||||
info := getLocalNetworkInfo(globalEndpoints, r)
|
||||
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
||||
// ServerInfoHandler - returns Server Info
|
||||
func (s *peerRESTServer) ServerInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
@@ -634,48 +514,6 @@ func (s *peerRESTServer) MemOBDInfoHandler(w http.ResponseWriter, r *http.Reques
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
||||
// DrivePerfInfoHandler - returns Drive Performance info.
|
||||
func (s *peerRESTServer) DrivePerfInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
params := mux.Vars(r)
|
||||
|
||||
sizeStr, found := params[peerRESTDrivePerfSize]
|
||||
if !found {
|
||||
s.writeErrorResponse(w, errors.New("size is missing"))
|
||||
return
|
||||
}
|
||||
|
||||
size, err := strconv.ParseInt(sizeStr, 10, 64)
|
||||
if err != nil || size < 0 {
|
||||
s.writeErrorResponse(w, errInvalidArgument)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := newContext(r, w, "DrivePerfInfo")
|
||||
|
||||
info := getLocalDrivesPerf(globalEndpoints, size, r)
|
||||
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
||||
// MemUsageInfoHandler - returns Memory Usage info.
|
||||
func (s *peerRESTServer) MemUsageInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
ctx := newContext(r, w, "MemUsageInfo")
|
||||
info := getLocalMemUsage(globalEndpoints, r)
|
||||
|
||||
defer w.(http.Flusher).Flush()
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
|
||||
}
|
||||
|
||||
// DeleteBucketHandler - Delete notification and policies related to the bucket.
|
||||
func (s *peerRESTServer) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
@@ -1303,12 +1141,8 @@ func (s *peerRESTServer) IsValid(w http.ResponseWriter, r *http.Request) bool {
|
||||
func registerPeerRESTHandlers(router *mux.Router) {
|
||||
server := &peerRESTServer{}
|
||||
subrouter := router.PathPrefix(peerRESTPrefix).Subrouter()
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodNetReadPerfInfo).HandlerFunc(httpTraceHdrs(server.NetReadPerfInfoHandler)).Queries(restQueries(peerRESTNetPerfSize)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodCollectNetPerfInfo).HandlerFunc(httpTraceHdrs(server.CollectNetPerfInfoHandler)).Queries(restQueries(peerRESTNetPerfSize)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetLocks).HandlerFunc(httpTraceHdrs(server.GetLocksHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodServerInfo).HandlerFunc(httpTraceHdrs(server.ServerInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodCPULoadInfo).HandlerFunc(httpTraceHdrs(server.CPULoadInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodMemUsageInfo).HandlerFunc(httpTraceHdrs(server.MemUsageInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodProcOBDInfo).HandlerFunc(httpTraceHdrs(server.ProcOBDInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodMemOBDInfo).HandlerFunc(httpTraceHdrs(server.MemOBDInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodOsInfoOBDInfo).HandlerFunc(httpTraceHdrs(server.OsOBDInfoHandler))
|
||||
@@ -1317,9 +1151,6 @@ func registerPeerRESTHandlers(router *mux.Router) {
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDriveOBDInfo).HandlerFunc(httpTraceHdrs(server.DriveOBDInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodNetOBDInfo).HandlerFunc(httpTraceHdrs(server.NetOBDInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDispatchNetOBDInfo).HandlerFunc(httpTraceHdrs(server.DispatchNetOBDInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDrivePerfInfo).HandlerFunc(httpTraceHdrs(server.DrivePerfInfoHandler)).Queries(restQueries(peerRESTDrivePerfSize)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodHardwareCPUInfo).HandlerFunc(httpTraceHdrs(server.CPUInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodHardwareNetworkInfo).HandlerFunc(httpTraceHdrs(server.NetworkInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDeleteBucket).HandlerFunc(httpTraceHdrs(server.DeleteBucketHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSignalService).HandlerFunc(httpTraceHdrs(server.SignalServiceHandler)).Queries(restQueries(peerRESTSignal)...)
|
||||
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodServerUpdate).HandlerFunc(httpTraceHdrs(server.ServerUpdateHandler)).Queries(restQueries(peerRESTUpdateURL, peerRESTSha256Hex, peerRESTLatestRelease)...)
|
||||
|
||||
Reference in New Issue
Block a user