2019-03-14 19:27:31 -04:00
|
|
|
/*
|
2019-04-09 14:39:42 -04:00
|
|
|
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
2019-03-14 19:27:31 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"crypto/tls"
|
|
|
|
"encoding/gob"
|
2020-06-17 17:49:26 -04:00
|
|
|
"errors"
|
2019-03-14 19:27:31 -04:00
|
|
|
"io"
|
2020-03-27 00:07:39 -04:00
|
|
|
"math"
|
2019-03-14 19:27:31 -04:00
|
|
|
"net/url"
|
2019-06-06 20:46:22 -04:00
|
|
|
"strconv"
|
2020-10-09 23:36:00 -04:00
|
|
|
"strings"
|
2020-03-27 00:07:39 -04:00
|
|
|
"sync"
|
2019-10-09 02:11:15 -04:00
|
|
|
"sync/atomic"
|
2019-06-08 18:54:41 -04:00
|
|
|
"time"
|
2019-03-14 19:27:31 -04:00
|
|
|
|
2020-04-12 22:37:09 -04:00
|
|
|
"github.com/dustin/go-humanize"
|
2019-03-14 19:27:31 -04:00
|
|
|
"github.com/minio/minio/cmd/http"
|
2020-06-17 17:49:26 -04:00
|
|
|
xhttp "github.com/minio/minio/cmd/http"
|
2019-03-14 19:27:31 -04:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
|
|
|
"github.com/minio/minio/cmd/rest"
|
2020-10-09 23:36:00 -04:00
|
|
|
"github.com/minio/minio/pkg/bandwidth"
|
2019-03-14 19:27:31 -04:00
|
|
|
"github.com/minio/minio/pkg/event"
|
2019-06-25 19:42:24 -04:00
|
|
|
"github.com/minio/minio/pkg/madmin"
|
2019-03-14 19:27:31 -04:00
|
|
|
xnet "github.com/minio/minio/pkg/net"
|
2019-06-27 01:41:12 -04:00
|
|
|
trace "github.com/minio/minio/pkg/trace"
|
2019-03-14 19:27:31 -04:00
|
|
|
)
|
|
|
|
|
2020-03-27 00:07:39 -04:00
|
|
|
// client to talk to peer Nodes.
|
2019-03-14 19:27:31 -04:00
|
|
|
type peerRESTClient struct {
|
|
|
|
host *xnet.Host
|
|
|
|
restClient *rest.Client
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected
|
|
|
|
// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()
|
|
|
|
// after verifying format.json
|
|
|
|
func (client *peerRESTClient) call(method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {
|
2020-04-09 12:30:02 -04:00
|
|
|
return client.callWithContext(GlobalContext, method, values, body, length)
|
2019-06-08 18:54:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected
|
|
|
|
// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()
|
|
|
|
// after verifying format.json
|
|
|
|
func (client *peerRESTClient) callWithContext(ctx context.Context, method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {
|
2019-03-14 19:27:31 -04:00
|
|
|
if values == nil {
|
|
|
|
values = make(url.Values)
|
|
|
|
}
|
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
respBody, err = client.restClient.Call(ctx, method, values, body, length)
|
2019-03-14 19:27:31 -04:00
|
|
|
if err == nil {
|
|
|
|
return respBody, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stringer provides a canonicalized representation of node.
|
|
|
|
func (client *peerRESTClient) String() string {
|
|
|
|
return client.host.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close - marks the client as closed.
|
|
|
|
func (client *peerRESTClient) Close() error {
|
|
|
|
client.restClient.Close()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetLocksResp stores various info from the client for each lock that is requested.
|
2019-11-13 15:17:45 -05:00
|
|
|
type GetLocksResp []map[string][]lockRequesterInfo
|
2019-03-14 19:27:31 -04:00
|
|
|
|
|
|
|
// GetLocks - fetch older locks for a remote node.
|
|
|
|
func (client *peerRESTClient) GetLocks() (locks GetLocksResp, err error) {
|
|
|
|
respBody, err := client.call(peerRESTMethodGetLocks, nil, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
err = gob.NewDecoder(respBody).Decode(&locks)
|
|
|
|
return locks, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// ServerInfo - fetch server information for a remote node.
|
2019-12-11 17:27:03 -05:00
|
|
|
func (client *peerRESTClient) ServerInfo() (info madmin.ServerProperties, err error) {
|
2019-03-14 19:27:31 -04:00
|
|
|
respBody, err := client.call(peerRESTMethodServerInfo, nil, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
err = gob.NewDecoder(respBody).Decode(&info)
|
|
|
|
return info, err
|
|
|
|
}
|
|
|
|
|
2020-03-27 00:07:39 -04:00
|
|
|
type networkOverloadedErr struct{}
|
|
|
|
|
|
|
|
var networkOverloaded networkOverloadedErr
|
|
|
|
|
|
|
|
func (n networkOverloadedErr) Error() string {
|
|
|
|
return "network overloaded"
|
|
|
|
}
|
|
|
|
|
2020-07-27 17:51:59 -04:00
|
|
|
type nullReader struct{}
|
|
|
|
|
|
|
|
func (r *nullReader) Read(b []byte) (int, error) {
|
|
|
|
return len(b), nil
|
|
|
|
}
|
|
|
|
|
2020-03-27 00:07:39 -04:00
|
|
|
func (client *peerRESTClient) doNetOBDTest(ctx context.Context, dataSize int64, threadCount uint) (info madmin.NetOBDInfo, err error) {
|
2020-10-01 12:50:08 -04:00
|
|
|
var mu sync.Mutex // mutex used to protect these slices in go-routines
|
2020-03-27 00:07:39 -04:00
|
|
|
latencies := []float64{}
|
|
|
|
throughputs := []float64{}
|
|
|
|
|
|
|
|
buflimiter := make(chan struct{}, threadCount)
|
|
|
|
errChan := make(chan error, threadCount)
|
|
|
|
|
2020-10-01 17:34:51 -04:00
|
|
|
var totalTransferred int64
|
2020-03-27 00:07:39 -04:00
|
|
|
|
|
|
|
// ensure enough samples to obtain normal distribution
|
|
|
|
maxSamples := int(10 * threadCount)
|
|
|
|
|
|
|
|
innerCtx, cancel := context.WithCancel(ctx)
|
|
|
|
|
|
|
|
slowSamples := int32(0)
|
|
|
|
maxSlowSamples := int32(maxSamples / 20)
|
|
|
|
slowSample := func() {
|
|
|
|
if slowSamples > maxSlowSamples { // 5% of total
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if atomic.AddInt32(&slowSamples, 1) >= maxSlowSamples {
|
|
|
|
errChan <- networkOverloaded
|
|
|
|
cancel()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-27 17:51:59 -04:00
|
|
|
var wg sync.WaitGroup
|
2020-03-27 00:07:39 -04:00
|
|
|
finish := func() {
|
|
|
|
<-buflimiter
|
|
|
|
wg.Done()
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < maxSamples; i++ {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return info, ctx.Err()
|
|
|
|
case err = <-errChan:
|
|
|
|
case buflimiter <- struct{}{}:
|
|
|
|
wg.Add(1)
|
|
|
|
|
|
|
|
if innerCtx.Err() != nil {
|
|
|
|
finish()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
go func(i int) {
|
|
|
|
start := time.Now()
|
|
|
|
before := atomic.LoadInt64(&totalTransferred)
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(innerCtx, 10*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
2020-10-01 17:34:51 -04:00
|
|
|
progress := io.LimitReader(&nullReader{}, dataSize)
|
|
|
|
|
2020-07-27 17:51:59 -04:00
|
|
|
// Turn off healthCheckFn for OBD tests to cater for higher load on the peers.
|
|
|
|
clnt := newPeerRESTClient(client.host)
|
|
|
|
clnt.restClient.HealthCheckFn = nil
|
|
|
|
|
|
|
|
respBody, err := clnt.callWithContext(ctx, peerRESTMethodNetOBDInfo, nil, progress, dataSize)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, context.DeadlineExceeded) {
|
|
|
|
slowSample()
|
|
|
|
finish()
|
|
|
|
return
|
2020-03-27 00:07:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
errChan <- err
|
|
|
|
finish()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
http.DrainBody(respBody)
|
|
|
|
|
|
|
|
finish()
|
2020-10-01 17:34:51 -04:00
|
|
|
atomic.AddInt64(&totalTransferred, dataSize)
|
|
|
|
after := atomic.LoadInt64(&totalTransferred)
|
2020-03-27 00:07:39 -04:00
|
|
|
end := time.Now()
|
|
|
|
|
2020-08-24 15:11:20 -04:00
|
|
|
latency := end.Sub(start).Seconds()
|
2020-03-27 00:07:39 -04:00
|
|
|
|
|
|
|
if latency > maxLatencyForSizeThreads(dataSize, threadCount) {
|
|
|
|
slowSample()
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Throughput = (total data transferred across all threads / time taken) */
|
2020-08-24 15:11:20 -04:00
|
|
|
throughput := float64((after - before)) / latency
|
2020-03-27 00:07:39 -04:00
|
|
|
|
2020-10-01 12:50:08 -04:00
|
|
|
// Protect updating latencies and throughputs slices from
|
|
|
|
// multiple go-routines.
|
|
|
|
mu.Lock()
|
2020-03-27 00:07:39 -04:00
|
|
|
latencies = append(latencies, latency)
|
|
|
|
throughputs = append(throughputs, throughput)
|
2020-10-01 12:50:08 -04:00
|
|
|
mu.Unlock()
|
2020-03-27 00:07:39 -04:00
|
|
|
}(i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return info, err
|
|
|
|
}
|
|
|
|
|
|
|
|
latency, throughput, err := xnet.ComputeOBDStats(latencies, throughputs)
|
|
|
|
info = madmin.NetOBDInfo{
|
|
|
|
Latency: latency,
|
|
|
|
Throughput: throughput,
|
|
|
|
}
|
|
|
|
return info, err
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func maxLatencyForSizeThreads(size int64, threadCount uint) float64 {
|
2020-04-12 22:37:09 -04:00
|
|
|
Gbit100 := 12.5 * float64(humanize.GiByte)
|
|
|
|
Gbit40 := 5.00 * float64(humanize.GiByte)
|
|
|
|
Gbit25 := 3.25 * float64(humanize.GiByte)
|
|
|
|
Gbit10 := 1.25 * float64(humanize.GiByte)
|
|
|
|
// Gbit1 := 0.25 * float64(humanize.GiByte)
|
2020-03-27 00:07:39 -04:00
|
|
|
|
|
|
|
// Given the current defaults, each combination of size/thread
|
|
|
|
// is supposed to fully saturate the intended pipe when all threads are active
|
|
|
|
// i.e. if the test is performed in a perfectly controlled environment, i.e. without
|
|
|
|
// CPU scheduling latencies and/or network jitters, then all threads working
|
|
|
|
// simultaneously should result in each of them completing in 1s
|
|
|
|
//
|
|
|
|
// In reality, I've assumed a normal distribution of latency with expected mean of 1s and min of 0s
|
|
|
|
// Then, 95% of threads should complete within 2 seconds (2 std. deviations from the mean). The 2s comes
|
|
|
|
// from fitting the normal curve such that the mean is 1.
|
|
|
|
//
|
|
|
|
// i.e. we expect that no more than 5% of threads to take longer than 2s to push the data.
|
|
|
|
//
|
|
|
|
// throughput | max latency
|
|
|
|
// 100 Gbit | 2s
|
|
|
|
// 40 Gbit | 2s
|
|
|
|
// 25 Gbit | 2s
|
|
|
|
// 10 Gbit | 2s
|
|
|
|
// 1 Gbit | inf
|
|
|
|
|
2020-08-24 15:11:20 -04:00
|
|
|
throughput := float64(size * int64(threadCount))
|
2020-03-27 00:07:39 -04:00
|
|
|
if throughput >= Gbit100 {
|
|
|
|
return 2.0
|
|
|
|
} else if throughput >= Gbit40 {
|
|
|
|
return 2.0
|
|
|
|
} else if throughput >= Gbit25 {
|
|
|
|
return 2.0
|
|
|
|
} else if throughput >= Gbit10 {
|
|
|
|
return 2.0
|
|
|
|
}
|
|
|
|
return math.MaxFloat64
|
|
|
|
}
|
|
|
|
|
|
|
|
// NetOBDInfo - fetch Net OBD information for a remote node.
|
|
|
|
func (client *peerRESTClient) NetOBDInfo(ctx context.Context) (info madmin.NetOBDInfo, err error) {
|
|
|
|
|
|
|
|
// 100 Gbit -> 256 MiB * 50 threads
|
|
|
|
// 40 Gbit -> 256 MiB * 20 threads
|
|
|
|
// 25 Gbit -> 128 MiB * 25 threads
|
|
|
|
// 10 Gbit -> 128 MiB * 10 threads
|
|
|
|
// 1 Gbit -> 64 MiB * 2 threads
|
|
|
|
|
|
|
|
type step struct {
|
|
|
|
size int64
|
|
|
|
threads uint
|
|
|
|
}
|
|
|
|
steps := []step{
|
|
|
|
{ // 100 Gbit
|
2020-04-12 22:37:09 -04:00
|
|
|
size: 256 * humanize.MiByte,
|
2020-03-27 00:07:39 -04:00
|
|
|
threads: 50,
|
|
|
|
},
|
|
|
|
{ // 40 Gbit
|
2020-04-12 22:37:09 -04:00
|
|
|
size: 256 * humanize.MiByte,
|
2020-03-27 00:07:39 -04:00
|
|
|
threads: 20,
|
|
|
|
},
|
|
|
|
{ // 25 Gbit
|
2020-04-12 22:37:09 -04:00
|
|
|
size: 128 * humanize.MiByte,
|
2020-03-27 00:07:39 -04:00
|
|
|
threads: 25,
|
|
|
|
},
|
|
|
|
{ // 10 Gbit
|
2020-04-12 22:37:09 -04:00
|
|
|
size: 128 * humanize.MiByte,
|
2020-03-27 00:07:39 -04:00
|
|
|
threads: 10,
|
|
|
|
},
|
|
|
|
{ // 1 Gbit
|
2020-04-12 22:37:09 -04:00
|
|
|
size: 64 * humanize.MiByte,
|
2020-03-27 00:07:39 -04:00
|
|
|
threads: 2,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := range steps {
|
|
|
|
size := steps[i].size
|
|
|
|
threads := steps[i].threads
|
|
|
|
|
|
|
|
if info, err = client.doNetOBDTest(ctx, size, threads); err != nil {
|
|
|
|
if err == networkOverloaded {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-07-27 17:51:59 -04:00
|
|
|
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
|
|
|
continue
|
2020-03-27 00:07:39 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return info, err
|
|
|
|
}
|
|
|
|
return info, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// DispatchNetOBDInfo - dispatch other nodes to run Net OBD.
|
|
|
|
func (client *peerRESTClient) DispatchNetOBDInfo(ctx context.Context) (info madmin.ServerNetOBDInfo, err error) {
|
|
|
|
respBody, err := client.callWithContext(ctx, peerRESTMethodDispatchNetOBDInfo, nil, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
2020-04-18 14:06:11 -04:00
|
|
|
waitReader, err := waitForHTTPResponse(respBody)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
err = gob.NewDecoder(waitReader).Decode(&info)
|
2020-03-27 00:07:39 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// DriveOBDInfo - fetch Drive OBD information for a remote node.
|
|
|
|
func (client *peerRESTClient) DriveOBDInfo(ctx context.Context) (info madmin.ServerDrivesOBDInfo, err error) {
|
|
|
|
respBody, err := client.callWithContext(ctx, peerRESTMethodDriveOBDInfo, nil, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
err = gob.NewDecoder(respBody).Decode(&info)
|
|
|
|
return info, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// CPUOBDInfo - fetch CPU OBD information for a remote node.
|
|
|
|
func (client *peerRESTClient) CPUOBDInfo(ctx context.Context) (info madmin.ServerCPUOBDInfo, err error) {
|
|
|
|
respBody, err := client.callWithContext(ctx, peerRESTMethodCPUOBDInfo, nil, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
err = gob.NewDecoder(respBody).Decode(&info)
|
|
|
|
return info, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// DiskHwOBDInfo - fetch Disk HW OBD information for a remote node.
|
|
|
|
func (client *peerRESTClient) DiskHwOBDInfo(ctx context.Context) (info madmin.ServerDiskHwOBDInfo, err error) {
|
|
|
|
respBody, err := client.callWithContext(ctx, peerRESTMethodDiskHwOBDInfo, nil, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
err = gob.NewDecoder(respBody).Decode(&info)
|
|
|
|
return info, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// OsOBDInfo - fetch OsInfo OBD information for a remote node.
|
|
|
|
func (client *peerRESTClient) OsOBDInfo(ctx context.Context) (info madmin.ServerOsOBDInfo, err error) {
|
|
|
|
respBody, err := client.callWithContext(ctx, peerRESTMethodOsInfoOBDInfo, nil, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
err = gob.NewDecoder(respBody).Decode(&info)
|
|
|
|
return info, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// MemOBDInfo - fetch MemInfo OBD information for a remote node.
|
|
|
|
func (client *peerRESTClient) MemOBDInfo(ctx context.Context) (info madmin.ServerMemOBDInfo, err error) {
|
|
|
|
respBody, err := client.callWithContext(ctx, peerRESTMethodMemOBDInfo, nil, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
err = gob.NewDecoder(respBody).Decode(&info)
|
|
|
|
return info, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// ProcOBDInfo - fetch ProcInfo OBD information for a remote node.
|
|
|
|
func (client *peerRESTClient) ProcOBDInfo(ctx context.Context) (info madmin.ServerProcOBDInfo, err error) {
|
|
|
|
respBody, err := client.callWithContext(ctx, peerRESTMethodProcOBDInfo, nil, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
err = gob.NewDecoder(respBody).Decode(&info)
|
|
|
|
return info, err
|
|
|
|
}
|
|
|
|
|
2020-09-15 21:02:54 -04:00
|
|
|
// LogOBDInfo - fetch Log OBD information for a remote node.
|
|
|
|
func (client *peerRESTClient) LogOBDInfo(ctx context.Context) (info madmin.ServerLogOBDInfo, err error) {
|
|
|
|
respBody, err := client.callWithContext(ctx, peerRESTMethodLogOBDInfo, nil, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
err = gob.NewDecoder(respBody).Decode(&info)
|
|
|
|
return info, err
|
|
|
|
}
|
|
|
|
|
2019-03-14 19:27:31 -04:00
|
|
|
// StartProfiling - Issues profiling command on the peer node.
|
|
|
|
func (client *peerRESTClient) StartProfiling(profiler string) error {
|
|
|
|
values := make(url.Values)
|
|
|
|
values.Set(peerRESTProfiler, profiler)
|
|
|
|
respBody, err := client.call(peerRESTMethodStartProfiling, values, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DownloadProfileData - download profiled data from a remote node.
|
2020-01-10 20:19:58 -05:00
|
|
|
func (client *peerRESTClient) DownloadProfileData() (data map[string][]byte, err error) {
|
2019-03-14 19:27:31 -04:00
|
|
|
respBody, err := client.call(peerRESTMethodDownloadProfilingData, nil, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
err = gob.NewDecoder(respBody).Decode(&data)
|
|
|
|
return data, err
|
|
|
|
}
|
|
|
|
|
2020-05-19 16:53:54 -04:00
|
|
|
// LoadBucketMetadata - load bucket metadata
|
|
|
|
func (client *peerRESTClient) LoadBucketMetadata(bucket string) error {
|
2019-03-14 19:27:31 -04:00
|
|
|
values := make(url.Values)
|
|
|
|
values.Set(peerRESTBucket, bucket)
|
2020-05-19 16:53:54 -04:00
|
|
|
respBody, err := client.call(peerRESTMethodLoadBucketMetadata, values, nil, -1)
|
2019-03-14 19:27:31 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-05-19 16:53:54 -04:00
|
|
|
// DeleteBucketMetadata - Delete bucket metadata
|
|
|
|
func (client *peerRESTClient) DeleteBucketMetadata(bucket string) error {
|
2019-03-14 19:27:31 -04:00
|
|
|
values := make(url.Values)
|
2020-05-19 16:53:54 -04:00
|
|
|
values.Set(peerRESTBucket, bucket)
|
|
|
|
respBody, err := client.call(peerRESTMethodDeleteBucketMetadata, values, nil, -1)
|
2019-03-14 19:27:31 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-05-19 16:53:54 -04:00
|
|
|
// ReloadFormat - reload format on the peer node.
|
|
|
|
func (client *peerRESTClient) ReloadFormat(dryRun bool) error {
|
2019-03-14 19:27:31 -04:00
|
|
|
values := make(url.Values)
|
2020-09-17 00:14:35 -04:00
|
|
|
values.Set(peerRESTDryRun, strconv.FormatBool(dryRun))
|
2019-03-14 19:27:31 -04:00
|
|
|
|
2020-05-19 16:53:54 -04:00
|
|
|
respBody, err := client.call(peerRESTMethodReloadFormat, values, nil, -1)
|
2019-11-21 16:18:32 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-27 13:06:21 -04:00
|
|
|
// cycleServerBloomFilter will cycle the bloom filter to start recording to index y if not already.
|
|
|
|
// The response will contain a bloom filter starting at index x up to, but not including index y.
|
|
|
|
// If y is 0, the response will not update y, but return the currently recorded information
|
|
|
|
// from the current x to y-1.
|
|
|
|
func (client *peerRESTClient) cycleServerBloomFilter(ctx context.Context, req bloomFilterRequest) (*bloomFilterResponse, error) {
|
|
|
|
var reader bytes.Buffer
|
|
|
|
err := gob.NewEncoder(&reader).Encode(req)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-05-18 12:59:45 -04:00
|
|
|
respBody, err := client.callWithContext(ctx, peerRESTMethodCycleBloom, nil, &reader, -1)
|
2020-04-27 13:06:21 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var resp bloomFilterResponse
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
return &resp, gob.NewDecoder(respBody).Decode(&resp)
|
|
|
|
}
|
|
|
|
|
2019-06-06 20:46:22 -04:00
|
|
|
// DeletePolicy - delete a specific canned policy.
|
|
|
|
func (client *peerRESTClient) DeletePolicy(policyName string) (err error) {
|
|
|
|
values := make(url.Values)
|
|
|
|
values.Set(peerRESTPolicy, policyName)
|
|
|
|
|
|
|
|
respBody, err := client.call(peerRESTMethodDeletePolicy, values, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// LoadPolicy - reload a specific canned policy.
|
|
|
|
func (client *peerRESTClient) LoadPolicy(policyName string) (err error) {
|
|
|
|
values := make(url.Values)
|
|
|
|
values.Set(peerRESTPolicy, policyName)
|
|
|
|
|
|
|
|
respBody, err := client.call(peerRESTMethodLoadPolicy, values, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-08-13 16:41:06 -04:00
|
|
|
// LoadPolicyMapping - reload a specific policy mapping
|
|
|
|
func (client *peerRESTClient) LoadPolicyMapping(userOrGroup string, isGroup bool) error {
|
|
|
|
values := make(url.Values)
|
|
|
|
values.Set(peerRESTUserOrGroup, userOrGroup)
|
|
|
|
if isGroup {
|
|
|
|
values.Set(peerRESTIsGroup, "")
|
|
|
|
}
|
|
|
|
|
|
|
|
respBody, err := client.call(peerRESTMethodLoadPolicyMapping, values, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-06-06 20:46:22 -04:00
|
|
|
// DeleteUser - delete a specific user.
|
|
|
|
func (client *peerRESTClient) DeleteUser(accessKey string) (err error) {
|
|
|
|
values := make(url.Values)
|
|
|
|
values.Set(peerRESTUser, accessKey)
|
|
|
|
|
|
|
|
respBody, err := client.call(peerRESTMethodDeleteUser, values, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-24 15:10:09 -04:00
|
|
|
// DeleteServiceAccount - delete a specific service account.
|
|
|
|
func (client *peerRESTClient) DeleteServiceAccount(accessKey string) (err error) {
|
|
|
|
values := make(url.Values)
|
|
|
|
values.Set(peerRESTUser, accessKey)
|
|
|
|
|
|
|
|
respBody, err := client.call(peerRESTMethodDeleteServiceAccount, values, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-06-06 20:46:22 -04:00
|
|
|
// LoadUser - reload a specific user.
|
|
|
|
func (client *peerRESTClient) LoadUser(accessKey string, temp bool) (err error) {
|
|
|
|
values := make(url.Values)
|
|
|
|
values.Set(peerRESTUser, accessKey)
|
|
|
|
values.Set(peerRESTUserTemp, strconv.FormatBool(temp))
|
|
|
|
|
|
|
|
respBody, err := client.call(peerRESTMethodLoadUser, values, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-24 15:10:09 -04:00
|
|
|
// LoadServiceAccount - reload a specific service account.
|
|
|
|
func (client *peerRESTClient) LoadServiceAccount(accessKey string) (err error) {
|
|
|
|
values := make(url.Values)
|
|
|
|
values.Set(peerRESTUser, accessKey)
|
|
|
|
|
|
|
|
respBody, err := client.call(peerRESTMethodLoadServiceAccount, values, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-08-02 17:25:00 -04:00
|
|
|
// LoadGroup - send load group command to peers.
|
|
|
|
func (client *peerRESTClient) LoadGroup(group string) error {
|
|
|
|
values := make(url.Values)
|
|
|
|
values.Set(peerRESTGroup, group)
|
|
|
|
respBody, err := client.call(peerRESTMethodLoadGroup, values, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-23 11:03:31 -04:00
|
|
|
type serverUpdateInfo struct {
|
|
|
|
URL *url.URL
|
|
|
|
Sha256Sum []byte
|
|
|
|
Time time.Time
|
|
|
|
}
|
|
|
|
|
2019-08-28 18:04:43 -04:00
|
|
|
// ServerUpdate - sends server update message to remote peers.
|
2020-07-23 11:03:31 -04:00
|
|
|
func (client *peerRESTClient) ServerUpdate(ctx context.Context, u *url.URL, sha256Sum []byte, lrTime time.Time) error {
|
2019-08-28 18:04:43 -04:00
|
|
|
values := make(url.Values)
|
2020-07-23 11:03:31 -04:00
|
|
|
var reader bytes.Buffer
|
|
|
|
if err := gob.NewEncoder(&reader).Encode(serverUpdateInfo{
|
|
|
|
URL: u,
|
|
|
|
Sha256Sum: sha256Sum,
|
|
|
|
Time: lrTime,
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
2019-08-28 18:04:43 -04:00
|
|
|
}
|
2020-07-23 11:03:31 -04:00
|
|
|
respBody, err := client.callWithContext(ctx, peerRESTMethodServerUpdate, values, &reader, -1)
|
2019-08-28 18:04:43 -04:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-03-14 19:27:31 -04:00
|
|
|
// SignalService - sends signal to peer nodes.
|
|
|
|
func (client *peerRESTClient) SignalService(sig serviceSignal) error {
|
|
|
|
values := make(url.Values)
|
2019-08-27 14:37:47 -04:00
|
|
|
values.Set(peerRESTSignal, strconv.Itoa(int(sig)))
|
2019-03-14 19:27:31 -04:00
|
|
|
respBody, err := client.call(peerRESTMethodSignalService, values, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-06-25 19:42:24 -04:00
|
|
|
func (client *peerRESTClient) BackgroundHealStatus() (madmin.BgHealState, error) {
|
|
|
|
respBody, err := client.call(peerRESTMethodBackgroundHealStatus, nil, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return madmin.BgHealState{}, err
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
|
|
|
|
state := madmin.BgHealState{}
|
|
|
|
err = gob.NewDecoder(respBody).Decode(&state)
|
|
|
|
return state, err
|
|
|
|
}
|
|
|
|
|
2020-05-23 20:38:39 -04:00
|
|
|
// GetLocalDiskIDs - get a peer's local disks' IDs.
|
2020-06-09 22:19:03 -04:00
|
|
|
func (client *peerRESTClient) GetLocalDiskIDs(ctx context.Context) (diskIDs []string) {
|
2020-05-23 20:38:39 -04:00
|
|
|
respBody, err := client.callWithContext(ctx, peerRESTMethodGetLocalDiskIDs, nil, nil, -1)
|
|
|
|
if err != nil {
|
2020-06-09 22:19:03 -04:00
|
|
|
logger.LogIf(ctx, err)
|
2020-05-23 20:38:39 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
if err = gob.NewDecoder(respBody).Decode(&diskIDs); err != nil {
|
2020-06-09 22:19:03 -04:00
|
|
|
logger.LogIf(ctx, err)
|
2020-05-23 20:38:39 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return diskIDs
|
|
|
|
}
|
|
|
|
|
2020-04-16 13:56:18 -04:00
|
|
|
func (client *peerRESTClient) doTrace(traceCh chan interface{}, doneCh <-chan struct{}, trcAll, trcErr bool) {
|
2019-06-27 01:41:12 -04:00
|
|
|
values := make(url.Values)
|
|
|
|
values.Set(peerRESTTraceAll, strconv.FormatBool(trcAll))
|
2019-07-19 20:38:26 -04:00
|
|
|
values.Set(peerRESTTraceErr, strconv.FormatBool(trcErr))
|
2019-06-27 01:41:12 -04:00
|
|
|
|
|
|
|
// To cancel the REST request in case doneCh gets closed.
|
2020-04-09 12:30:02 -04:00
|
|
|
ctx, cancel := context.WithCancel(GlobalContext)
|
2019-06-27 01:41:12 -04:00
|
|
|
|
|
|
|
cancelCh := make(chan struct{})
|
|
|
|
defer close(cancelCh)
|
2019-06-08 18:54:41 -04:00
|
|
|
go func() {
|
2019-06-27 01:41:12 -04:00
|
|
|
select {
|
|
|
|
case <-doneCh:
|
|
|
|
case <-cancelCh:
|
|
|
|
// There was an error in the REST request.
|
|
|
|
}
|
|
|
|
cancel()
|
|
|
|
}()
|
|
|
|
|
|
|
|
respBody, err := client.callWithContext(ctx, peerRESTMethodTrace, values, nil, -1)
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
dec := gob.NewDecoder(respBody)
|
|
|
|
for {
|
|
|
|
var info trace.Info
|
|
|
|
if err = dec.Decode(&info); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2019-07-31 14:08:39 -04:00
|
|
|
if len(info.NodeName) > 0 {
|
|
|
|
select {
|
|
|
|
case traceCh <- info:
|
|
|
|
default:
|
|
|
|
// Do not block on slow receivers.
|
|
|
|
}
|
2019-06-08 18:54:41 -04:00
|
|
|
}
|
2019-06-27 01:41:12 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-16 13:56:18 -04:00
|
|
|
func (client *peerRESTClient) doListen(listenCh chan interface{}, doneCh <-chan struct{}, v url.Values) {
|
2019-12-12 13:01:23 -05:00
|
|
|
// To cancel the REST request in case doneCh gets closed.
|
2020-04-09 12:30:02 -04:00
|
|
|
ctx, cancel := context.WithCancel(GlobalContext)
|
2019-12-12 13:01:23 -05:00
|
|
|
|
|
|
|
cancelCh := make(chan struct{})
|
|
|
|
defer close(cancelCh)
|
|
|
|
go func() {
|
|
|
|
select {
|
|
|
|
case <-doneCh:
|
|
|
|
case <-cancelCh:
|
|
|
|
// There was an error in the REST request.
|
|
|
|
}
|
|
|
|
cancel()
|
|
|
|
}()
|
|
|
|
|
2019-12-16 23:30:57 -05:00
|
|
|
respBody, err := client.callWithContext(ctx, peerRESTMethodListen, v, nil, -1)
|
2019-12-12 13:01:23 -05:00
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
dec := gob.NewDecoder(respBody)
|
|
|
|
for {
|
|
|
|
var ev event.Event
|
|
|
|
if err = dec.Decode(&ev); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(ev.EventVersion) > 0 {
|
|
|
|
select {
|
|
|
|
case listenCh <- ev:
|
|
|
|
default:
|
|
|
|
// Do not block on slow receivers.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Listen - listen on peers.
|
2020-04-16 13:56:18 -04:00
|
|
|
func (client *peerRESTClient) Listen(listenCh chan interface{}, doneCh <-chan struct{}, v url.Values) {
|
2019-12-12 13:01:23 -05:00
|
|
|
go func() {
|
|
|
|
for {
|
2019-12-16 23:30:57 -05:00
|
|
|
client.doListen(listenCh, doneCh, v)
|
2019-12-12 13:01:23 -05:00
|
|
|
select {
|
|
|
|
case <-doneCh:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
// There was error in the REST request, retry after sometime as probably the peer is down.
|
|
|
|
time.Sleep(5 * time.Second)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2019-06-27 01:41:12 -04:00
|
|
|
// Trace - send http trace request to peer nodes
|
2020-04-16 13:56:18 -04:00
|
|
|
func (client *peerRESTClient) Trace(traceCh chan interface{}, doneCh <-chan struct{}, trcAll, trcErr bool) {
|
2019-06-27 01:41:12 -04:00
|
|
|
go func() {
|
2019-06-08 18:54:41 -04:00
|
|
|
for {
|
2019-07-19 20:38:26 -04:00
|
|
|
client.doTrace(traceCh, doneCh, trcAll, trcErr)
|
2019-06-08 18:54:41 -04:00
|
|
|
select {
|
|
|
|
case <-doneCh:
|
|
|
|
return
|
|
|
|
default:
|
2019-06-27 01:41:12 -04:00
|
|
|
// There was error in the REST request, retry after sometime as probably the peer is down.
|
|
|
|
time.Sleep(5 * time.Second)
|
2019-06-08 18:54:41 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2019-09-03 14:10:48 -04:00
|
|
|
// ConsoleLog - sends request to peer nodes to get console logs
|
2020-04-16 13:56:18 -04:00
|
|
|
func (client *peerRESTClient) ConsoleLog(logCh chan interface{}, doneCh <-chan struct{}) {
|
2019-09-03 14:10:48 -04:00
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
// get cancellation context to properly unsubscribe peers
|
2020-04-09 12:30:02 -04:00
|
|
|
ctx, cancel := context.WithCancel(GlobalContext)
|
2019-09-03 14:10:48 -04:00
|
|
|
respBody, err := client.callWithContext(ctx, peerRESTMethodLog, nil, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
// Retry the failed request.
|
|
|
|
time.Sleep(5 * time.Second)
|
|
|
|
} else {
|
|
|
|
dec := gob.NewDecoder(respBody)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
<-doneCh
|
|
|
|
cancel()
|
|
|
|
}()
|
|
|
|
|
|
|
|
for {
|
|
|
|
var log madmin.LogInfo
|
|
|
|
if err = dec.Decode(&log); err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case logCh <- log:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-doneCh:
|
|
|
|
cancel()
|
|
|
|
http.DrainBody(respBody)
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
// There was error in the REST request, retry.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2019-11-19 20:42:27 -05:00
|
|
|
func getRemoteHosts(endpointZones EndpointZones) []*xnet.Host {
|
2020-09-14 23:44:18 -04:00
|
|
|
peers := GetRemotePeers(endpointZones)
|
|
|
|
remoteHosts := make([]*xnet.Host, 0, len(peers))
|
|
|
|
for _, hostStr := range peers {
|
2019-03-14 19:27:31 -04:00
|
|
|
host, err := xnet.ParseHost(hostStr)
|
2019-11-09 12:27:23 -05:00
|
|
|
if err != nil {
|
2020-04-09 12:30:02 -04:00
|
|
|
logger.LogIf(GlobalContext, err)
|
2019-11-09 12:27:23 -05:00
|
|
|
continue
|
|
|
|
}
|
2019-03-14 19:27:31 -04:00
|
|
|
remoteHosts = append(remoteHosts, host)
|
|
|
|
}
|
|
|
|
|
|
|
|
return remoteHosts
|
|
|
|
}
|
|
|
|
|
2020-04-17 14:20:56 -04:00
|
|
|
// newPeerRestClients creates new peer clients.
|
|
|
|
func newPeerRestClients(endpoints EndpointZones) []*peerRESTClient {
|
2019-11-09 12:27:23 -05:00
|
|
|
peerHosts := getRemoteHosts(endpoints)
|
2019-03-14 19:27:31 -04:00
|
|
|
restClients := make([]*peerRESTClient, len(peerHosts))
|
|
|
|
for i, host := range peerHosts {
|
2020-07-12 01:19:38 -04:00
|
|
|
restClients[i] = newPeerRESTClient(host)
|
2019-03-14 19:27:31 -04:00
|
|
|
}
|
|
|
|
|
2019-10-15 21:35:41 -04:00
|
|
|
return restClients
|
2019-03-14 19:27:31 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Returns a peer rest client.
|
2020-07-12 01:19:38 -04:00
|
|
|
func newPeerRESTClient(peer *xnet.Host) *peerRESTClient {
|
2019-03-14 19:27:31 -04:00
|
|
|
scheme := "http"
|
|
|
|
if globalIsSSL {
|
|
|
|
scheme = "https"
|
|
|
|
}
|
|
|
|
|
|
|
|
serverURL := &url.URL{
|
|
|
|
Scheme: scheme,
|
|
|
|
Host: peer.String(),
|
|
|
|
Path: peerRESTPath,
|
|
|
|
}
|
|
|
|
|
|
|
|
var tlsConfig *tls.Config
|
|
|
|
if globalIsSSL {
|
|
|
|
tlsConfig = &tls.Config{
|
2019-03-19 00:14:49 -04:00
|
|
|
ServerName: peer.Name,
|
2019-03-14 19:27:31 -04:00
|
|
|
RootCAs: globalRootCAs,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-29 18:18:34 -04:00
|
|
|
trFn := newInternodeHTTPTransport(tlsConfig, rest.DefaultTimeout)
|
2020-07-12 01:19:38 -04:00
|
|
|
restClient := rest.NewClient(serverURL, trFn, newAuthToken)
|
2020-06-17 17:49:26 -04:00
|
|
|
|
|
|
|
// Construct a new health function.
|
|
|
|
restClient.HealthCheckFn = func() bool {
|
|
|
|
ctx, cancel := context.WithTimeout(GlobalContext, restClient.HealthCheckTimeout)
|
2020-07-12 01:19:38 -04:00
|
|
|
// Instantiate a new rest client for healthcheck
|
|
|
|
// to avoid recursive healthCheckFn()
|
2020-09-04 12:45:06 -04:00
|
|
|
respBody, err := rest.NewClient(serverURL, trFn, newAuthToken).Call(ctx, peerRESTMethodHealth, nil, nil, -1)
|
2020-06-17 17:49:26 -04:00
|
|
|
xhttp.DrainBody(respBody)
|
|
|
|
cancel()
|
|
|
|
var ne *rest.NetworkError
|
|
|
|
return !errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &ne)
|
|
|
|
}
|
2019-03-14 19:27:31 -04:00
|
|
|
|
2020-07-12 01:19:38 -04:00
|
|
|
return &peerRESTClient{host: peer, restClient: restClient}
|
2019-03-14 19:27:31 -04:00
|
|
|
}
|
2020-10-09 23:36:00 -04:00
|
|
|
|
|
|
|
// MonitorBandwidth - send http trace request to peer nodes
|
|
|
|
func (client *peerRESTClient) MonitorBandwidth(ctx context.Context, buckets []string) (*bandwidth.Report, error) {
|
|
|
|
values := make(url.Values)
|
|
|
|
values.Set(peerRESTBuckets, strings.Join(buckets, ","))
|
|
|
|
respBody, err := client.callWithContext(ctx, peerRESTMethodGetBandwidth, values, nil, -1)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer http.DrainBody(respBody)
|
|
|
|
|
|
|
|
dec := gob.NewDecoder(respBody)
|
|
|
|
var bandwidthReport bandwidth.Report
|
|
|
|
err = dec.Decode(&bandwidthReport)
|
|
|
|
return &bandwidthReport, err
|
|
|
|
}
|