mirror of
https://github.com/minio/minio.git
synced 2025-01-11 15:03:22 -05:00
Fix bandwidth monitoring to be per remote target (#16360)
This commit is contained in:
parent
d08e3cc895
commit
1b02e046c2
@ -32,7 +32,6 @@ import (
|
|||||||
"hash/crc32"
|
"hash/crc32"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"math/rand"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
@ -2341,66 +2340,6 @@ func getTLSInfo() madmin.TLSInfo {
|
|||||||
return tlsInfo
|
return tlsInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
// BandwidthMonitorHandler - GET /minio/admin/v3/bandwidth
|
|
||||||
// ----------
|
|
||||||
// Get bandwidth consumption information
|
|
||||||
func (a adminAPIHandlers) BandwidthMonitorHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
ctx := newContext(r, w, "BandwidthMonitor")
|
|
||||||
|
|
||||||
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
|
||||||
|
|
||||||
// Validate request signature.
|
|
||||||
_, adminAPIErr := checkAdminRequestAuth(ctx, r, iampolicy.BandwidthMonitorAction, "")
|
|
||||||
if adminAPIErr != ErrNone {
|
|
||||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
|
|
||||||
|
|
||||||
setEventStreamHeaders(w)
|
|
||||||
reportCh := make(chan madmin.BucketBandwidthReport)
|
|
||||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
|
||||||
defer keepAliveTicker.Stop()
|
|
||||||
bucketsRequestedString := r.Form.Get("buckets")
|
|
||||||
bucketsRequested := strings.Split(bucketsRequestedString, ",")
|
|
||||||
go func() {
|
|
||||||
defer close(reportCh)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case reportCh <- globalNotificationSys.GetBandwidthReports(ctx, bucketsRequested...):
|
|
||||||
time.Sleep(time.Duration(rnd.Float64() * float64(2*time.Second)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
enc := json.NewEncoder(w)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case report, ok := <-reportCh:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := enc.Encode(report); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(reportCh) == 0 {
|
|
||||||
// Flush if nothing is queued
|
|
||||||
w.(http.Flusher).Flush()
|
|
||||||
}
|
|
||||||
case <-keepAliveTicker.C:
|
|
||||||
if _, err := w.Write([]byte(" ")); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
w.(http.Flusher).Flush()
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerInfoHandler - GET /minio/admin/v3/info
|
// ServerInfoHandler - GET /minio/admin/v3/info
|
||||||
// ----------
|
// ----------
|
||||||
// Get server information
|
// Get server information
|
||||||
|
@ -303,8 +303,6 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
|||||||
// -- Health API --
|
// -- Health API --
|
||||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/healthinfo").
|
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/healthinfo").
|
||||||
HandlerFunc(gz(httpTraceHdrs(adminAPI.HealthInfoHandler)))
|
HandlerFunc(gz(httpTraceHdrs(adminAPI.HealthInfoHandler)))
|
||||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/bandwidth").
|
|
||||||
HandlerFunc(gz(httpTraceHdrs(adminAPI.BandwidthMonitorHandler)))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If none of the routes match add default error handler routes
|
// If none of the routes match add default error handler routes
|
||||||
|
@ -231,7 +231,19 @@ func (api objectAPIHandlers) GetBucketReplicationMetricsHandler(w http.ResponseW
|
|||||||
w.Header().Set(xhttp.ContentType, string(mimeJSON))
|
w.Header().Set(xhttp.ContentType, string(mimeJSON))
|
||||||
|
|
||||||
enc := json.NewEncoder(w)
|
enc := json.NewEncoder(w)
|
||||||
if err = enc.Encode(globalReplicationStats.getLatestReplicationStats(bucket, usageInfo)); err != nil {
|
stats := globalReplicationStats.getLatestReplicationStats(bucket, usageInfo)
|
||||||
|
bwRpt := globalNotificationSys.GetBandwidthReports(ctx, bucket)
|
||||||
|
bwMap := bwRpt.BucketStats[bucket]
|
||||||
|
for arn, st := range stats.Stats {
|
||||||
|
if bwMap != nil {
|
||||||
|
if bw, ok := bwMap[arn]; ok {
|
||||||
|
st.BandWidthLimitInBytesPerSecond = bw.LimitInBytesPerSecond
|
||||||
|
st.CurrentBandwidthInBytesPerSecond = bw.CurrentBandwidthInBytesPerSecond
|
||||||
|
stats.Stats[arn] = st
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err = enc.Encode(stats); err != nil {
|
||||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1144,10 +1144,11 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
|
|||||||
|
|
||||||
opts := &bandwidth.MonitorReaderOptions{
|
opts := &bandwidth.MonitorReaderOptions{
|
||||||
Bucket: objInfo.Bucket,
|
Bucket: objInfo.Bucket,
|
||||||
|
TargetARN: tgt.ARN,
|
||||||
HeaderSize: headerSize,
|
HeaderSize: headerSize,
|
||||||
}
|
}
|
||||||
newCtx := ctx
|
newCtx := ctx
|
||||||
if globalBucketMonitor.IsThrottled(bucket) {
|
if globalBucketMonitor.IsThrottled(bucket, tgt.ARN) {
|
||||||
var cancel context.CancelFunc
|
var cancel context.CancelFunc
|
||||||
newCtx, cancel = context.WithTimeout(ctx, throttleDeadline)
|
newCtx, cancel = context.WithTimeout(ctx, throttleDeadline)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@ -1344,10 +1345,11 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
|||||||
|
|
||||||
opts := &bandwidth.MonitorReaderOptions{
|
opts := &bandwidth.MonitorReaderOptions{
|
||||||
Bucket: objInfo.Bucket,
|
Bucket: objInfo.Bucket,
|
||||||
|
TargetARN: tgt.ARN,
|
||||||
HeaderSize: headerSize,
|
HeaderSize: headerSize,
|
||||||
}
|
}
|
||||||
newCtx := ctx
|
newCtx := ctx
|
||||||
if globalBucketMonitor.IsThrottled(bucket) {
|
if globalBucketMonitor.IsThrottled(bucket, tgt.ARN) {
|
||||||
var cancel context.CancelFunc
|
var cancel context.CancelFunc
|
||||||
newCtx, cancel = context.WithTimeout(ctx, throttleDeadline)
|
newCtx, cancel = context.WithTimeout(ctx, throttleDeadline)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
@ -120,6 +120,10 @@ type BucketReplicationStat struct {
|
|||||||
FailedCount int64 `json:"failedReplicationCount"`
|
FailedCount int64 `json:"failedReplicationCount"`
|
||||||
// Replication latency information
|
// Replication latency information
|
||||||
Latency ReplicationLatency `json:"replicationLatency"`
|
Latency ReplicationLatency `json:"replicationLatency"`
|
||||||
|
// bandwidth limit for target
|
||||||
|
BandWidthLimitInBytesPerSecond int64 `json:"limitInBits"`
|
||||||
|
// current bandwidth reported
|
||||||
|
CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bs *BucketReplicationStat) hasReplicationUsage() bool {
|
func (bs *BucketReplicationStat) hasReplicationUsage() bool {
|
||||||
|
@ -89,6 +89,18 @@ func (z *BucketReplicationStat) DecodeMsg(dc *msgp.Reader) (err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
case "BandWidthLimitInBytesPerSecond":
|
||||||
|
z.BandWidthLimitInBytesPerSecond, err = dc.ReadInt64()
|
||||||
|
if err != nil {
|
||||||
|
err = msgp.WrapError(err, "BandWidthLimitInBytesPerSecond")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case "CurrentBandwidthInBytesPerSecond":
|
||||||
|
z.CurrentBandwidthInBytesPerSecond, err = dc.ReadFloat64()
|
||||||
|
if err != nil {
|
||||||
|
err = msgp.WrapError(err, "CurrentBandwidthInBytesPerSecond")
|
||||||
|
return
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
err = dc.Skip()
|
err = dc.Skip()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -102,9 +114,9 @@ func (z *BucketReplicationStat) DecodeMsg(dc *msgp.Reader) (err error) {
|
|||||||
|
|
||||||
// EncodeMsg implements msgp.Encodable
|
// EncodeMsg implements msgp.Encodable
|
||||||
func (z *BucketReplicationStat) EncodeMsg(en *msgp.Writer) (err error) {
|
func (z *BucketReplicationStat) EncodeMsg(en *msgp.Writer) (err error) {
|
||||||
// map header, size 7
|
// map header, size 9
|
||||||
// write "PendingSize"
|
// write "PendingSize"
|
||||||
err = en.Append(0x87, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65)
|
err = en.Append(0x89, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -179,15 +191,35 @@ func (z *BucketReplicationStat) EncodeMsg(en *msgp.Writer) (err error) {
|
|||||||
err = msgp.WrapError(err, "Latency", "UploadHistogram")
|
err = msgp.WrapError(err, "Latency", "UploadHistogram")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// write "BandWidthLimitInBytesPerSecond"
|
||||||
|
err = en.Append(0xbe, 0x42, 0x61, 0x6e, 0x64, 0x57, 0x69, 0x64, 0x74, 0x68, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x49, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = en.WriteInt64(z.BandWidthLimitInBytesPerSecond)
|
||||||
|
if err != nil {
|
||||||
|
err = msgp.WrapError(err, "BandWidthLimitInBytesPerSecond")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// write "CurrentBandwidthInBytesPerSecond"
|
||||||
|
err = en.Append(0xd9, 0x20, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x6e, 0x64, 0x77, 0x69, 0x64, 0x74, 0x68, 0x49, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = en.WriteFloat64(z.CurrentBandwidthInBytesPerSecond)
|
||||||
|
if err != nil {
|
||||||
|
err = msgp.WrapError(err, "CurrentBandwidthInBytesPerSecond")
|
||||||
|
return
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalMsg implements msgp.Marshaler
|
// MarshalMsg implements msgp.Marshaler
|
||||||
func (z *BucketReplicationStat) MarshalMsg(b []byte) (o []byte, err error) {
|
func (z *BucketReplicationStat) MarshalMsg(b []byte) (o []byte, err error) {
|
||||||
o = msgp.Require(b, z.Msgsize())
|
o = msgp.Require(b, z.Msgsize())
|
||||||
// map header, size 7
|
// map header, size 9
|
||||||
// string "PendingSize"
|
// string "PendingSize"
|
||||||
o = append(o, 0x87, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65)
|
o = append(o, 0x89, 0xab, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x7a, 0x65)
|
||||||
o = msgp.AppendInt64(o, z.PendingSize)
|
o = msgp.AppendInt64(o, z.PendingSize)
|
||||||
// string "ReplicatedSize"
|
// string "ReplicatedSize"
|
||||||
o = append(o, 0xae, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
|
o = append(o, 0xae, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65)
|
||||||
@ -214,6 +246,12 @@ func (z *BucketReplicationStat) MarshalMsg(b []byte) (o []byte, err error) {
|
|||||||
err = msgp.WrapError(err, "Latency", "UploadHistogram")
|
err = msgp.WrapError(err, "Latency", "UploadHistogram")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// string "BandWidthLimitInBytesPerSecond"
|
||||||
|
o = append(o, 0xbe, 0x42, 0x61, 0x6e, 0x64, 0x57, 0x69, 0x64, 0x74, 0x68, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x49, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64)
|
||||||
|
o = msgp.AppendInt64(o, z.BandWidthLimitInBytesPerSecond)
|
||||||
|
// string "CurrentBandwidthInBytesPerSecond"
|
||||||
|
o = append(o, 0xd9, 0x20, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x6e, 0x64, 0x77, 0x69, 0x64, 0x74, 0x68, 0x49, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64)
|
||||||
|
o = msgp.AppendFloat64(o, z.CurrentBandwidthInBytesPerSecond)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -300,6 +338,18 @@ func (z *BucketReplicationStat) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
case "BandWidthLimitInBytesPerSecond":
|
||||||
|
z.BandWidthLimitInBytesPerSecond, bts, err = msgp.ReadInt64Bytes(bts)
|
||||||
|
if err != nil {
|
||||||
|
err = msgp.WrapError(err, "BandWidthLimitInBytesPerSecond")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case "CurrentBandwidthInBytesPerSecond":
|
||||||
|
z.CurrentBandwidthInBytesPerSecond, bts, err = msgp.ReadFloat64Bytes(bts)
|
||||||
|
if err != nil {
|
||||||
|
err = msgp.WrapError(err, "CurrentBandwidthInBytesPerSecond")
|
||||||
|
return
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
bts, err = msgp.Skip(bts)
|
bts, err = msgp.Skip(bts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -314,7 +364,7 @@ func (z *BucketReplicationStat) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|||||||
|
|
||||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||||
func (z *BucketReplicationStat) Msgsize() (s int) {
|
func (z *BucketReplicationStat) Msgsize() (s int) {
|
||||||
s = 1 + 12 + msgp.Int64Size + 15 + msgp.Int64Size + 12 + msgp.Int64Size + 11 + msgp.Int64Size + 13 + msgp.Int64Size + 12 + msgp.Int64Size + 8 + 1 + 16 + z.Latency.UploadHistogram.Msgsize()
|
s = 1 + 12 + msgp.Int64Size + 15 + msgp.Int64Size + 12 + msgp.Int64Size + 11 + msgp.Int64Size + 13 + msgp.Int64Size + 12 + msgp.Int64Size + 8 + 1 + 16 + z.Latency.UploadHistogram.Msgsize() + 31 + msgp.Int64Size + 34 + msgp.Float64Size
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -269,18 +269,18 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m
|
|||||||
|
|
||||||
sys.targetsMap[bucket] = newtgts
|
sys.targetsMap[bucket] = newtgts
|
||||||
sys.arnRemotesMap[tgt.Arn] = clnt
|
sys.arnRemotesMap[tgt.Arn] = clnt
|
||||||
sys.updateBandwidthLimit(bucket, tgt.BandwidthLimit)
|
sys.updateBandwidthLimit(bucket, tgt.Arn, tgt.BandwidthLimit)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sys *BucketTargetSys) updateBandwidthLimit(bucket string, limit int64) {
|
func (sys *BucketTargetSys) updateBandwidthLimit(bucket, arn string, limit int64) {
|
||||||
if limit == 0 {
|
if limit == 0 {
|
||||||
globalBucketMonitor.DeleteBucket(bucket)
|
globalBucketMonitor.DeleteBucketThrottle(bucket, arn)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Setup bandwidth throttling
|
// Setup bandwidth throttling
|
||||||
|
|
||||||
globalBucketMonitor.SetBandwidthLimit(bucket, limit)
|
globalBucketMonitor.SetBandwidthLimit(bucket, arn, limit)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveTarget - removes a remote bucket target for this source bucket.
|
// RemoveTarget - removes a remote bucket target for this source bucket.
|
||||||
@ -332,7 +332,7 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
|
|||||||
}
|
}
|
||||||
sys.targetsMap[bucket] = targets
|
sys.targetsMap[bucket] = targets
|
||||||
delete(sys.arnRemotesMap, arnStr)
|
delete(sys.arnRemotesMap, arnStr)
|
||||||
sys.updateBandwidthLimit(bucket, 0)
|
sys.updateBandwidthLimit(bucket, arnStr, 0)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -402,7 +402,7 @@ func (sys *BucketTargetSys) UpdateAllTargets(bucket string, tgts *madmin.BucketT
|
|||||||
|
|
||||||
// No need for more if not adding anything
|
// No need for more if not adding anything
|
||||||
if tgts == nil || tgts.Empty() {
|
if tgts == nil || tgts.Empty() {
|
||||||
sys.updateBandwidthLimit(bucket, 0)
|
globalBucketMonitor.DeleteBucket(bucket)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -415,7 +415,7 @@ func (sys *BucketTargetSys) UpdateAllTargets(bucket string, tgts *madmin.BucketT
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
sys.arnRemotesMap[tgt.Arn] = tgtClient
|
sys.arnRemotesMap[tgt.Arn] = tgtClient
|
||||||
sys.updateBandwidthLimit(bucket, tgt.BandwidthLimit)
|
sys.updateBandwidthLimit(bucket, tgt.Arn, tgt.BandwidthLimit)
|
||||||
}
|
}
|
||||||
sys.targetsMap[bucket] = tgts.Targets
|
sys.targetsMap[bucket] = tgts.Targets
|
||||||
}
|
}
|
||||||
@ -438,7 +438,7 @@ func (sys *BucketTargetSys) set(bucket BucketInfo, meta BucketMetadata) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
sys.arnRemotesMap[tgt.Arn] = tgtClient
|
sys.arnRemotesMap[tgt.Arn] = tgtClient
|
||||||
sys.updateBandwidthLimit(bucket.Name, tgt.BandwidthLimit)
|
sys.updateBandwidthLimit(bucket.Name, tgt.Arn, tgt.BandwidthLimit)
|
||||||
}
|
}
|
||||||
sys.targetsMap[bucket.Name] = cfg.Targets
|
sys.targetsMap[bucket.Name] = cfg.Targets
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,7 @@ import (
|
|||||||
"github.com/cespare/xxhash/v2"
|
"github.com/cespare/xxhash/v2"
|
||||||
"github.com/klauspost/compress/zip"
|
"github.com/klauspost/compress/zip"
|
||||||
"github.com/minio/madmin-go/v2"
|
"github.com/minio/madmin-go/v2"
|
||||||
bucketBandwidth "github.com/minio/minio/internal/bucket/bandwidth"
|
"github.com/minio/minio/internal/bucket/bandwidth"
|
||||||
"github.com/minio/minio/internal/logger"
|
"github.com/minio/minio/internal/logger"
|
||||||
"github.com/minio/minio/internal/sync/errgroup"
|
"github.com/minio/minio/internal/sync/errgroup"
|
||||||
xnet "github.com/minio/pkg/net"
|
xnet "github.com/minio/pkg/net"
|
||||||
@ -1104,8 +1104,8 @@ func NewNotificationSys(endpoints EndpointServerPools) *NotificationSys {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetBandwidthReports - gets the bandwidth report from all nodes including self.
|
// GetBandwidthReports - gets the bandwidth report from all nodes including self.
|
||||||
func (sys *NotificationSys) GetBandwidthReports(ctx context.Context, buckets ...string) madmin.BucketBandwidthReport {
|
func (sys *NotificationSys) GetBandwidthReports(ctx context.Context, buckets ...string) bandwidth.BucketBandwidthReport {
|
||||||
reports := make([]*madmin.BucketBandwidthReport, len(sys.peerClients))
|
reports := make([]*bandwidth.BucketBandwidthReport, len(sys.peerClients))
|
||||||
g := errgroup.WithNErrs(len(sys.peerClients))
|
g := errgroup.WithNErrs(len(sys.peerClients))
|
||||||
for index := range sys.peerClients {
|
for index := range sys.peerClients {
|
||||||
if sys.peerClients[index] == nil {
|
if sys.peerClients[index] == nil {
|
||||||
@ -1125,9 +1125,9 @@ func (sys *NotificationSys) GetBandwidthReports(ctx context.Context, buckets ...
|
|||||||
ctx := logger.SetReqInfo(ctx, reqInfo)
|
ctx := logger.SetReqInfo(ctx, reqInfo)
|
||||||
logger.LogOnceIf(ctx, err, sys.peerClients[index].host.String())
|
logger.LogOnceIf(ctx, err, sys.peerClients[index].host.String())
|
||||||
}
|
}
|
||||||
reports = append(reports, globalBucketMonitor.GetReport(bucketBandwidth.SelectBuckets(buckets...)))
|
reports = append(reports, globalBucketMonitor.GetReport(bandwidth.SelectBuckets(buckets...)))
|
||||||
consolidatedReport := madmin.BucketBandwidthReport{
|
consolidatedReport := bandwidth.BucketBandwidthReport{
|
||||||
BucketStats: make(map[string]madmin.BandwidthDetails),
|
BucketStats: make(map[string]map[string]bandwidth.Details),
|
||||||
}
|
}
|
||||||
for _, report := range reports {
|
for _, report := range reports {
|
||||||
if report == nil || report.BucketStats == nil {
|
if report == nil || report.BucketStats == nil {
|
||||||
@ -1136,15 +1136,26 @@ func (sys *NotificationSys) GetBandwidthReports(ctx context.Context, buckets ...
|
|||||||
for bucket := range report.BucketStats {
|
for bucket := range report.BucketStats {
|
||||||
d, ok := consolidatedReport.BucketStats[bucket]
|
d, ok := consolidatedReport.BucketStats[bucket]
|
||||||
if !ok {
|
if !ok {
|
||||||
consolidatedReport.BucketStats[bucket] = madmin.BandwidthDetails{}
|
consolidatedReport.BucketStats[bucket] = make(map[string]bandwidth.Details)
|
||||||
d = consolidatedReport.BucketStats[bucket]
|
d = consolidatedReport.BucketStats[bucket]
|
||||||
d.LimitInBytesPerSecond = report.BucketStats[bucket].LimitInBytesPerSecond
|
for arn := range d {
|
||||||
|
d[arn] = bandwidth.Details{
|
||||||
|
LimitInBytesPerSecond: report.BucketStats[bucket][arn].LimitInBytesPerSecond,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if d.LimitInBytesPerSecond < report.BucketStats[bucket].LimitInBytesPerSecond {
|
for arn, st := range report.BucketStats[bucket] {
|
||||||
d.LimitInBytesPerSecond = report.BucketStats[bucket].LimitInBytesPerSecond
|
bwDet := bandwidth.Details{}
|
||||||
|
if bw, ok := d[arn]; ok {
|
||||||
|
bwDet = bw
|
||||||
|
}
|
||||||
|
if bwDet.LimitInBytesPerSecond < st.LimitInBytesPerSecond {
|
||||||
|
bwDet.LimitInBytesPerSecond = st.LimitInBytesPerSecond
|
||||||
|
}
|
||||||
|
bwDet.CurrentBandwidthInBytesPerSecond += st.CurrentBandwidthInBytesPerSecond
|
||||||
|
d[arn] = bwDet
|
||||||
|
consolidatedReport.BucketStats[bucket] = d
|
||||||
}
|
}
|
||||||
d.CurrentBandwidthInBytesPerSecond += report.BucketStats[bucket].CurrentBandwidthInBytesPerSecond
|
|
||||||
consolidatedReport.BucketStats[bucket] = d
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return consolidatedReport
|
return consolidatedReport
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/minio/madmin-go/v2"
|
"github.com/minio/madmin-go/v2"
|
||||||
|
"github.com/minio/minio/internal/bucket/bandwidth"
|
||||||
"github.com/minio/minio/internal/event"
|
"github.com/minio/minio/internal/event"
|
||||||
xhttp "github.com/minio/minio/internal/http"
|
xhttp "github.com/minio/minio/internal/http"
|
||||||
"github.com/minio/minio/internal/logger"
|
"github.com/minio/minio/internal/logger"
|
||||||
@ -814,7 +815,7 @@ func newPeerRESTClient(peer *xnet.Host) *peerRESTClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MonitorBandwidth - send http trace request to peer nodes
|
// MonitorBandwidth - send http trace request to peer nodes
|
||||||
func (client *peerRESTClient) MonitorBandwidth(ctx context.Context, buckets []string) (*madmin.BucketBandwidthReport, error) {
|
func (client *peerRESTClient) MonitorBandwidth(ctx context.Context, buckets []string) (*bandwidth.BucketBandwidthReport, error) {
|
||||||
values := make(url.Values)
|
values := make(url.Values)
|
||||||
values.Set(peerRESTBuckets, strings.Join(buckets, ","))
|
values.Set(peerRESTBuckets, strings.Join(buckets, ","))
|
||||||
respBody, err := client.callWithContext(ctx, peerRESTMethodGetBandwidth, values, nil, -1)
|
respBody, err := client.callWithContext(ctx, peerRESTMethodGetBandwidth, values, nil, -1)
|
||||||
@ -824,7 +825,7 @@ func (client *peerRESTClient) MonitorBandwidth(ctx context.Context, buckets []st
|
|||||||
defer xhttp.DrainBody(respBody)
|
defer xhttp.DrainBody(respBody)
|
||||||
|
|
||||||
dec := gob.NewDecoder(respBody)
|
dec := gob.NewDecoder(respBody)
|
||||||
var bandwidthReport madmin.BucketBandwidthReport
|
var bandwidthReport bandwidth.BucketBandwidthReport
|
||||||
err = dec.Decode(&bandwidthReport)
|
err = dec.Decode(&bandwidthReport)
|
||||||
return &bandwidthReport, err
|
return &bandwidthReport, err
|
||||||
}
|
}
|
||||||
|
2
go.mod
2
go.mod
@ -50,7 +50,7 @@ require (
|
|||||||
github.com/minio/highwayhash v1.0.2
|
github.com/minio/highwayhash v1.0.2
|
||||||
github.com/minio/kes v0.22.2
|
github.com/minio/kes v0.22.2
|
||||||
github.com/minio/madmin-go/v2 v2.0.7
|
github.com/minio/madmin-go/v2 v2.0.7
|
||||||
github.com/minio/minio-go/v7 v7.0.45
|
github.com/minio/minio-go/v7 v7.0.46-0.20230104182320-4eab739c18fd
|
||||||
github.com/minio/pkg v1.5.8
|
github.com/minio/pkg v1.5.8
|
||||||
github.com/minio/selfupdate v0.5.0
|
github.com/minio/selfupdate v0.5.0
|
||||||
github.com/minio/sha256-simd v1.0.0
|
github.com/minio/sha256-simd v1.0.0
|
||||||
|
2
go.sum
2
go.sum
@ -779,6 +779,8 @@ github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEp
|
|||||||
github.com/minio/minio-go/v7 v7.0.41/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
|
github.com/minio/minio-go/v7 v7.0.41/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
|
||||||
github.com/minio/minio-go/v7 v7.0.45 h1:g4IeM9M9pW/Lo8AGGNOjBZYlvmtlE1N5TQEYWXRWzIs=
|
github.com/minio/minio-go/v7 v7.0.45 h1:g4IeM9M9pW/Lo8AGGNOjBZYlvmtlE1N5TQEYWXRWzIs=
|
||||||
github.com/minio/minio-go/v7 v7.0.45/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
|
github.com/minio/minio-go/v7 v7.0.45/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
|
||||||
|
github.com/minio/minio-go/v7 v7.0.46-0.20230104182320-4eab739c18fd h1:0KBrQiZnIfb56iUEYGy4AmOvcIcy5Flqz3om3gmx5P8=
|
||||||
|
github.com/minio/minio-go/v7 v7.0.46-0.20230104182320-4eab739c18fd/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
|
||||||
github.com/minio/pkg v1.5.4/go.mod h1:2MOaRFdmFKULD+uOLc3qHLGTQTuxCNPKNPfLBTxC8CA=
|
github.com/minio/pkg v1.5.4/go.mod h1:2MOaRFdmFKULD+uOLc3qHLGTQTuxCNPKNPfLBTxC8CA=
|
||||||
github.com/minio/pkg v1.5.8 h1:ryx23f28havoidUezmYRNgaZpbyn4y3m2yp/vfasFy0=
|
github.com/minio/pkg v1.5.8 h1:ryx23f28havoidUezmYRNgaZpbyn4y3m2yp/vfasFy0=
|
||||||
github.com/minio/pkg v1.5.8/go.mod h1:EiGlHS2xaooa2VMxhJsxxAZHDObHVUB3HwtuoEXOCVE=
|
github.com/minio/pkg v1.5.8/go.mod h1:EiGlHS2xaooa2VMxhJsxxAZHDObHVUB3HwtuoEXOCVE=
|
||||||
|
@ -22,7 +22,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/minio/madmin-go/v2"
|
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -34,19 +33,19 @@ type throttle struct {
|
|||||||
// Monitor holds the state of the global bucket monitor
|
// Monitor holds the state of the global bucket monitor
|
||||||
type Monitor struct {
|
type Monitor struct {
|
||||||
tlock sync.RWMutex // mutex for bucketThrottle
|
tlock sync.RWMutex // mutex for bucketThrottle
|
||||||
bucketThrottle map[string]*throttle
|
bucketThrottle map[string]map[string]*throttle
|
||||||
mlock sync.RWMutex // mutex for activeBuckets map
|
mlock sync.RWMutex // mutex for activeBuckets map
|
||||||
activeBuckets map[string]*bucketMeasurement // Buckets with objects in flight
|
activeBuckets map[string]map[string]*bucketMeasurement // Buckets with objects in flight
|
||||||
bucketMovingAvgTicker *time.Ticker // Ticker for calculating moving averages
|
bucketMovingAvgTicker *time.Ticker // Ticker for calculating moving averages
|
||||||
ctx context.Context // Context for generate
|
ctx context.Context // Context for generate
|
||||||
NodeCount uint64
|
NodeCount uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMonitor returns a monitor with defaults.
|
// NewMonitor returns a monitor with defaults.
|
||||||
func NewMonitor(ctx context.Context, numNodes uint64) *Monitor {
|
func NewMonitor(ctx context.Context, numNodes uint64) *Monitor {
|
||||||
m := &Monitor{
|
m := &Monitor{
|
||||||
activeBuckets: make(map[string]*bucketMeasurement),
|
activeBuckets: make(map[string]map[string]*bucketMeasurement),
|
||||||
bucketThrottle: make(map[string]*throttle),
|
bucketThrottle: make(map[string]map[string]*throttle),
|
||||||
bucketMovingAvgTicker: time.NewTicker(2 * time.Second),
|
bucketMovingAvgTicker: time.NewTicker(2 * time.Second),
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
NodeCount: numNodes,
|
NodeCount: numNodes,
|
||||||
@ -55,12 +54,19 @@ func NewMonitor(ctx context.Context, numNodes uint64) *Monitor {
|
|||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Monitor) updateMeasurement(bucket string, bytes uint64) {
|
func (m *Monitor) updateMeasurement(bucket, arn string, bytes uint64) {
|
||||||
m.mlock.Lock()
|
m.mlock.Lock()
|
||||||
defer m.mlock.Unlock()
|
defer m.mlock.Unlock()
|
||||||
if m, ok := m.activeBuckets[bucket]; ok {
|
tm, ok := m.activeBuckets[bucket]
|
||||||
m.incrementBytes(bytes)
|
if !ok {
|
||||||
|
tm = make(map[string]*bucketMeasurement)
|
||||||
}
|
}
|
||||||
|
measurement, ok := tm[arn]
|
||||||
|
if !ok {
|
||||||
|
measurement = &bucketMeasurement{}
|
||||||
|
}
|
||||||
|
measurement.incrementBytes(bytes)
|
||||||
|
m.activeBuckets[bucket][arn] = measurement
|
||||||
}
|
}
|
||||||
|
|
||||||
// SelectionFunction for buckets
|
// SelectionFunction for buckets
|
||||||
@ -83,27 +89,44 @@ func SelectBuckets(buckets ...string) SelectionFunction {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Details for the measured bandwidth
|
||||||
|
type Details struct {
|
||||||
|
LimitInBytesPerSecond int64 `json:"limitInBits"`
|
||||||
|
CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// BucketBandwidthReport captures the details for all buckets.
|
||||||
|
type BucketBandwidthReport struct {
|
||||||
|
BucketStats map[string]map[string]Details `json:"bucketStats,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
// GetReport gets the report for all bucket bandwidth details.
|
// GetReport gets the report for all bucket bandwidth details.
|
||||||
func (m *Monitor) GetReport(selectBucket SelectionFunction) *madmin.BucketBandwidthReport {
|
func (m *Monitor) GetReport(selectBucket SelectionFunction) *BucketBandwidthReport {
|
||||||
m.mlock.RLock()
|
m.mlock.RLock()
|
||||||
defer m.mlock.RUnlock()
|
defer m.mlock.RUnlock()
|
||||||
return m.getReport(selectBucket)
|
return m.getReport(selectBucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Monitor) getReport(selectBucket SelectionFunction) *madmin.BucketBandwidthReport {
|
func (m *Monitor) getReport(selectBucket SelectionFunction) *BucketBandwidthReport {
|
||||||
report := &madmin.BucketBandwidthReport{
|
report := &BucketBandwidthReport{
|
||||||
BucketStats: make(map[string]madmin.BandwidthDetails),
|
BucketStats: make(map[string]map[string]Details),
|
||||||
}
|
}
|
||||||
for bucket, bucketMeasurement := range m.activeBuckets {
|
for bucket, bucketMeasurementMap := range m.activeBuckets {
|
||||||
if !selectBucket(bucket) {
|
if !selectBucket(bucket) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
m.tlock.RLock()
|
m.tlock.RLock()
|
||||||
bucketThrottle, ok := m.bucketThrottle[bucket]
|
report.BucketStats[bucket] = make(map[string]Details)
|
||||||
if ok {
|
if tgtThrottle, ok := m.bucketThrottle[bucket]; ok {
|
||||||
report.BucketStats[bucket] = madmin.BandwidthDetails{
|
for arn, throttle := range tgtThrottle {
|
||||||
LimitInBytesPerSecond: bucketThrottle.NodeBandwidthPerSec * int64(m.NodeCount),
|
var currBw float64
|
||||||
CurrentBandwidthInBytesPerSecond: bucketMeasurement.getExpMovingAvgBytesPerSecond(),
|
if bucketMeasurement, ok := bucketMeasurementMap[arn]; ok {
|
||||||
|
currBw = bucketMeasurement.getExpMovingAvgBytesPerSecond()
|
||||||
|
}
|
||||||
|
report.BucketStats[bucket][arn] = Details{
|
||||||
|
LimitInBytesPerSecond: throttle.NodeBandwidthPerSec * int64(m.NodeCount),
|
||||||
|
CurrentBandwidthInBytesPerSecond: currBw,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
m.tlock.RUnlock()
|
m.tlock.RUnlock()
|
||||||
@ -127,24 +150,27 @@ func (m *Monitor) updateMovingAvg() {
|
|||||||
m.mlock.Lock()
|
m.mlock.Lock()
|
||||||
defer m.mlock.Unlock()
|
defer m.mlock.Unlock()
|
||||||
for _, bucketMeasurement := range m.activeBuckets {
|
for _, bucketMeasurement := range m.activeBuckets {
|
||||||
bucketMeasurement.updateExponentialMovingAverage(time.Now())
|
for _, measurement := range bucketMeasurement {
|
||||||
|
measurement.updateExponentialMovingAverage(time.Now())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Monitor) getBucketMeasurement(bucket string, initTime time.Time) *bucketMeasurement {
|
func (m *Monitor) getBucketMeasurement(bucket, arn string, initTime time.Time) map[string]*bucketMeasurement {
|
||||||
bucketTracker, ok := m.activeBuckets[bucket]
|
bucketTracker, ok := m.activeBuckets[bucket]
|
||||||
if !ok {
|
if !ok {
|
||||||
bucketTracker = newBucketMeasurement(initTime)
|
bucketTracker = make(map[string]*bucketMeasurement)
|
||||||
|
bucketTracker[arn] = newBucketMeasurement(initTime)
|
||||||
m.activeBuckets[bucket] = bucketTracker
|
m.activeBuckets[bucket] = bucketTracker
|
||||||
}
|
}
|
||||||
return bucketTracker
|
return bucketTracker
|
||||||
}
|
}
|
||||||
|
|
||||||
// track returns the measurement object for bucket
|
// track returns the measurement object for bucket
|
||||||
func (m *Monitor) track(bucket string) {
|
func (m *Monitor) track(bucket, arn string) {
|
||||||
m.mlock.Lock()
|
m.mlock.Lock()
|
||||||
defer m.mlock.Unlock()
|
defer m.mlock.Unlock()
|
||||||
m.getBucketMeasurement(bucket, time.Now())
|
m.getBucketMeasurement(bucket, arn, time.Now())
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteBucket deletes monitoring the 'bucket'
|
// DeleteBucket deletes monitoring the 'bucket'
|
||||||
@ -157,34 +183,54 @@ func (m *Monitor) DeleteBucket(bucket string) {
|
|||||||
m.mlock.Unlock()
|
m.mlock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteBucketThrottle deletes monitoring for a bucket's target
|
||||||
|
func (m *Monitor) DeleteBucketThrottle(bucket, arn string) {
|
||||||
|
m.tlock.Lock()
|
||||||
|
delete(m.bucketThrottle, bucket)
|
||||||
|
m.tlock.Unlock()
|
||||||
|
m.mlock.Lock()
|
||||||
|
delete(m.activeBuckets, bucket)
|
||||||
|
m.mlock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
// throttle returns currently configured throttle for this bucket
|
// throttle returns currently configured throttle for this bucket
|
||||||
func (m *Monitor) throttle(bucket string) *throttle {
|
func (m *Monitor) throttle(bucket, arn string) *throttle {
|
||||||
m.tlock.RLock()
|
m.tlock.RLock()
|
||||||
defer m.tlock.RUnlock()
|
defer m.tlock.RUnlock()
|
||||||
return m.bucketThrottle[bucket]
|
return m.bucketThrottle[bucket][arn]
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetBandwidthLimit sets the bandwidth limit for a bucket
|
// SetBandwidthLimit sets the bandwidth limit for a bucket
|
||||||
func (m *Monitor) SetBandwidthLimit(bucket string, limit int64) {
|
func (m *Monitor) SetBandwidthLimit(bucket, arn string, limit int64) {
|
||||||
m.tlock.Lock()
|
m.tlock.Lock()
|
||||||
defer m.tlock.Unlock()
|
defer m.tlock.Unlock()
|
||||||
bw := limit / int64(m.NodeCount)
|
bw := limit / int64(m.NodeCount)
|
||||||
t, ok := m.bucketThrottle[bucket]
|
tgtMap, ok := m.bucketThrottle[bucket]
|
||||||
if !ok {
|
if !ok {
|
||||||
t = &throttle{
|
tgtMap = make(map[string]*throttle)
|
||||||
|
tgtMap[arn] = &throttle{
|
||||||
NodeBandwidthPerSec: bw,
|
NodeBandwidthPerSec: bw,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
t.NodeBandwidthPerSec = bw
|
th, ok := tgtMap[arn]
|
||||||
newlimit := rate.Every(time.Second / time.Duration(t.NodeBandwidthPerSec))
|
if !ok {
|
||||||
t.Limiter = rate.NewLimiter(newlimit, int(t.NodeBandwidthPerSec))
|
th = &throttle{}
|
||||||
m.bucketThrottle[bucket] = t
|
}
|
||||||
|
th.NodeBandwidthPerSec = bw
|
||||||
|
tgtMap[arn] = th
|
||||||
|
newlimit := rate.Every(time.Second / time.Duration(tgtMap[arn].NodeBandwidthPerSec))
|
||||||
|
tgtMap[arn].Limiter = rate.NewLimiter(newlimit, int(tgtMap[arn].NodeBandwidthPerSec))
|
||||||
|
m.bucketThrottle[bucket] = tgtMap
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsThrottled returns true if a bucket has bandwidth throttling enabled.
|
// IsThrottled returns true if a bucket has bandwidth throttling enabled.
|
||||||
func (m *Monitor) IsThrottled(bucket string) bool {
|
func (m *Monitor) IsThrottled(bucket, arn string) bool {
|
||||||
m.tlock.RLock()
|
m.tlock.RLock()
|
||||||
defer m.tlock.RUnlock()
|
defer m.tlock.RUnlock()
|
||||||
_, ok := m.bucketThrottle[bucket]
|
th, ok := m.bucketThrottle[bucket]
|
||||||
|
if !ok {
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
_, ok = th[arn]
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
@ -21,8 +21,6 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/minio/madmin-go/v2"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -31,7 +29,7 @@ const (
|
|||||||
|
|
||||||
func TestMonitor_GetReport(t *testing.T) {
|
func TestMonitor_GetReport(t *testing.T) {
|
||||||
type fields struct {
|
type fields struct {
|
||||||
activeBuckets map[string]*bucketMeasurement
|
activeBuckets map[string]map[string]*bucketMeasurement
|
||||||
endTime time.Time
|
endTime time.Time
|
||||||
update2 uint64
|
update2 uint64
|
||||||
endTime2 time.Time
|
endTime2 time.Time
|
||||||
@ -44,44 +42,52 @@ func TestMonitor_GetReport(t *testing.T) {
|
|||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
fields fields
|
fields fields
|
||||||
want *madmin.BucketBandwidthReport
|
want *BucketBandwidthReport
|
||||||
want2 *madmin.BucketBandwidthReport
|
want2 *BucketBandwidthReport
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "ZeroToOne",
|
name: "ZeroToOne",
|
||||||
fields: fields{
|
fields: fields{
|
||||||
activeBuckets: map[string]*bucketMeasurement{
|
activeBuckets: map[string]map[string]*bucketMeasurement{
|
||||||
"bucket": m0,
|
"bucket": {
|
||||||
|
"arn": m0,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
endTime: start.Add(1 * time.Second),
|
endTime: start.Add(1 * time.Second),
|
||||||
update2: oneMiB,
|
update2: oneMiB,
|
||||||
endTime2: start.Add(2 * time.Second),
|
endTime2: start.Add(2 * time.Second),
|
||||||
},
|
},
|
||||||
want: &madmin.BucketBandwidthReport{
|
want: &BucketBandwidthReport{
|
||||||
BucketStats: map[string]madmin.BandwidthDetails{"bucket": {LimitInBytesPerSecond: 1024 * 1024, CurrentBandwidthInBytesPerSecond: 0}},
|
BucketStats: map[string]map[string]Details{
|
||||||
|
"bucket": {
|
||||||
|
"arn": Details{LimitInBytesPerSecond: 1024 * 1024, CurrentBandwidthInBytesPerSecond: 0},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
want2: &madmin.BucketBandwidthReport{
|
want2: &BucketBandwidthReport{
|
||||||
BucketStats: map[string]madmin.BandwidthDetails{"bucket": {LimitInBytesPerSecond: 1024 * 1024, CurrentBandwidthInBytesPerSecond: (1024 * 1024) / start.Add(2*time.Second).Sub(start.Add(1*time.Second)).Seconds()}},
|
BucketStats: map[string]map[string]Details{"bucket": {"arn": Details{LimitInBytesPerSecond: 1024 * 1024, CurrentBandwidthInBytesPerSecond: (1024 * 1024) / start.Add(2*time.Second).Sub(start.Add(1*time.Second)).Seconds()}}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "OneToTwo",
|
name: "OneToTwo",
|
||||||
fields: fields{
|
fields: fields{
|
||||||
activeBuckets: map[string]*bucketMeasurement{
|
activeBuckets: map[string]map[string]*bucketMeasurement{
|
||||||
"bucket": m1MiBPS,
|
"bucket": {
|
||||||
|
"arn": m1MiBPS,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
endTime: start.Add(1 * time.Second),
|
endTime: start.Add(1 * time.Second),
|
||||||
update2: 2 * oneMiB,
|
update2: 2 * oneMiB,
|
||||||
endTime2: start.Add(2 * time.Second),
|
endTime2: start.Add(2 * time.Second),
|
||||||
},
|
},
|
||||||
want: &madmin.BucketBandwidthReport{
|
want: &BucketBandwidthReport{
|
||||||
BucketStats: map[string]madmin.BandwidthDetails{"bucket": {LimitInBytesPerSecond: 1024 * 1024, CurrentBandwidthInBytesPerSecond: float64(oneMiB)}},
|
BucketStats: map[string]map[string]Details{"bucket": {"arn": Details{LimitInBytesPerSecond: 1024 * 1024, CurrentBandwidthInBytesPerSecond: float64(oneMiB)}}},
|
||||||
},
|
},
|
||||||
want2: &madmin.BucketBandwidthReport{
|
want2: &BucketBandwidthReport{
|
||||||
BucketStats: map[string]madmin.BandwidthDetails{"bucket": {
|
BucketStats: map[string]map[string]Details{"bucket": {"arn": Details{
|
||||||
LimitInBytesPerSecond: 1024 * 1024,
|
LimitInBytesPerSecond: 1024 * 1024,
|
||||||
CurrentBandwidthInBytesPerSecond: exponentialMovingAverage(betaBucket, float64(oneMiB), 2*float64(oneMiB)),
|
CurrentBandwidthInBytesPerSecond: exponentialMovingAverage(betaBucket, float64(oneMiB), 2*float64(oneMiB)),
|
||||||
}},
|
}}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -92,21 +98,23 @@ func TestMonitor_GetReport(t *testing.T) {
|
|||||||
thr := throttle{
|
thr := throttle{
|
||||||
NodeBandwidthPerSec: 1024 * 1024,
|
NodeBandwidthPerSec: 1024 * 1024,
|
||||||
}
|
}
|
||||||
|
th := make(map[string]map[string]*throttle)
|
||||||
|
th["bucket"] = map[string]*throttle{"arn": &thr}
|
||||||
m := &Monitor{
|
m := &Monitor{
|
||||||
activeBuckets: tt.fields.activeBuckets,
|
activeBuckets: tt.fields.activeBuckets,
|
||||||
bucketThrottle: map[string]*throttle{"bucket": &thr},
|
bucketThrottle: th,
|
||||||
NodeCount: 1,
|
NodeCount: 1,
|
||||||
}
|
}
|
||||||
m.activeBuckets["bucket"].updateExponentialMovingAverage(tt.fields.endTime)
|
m.activeBuckets["bucket"]["arn"].updateExponentialMovingAverage(tt.fields.endTime)
|
||||||
got := m.GetReport(SelectBuckets())
|
got := m.GetReport(SelectBuckets())
|
||||||
if !reflect.DeepEqual(got, tt.want) {
|
if !reflect.DeepEqual(got, tt.want) {
|
||||||
t.Errorf("GetReport() = %v, want %v", got, tt.want)
|
t.Errorf("GetReport() = %v, want %v", got, tt.want)
|
||||||
}
|
}
|
||||||
m.activeBuckets["bucket"].incrementBytes(tt.fields.update2)
|
m.activeBuckets["bucket"]["arn"].incrementBytes(tt.fields.update2)
|
||||||
m.activeBuckets["bucket"].updateExponentialMovingAverage(tt.fields.endTime2)
|
m.activeBuckets["bucket"]["arn"].updateExponentialMovingAverage(tt.fields.endTime2)
|
||||||
got = m.GetReport(SelectBuckets())
|
got = m.GetReport(SelectBuckets())
|
||||||
if !reflect.DeepEqual(got, tt.want2) {
|
if !reflect.DeepEqual(got.BucketStats, tt.want2.BucketStats) {
|
||||||
t.Errorf("GetReport() = %v, want %v", got, tt.want2)
|
t.Errorf("GetReport() = %v, want %v", got.BucketStats, tt.want2.BucketStats)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -36,6 +36,7 @@ type MonitoredReader struct {
|
|||||||
// MonitorReaderOptions provides configurable options for monitor reader implementation.
|
// MonitorReaderOptions provides configurable options for monitor reader implementation.
|
||||||
type MonitorReaderOptions struct {
|
type MonitorReaderOptions struct {
|
||||||
Bucket string
|
Bucket string
|
||||||
|
TargetARN string
|
||||||
HeaderSize int
|
HeaderSize int
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -79,7 +80,7 @@ func (r *MonitoredReader) Read(buf []byte) (n int, err error) {
|
|||||||
r.lastErr = err
|
r.lastErr = err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r.m.updateMeasurement(r.opts.Bucket, uint64(tokens))
|
r.m.updateMeasurement(r.opts.Bucket, r.opts.TargetARN, uint64(tokens))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -88,11 +89,11 @@ func (r *MonitoredReader) Read(buf []byte) (n int, err error) {
|
|||||||
func NewMonitoredReader(ctx context.Context, m *Monitor, r io.Reader, opts *MonitorReaderOptions) *MonitoredReader {
|
func NewMonitoredReader(ctx context.Context, m *Monitor, r io.Reader, opts *MonitorReaderOptions) *MonitoredReader {
|
||||||
reader := MonitoredReader{
|
reader := MonitoredReader{
|
||||||
r: r,
|
r: r,
|
||||||
throttle: m.throttle(opts.Bucket),
|
throttle: m.throttle(opts.Bucket, opts.TargetARN),
|
||||||
m: m,
|
m: m,
|
||||||
opts: opts,
|
opts: opts,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
}
|
}
|
||||||
reader.m.track(opts.Bucket)
|
reader.m.track(opts.Bucket, opts.TargetARN)
|
||||||
return &reader
|
return &reader
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user