2021-04-18 15:41:13 -04:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2021-04-03 12:03:42 -04:00
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2022-11-04 12:59:14 -04:00
|
|
|
"encoding/binary"
|
2022-09-12 15:40:02 -04:00
|
|
|
"math"
|
2021-04-03 12:03:42 -04:00
|
|
|
"sync"
|
2021-10-21 21:52:55 -04:00
|
|
|
"time"
|
2021-04-03 12:03:42 -04:00
|
|
|
|
2021-06-01 17:59:40 -04:00
|
|
|
"github.com/minio/minio/internal/bucket/replication"
|
2021-04-03 12:03:42 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
func (b *BucketReplicationStats) hasReplicationUsage() bool {
|
2021-09-18 16:31:35 -04:00
|
|
|
for _, s := range b.Stats {
|
|
|
|
if s.hasReplicationUsage() {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// ReplicationStats holds the global in-memory replication stats
|
|
|
|
type ReplicationStats struct {
|
2022-11-04 12:59:14 -04:00
|
|
|
Cache map[string]*BucketReplicationStats
|
|
|
|
UsageCache map[string]*BucketReplicationStats
|
|
|
|
mostRecentStats BucketStatsMap
|
|
|
|
sync.RWMutex // mutex for Cache
|
|
|
|
ulock sync.RWMutex // mutex for UsageCache
|
|
|
|
mostRecentStatsMu sync.Mutex // mutex for mostRecentStats
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Delete deletes in-memory replication statistics for a bucket.
|
2021-04-04 18:34:33 -04:00
|
|
|
func (r *ReplicationStats) Delete(bucket string) {
|
2021-04-03 12:03:42 -04:00
|
|
|
if r == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
r.Lock()
|
|
|
|
defer r.Unlock()
|
|
|
|
delete(r.Cache, bucket)
|
2021-12-17 18:33:13 -05:00
|
|
|
|
|
|
|
r.ulock.Lock()
|
|
|
|
defer r.ulock.Unlock()
|
2021-04-06 11:36:54 -04:00
|
|
|
delete(r.UsageCache, bucket)
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|
|
|
|
|
2021-09-18 16:31:35 -04:00
|
|
|
// UpdateReplicaStat updates in-memory replica statistics with new values.
|
|
|
|
func (r *ReplicationStats) UpdateReplicaStat(bucket string, n int64) {
|
2021-04-03 12:03:42 -04:00
|
|
|
if r == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-09-18 16:31:35 -04:00
|
|
|
r.Lock()
|
|
|
|
defer r.Unlock()
|
|
|
|
bs, ok := r.Cache[bucket]
|
|
|
|
if !ok {
|
|
|
|
bs = &BucketReplicationStats{Stats: make(map[string]*BucketReplicationStat)}
|
|
|
|
}
|
2021-11-17 15:10:57 -05:00
|
|
|
bs.ReplicaSize += n
|
2021-09-18 16:31:35 -04:00
|
|
|
r.Cache[bucket] = bs
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update updates in-memory replication statistics with new values.
|
2021-11-17 15:10:57 -05:00
|
|
|
func (r *ReplicationStats) Update(bucket string, arn string, n int64, duration time.Duration, status, prevStatus replication.StatusType, opType replication.Type) {
|
2021-09-18 16:31:35 -04:00
|
|
|
if r == nil {
|
|
|
|
return
|
|
|
|
}
|
2021-11-17 15:10:57 -05:00
|
|
|
r.Lock()
|
|
|
|
defer r.Unlock()
|
|
|
|
|
2021-09-18 16:31:35 -04:00
|
|
|
bs, ok := r.Cache[bucket]
|
2021-04-03 12:03:42 -04:00
|
|
|
if !ok {
|
2021-09-18 16:31:35 -04:00
|
|
|
bs = &BucketReplicationStats{Stats: make(map[string]*BucketReplicationStat)}
|
2021-12-17 18:33:13 -05:00
|
|
|
r.Cache[bucket] = bs
|
2021-09-18 16:31:35 -04:00
|
|
|
}
|
|
|
|
b, ok := bs.Stats[arn]
|
|
|
|
if !ok {
|
|
|
|
b = &BucketReplicationStat{}
|
2021-12-17 18:33:13 -05:00
|
|
|
bs.Stats[arn] = b
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|
|
|
|
switch status {
|
2022-09-12 15:40:02 -04:00
|
|
|
case replication.Pending:
|
|
|
|
if opType.IsDataReplication() && prevStatus != status {
|
|
|
|
b.PendingSize += n
|
|
|
|
b.PendingCount++
|
|
|
|
}
|
2021-04-03 12:03:42 -04:00
|
|
|
case replication.Completed:
|
|
|
|
switch prevStatus { // adjust counters based on previous state
|
2022-09-12 15:40:02 -04:00
|
|
|
case replication.Pending:
|
|
|
|
b.PendingCount--
|
2021-04-03 12:03:42 -04:00
|
|
|
case replication.Failed:
|
2021-11-17 15:10:57 -05:00
|
|
|
b.FailedCount--
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|
2022-09-12 15:40:02 -04:00
|
|
|
if opType.IsDataReplication() {
|
2021-11-17 15:10:57 -05:00
|
|
|
b.ReplicatedSize += n
|
2021-04-03 12:03:42 -04:00
|
|
|
switch prevStatus {
|
2022-09-12 15:40:02 -04:00
|
|
|
case replication.Pending:
|
|
|
|
b.PendingSize -= n
|
2021-04-03 12:03:42 -04:00
|
|
|
case replication.Failed:
|
2021-11-17 15:10:57 -05:00
|
|
|
b.FailedSize -= n
|
|
|
|
}
|
|
|
|
if duration > 0 {
|
|
|
|
b.Latency.update(n, duration)
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
case replication.Failed:
|
2022-09-12 15:40:02 -04:00
|
|
|
if opType.IsDataReplication() {
|
2021-04-03 12:03:42 -04:00
|
|
|
if prevStatus == replication.Pending {
|
2021-11-17 15:10:57 -05:00
|
|
|
b.FailedSize += n
|
|
|
|
b.FailedCount++
|
2022-09-12 15:40:02 -04:00
|
|
|
b.PendingSize -= n
|
|
|
|
b.PendingCount--
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
case replication.Replica:
|
|
|
|
if opType == replication.ObjectReplicationType {
|
2021-11-17 15:10:57 -05:00
|
|
|
b.ReplicaSize += n
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|
|
|
|
}
|
2021-04-06 11:36:54 -04:00
|
|
|
}
|
|
|
|
|
2021-04-06 14:32:52 -04:00
|
|
|
// GetInitialUsage get replication metrics available at the time of cluster initialization
|
2021-04-06 11:36:54 -04:00
|
|
|
func (r *ReplicationStats) GetInitialUsage(bucket string) BucketReplicationStats {
|
|
|
|
if r == nil {
|
|
|
|
return BucketReplicationStats{}
|
|
|
|
}
|
2021-09-18 16:31:35 -04:00
|
|
|
r.ulock.RLock()
|
2021-10-21 21:52:55 -04:00
|
|
|
defer r.ulock.RUnlock()
|
2021-04-06 11:36:54 -04:00
|
|
|
st, ok := r.UsageCache[bucket]
|
2021-12-17 18:33:13 -05:00
|
|
|
if !ok {
|
|
|
|
return BucketReplicationStats{}
|
2021-09-18 16:31:35 -04:00
|
|
|
}
|
2021-12-17 18:33:13 -05:00
|
|
|
return st.Clone()
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|
|
|
|
|
2022-05-23 12:15:30 -04:00
|
|
|
// GetAll returns replication metrics for all buckets at once.
|
|
|
|
func (r *ReplicationStats) GetAll() map[string]BucketReplicationStats {
|
|
|
|
if r == nil {
|
|
|
|
return map[string]BucketReplicationStats{}
|
|
|
|
}
|
|
|
|
|
|
|
|
r.RLock()
|
|
|
|
defer r.RUnlock()
|
|
|
|
|
|
|
|
bucketReplicationStats := make(map[string]BucketReplicationStats, len(r.Cache))
|
|
|
|
for k, v := range r.Cache {
|
|
|
|
bucketReplicationStats[k] = v.Clone()
|
|
|
|
}
|
|
|
|
|
|
|
|
return bucketReplicationStats
|
|
|
|
}
|
|
|
|
|
2021-04-06 14:32:52 -04:00
|
|
|
// Get replication metrics for a bucket from this node since this node came up.
|
2021-04-03 12:03:42 -04:00
|
|
|
func (r *ReplicationStats) Get(bucket string) BucketReplicationStats {
|
|
|
|
if r == nil {
|
2021-09-18 16:31:35 -04:00
|
|
|
return BucketReplicationStats{Stats: make(map[string]*BucketReplicationStat)}
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
r.RLock()
|
|
|
|
defer r.RUnlock()
|
2021-04-04 18:34:33 -04:00
|
|
|
|
2021-04-03 12:03:42 -04:00
|
|
|
st, ok := r.Cache[bucket]
|
|
|
|
if !ok {
|
2021-12-17 18:33:13 -05:00
|
|
|
return BucketReplicationStats{}
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|
2021-09-18 16:31:35 -04:00
|
|
|
return st.Clone()
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewReplicationStats initialize in-memory replication statistics
|
|
|
|
func NewReplicationStats(ctx context.Context, objectAPI ObjectLayer) *ReplicationStats {
|
2021-10-21 21:52:55 -04:00
|
|
|
return &ReplicationStats{
|
2021-04-06 11:36:54 -04:00
|
|
|
Cache: make(map[string]*BucketReplicationStats),
|
|
|
|
UsageCache: make(map[string]*BucketReplicationStats),
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|
2021-10-21 21:52:55 -04:00
|
|
|
}
|
2021-04-03 12:03:42 -04:00
|
|
|
|
2022-09-12 15:40:02 -04:00
|
|
|
// load replication metrics at cluster start from latest replication stats saved in .minio.sys/buckets/replication/node-name.stats
|
|
|
|
// fallback to replication stats in data usage to be backward compatible
|
2021-10-21 21:52:55 -04:00
|
|
|
func (r *ReplicationStats) loadInitialReplicationMetrics(ctx context.Context) {
|
2022-09-12 15:40:02 -04:00
|
|
|
m := make(map[string]*BucketReplicationStats)
|
|
|
|
if stats, err := globalReplicationPool.loadStatsFromDisk(); err == nil {
|
|
|
|
for b, st := range stats {
|
2023-01-24 05:10:32 -05:00
|
|
|
c := st.Clone()
|
|
|
|
m[b] = &c
|
2022-09-12 15:40:02 -04:00
|
|
|
}
|
|
|
|
r.ulock.Lock()
|
|
|
|
r.UsageCache = m
|
|
|
|
r.ulock.Unlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
rTimer := time.NewTimer(time.Second * 5)
|
2021-10-21 21:52:55 -04:00
|
|
|
defer rTimer.Stop()
|
2021-12-17 18:33:13 -05:00
|
|
|
var (
|
|
|
|
dui DataUsageInfo
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
outer:
|
2021-10-21 21:52:55 -04:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case <-rTimer.C:
|
2021-12-17 18:33:13 -05:00
|
|
|
dui, err = loadDataUsageFromBackend(GlobalContext, newObjectLayerFn())
|
|
|
|
// If LastUpdate is set, data usage is available.
|
2022-09-12 15:40:02 -04:00
|
|
|
if err == nil {
|
2021-12-17 18:33:13 -05:00
|
|
|
break outer
|
2021-10-21 21:52:55 -04:00
|
|
|
}
|
2022-09-12 15:40:02 -04:00
|
|
|
rTimer.Reset(time.Second * 5)
|
2021-12-17 18:33:13 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for bucket, usage := range dui.BucketsUsage {
|
|
|
|
b := &BucketReplicationStats{
|
|
|
|
Stats: make(map[string]*BucketReplicationStat, len(usage.ReplicationInfo)),
|
|
|
|
}
|
|
|
|
for arn, uinfo := range usage.ReplicationInfo {
|
|
|
|
b.Stats[arn] = &BucketReplicationStat{
|
|
|
|
FailedSize: int64(uinfo.ReplicationFailedSize),
|
|
|
|
ReplicatedSize: int64(uinfo.ReplicatedSize),
|
|
|
|
ReplicaSize: int64(uinfo.ReplicaSize),
|
|
|
|
FailedCount: int64(uinfo.ReplicationFailedCount),
|
2021-10-21 21:52:55 -04:00
|
|
|
}
|
2021-12-17 18:33:13 -05:00
|
|
|
}
|
|
|
|
b.ReplicaSize += int64(usage.ReplicaSize)
|
|
|
|
if b.hasReplicationUsage() {
|
|
|
|
m[bucket] = b
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|
|
|
|
}
|
2021-12-17 18:33:13 -05:00
|
|
|
r.ulock.Lock()
|
|
|
|
r.UsageCache = m
|
2022-09-12 15:40:02 -04:00
|
|
|
r.ulock.Unlock()
|
|
|
|
}
|
|
|
|
|
2022-11-04 12:59:14 -04:00
|
|
|
// serializeStats will serialize the current stats.
|
|
|
|
// Will return (nil, nil) if no data.
|
|
|
|
func (r *ReplicationStats) serializeStats() ([]byte, error) {
|
2022-09-26 12:04:54 -04:00
|
|
|
if r == nil {
|
2022-11-04 12:59:14 -04:00
|
|
|
return nil, nil
|
2022-09-26 12:04:54 -04:00
|
|
|
}
|
2022-11-04 12:59:14 -04:00
|
|
|
r.mostRecentStatsMu.Lock()
|
|
|
|
defer r.mostRecentStatsMu.Unlock()
|
2022-12-20 03:07:53 -05:00
|
|
|
if len(r.mostRecentStats.Stats) == 0 {
|
2022-11-04 12:59:14 -04:00
|
|
|
return nil, nil
|
|
|
|
}
|
2022-12-20 03:07:53 -05:00
|
|
|
data := make([]byte, 4, 4+r.mostRecentStats.Msgsize())
|
2022-11-04 12:59:14 -04:00
|
|
|
// Add the replication stats meta header.
|
|
|
|
binary.LittleEndian.PutUint16(data[0:2], replStatsMetaFormat)
|
|
|
|
binary.LittleEndian.PutUint16(data[2:4], replStatsVersion)
|
|
|
|
// Add data
|
|
|
|
return r.mostRecentStats.MarshalMsg(data)
|
2022-09-12 15:40:02 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (r *ReplicationStats) getAllLatest(bucketsUsage map[string]BucketUsageInfo) (bucketsReplicationStats map[string]BucketReplicationStats) {
|
|
|
|
peerBucketStatsList := globalNotificationSys.GetClusterAllBucketStats(GlobalContext)
|
|
|
|
bucketsReplicationStats = make(map[string]BucketReplicationStats, len(bucketsUsage))
|
|
|
|
|
|
|
|
for bucket, u := range bucketsUsage {
|
|
|
|
bucketStats := make([]BucketStats, len(peerBucketStatsList))
|
|
|
|
for i, peerBucketStats := range peerBucketStatsList {
|
|
|
|
bucketStat, ok := peerBucketStats.Stats[bucket]
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
bucketStats[i] = bucketStat
|
|
|
|
}
|
|
|
|
bucketsReplicationStats[bucket] = r.calculateBucketReplicationStats(bucket, u, bucketStats)
|
|
|
|
}
|
|
|
|
return bucketsReplicationStats
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *ReplicationStats) calculateBucketReplicationStats(bucket string, u BucketUsageInfo, bucketStats []BucketStats) (s BucketReplicationStats) {
|
2022-09-16 20:09:45 -04:00
|
|
|
if r == nil {
|
|
|
|
s = BucketReplicationStats{
|
|
|
|
Stats: make(map[string]*BucketReplicationStat),
|
|
|
|
}
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
2022-09-12 15:40:02 -04:00
|
|
|
// accumulate cluster bucket stats
|
|
|
|
stats := make(map[string]*BucketReplicationStat)
|
|
|
|
var totReplicaSize int64
|
|
|
|
for _, bucketStat := range bucketStats {
|
|
|
|
totReplicaSize += bucketStat.ReplicationStats.ReplicaSize
|
|
|
|
for arn, stat := range bucketStat.ReplicationStats.Stats {
|
|
|
|
oldst := stats[arn]
|
|
|
|
if oldst == nil {
|
|
|
|
oldst = &BucketReplicationStat{}
|
|
|
|
}
|
|
|
|
stats[arn] = &BucketReplicationStat{
|
|
|
|
FailedCount: stat.FailedCount + oldst.FailedCount,
|
|
|
|
FailedSize: stat.FailedSize + oldst.FailedSize,
|
|
|
|
ReplicatedSize: stat.ReplicatedSize + oldst.ReplicatedSize,
|
|
|
|
Latency: stat.Latency.merge(oldst.Latency),
|
|
|
|
PendingCount: stat.PendingCount + oldst.PendingCount,
|
|
|
|
PendingSize: stat.PendingSize + oldst.PendingSize,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// add initial usage stat to cluster stats
|
|
|
|
usageStat := globalReplicationStats.GetInitialUsage(bucket)
|
|
|
|
|
|
|
|
totReplicaSize += usageStat.ReplicaSize
|
|
|
|
for arn, stat := range usageStat.Stats {
|
|
|
|
st, ok := stats[arn]
|
|
|
|
if !ok {
|
|
|
|
st = &BucketReplicationStat{}
|
|
|
|
stats[arn] = st
|
|
|
|
}
|
|
|
|
st.ReplicatedSize += stat.ReplicatedSize
|
|
|
|
st.FailedSize += stat.FailedSize
|
|
|
|
st.FailedCount += stat.FailedCount
|
|
|
|
st.PendingSize += stat.PendingSize
|
|
|
|
st.PendingCount += stat.PendingCount
|
|
|
|
}
|
|
|
|
|
|
|
|
s = BucketReplicationStats{
|
|
|
|
Stats: make(map[string]*BucketReplicationStat, len(stats)),
|
|
|
|
}
|
2022-12-20 03:07:53 -05:00
|
|
|
|
2022-09-12 15:40:02 -04:00
|
|
|
var latestTotReplicatedSize int64
|
|
|
|
for _, st := range u.ReplicationInfo {
|
|
|
|
latestTotReplicatedSize += int64(st.ReplicatedSize)
|
|
|
|
}
|
|
|
|
|
|
|
|
// normalize computed real time stats with latest usage stat
|
|
|
|
for arn, tgtstat := range stats {
|
|
|
|
st := BucketReplicationStat{}
|
|
|
|
bu, ok := u.ReplicationInfo[arn]
|
|
|
|
if !ok {
|
|
|
|
bu = BucketTargetUsageInfo{}
|
|
|
|
}
|
|
|
|
// use in memory replication stats if it is ahead of usage info.
|
|
|
|
st.ReplicatedSize = int64(bu.ReplicatedSize)
|
|
|
|
if tgtstat.ReplicatedSize >= int64(bu.ReplicatedSize) {
|
|
|
|
st.ReplicatedSize = tgtstat.ReplicatedSize
|
|
|
|
}
|
|
|
|
s.ReplicatedSize += st.ReplicatedSize
|
|
|
|
// Reset FailedSize and FailedCount to 0 for negative overflows which can
|
|
|
|
// happen since data usage picture can lag behind actual usage state at the time of cluster start
|
|
|
|
st.FailedSize = int64(math.Max(float64(tgtstat.FailedSize), 0))
|
|
|
|
st.FailedCount = int64(math.Max(float64(tgtstat.FailedCount), 0))
|
|
|
|
st.PendingSize = int64(math.Max(float64(tgtstat.PendingSize), 0))
|
|
|
|
st.PendingCount = int64(math.Max(float64(tgtstat.PendingCount), 0))
|
|
|
|
st.Latency = tgtstat.Latency
|
|
|
|
|
|
|
|
s.Stats[arn] = &st
|
|
|
|
s.FailedSize += st.FailedSize
|
|
|
|
s.FailedCount += st.FailedCount
|
|
|
|
s.PendingCount += st.PendingCount
|
|
|
|
s.PendingSize += st.PendingSize
|
|
|
|
}
|
|
|
|
// normalize overall stats
|
|
|
|
s.ReplicaSize = int64(math.Max(float64(totReplicaSize), float64(u.ReplicaSize)))
|
|
|
|
s.ReplicatedSize = int64(math.Max(float64(s.ReplicatedSize), float64(latestTotReplicatedSize)))
|
2022-11-04 12:59:14 -04:00
|
|
|
r.mostRecentStatsMu.Lock()
|
2022-09-12 15:40:02 -04:00
|
|
|
if len(r.mostRecentStats.Stats) == 0 {
|
|
|
|
r.mostRecentStats = BucketStatsMap{Stats: make(map[string]BucketStats, 1), Timestamp: UTCNow()}
|
|
|
|
}
|
2022-12-20 03:07:53 -05:00
|
|
|
if len(s.Stats) > 0 {
|
|
|
|
r.mostRecentStats.Stats[bucket] = BucketStats{ReplicationStats: s}
|
|
|
|
}
|
2022-09-12 15:40:02 -04:00
|
|
|
r.mostRecentStats.Timestamp = UTCNow()
|
2022-11-04 12:59:14 -04:00
|
|
|
r.mostRecentStatsMu.Unlock()
|
2022-09-12 15:40:02 -04:00
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
|
|
|
// get the most current of in-memory replication stats and data usage info from crawler.
|
|
|
|
func (r *ReplicationStats) getLatestReplicationStats(bucket string, u BucketUsageInfo) (s BucketReplicationStats) {
|
|
|
|
bucketStats := globalNotificationSys.GetClusterBucketStats(GlobalContext, bucket)
|
|
|
|
return r.calculateBucketReplicationStats(bucket, u, bucketStats)
|
2021-04-03 12:03:42 -04:00
|
|
|
}
|