diff --git a/cmd/data-usage-cache.go b/cmd/data-usage-cache.go index f220f9c79..739152974 100644 --- a/cmd/data-usage-cache.go +++ b/cmd/data-usage-cache.go @@ -522,7 +522,7 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string) dataUsageBucket, name, NewPutObjReader(r), - ObjectOptions{}) + ObjectOptions{NoLock: true}) if isErrBucketNotFound(err) { return nil } diff --git a/cmd/erasure-sets.go b/cmd/erasure-sets.go index 57acdaa8f..3954c653b 100644 --- a/cmd/erasure-sets.go +++ b/cmd/erasure-sets.go @@ -357,9 +357,14 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto mutex := newNSLock(globalIsDistErasure) + // Number of buffers, max 2GB. + n := setCount * setDriveCount + if n > 100 { + n = 100 + } // Initialize byte pool once for all sets, bpool size is set to // setCount * setDriveCount with each memory upto blockSizeV1. - bp := bpool.NewBytePoolCap(setCount*setDriveCount, blockSizeV1, blockSizeV1*2) + bp := bpool.NewBytePoolCap(n, blockSizeV1, blockSizeV1*2) for i := 0; i < setCount; i++ { s.erasureDisks[i] = make([]StorageAPI, setDriveCount) diff --git a/cmd/erasure.go b/cmd/erasure.go index 0749e4df8..1a848a69a 100644 --- a/cmd/erasure.go +++ b/cmd/erasure.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "math/rand" "sort" "sync" "time" @@ -345,7 +346,8 @@ func (er erasureObjects) crawlAndGetDataUsage(ctx context.Context, buckets []Buc var saverWg sync.WaitGroup saverWg.Add(1) go func() { - const updateTime = 30 * time.Second + // Add jitter to the update time so multiple sets don't sync up. + var updateTime = 30*time.Second + time.Duration(float64(10*time.Second)*rand.Float64()) t := time.NewTicker(updateTime) defer t.Stop() defer saverWg.Done() @@ -429,11 +431,15 @@ func (er erasureObjects) crawlAndGetDataUsage(ctx context.Context, buckets []Buc if r := cache.root(); r != nil { root = cache.flatten(*r) } + t := time.Now() bucketResults <- dataUsageEntryInfo{ Name: cache.Info.Name, Parent: dataUsageRoot, Entry: root, } + // We want to avoid synchronizing up all writes in case + // the results are piled up. + time.Sleep(time.Duration(float64(time.Since(t)) * rand.Float64())) // Save cache logger.LogIf(ctx, cache.save(ctx, er, cacheName)) }