diff --git a/cmd/erasure-sets.go b/cmd/erasure-sets.go index a7bcfe607..fa9d66ca7 100644 --- a/cmd/erasure-sets.go +++ b/cmd/erasure-sets.go @@ -375,9 +375,14 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto mutex := newNSLock(globalIsDistErasure) + // Number of buffers, max 2GB. + n := setCount * setDriveCount + if n > 100 { + n = 100 + } // Initialize byte pool once for all sets, bpool size is set to // setCount * setDriveCount with each memory upto blockSizeV1. - bp := bpool.NewBytePoolCap(setCount*setDriveCount, blockSizeV1, blockSizeV1*2) + bp := bpool.NewBytePoolCap(n, blockSizeV1, blockSizeV1*2) for i := 0; i < setCount; i++ { s.erasureDisks[i] = make([]StorageAPI, setDriveCount) diff --git a/cmd/erasure.go b/cmd/erasure.go index 9c081a87d..223bf9d4f 100644 --- a/cmd/erasure.go +++ b/cmd/erasure.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "math/rand" "sort" "sync" "time" @@ -294,7 +295,8 @@ func (er erasureObjects) crawlAndGetDataUsage(ctx context.Context, buckets []Buc var saverWg sync.WaitGroup saverWg.Add(1) go func() { - const updateTime = 30 * time.Second + // Add jitter to the update time so multiple sets don't sync up. + var updateTime = 30*time.Second + time.Duration(float64(10*time.Second)*rand.Float64()) t := time.NewTicker(updateTime) defer t.Stop() defer saverWg.Done() @@ -377,11 +379,15 @@ func (er erasureObjects) crawlAndGetDataUsage(ctx context.Context, buckets []Buc if r := cache.root(); r != nil { root = cache.flatten(*r) } + t := time.Now() bucketResults <- dataUsageEntryInfo{ Name: cache.Info.Name, Parent: dataUsageRoot, Entry: root, } + // We want to avoid synchronizing up all writes in case + // the results are piled up. + time.Sleep(time.Duration(float64(time.Since(t)) * rand.Float64())) // Save cache logger.LogIf(ctx, cache.save(ctx, er, cacheName)) }