mirror of
https://github.com/minio/minio.git
synced 2025-01-23 04:33:15 -05:00
Remove locks on usage cache (#16786)
This commit is contained in:
parent
b984bf8d1a
commit
a547bf517d
@ -23,6 +23,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@ -847,37 +848,54 @@ type objectIO interface {
|
||||
|
||||
// load the cache content with name from minioMetaBackgroundOpsBucket.
|
||||
// Only backend errors are returned as errors.
|
||||
// The loader is optimistic and has no locking, but tries 5 times before giving up.
|
||||
// If the object is not found or unable to deserialize d is cleared and nil error is returned.
|
||||
func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string) error {
|
||||
// Abandon if more than 5 minutes, so we don't hold up scanner.
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, readLock, ObjectOptions{})
|
||||
// Caches are read+written without locks,
|
||||
retries := 0
|
||||
for retries < 5 {
|
||||
r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, noLock, ObjectOptions{NoLock: true})
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case ObjectNotFound:
|
||||
case BucketNotFound:
|
||||
case InsufficientReadQuorum:
|
||||
case StorageErr:
|
||||
case ObjectNotFound, BucketNotFound:
|
||||
case InsufficientReadQuorum, StorageErr:
|
||||
retries++
|
||||
time.Sleep(time.Duration(rand.Int63n(int64(time.Second))))
|
||||
continue
|
||||
default:
|
||||
return toObjectErr(err, dataUsageBucket, name)
|
||||
}
|
||||
*d = dataUsageCache{}
|
||||
return nil
|
||||
}
|
||||
defer r.Close()
|
||||
if err := d.deserialize(r); err != nil {
|
||||
*d = dataUsageCache{}
|
||||
logger.LogOnceIf(ctx, err, err.Error())
|
||||
r.Close()
|
||||
retries++
|
||||
time.Sleep(time.Duration(rand.Int63n(int64(time.Second))))
|
||||
continue
|
||||
}
|
||||
r.Close()
|
||||
return nil
|
||||
}
|
||||
*d = dataUsageCache{}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Maximum running concurrent saves on server.
|
||||
var maxConcurrentScannerSaves = make(chan struct{}, 4)
|
||||
|
||||
// save the content of the cache to minioMetaBackgroundOpsBucket with the provided name.
|
||||
// Note that no locking is done when saving.
|
||||
func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string) error {
|
||||
var r io.Reader
|
||||
|
||||
maxConcurrentScannerSaves <- struct{}{}
|
||||
defer func() {
|
||||
<-maxConcurrentScannerSaves
|
||||
}()
|
||||
// If big, do streaming...
|
||||
size := int64(-1)
|
||||
if len(d.Cache) > 10000 {
|
||||
@ -909,7 +927,7 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string)
|
||||
dataUsageBucket,
|
||||
name,
|
||||
NewPutObjReader(hr),
|
||||
ObjectOptions{})
|
||||
ObjectOptions{NoLock: true})
|
||||
if isErrBucketNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
|
@ -449,6 +449,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa
|
||||
// Start one scanner per disk
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(disks))
|
||||
|
||||
for i := range disks {
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
@ -518,7 +519,6 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa
|
||||
if r := cache.root(); r != nil {
|
||||
root = cache.flatten(*r)
|
||||
}
|
||||
t := time.Now()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
@ -528,9 +528,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa
|
||||
Entry: root,
|
||||
}:
|
||||
}
|
||||
// We want to avoid synchronizing up all writes in case
|
||||
// the results are piled up.
|
||||
time.Sleep(time.Duration(float64(time.Since(t)) * rand.Float64()))
|
||||
|
||||
// Save cache
|
||||
logger.LogIf(ctx, cache.save(ctx, er, cacheName))
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user