mirror of
https://github.com/minio/minio.git
synced 2025-11-21 18:26:04 -05:00
fix: cleanup locking, cancel context upon lock timeout (#12183)
upon errors to acquire lock context would still leak, since the cancel would never be called. since the lock is never acquired - proactively clear it before returning.
This commit is contained in:
@@ -91,19 +91,19 @@ func (s *safeDuration) Get() time.Duration {
|
||||
// runDataScanner will start a data scanner.
|
||||
// The function will block until the context is canceled.
|
||||
// There should only ever be one scanner running per cluster.
|
||||
func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
|
||||
func runDataScanner(pctx context.Context, objAPI ObjectLayer) {
|
||||
// Make sure only 1 scanner is running on the cluster.
|
||||
locker := objAPI.NewNSLock(minioMetaBucket, "runDataScanner.lock")
|
||||
var ctx context.Context
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for {
|
||||
var err error
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel, err = locker.GetLock(ctx, dataScannerLeaderLockTimeout)
|
||||
lkctx, err := locker.GetLock(pctx, dataScannerLeaderLockTimeout)
|
||||
if err != nil {
|
||||
time.Sleep(time.Duration(r.Float64() * float64(scannerCycle.Get())))
|
||||
continue
|
||||
}
|
||||
defer cancel()
|
||||
ctx = lkctx.Context()
|
||||
defer lkctx.Cancel()
|
||||
break
|
||||
// No unlock for "leader" lock.
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user