fix: cleanup locking, cancel context upon lock timeout (#12183)

upon errors to acquire lock context would still leak,
since the cancel would never be called. since the lock
is never acquired - proactively clear it before returning.
This commit is contained in:
Harshavardhana
2021-04-29 20:55:21 -07:00
committed by GitHub
parent 0faa4e6187
commit 64f6020854
13 changed files with 175 additions and 128 deletions

View File

@@ -91,19 +91,19 @@ func (s *safeDuration) Get() time.Duration {
// runDataScanner will start a data scanner.
// The function will block until the context is canceled.
// There should only ever be one scanner running per cluster.
func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
func runDataScanner(pctx context.Context, objAPI ObjectLayer) {
// Make sure only 1 scanner is running on the cluster.
locker := objAPI.NewNSLock(minioMetaBucket, "runDataScanner.lock")
var ctx context.Context
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for {
var err error
var cancel context.CancelFunc
ctx, cancel, err = locker.GetLock(ctx, dataScannerLeaderLockTimeout)
lkctx, err := locker.GetLock(pctx, dataScannerLeaderLockTimeout)
if err != nil {
time.Sleep(time.Duration(r.Float64() * float64(scannerCycle.Get())))
continue
}
defer cancel()
ctx = lkctx.Context()
defer lkctx.Cancel()
break
// No unlock for "leader" lock.
}