From e05886561db7703662b94582a6159446d5427063 Mon Sep 17 00:00:00 2001 From: Anis Elleuch Date: Fri, 27 Aug 2021 21:07:55 +0100 Subject: [PATCH] lock: Fix Refresh logic with multi resources lock (#13092) A multi resources lock is a single lock UID with multiple associated resources. This is created for example by multi objects delete operation. This commit changes the behavior of Refresh() to iterate over all locks having the same UID and refresh them. Bonus: Fix showing top locks for multi delete objects --- cmd/admin-handlers.go | 4 ++-- cmd/local-locker.go | 32 ++++++++++++++------------------ internal/dsync/drwmutex.go | 12 ++++-------- 3 files changed, 20 insertions(+), 28 deletions(-) diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index 1573a2f1a..84d32b3a1 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -369,10 +369,10 @@ func topLockEntries(peerLocks []*PeerLocks, stale bool) madmin.LockEntries { } for k, v := range peerLock.Locks { for _, lockReqInfo := range v { - if val, ok := entryMap[lockReqInfo.UID]; ok { + if val, ok := entryMap[lockReqInfo.Name]; ok { val.ServerList = append(val.ServerList, peerLock.Addr) } else { - entryMap[lockReqInfo.UID] = lriToLockEntry(lockReqInfo, k, peerLock.Addr) + entryMap[lockReqInfo.Name] = lriToLockEntry(lockReqInfo, k, peerLock.Addr) } } } diff --git a/cmd/local-locker.go b/cmd/local-locker.go index 5dd951fbc..f24ecad74 100644 --- a/cmd/local-locker.go +++ b/cmd/local-locker.go @@ -234,15 +234,17 @@ func (l *localLocker) ForceUnlock(ctx context.Context, args dsync.LockArgs) (rep return true, nil } + lockFound := false for _, lris := range l.lockMap { for _, lri := range lris { if lri.UID == args.UID { - l.removeEntry(lri.Name, dsync.LockArgs{Owner: lri.Owner, UID: lri.UID}, &lris) - return true, nil + l.removeEntry(lri.Name, dsync.LockArgs{UID: lri.UID}, &lris) + lockFound = true } } } - return false, nil + + return lockFound, nil } } @@ -254,24 +256,18 @@ func (l *localLocker) Refresh(ctx context.Context, args dsync.LockArgs) (refresh l.mutex.Lock() defer l.mutex.Unlock() - resource := args.Resources[0] // refresh check is always per resource. - - // Lock found, proceed to verify if belongs to given uid. - lri, ok := l.lockMap[resource] - if !ok { - // lock doesn't exist yet, return false - return false, nil - } - - // Check whether uid is still active - for i := range lri { - if lri[i].UID == args.UID && lri[i].Owner == args.Owner { - lri[i].TimeLastRefresh = UTCNow() - return true, nil + lockFound := false + for _, lri := range l.lockMap { + // Check whether uid is still active + for i := range lri { + if lri[i].UID == args.UID { + lri[i].TimeLastRefresh = UTCNow() + lockFound = true + } } } - return false, nil + return lockFound, nil } } diff --git a/internal/dsync/drwmutex.go b/internal/dsync/drwmutex.go index fceffc3b5..fb3ff5764 100644 --- a/internal/dsync/drwmutex.go +++ b/internal/dsync/drwmutex.go @@ -238,7 +238,7 @@ func (dm *DRWMutex) startContinousLockRefresh(lockLossCallback func(), id, sourc case <-refreshTimer.C: refreshTimer.Reset(drwMutexRefreshInterval) - refreshed, err := refresh(ctx, dm.clnt, id, source, quorum, dm.Names...) + refreshed, err := refresh(ctx, dm.clnt, id, source, quorum) if err == nil && !refreshed { // Clean the lock locally and in remote nodes forceUnlock(ctx, dm.clnt, id) @@ -279,8 +279,8 @@ type refreshResult struct { succeeded bool } -func refresh(ctx context.Context, ds *Dsync, id, source string, quorum int, lockNames ...string) (bool, error) { - restClnts, owner := ds.GetLockers() +func refresh(ctx context.Context, ds *Dsync, id, source string, quorum int) (bool, error) { + restClnts, _ := ds.GetLockers() // Create buffered channel of size equal to total number of nodes. ch := make(chan refreshResult, len(restClnts)) @@ -298,11 +298,7 @@ func refresh(ctx context.Context, ds *Dsync, id, source string, quorum int, lock } args := LockArgs{ - Owner: owner, - UID: id, - Resources: lockNames, - Source: source, - Quorum: quorum, + UID: id, } ctx, cancel := context.WithTimeout(ctx, drwMutexRefreshCallTimeout)