locks: Ensure local lock removal after a failed refresh (#12979)

In the event when a lock is not refreshed in the cluster, this latter
will be automatically removed in the subsequent cleanup of non 
refreshed locks routine, but it forgot to clean the local server, 
hence having the same weird stale locks present.

This commit will remove the lock locally also in remote nodes, if
removing a lock from a remote node will fail, it will be anyway 
removed later in the locks cleanup routine.
This commit is contained in:
Anis Elleuch 2021-08-27 16:59:36 +01:00 committed by GitHub
parent ae8f7f11d5
commit 06b71c99ee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 41 additions and 5 deletions

View File

@ -227,14 +227,23 @@ func (l *localLocker) ForceUnlock(ctx context.Context, args dsync.LockArgs) (rep
default: default:
l.mutex.Lock() l.mutex.Lock()
defer l.mutex.Unlock() defer l.mutex.Unlock()
if len(args.UID) != 0 { if len(args.UID) == 0 {
return false, fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.UID)
}
for _, resource := range args.Resources { for _, resource := range args.Resources {
delete(l.lockMap, resource) // Remove the lock (irrespective of write or read lock) delete(l.lockMap, resource) // Remove the lock (irrespective of write or read lock)
} }
return true, nil return true, nil
} }
for _, lris := range l.lockMap {
for _, lri := range lris {
if lri.UID == args.UID {
l.removeEntry(lri.Name, dsync.LockArgs{Owner: lri.Owner, UID: lri.UID}, &lris)
return true, nil
}
}
}
return false, nil
}
} }
func (l *localLocker) Refresh(ctx context.Context, args dsync.LockArgs) (refreshed bool, err error) { func (l *localLocker) Refresh(ctx context.Context, args dsync.LockArgs) (refreshed bool, err error) {

View File

@ -51,6 +51,9 @@ const drwMutexRefreshCallTimeout = 5 * time.Second
// dRWMutexUnlockTimeout - timeout for the unlock call // dRWMutexUnlockTimeout - timeout for the unlock call
const drwMutexUnlockCallTimeout = 30 * time.Second const drwMutexUnlockCallTimeout = 30 * time.Second
// dRWMutexForceUnlockTimeout - timeout for the unlock call
const drwMutexForceUnlockCallTimeout = 30 * time.Second
// dRWMutexRefreshInterval - the interval between two refresh calls // dRWMutexRefreshInterval - the interval between two refresh calls
const drwMutexRefreshInterval = 10 * time.Second const drwMutexRefreshInterval = 10 * time.Second
@ -237,6 +240,9 @@ func (dm *DRWMutex) startContinousLockRefresh(lockLossCallback func(), id, sourc
refreshed, err := refresh(ctx, dm.clnt, id, source, quorum, dm.Names...) refreshed, err := refresh(ctx, dm.clnt, id, source, quorum, dm.Names...)
if err == nil && !refreshed { if err == nil && !refreshed {
// Clean the lock locally and in remote nodes
forceUnlock(ctx, dm.clnt, id)
// Execute the caller lock loss callback
if lockLossCallback != nil { if lockLossCallback != nil {
lockLossCallback() lockLossCallback()
} }
@ -247,6 +253,27 @@ func (dm *DRWMutex) startContinousLockRefresh(lockLossCallback func(), id, sourc
}() }()
} }
func forceUnlock(ctx context.Context, ds *Dsync, id string) {
ctx, cancel := context.WithTimeout(ctx, drwMutexForceUnlockCallTimeout)
defer cancel()
restClnts, _ := ds.GetLockers()
var wg sync.WaitGroup
for index, c := range restClnts {
wg.Add(1)
// Send refresh request to all nodes
go func(index int, c NetLocker) {
defer wg.Done()
args := LockArgs{
UID: id,
}
c.ForceUnlock(ctx, args)
}(index, c)
}
wg.Wait()
}
type refreshResult struct { type refreshResult struct {
offline bool offline bool
succeeded bool succeeded bool