locks: Ensure local lock removal after a failed refresh (#12979)

In the event when a lock is not refreshed in the cluster, this latter
will be automatically removed in the subsequent cleanup of non 
refreshed locks routine, but it forgot to clean the local server, 
hence having the same weird stale locks present.

This commit will remove the lock locally also in remote nodes, if
removing a lock from a remote node will fail, it will be anyway 
removed later in the locks cleanup routine.
This commit is contained in:
Anis Elleuch
2021-08-27 16:59:36 +01:00
committed by GitHub
parent ae8f7f11d5
commit 06b71c99ee
2 changed files with 41 additions and 5 deletions

View File

@@ -227,13 +227,22 @@ func (l *localLocker) ForceUnlock(ctx context.Context, args dsync.LockArgs) (rep
default:
l.mutex.Lock()
defer l.mutex.Unlock()
if len(args.UID) != 0 {
return false, fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.UID)
if len(args.UID) == 0 {
for _, resource := range args.Resources {
delete(l.lockMap, resource) // Remove the lock (irrespective of write or read lock)
}
return true, nil
}
for _, resource := range args.Resources {
delete(l.lockMap, resource) // Remove the lock (irrespective of write or read lock)
for _, lris := range l.lockMap {
for _, lri := range lris {
if lri.UID == args.UID {
l.removeEntry(lri.Name, dsync.LockArgs{Owner: lri.Owner, UID: lri.UID}, &lris)
return true, nil
}
}
}
return true, nil
return false, nil
}
}