lock: Fix decision when a lock needs to be removed (#14095)

The code was not properly deciding if a lock needs to be removed 
when it doesn't have quorum anymore. After this commit, a lock will be
forcefully unlocked if nodes reporting they are not able to find a lock
internally breaks the quorum.

Simplify the code as well.
This commit is contained in:
Anis Elleuch
2022-01-14 19:33:08 +01:00
committed by GitHub
parent 0df31f63ab
commit b106b1c131
2 changed files with 63 additions and 31 deletions

View File

@@ -234,8 +234,8 @@ func (dm *DRWMutex) startContinousLockRefresh(lockLossCallback func(), id, sourc
case <-refreshTimer.C:
refreshTimer.Reset(drwMutexRefreshInterval)
refreshed, err := refresh(ctx, dm.clnt, id, source, quorum)
if err == nil && !refreshed {
noQuorum, err := refreshLock(ctx, dm.clnt, id, source, quorum)
if err == nil && noQuorum {
// Clean the lock locally and in remote nodes
forceUnlock(ctx, dm.clnt, id)
// Execute the caller lock loss callback
@@ -273,10 +273,12 @@ func forceUnlock(ctx context.Context, ds *Dsync, id string) {
type refreshResult struct {
offline bool
succeeded bool
refreshed bool
}
func refresh(ctx context.Context, ds *Dsync, id, source string, quorum int) (bool, error) {
// Refresh the given lock in all nodes, return true to indicate if a lock
// does not exist in enough quorum nodes.
func refreshLock(ctx context.Context, ds *Dsync, id, source string, quorum int) (bool, error) {
restClnts, _ := ds.GetLockers()
// Create buffered channel of size equal to total number of nodes.
@@ -302,16 +304,12 @@ func refresh(ctx context.Context, ds *Dsync, id, source string, quorum int) (boo
defer cancel()
refreshed, err := c.Refresh(ctx, args)
if refreshed && err == nil {
ch <- refreshResult{succeeded: true}
if err != nil {
ch <- refreshResult{offline: true}
log("dsync: Unable to call Refresh failed with %s for %#v at %s\n", err, args, c)
} else {
if err != nil {
ch <- refreshResult{offline: true}
log("dsync: Unable to call Refresh failed with %s for %#v at %s\n", err, args, c)
} else {
ch <- refreshResult{succeeded: false}
log("dsync: Refresh returned false for %#v at %s\n", args, c)
}
ch <- refreshResult{refreshed: refreshed}
log("dsync: Refresh returned false for %#v at %s\n", args, c)
}
}(index, c)
}
@@ -322,39 +320,32 @@ func refresh(ctx context.Context, ds *Dsync, id, source string, quorum int) (boo
// b) received too many refreshed for quorum to be still possible
// c) timed out
//
i, refreshFailed, refreshSucceeded := 0, 0, 0
lockNotFound, lockRefreshed := 0, 0
done := false
for ; i < len(restClnts); i++ {
for i := 0; i < len(restClnts); i++ {
select {
case refresh := <-ch:
if refresh.offline {
case refreshResult := <-ch:
if refreshResult.offline {
continue
}
if refresh.succeeded {
refreshSucceeded++
if refreshResult.refreshed {
lockRefreshed++
} else {
refreshFailed++
lockNotFound++
}
if refreshFailed > quorum {
// We know that we are not going to succeed with refresh
if lockRefreshed >= quorum || lockNotFound > len(restClnts)-quorum {
done = true
}
case <-ctx.Done():
// Refreshing is canceled
return false, ctx.Err()
}
if done {
break
}
}
refreshQuorum := refreshSucceeded >= quorum
if !refreshQuorum {
refreshQuorum = refreshFailed < quorum
}
// We may have some unused results in ch, release them async.
go func() {
wg.Wait()
@@ -363,7 +354,8 @@ func refresh(ctx context.Context, ds *Dsync, id, source string, quorum int) (boo
}
}()
return refreshQuorum, nil
noQuorum := lockNotFound > len(restClnts)-quorum
return noQuorum, nil
}
// lock tries to acquire the distributed lock, returning true or false.