mirror of
https://github.com/minio/minio.git
synced 2025-11-20 01:50:24 -05:00
fix: add lock ownership to expire locks (#10571)
- Add owner information for expiry, locking, unlocking a resource - TopLocks returns now locks in quorum by default, provides a way to capture stale locks as well with `?stale=true` - Simplify the quorum handling for locks to avoid from storage class, because there were challenges to make it consistent across all situations. - And other tiny simplifications to reset locks.
This commit is contained in:
@@ -71,8 +71,9 @@ func isLocked(uid string) bool {
|
||||
|
||||
// NewDRWMutex - initializes a new dsync RW mutex.
|
||||
func NewDRWMutex(clnt *Dsync, names ...string) *DRWMutex {
|
||||
restClnts, _ := clnt.GetLockers()
|
||||
return &DRWMutex{
|
||||
writeLocks: make([]string, len(clnt.GetLockersFn())),
|
||||
writeLocks: make([]string, len(restClnts)),
|
||||
Names: names,
|
||||
clnt: clnt,
|
||||
}
|
||||
@@ -141,28 +142,19 @@ const (
|
||||
// algorithm until either the lock is acquired successfully or more
|
||||
// time has elapsed than the timeout value.
|
||||
func (dm *DRWMutex) lockBlocking(ctx context.Context, id, source string, isReadLock bool, opts Options) (locked bool) {
|
||||
restClnts := dm.clnt.GetLockersFn()
|
||||
restClnts, _ := dm.clnt.GetLockers()
|
||||
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
// Create lock array to capture the successful lockers
|
||||
locks := make([]string, len(restClnts))
|
||||
|
||||
cleanLocks := func(locks []string) {
|
||||
for i := range locks {
|
||||
locks[i] = ""
|
||||
}
|
||||
}
|
||||
|
||||
log("lockBlocking %s/%s for %#v: lockType readLock(%t), additional opts: %#v\n", id, source, dm.Names, isReadLock, opts)
|
||||
retryCtx, cancel := context.WithTimeout(ctx, opts.Timeout)
|
||||
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
// cleanup any older state, re-use the lock slice.
|
||||
cleanLocks(locks)
|
||||
|
||||
select {
|
||||
case <-retryCtx.Done():
|
||||
log("lockBlocking canceled %s/%s for %#v: lockType readLock(%t), additional opts: %#v\n", id, source, dm.Names, isReadLock, opts)
|
||||
@@ -195,8 +187,11 @@ func (dm *DRWMutex) lockBlocking(ctx context.Context, id, source string, isReadL
|
||||
|
||||
// lock tries to acquire the distributed lock, returning true or false.
|
||||
func lock(ctx context.Context, ds *Dsync, locks *[]string, id, source string, isReadLock bool, tolerance int, lockNames ...string) bool {
|
||||
for i := range *locks {
|
||||
(*locks)[i] = ""
|
||||
}
|
||||
|
||||
restClnts := ds.GetLockersFn()
|
||||
restClnts, owner := ds.GetLockers()
|
||||
|
||||
// Tolerance is not set, defaults to half of the locker clients.
|
||||
if tolerance == 0 {
|
||||
@@ -237,6 +232,7 @@ func lock(ctx context.Context, ds *Dsync, locks *[]string, id, source string, is
|
||||
}
|
||||
|
||||
args := LockArgs{
|
||||
Owner: owner,
|
||||
UID: id,
|
||||
Resources: lockNames,
|
||||
Source: source,
|
||||
@@ -293,7 +289,7 @@ func lock(ctx context.Context, ds *Dsync, locks *[]string, id, source string, is
|
||||
done = true
|
||||
// Increment the number of grants received from the buffered channel.
|
||||
i++
|
||||
releaseAll(ds, locks, isReadLock, restClnts, lockNames...)
|
||||
releaseAll(ds, owner, locks, isReadLock, restClnts, lockNames...)
|
||||
}
|
||||
}
|
||||
case <-timeout:
|
||||
@@ -302,7 +298,7 @@ func lock(ctx context.Context, ds *Dsync, locks *[]string, id, source string, is
|
||||
// number of locks to check whether we have quorum or not
|
||||
if !checkQuorumMet(locks, quorum) {
|
||||
log("Quorum not met after timeout\n")
|
||||
releaseAll(ds, locks, isReadLock, restClnts, lockNames...)
|
||||
releaseAll(ds, owner, locks, isReadLock, restClnts, lockNames...)
|
||||
} else {
|
||||
log("Quorum met after timeout\n")
|
||||
}
|
||||
@@ -327,6 +323,7 @@ func lock(ctx context.Context, ds *Dsync, locks *[]string, id, source string, is
|
||||
if grantToBeReleased.isLocked() {
|
||||
// release lock
|
||||
sendRelease(ds, restClnts[grantToBeReleased.index],
|
||||
owner,
|
||||
grantToBeReleased.lockUID, isReadLock, lockNames...)
|
||||
}
|
||||
}
|
||||
@@ -350,10 +347,10 @@ func checkQuorumMet(locks *[]string, quorum int) bool {
|
||||
}
|
||||
|
||||
// releaseAll releases all locks that are marked as locked
|
||||
func releaseAll(ds *Dsync, locks *[]string, isReadLock bool, restClnts []NetLocker, lockNames ...string) {
|
||||
func releaseAll(ds *Dsync, owner string, locks *[]string, isReadLock bool, restClnts []NetLocker, lockNames ...string) {
|
||||
for lock := range restClnts {
|
||||
if isLocked((*locks)[lock]) {
|
||||
sendRelease(ds, restClnts[lock], (*locks)[lock], isReadLock, lockNames...)
|
||||
sendRelease(ds, restClnts[lock], owner, (*locks)[lock], isReadLock, lockNames...)
|
||||
(*locks)[lock] = ""
|
||||
}
|
||||
}
|
||||
@@ -364,7 +361,7 @@ func releaseAll(ds *Dsync, locks *[]string, isReadLock bool, restClnts []NetLock
|
||||
// It is a run-time error if dm is not locked on entry to Unlock.
|
||||
func (dm *DRWMutex) Unlock() {
|
||||
|
||||
restClnts := dm.clnt.GetLockersFn()
|
||||
restClnts, owner := dm.clnt.GetLockers()
|
||||
// create temp array on stack
|
||||
locks := make([]string, len(restClnts))
|
||||
|
||||
@@ -391,7 +388,7 @@ func (dm *DRWMutex) Unlock() {
|
||||
}
|
||||
|
||||
isReadLock := false
|
||||
unlock(dm.clnt, locks, isReadLock, restClnts, dm.Names...)
|
||||
unlock(dm.clnt, owner, locks, isReadLock, restClnts, dm.Names...)
|
||||
}
|
||||
|
||||
// RUnlock releases a read lock held on dm.
|
||||
@@ -400,7 +397,7 @@ func (dm *DRWMutex) Unlock() {
|
||||
func (dm *DRWMutex) RUnlock() {
|
||||
|
||||
// create temp array on stack
|
||||
restClnts := dm.clnt.GetLockersFn()
|
||||
restClnts, owner := dm.clnt.GetLockers()
|
||||
|
||||
locks := make([]string, len(restClnts))
|
||||
{
|
||||
@@ -416,10 +413,10 @@ func (dm *DRWMutex) RUnlock() {
|
||||
}
|
||||
|
||||
isReadLock := true
|
||||
unlock(dm.clnt, locks, isReadLock, restClnts, dm.Names...)
|
||||
unlock(dm.clnt, owner, locks, isReadLock, restClnts, dm.Names...)
|
||||
}
|
||||
|
||||
func unlock(ds *Dsync, locks []string, isReadLock bool, restClnts []NetLocker, names ...string) {
|
||||
func unlock(ds *Dsync, owner string, locks []string, isReadLock bool, restClnts []NetLocker, names ...string) {
|
||||
|
||||
// We don't need to synchronously wait until we have released all the locks (or the quorum)
|
||||
// (a subsequent lock will retry automatically in case it would fail to get quorum)
|
||||
@@ -428,19 +425,20 @@ func unlock(ds *Dsync, locks []string, isReadLock bool, restClnts []NetLocker, n
|
||||
|
||||
if isLocked(locks[index]) {
|
||||
// broadcast lock release to all nodes that granted the lock
|
||||
sendRelease(ds, c, locks[index], isReadLock, names...)
|
||||
sendRelease(ds, c, owner, locks[index], isReadLock, names...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sendRelease sends a release message to a node that previously granted a lock
|
||||
func sendRelease(ds *Dsync, c NetLocker, uid string, isReadLock bool, names ...string) {
|
||||
func sendRelease(ds *Dsync, c NetLocker, owner string, uid string, isReadLock bool, names ...string) {
|
||||
if c == nil {
|
||||
log("Unable to call RUnlock failed with %s\n", errors.New("netLocker is offline"))
|
||||
return
|
||||
}
|
||||
|
||||
args := LockArgs{
|
||||
Owner: owner,
|
||||
UID: uid,
|
||||
Resources: names,
|
||||
}
|
||||
|
||||
@@ -20,5 +20,5 @@ package dsync
|
||||
// authenticated clients, used to initiate lock REST calls.
|
||||
type Dsync struct {
|
||||
// List of rest client objects, one per lock server.
|
||||
GetLockersFn func() []NetLocker
|
||||
GetLockers func() ([]NetLocker, string)
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
. "github.com/minio/minio/pkg/dsync"
|
||||
)
|
||||
|
||||
@@ -78,7 +79,7 @@ func TestMain(m *testing.M) {
|
||||
}
|
||||
|
||||
ds = &Dsync{
|
||||
GetLockersFn: func() []NetLocker { return clnts },
|
||||
GetLockers: func() ([]NetLocker, string) { return clnts, uuid.New().String() },
|
||||
}
|
||||
|
||||
startRPCServers(nodes)
|
||||
|
||||
@@ -29,6 +29,10 @@ type LockArgs struct {
|
||||
// Source contains the line number, function and file name of the code
|
||||
// on the client node that requested the lock.
|
||||
Source string
|
||||
|
||||
// Owner represents unique ID for this instance, an owner who originally requested
|
||||
// the locked resource, useful primarily in figuring our stale locks.
|
||||
Owner string
|
||||
}
|
||||
|
||||
// NetLocker is dsync compatible locker interface.
|
||||
|
||||
Reference in New Issue
Block a user