mirror of
https://github.com/minio/minio.git
synced 2025-11-07 12:52:58 -05:00
preserve context per request for local locks (#9828)
In the Current bug we were re-using the context from previously granted lockers, this would lead to lock timeouts for existing valid read or write locks, leading to premature timeout of locks. This bug affects only local lockers in FS or standalone erasure coded mode. This issue is rather historical as well and was present in lsync for some time but we were lucky to not see it. Similar changes are done in dsync as well to keep the code more familiar Fixes #9827
This commit is contained in:
@@ -79,7 +79,7 @@ func (n *nsLockMap) lock(ctx context.Context, volume string, path string, lockSo
|
||||
nsLk, found := n.lockMap[resource]
|
||||
if !found {
|
||||
nsLk = &nsLock{
|
||||
LRWMutex: lsync.NewLRWMutex(ctx),
|
||||
LRWMutex: lsync.NewLRWMutex(),
|
||||
}
|
||||
// Add a count to indicate that a parallel unlock doesn't clear this entry.
|
||||
}
|
||||
@@ -89,9 +89,9 @@ func (n *nsLockMap) lock(ctx context.Context, volume string, path string, lockSo
|
||||
|
||||
// Locking here will block (until timeout).
|
||||
if readLock {
|
||||
locked = nsLk.GetRLock(opsID, lockSource, timeout)
|
||||
locked = nsLk.GetRLock(ctx, opsID, lockSource, timeout)
|
||||
} else {
|
||||
locked = nsLk.GetLock(opsID, lockSource, timeout)
|
||||
locked = nsLk.GetLock(ctx, opsID, lockSource, timeout)
|
||||
}
|
||||
|
||||
if !locked { // We failed to get the lock
|
||||
@@ -139,6 +139,7 @@ func (n *nsLockMap) unlock(volume string, path string, readLock bool) {
|
||||
type distLockInstance struct {
|
||||
rwMutex *dsync.DRWMutex
|
||||
opsID string
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// Lock - block until write lock is taken or timeout has occurred.
|
||||
@@ -146,7 +147,7 @@ func (di *distLockInstance) GetLock(timeout *dynamicTimeout) (timedOutErr error)
|
||||
lockSource := getSource(2)
|
||||
start := UTCNow()
|
||||
|
||||
if !di.rwMutex.GetLock(di.opsID, lockSource, timeout.Timeout()) {
|
||||
if !di.rwMutex.GetLock(di.ctx, di.opsID, lockSource, timeout.Timeout()) {
|
||||
timeout.LogFailure()
|
||||
return OperationTimedOut{}
|
||||
}
|
||||
@@ -163,7 +164,7 @@ func (di *distLockInstance) Unlock() {
|
||||
func (di *distLockInstance) GetRLock(timeout *dynamicTimeout) (timedOutErr error) {
|
||||
lockSource := getSource(2)
|
||||
start := UTCNow()
|
||||
if !di.rwMutex.GetRLock(di.opsID, lockSource, timeout.Timeout()) {
|
||||
if !di.rwMutex.GetRLock(di.ctx, di.opsID, lockSource, timeout.Timeout()) {
|
||||
timeout.LogFailure()
|
||||
return OperationTimedOut{}
|
||||
}
|
||||
@@ -191,10 +192,10 @@ type localLockInstance struct {
|
||||
func (n *nsLockMap) NewNSLock(ctx context.Context, lockersFn func() []dsync.NetLocker, volume string, paths ...string) RWLocker {
|
||||
opsID := mustGetUUID()
|
||||
if n.isDistErasure {
|
||||
drwmutex := dsync.NewDRWMutex(ctx, &dsync.Dsync{
|
||||
drwmutex := dsync.NewDRWMutex(&dsync.Dsync{
|
||||
GetLockersFn: lockersFn,
|
||||
}, pathsJoinPrefix(volume, paths...)...)
|
||||
return &distLockInstance{drwmutex, opsID}
|
||||
return &distLockInstance{drwmutex, opsID, ctx}
|
||||
}
|
||||
sort.Strings(paths)
|
||||
return &localLockInstance{ctx, n, volume, paths, opsID}
|
||||
|
||||
Reference in New Issue
Block a user