mirror of
https://github.com/minio/minio.git
synced 2025-11-07 12:52:58 -05:00
fix: Speed up multi-object delete by taking bulk locks (#8974)
Change distributed locking to allow taking bulk locks
across objects, reduces usually 1000 calls to 1.
Also allows for situations where multiple clients sends
delete requests to objects with following names
```
{1,2,3,4,5}
```
```
{5,4,3,2,1}
```
will block and ensure that we do not fail the request
on each other.
This commit is contained in:
@@ -49,12 +49,42 @@ func (l *localLocker) String() string {
|
||||
return l.endpoint.String()
|
||||
}
|
||||
|
||||
func (l *localLocker) canTakeUnlock(resources ...string) bool {
|
||||
var lkCnt int
|
||||
for _, resource := range resources {
|
||||
isWriteLockTaken := isWriteLock(l.lockMap[resource])
|
||||
if isWriteLockTaken {
|
||||
lkCnt++
|
||||
}
|
||||
}
|
||||
return lkCnt == len(resources)
|
||||
}
|
||||
|
||||
func (l *localLocker) canTakeLock(resources ...string) bool {
|
||||
var noLkCnt int
|
||||
for _, resource := range resources {
|
||||
_, lockTaken := l.lockMap[resource]
|
||||
if !lockTaken {
|
||||
noLkCnt++
|
||||
}
|
||||
}
|
||||
return noLkCnt == len(resources)
|
||||
}
|
||||
|
||||
func (l *localLocker) Lock(args dsync.LockArgs) (reply bool, err error) {
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
_, isLockTaken := l.lockMap[args.Resource]
|
||||
if !isLockTaken { // No locks held on the given name, so claim write lock
|
||||
l.lockMap[args.Resource] = []lockRequesterInfo{
|
||||
|
||||
if !l.canTakeLock(args.Resources...) {
|
||||
// Not all locks can be taken on resources,
|
||||
// reject it completely.
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// No locks held on the all resources, so claim write
|
||||
// lock on all resources at once.
|
||||
for _, resource := range args.Resources {
|
||||
l.lockMap[resource] = []lockRequesterInfo{
|
||||
{
|
||||
Writer: true,
|
||||
Source: args.Source,
|
||||
@@ -64,24 +94,22 @@ func (l *localLocker) Lock(args dsync.LockArgs) (reply bool, err error) {
|
||||
},
|
||||
}
|
||||
}
|
||||
// return reply=true if lock was granted.
|
||||
return !isLockTaken, nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (l *localLocker) Unlock(args dsync.LockArgs) (reply bool, err error) {
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
var lri []lockRequesterInfo
|
||||
if lri, reply = l.lockMap[args.Resource]; !reply {
|
||||
// No lock is held on the given name
|
||||
return reply, fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.Resource)
|
||||
|
||||
if !l.canTakeUnlock(args.Resources...) {
|
||||
// Unless it is a write lock reject it.
|
||||
return reply, fmt.Errorf("Unlock attempted on a read locked entity: %s", args.Resources)
|
||||
}
|
||||
if reply = isWriteLock(lri); !reply {
|
||||
// Unless it is a write lock
|
||||
return reply, fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.Resource, len(lri))
|
||||
}
|
||||
if !l.removeEntry(args.Resource, args.UID, &lri) {
|
||||
return false, fmt.Errorf("Unlock unable to find corresponding lock for uid: %s", args.UID)
|
||||
for _, resource := range args.Resources {
|
||||
lri := l.lockMap[resource]
|
||||
if !l.removeEntry(resource, args.UID, &lri) {
|
||||
return false, fmt.Errorf("Unlock unable to find corresponding lock for uid: %s on resource %s", args.UID, resource)
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
|
||||
@@ -120,14 +148,15 @@ func (l *localLocker) RLock(args dsync.LockArgs) (reply bool, err error) {
|
||||
Timestamp: UTCNow(),
|
||||
TimeLastCheck: UTCNow(),
|
||||
}
|
||||
if lri, ok := l.lockMap[args.Resource]; ok {
|
||||
resource := args.Resources[0]
|
||||
if lri, ok := l.lockMap[resource]; ok {
|
||||
if reply = !isWriteLock(lri); reply {
|
||||
// Unless there is a write lock
|
||||
l.lockMap[args.Resource] = append(l.lockMap[args.Resource], lrInfo)
|
||||
l.lockMap[resource] = append(l.lockMap[resource], lrInfo)
|
||||
}
|
||||
} else {
|
||||
// No locks held on the given name, so claim (first) read lock
|
||||
l.lockMap[args.Resource] = []lockRequesterInfo{lrInfo}
|
||||
l.lockMap[resource] = []lockRequesterInfo{lrInfo}
|
||||
reply = true
|
||||
}
|
||||
return reply, nil
|
||||
@@ -137,15 +166,17 @@ func (l *localLocker) RUnlock(args dsync.LockArgs) (reply bool, err error) {
|
||||
l.mutex.Lock()
|
||||
defer l.mutex.Unlock()
|
||||
var lri []lockRequesterInfo
|
||||
if lri, reply = l.lockMap[args.Resource]; !reply {
|
||||
|
||||
resource := args.Resources[0]
|
||||
if lri, reply = l.lockMap[resource]; !reply {
|
||||
// No lock is held on the given name
|
||||
return reply, fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.Resource)
|
||||
return reply, fmt.Errorf("RUnlock attempted on an unlocked entity: %s", resource)
|
||||
}
|
||||
if reply = !isWriteLock(lri); !reply {
|
||||
// A write-lock is held, cannot release a read lock
|
||||
return reply, fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.Resource)
|
||||
return reply, fmt.Errorf("RUnlock attempted on a write locked entity: %s", resource)
|
||||
}
|
||||
if !l.removeEntry(args.Resource, args.UID, &lri) {
|
||||
if !l.removeEntry(resource, args.UID, &lri) {
|
||||
return false, fmt.Errorf("RUnlock unable to find corresponding read lock for uid: %s", args.UID)
|
||||
}
|
||||
return reply, nil
|
||||
@@ -176,11 +207,13 @@ func (l *localLocker) Expired(args dsync.LockArgs) (expired bool, err error) {
|
||||
defer l.mutex.Unlock()
|
||||
|
||||
// Lock found, proceed to verify if belongs to given uid.
|
||||
if lri, ok := l.lockMap[args.Resource]; ok {
|
||||
// Check whether uid is still active
|
||||
for _, entry := range lri {
|
||||
if entry.UID == args.UID {
|
||||
return false, nil
|
||||
for _, resource := range args.Resources {
|
||||
if lri, ok := l.lockMap[resource]; ok {
|
||||
// Check whether uid is still active
|
||||
for _, entry := range lri {
|
||||
if entry.UID == args.UID {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user