fix: Speed up multi-object delete by taking bulk locks (#8974)

Change distributed locking to allow taking bulk locks
across objects, reduces usually 1000 calls to 1.

Also allows for situations where multiple clients sends
delete requests to objects with following names

```
{1,2,3,4,5}
```

```
{5,4,3,2,1}
```

will block and ensure that we do not fail the request
on each other.
This commit is contained in:
Harshavardhana
2020-02-21 11:29:57 +05:30
committed by GitHub
parent 852fb320f7
commit ab7d3cd508
24 changed files with 305 additions and 580 deletions

View File

@@ -89,7 +89,7 @@ func TestMain(m *testing.M) {
func TestSimpleLock(t *testing.T) {
dm := NewDRWMutex(context.Background(), "test", ds)
dm := NewDRWMutex(context.Background(), ds, "test")
dm.Lock(id, source)
@@ -101,7 +101,7 @@ func TestSimpleLock(t *testing.T) {
func TestSimpleLockUnlockMultipleTimes(t *testing.T) {
dm := NewDRWMutex(context.Background(), "test", ds)
dm := NewDRWMutex(context.Background(), ds, "test")
dm.Lock(id, source)
time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond)
@@ -127,8 +127,8 @@ func TestSimpleLockUnlockMultipleTimes(t *testing.T) {
// Test two locks for same resource, one succeeds, one fails (after timeout)
func TestTwoSimultaneousLocksForSameResource(t *testing.T) {
dm1st := NewDRWMutex(context.Background(), "aap", ds)
dm2nd := NewDRWMutex(context.Background(), "aap", ds)
dm1st := NewDRWMutex(context.Background(), ds, "aap")
dm2nd := NewDRWMutex(context.Background(), ds, "aap")
dm1st.Lock(id, source)
@@ -151,9 +151,9 @@ func TestTwoSimultaneousLocksForSameResource(t *testing.T) {
// Test three locks for same resource, one succeeds, one fails (after timeout)
func TestThreeSimultaneousLocksForSameResource(t *testing.T) {
dm1st := NewDRWMutex(context.Background(), "aap", ds)
dm2nd := NewDRWMutex(context.Background(), "aap", ds)
dm3rd := NewDRWMutex(context.Background(), "aap", ds)
dm1st := NewDRWMutex(context.Background(), ds, "aap")
dm2nd := NewDRWMutex(context.Background(), ds, "aap")
dm3rd := NewDRWMutex(context.Background(), ds, "aap")
dm1st.Lock(id, source)
@@ -216,8 +216,8 @@ func TestThreeSimultaneousLocksForSameResource(t *testing.T) {
// Test two locks for different resources, both succeed
func TestTwoSimultaneousLocksForDifferentResources(t *testing.T) {
dm1 := NewDRWMutex(context.Background(), "aap", ds)
dm2 := NewDRWMutex(context.Background(), "noot", ds)
dm1 := NewDRWMutex(context.Background(), ds, "aap")
dm2 := NewDRWMutex(context.Background(), ds, "noot")
dm1.Lock(id, source)
dm2.Lock(id, source)
@@ -243,7 +243,7 @@ func HammerMutex(m *DRWMutex, loops int, cdone chan bool) {
// Borrowed from mutex_test.go
func TestMutex(t *testing.T) {
c := make(chan bool)
m := NewDRWMutex(context.Background(), "test", ds)
m := NewDRWMutex(context.Background(), ds, "test")
for i := 0; i < 10; i++ {
go HammerMutex(m, 1000, c)
}
@@ -257,7 +257,7 @@ func BenchmarkMutexUncontended(b *testing.B) {
*DRWMutex
}
b.RunParallel(func(pb *testing.PB) {
var mu = PaddedMutex{NewDRWMutex(context.Background(), "", ds)}
var mu = PaddedMutex{NewDRWMutex(context.Background(), ds, "")}
for pb.Next() {
mu.Lock(id, source)
mu.Unlock()
@@ -266,7 +266,7 @@ func BenchmarkMutexUncontended(b *testing.B) {
}
func benchmarkMutex(b *testing.B, slack, work bool) {
mu := NewDRWMutex(context.Background(), "", ds)
mu := NewDRWMutex(context.Background(), ds, "")
if slack {
b.SetParallelism(10)
}
@@ -309,7 +309,7 @@ func BenchmarkMutexNoSpin(b *testing.B) {
// These goroutines yield during local work, so that switching from
// a blocked goroutine to other goroutines is profitable.
// As a matter of fact, this benchmark still triggers some spinning in the mutex.
m := NewDRWMutex(context.Background(), "", ds)
m := NewDRWMutex(context.Background(), ds, "")
var acc0, acc1 uint64
b.SetParallelism(4)
b.RunParallel(func(pb *testing.PB) {
@@ -341,7 +341,7 @@ func BenchmarkMutexSpin(b *testing.B) {
// profitable. To achieve this we create a goroutine per-proc.
// These goroutines access considerable amount of local data so that
// unnecessary rescheduling is penalized by cache misses.
m := NewDRWMutex(context.Background(), "", ds)
m := NewDRWMutex(context.Background(), ds, "")
var acc0, acc1 uint64
b.RunParallel(func(pb *testing.PB) {
var data [16 << 10]uint64