2021-04-18 15:41:13 -04:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2019-11-13 15:17:45 -05:00
|
|
|
|
2021-05-11 05:11:29 -04:00
|
|
|
package dsync
|
2019-11-13 15:17:45 -05:00
|
|
|
|
|
|
|
import (
|
2021-03-03 21:36:43 -05:00
|
|
|
"context"
|
2019-11-13 15:17:45 -05:00
|
|
|
"math/rand"
|
|
|
|
"os"
|
|
|
|
"sync"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2020-09-25 22:21:52 -04:00
|
|
|
"github.com/google/uuid"
|
2019-11-13 15:17:45 -05:00
|
|
|
)
|
|
|
|
|
2022-03-01 14:14:28 -05:00
|
|
|
const (
|
|
|
|
testDrwMutexAcquireTimeout = 250 * time.Millisecond
|
|
|
|
testDrwMutexRefreshCallTimeout = 250 * time.Millisecond
|
|
|
|
testDrwMutexUnlockCallTimeout = 250 * time.Millisecond
|
|
|
|
testDrwMutexForceUnlockCallTimeout = 250 * time.Millisecond
|
|
|
|
testDrwMutexRefreshInterval = 100 * time.Millisecond
|
|
|
|
)
|
|
|
|
|
2019-11-13 15:17:45 -05:00
|
|
|
// TestMain initializes the testing framework
|
|
|
|
func TestMain(m *testing.M) {
|
2022-01-20 12:36:09 -05:00
|
|
|
startLockServers()
|
2019-11-13 15:17:45 -05:00
|
|
|
|
2022-01-20 12:36:09 -05:00
|
|
|
// Initialize locker clients for dsync.
|
2019-11-13 15:17:45 -05:00
|
|
|
var clnts []NetLocker
|
|
|
|
for i := 0; i < len(nodes); i++ {
|
2022-01-18 15:44:38 -05:00
|
|
|
clnts = append(clnts, newClient(nodes[i].URL))
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
|
|
|
|
2019-11-19 20:42:27 -05:00
|
|
|
ds = &Dsync{
|
2020-09-25 22:21:52 -04:00
|
|
|
GetLockers: func() ([]NetLocker, string) { return clnts, uuid.New().String() },
|
2022-03-01 14:14:28 -05:00
|
|
|
Timeouts: Timeouts{
|
|
|
|
Acquire: testDrwMutexAcquireTimeout,
|
|
|
|
RefreshCall: testDrwMutexRefreshCallTimeout,
|
|
|
|
UnlockCall: testDrwMutexUnlockCallTimeout,
|
|
|
|
ForceUnlockCall: testDrwMutexForceUnlockCallTimeout,
|
|
|
|
},
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
|
|
|
|
2022-01-18 15:44:38 -05:00
|
|
|
code := m.Run()
|
2022-01-20 12:36:09 -05:00
|
|
|
stopLockServers()
|
2022-01-18 15:44:38 -05:00
|
|
|
os.Exit(code)
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestSimpleLock(t *testing.T) {
|
2020-06-14 10:43:10 -04:00
|
|
|
dm := NewDRWMutex(ds, "test")
|
2019-11-13 15:17:45 -05:00
|
|
|
|
|
|
|
dm.Lock(id, source)
|
|
|
|
|
|
|
|
// fmt.Println("Lock acquired, waiting...")
|
2022-03-01 14:14:28 -05:00
|
|
|
time.Sleep(testDrwMutexRefreshCallTimeout)
|
2019-11-13 15:17:45 -05:00
|
|
|
|
2022-12-23 22:49:07 -05:00
|
|
|
dm.Unlock(context.Background())
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestSimpleLockUnlockMultipleTimes(t *testing.T) {
|
2020-06-14 10:43:10 -04:00
|
|
|
dm := NewDRWMutex(ds, "test")
|
2019-11-13 15:17:45 -05:00
|
|
|
|
|
|
|
dm.Lock(id, source)
|
|
|
|
time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond)
|
2022-12-23 22:49:07 -05:00
|
|
|
dm.Unlock(context.Background())
|
2019-11-13 15:17:45 -05:00
|
|
|
|
|
|
|
dm.Lock(id, source)
|
|
|
|
time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond)
|
2022-12-23 22:49:07 -05:00
|
|
|
dm.Unlock(context.Background())
|
2019-11-13 15:17:45 -05:00
|
|
|
|
|
|
|
dm.Lock(id, source)
|
|
|
|
time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond)
|
2022-12-23 22:49:07 -05:00
|
|
|
dm.Unlock(context.Background())
|
2019-11-13 15:17:45 -05:00
|
|
|
|
|
|
|
dm.Lock(id, source)
|
|
|
|
time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond)
|
2022-12-23 22:49:07 -05:00
|
|
|
dm.Unlock(context.Background())
|
2019-11-13 15:17:45 -05:00
|
|
|
|
|
|
|
dm.Lock(id, source)
|
|
|
|
time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond)
|
2022-12-23 22:49:07 -05:00
|
|
|
dm.Unlock(context.Background())
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Test two locks for same resource, one succeeds, one fails (after timeout)
|
|
|
|
func TestTwoSimultaneousLocksForSameResource(t *testing.T) {
|
2020-06-14 10:43:10 -04:00
|
|
|
dm1st := NewDRWMutex(ds, "aap")
|
|
|
|
dm2nd := NewDRWMutex(ds, "aap")
|
2019-11-13 15:17:45 -05:00
|
|
|
|
|
|
|
dm1st.Lock(id, source)
|
|
|
|
|
|
|
|
// Release lock after 10 seconds
|
|
|
|
go func() {
|
2022-03-01 14:14:28 -05:00
|
|
|
time.Sleep(5 * testDrwMutexAcquireTimeout)
|
2019-11-13 15:17:45 -05:00
|
|
|
// fmt.Println("Unlocking dm1")
|
|
|
|
|
2022-12-23 22:49:07 -05:00
|
|
|
dm1st.Unlock(context.Background())
|
2019-11-13 15:17:45 -05:00
|
|
|
}()
|
|
|
|
|
|
|
|
dm2nd.Lock(id, source)
|
|
|
|
|
|
|
|
// fmt.Printf("2nd lock obtained after 1st lock is released\n")
|
2022-03-01 14:14:28 -05:00
|
|
|
time.Sleep(testDrwMutexRefreshCallTimeout * 2)
|
2019-11-13 15:17:45 -05:00
|
|
|
|
2022-12-23 22:49:07 -05:00
|
|
|
dm2nd.Unlock(context.Background())
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Test three locks for same resource, one succeeds, one fails (after timeout)
|
|
|
|
func TestThreeSimultaneousLocksForSameResource(t *testing.T) {
|
2020-06-14 10:43:10 -04:00
|
|
|
dm1st := NewDRWMutex(ds, "aap")
|
|
|
|
dm2nd := NewDRWMutex(ds, "aap")
|
|
|
|
dm3rd := NewDRWMutex(ds, "aap")
|
2019-11-13 15:17:45 -05:00
|
|
|
|
|
|
|
dm1st.Lock(id, source)
|
2022-03-01 14:14:28 -05:00
|
|
|
started := time.Now()
|
|
|
|
var expect time.Duration
|
2019-11-13 15:17:45 -05:00
|
|
|
// Release lock after 10 seconds
|
|
|
|
go func() {
|
2022-03-01 14:14:28 -05:00
|
|
|
// TOTAL
|
|
|
|
time.Sleep(2 * testDrwMutexAcquireTimeout)
|
2019-11-13 15:17:45 -05:00
|
|
|
// fmt.Println("Unlocking dm1")
|
|
|
|
|
2022-12-23 22:49:07 -05:00
|
|
|
dm1st.Unlock(context.Background())
|
2019-11-13 15:17:45 -05:00
|
|
|
}()
|
2022-03-01 14:14:28 -05:00
|
|
|
expect += 2 * testDrwMutexAcquireTimeout
|
2019-11-13 15:17:45 -05:00
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(2)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
dm2nd.Lock(id, source)
|
|
|
|
|
|
|
|
// Release lock after 10 seconds
|
|
|
|
go func() {
|
2022-03-01 14:14:28 -05:00
|
|
|
time.Sleep(2 * testDrwMutexAcquireTimeout)
|
2019-11-13 15:17:45 -05:00
|
|
|
// fmt.Println("Unlocking dm2")
|
|
|
|
|
2022-12-23 22:49:07 -05:00
|
|
|
dm2nd.Unlock(context.Background())
|
2019-11-13 15:17:45 -05:00
|
|
|
}()
|
|
|
|
|
|
|
|
dm3rd.Lock(id, source)
|
|
|
|
|
|
|
|
// fmt.Printf("3rd lock obtained after 1st & 2nd locks are released\n")
|
2022-03-01 14:14:28 -05:00
|
|
|
time.Sleep(testDrwMutexRefreshCallTimeout)
|
2019-11-13 15:17:45 -05:00
|
|
|
|
2022-12-23 22:49:07 -05:00
|
|
|
dm3rd.Unlock(context.Background())
|
2019-11-13 15:17:45 -05:00
|
|
|
}()
|
2022-03-01 14:14:28 -05:00
|
|
|
expect += 2*testDrwMutexAcquireTimeout + testDrwMutexRefreshCallTimeout
|
2019-11-13 15:17:45 -05:00
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
dm3rd.Lock(id, source)
|
|
|
|
|
|
|
|
// Release lock after 10 seconds
|
|
|
|
go func() {
|
2022-03-01 14:14:28 -05:00
|
|
|
time.Sleep(2 * testDrwMutexAcquireTimeout)
|
2019-11-13 15:17:45 -05:00
|
|
|
// fmt.Println("Unlocking dm3")
|
|
|
|
|
2022-12-23 22:49:07 -05:00
|
|
|
dm3rd.Unlock(context.Background())
|
2019-11-13 15:17:45 -05:00
|
|
|
}()
|
|
|
|
|
|
|
|
dm2nd.Lock(id, source)
|
|
|
|
|
|
|
|
// fmt.Printf("2nd lock obtained after 1st & 3rd locks are released\n")
|
2022-03-01 14:14:28 -05:00
|
|
|
time.Sleep(testDrwMutexRefreshCallTimeout)
|
2019-11-13 15:17:45 -05:00
|
|
|
|
2022-12-23 22:49:07 -05:00
|
|
|
dm2nd.Unlock(context.Background())
|
2019-11-13 15:17:45 -05:00
|
|
|
}()
|
2022-03-01 14:14:28 -05:00
|
|
|
expect += 2*testDrwMutexAcquireTimeout + testDrwMutexRefreshCallTimeout
|
2019-11-13 15:17:45 -05:00
|
|
|
|
|
|
|
wg.Wait()
|
2022-03-01 14:14:28 -05:00
|
|
|
// We expect at least 3 x 2 x testDrwMutexAcquireTimeout to have passed
|
|
|
|
elapsed := time.Since(started)
|
|
|
|
if elapsed < expect {
|
|
|
|
t.Errorf("expected at least %v time have passed, however %v passed", expect, elapsed)
|
|
|
|
}
|
|
|
|
t.Logf("expected at least %v time have passed, %v passed", expect, elapsed)
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Test two locks for different resources, both succeed
|
|
|
|
func TestTwoSimultaneousLocksForDifferentResources(t *testing.T) {
|
2020-06-14 10:43:10 -04:00
|
|
|
dm1 := NewDRWMutex(ds, "aap")
|
|
|
|
dm2 := NewDRWMutex(ds, "noot")
|
2019-11-13 15:17:45 -05:00
|
|
|
|
|
|
|
dm1.Lock(id, source)
|
|
|
|
dm2.Lock(id, source)
|
2022-12-23 22:49:07 -05:00
|
|
|
dm1.Unlock(context.Background())
|
|
|
|
dm2.Unlock(context.Background())
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
|
|
|
|
2022-01-14 13:33:08 -05:00
|
|
|
// Test refreshing lock - refresh should always return true
|
|
|
|
func TestSuccessfulLockRefresh(t *testing.T) {
|
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("skipping test in short mode.")
|
|
|
|
}
|
|
|
|
|
|
|
|
dm := NewDRWMutex(ds, "aap")
|
2022-03-01 14:14:28 -05:00
|
|
|
dm.refreshInterval = testDrwMutexRefreshInterval
|
2022-01-14 13:33:08 -05:00
|
|
|
|
2022-03-01 14:14:28 -05:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2022-01-14 13:33:08 -05:00
|
|
|
|
|
|
|
if !dm.GetLock(ctx, cancel, id, source, Options{Timeout: 5 * time.Minute}) {
|
|
|
|
t.Fatal("GetLock() should be successful")
|
|
|
|
}
|
|
|
|
|
2022-03-01 14:14:28 -05:00
|
|
|
// Make it run twice.
|
|
|
|
timer := time.NewTimer(testDrwMutexRefreshInterval * 2)
|
2022-01-14 13:33:08 -05:00
|
|
|
|
|
|
|
select {
|
2022-03-01 14:14:28 -05:00
|
|
|
case <-ctx.Done():
|
2022-01-14 13:33:08 -05:00
|
|
|
t.Fatal("Lock context canceled which is not expected")
|
|
|
|
case <-timer.C:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should be safe operation in all cases
|
2022-12-23 22:49:07 -05:00
|
|
|
dm.Unlock(context.Background())
|
2022-01-14 13:33:08 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Test canceling context while quorum servers report lock not found
|
2021-03-03 21:36:43 -05:00
|
|
|
func TestFailedRefreshLock(t *testing.T) {
|
2022-01-14 13:33:08 -05:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("skipping test in short mode.")
|
|
|
|
}
|
|
|
|
|
2022-01-20 12:36:09 -05:00
|
|
|
// Simulate Refresh response to return no locking found
|
2022-01-14 13:33:08 -05:00
|
|
|
for i := range lockServers[:3] {
|
2021-03-03 21:36:43 -05:00
|
|
|
lockServers[i].setRefreshReply(false)
|
2021-05-11 05:11:29 -04:00
|
|
|
defer lockServers[i].setRefreshReply(true)
|
2021-03-03 21:36:43 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
dm := NewDRWMutex(ds, "aap")
|
2022-03-01 14:14:28 -05:00
|
|
|
dm.refreshInterval = 500 * time.Millisecond
|
2022-01-18 15:44:38 -05:00
|
|
|
var wg sync.WaitGroup
|
2021-03-03 21:36:43 -05:00
|
|
|
wg.Add(1)
|
|
|
|
|
|
|
|
ctx, cl := context.WithCancel(context.Background())
|
|
|
|
cancel := func() {
|
|
|
|
cl()
|
|
|
|
wg.Done()
|
|
|
|
}
|
|
|
|
|
2021-05-11 05:11:29 -04:00
|
|
|
if !dm.GetLock(ctx, cancel, id, source, Options{Timeout: 5 * time.Minute}) {
|
2021-03-03 21:36:43 -05:00
|
|
|
t.Fatal("GetLock() should be successful")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait until context is canceled
|
|
|
|
wg.Wait()
|
|
|
|
if ctx.Err() == nil {
|
|
|
|
t.Fatal("Unexpected error", ctx.Err())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should be safe operation in all cases
|
2022-12-23 22:49:07 -05:00
|
|
|
dm.Unlock(context.Background())
|
2021-05-11 05:11:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Test Unlock should not timeout
|
|
|
|
func TestUnlockShouldNotTimeout(t *testing.T) {
|
2022-01-14 13:33:08 -05:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("skipping test in short mode.")
|
|
|
|
}
|
|
|
|
|
2021-05-11 05:11:29 -04:00
|
|
|
dm := NewDRWMutex(ds, "aap")
|
2022-03-01 14:14:28 -05:00
|
|
|
dm.refreshInterval = testDrwMutexUnlockCallTimeout
|
2021-05-11 05:11:29 -04:00
|
|
|
if !dm.GetLock(context.Background(), nil, id, source, Options{Timeout: 5 * time.Minute}) {
|
|
|
|
t.Fatal("GetLock() should be successful")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add delay to lock server responses to ensure that lock does not timeout
|
2021-03-03 21:36:43 -05:00
|
|
|
for i := range lockServers {
|
2022-03-01 14:14:28 -05:00
|
|
|
lockServers[i].setResponseDelay(5 * testDrwMutexUnlockCallTimeout)
|
2021-05-11 05:11:29 -04:00
|
|
|
defer lockServers[i].setResponseDelay(0)
|
|
|
|
}
|
|
|
|
|
|
|
|
unlockReturned := make(chan struct{}, 1)
|
|
|
|
go func() {
|
2022-12-23 22:49:07 -05:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
|
|
|
|
defer cancel()
|
|
|
|
dm.Unlock(ctx)
|
2021-05-11 05:11:29 -04:00
|
|
|
unlockReturned <- struct{}{}
|
|
|
|
}()
|
|
|
|
|
2022-03-01 14:14:28 -05:00
|
|
|
timer := time.NewTimer(2 * testDrwMutexUnlockCallTimeout)
|
2021-05-11 05:11:29 -04:00
|
|
|
defer timer.Stop()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-unlockReturned:
|
|
|
|
t.Fatal("Unlock timed out, which should not happen")
|
|
|
|
case <-timer.C:
|
2021-03-03 21:36:43 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-13 15:17:45 -05:00
|
|
|
// Borrowed from mutex_test.go
|
|
|
|
func HammerMutex(m *DRWMutex, loops int, cdone chan bool) {
|
|
|
|
for i := 0; i < loops; i++ {
|
|
|
|
m.Lock(id, source)
|
2022-12-23 22:49:07 -05:00
|
|
|
m.Unlock(context.Background())
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
|
|
|
cdone <- true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Borrowed from mutex_test.go
|
|
|
|
func TestMutex(t *testing.T) {
|
2020-04-14 21:39:03 -04:00
|
|
|
loops := 200
|
|
|
|
if testing.Short() {
|
|
|
|
loops = 5
|
|
|
|
}
|
2019-11-13 15:17:45 -05:00
|
|
|
c := make(chan bool)
|
2020-06-14 10:43:10 -04:00
|
|
|
m := NewDRWMutex(ds, "test")
|
2019-11-13 15:17:45 -05:00
|
|
|
for i := 0; i < 10; i++ {
|
2020-04-14 21:39:03 -04:00
|
|
|
go HammerMutex(m, loops, c)
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
<-c
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkMutexUncontended(b *testing.B) {
|
2021-10-18 11:39:59 -04:00
|
|
|
b.ResetTimer()
|
|
|
|
b.ReportAllocs()
|
|
|
|
|
2019-11-13 15:17:45 -05:00
|
|
|
type PaddedMutex struct {
|
2019-11-19 20:42:27 -05:00
|
|
|
*DRWMutex
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
2022-01-02 12:15:06 -05:00
|
|
|
mu := PaddedMutex{NewDRWMutex(ds, "")}
|
2019-11-13 15:17:45 -05:00
|
|
|
for pb.Next() {
|
|
|
|
mu.Lock(id, source)
|
2022-12-23 22:49:07 -05:00
|
|
|
mu.Unlock(context.Background())
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkMutex(b *testing.B, slack, work bool) {
|
2021-10-18 11:39:59 -04:00
|
|
|
b.ResetTimer()
|
|
|
|
b.ReportAllocs()
|
|
|
|
|
2020-06-14 10:43:10 -04:00
|
|
|
mu := NewDRWMutex(ds, "")
|
2019-11-13 15:17:45 -05:00
|
|
|
if slack {
|
|
|
|
b.SetParallelism(10)
|
|
|
|
}
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
foo := 0
|
|
|
|
for pb.Next() {
|
|
|
|
mu.Lock(id, source)
|
2022-12-23 22:49:07 -05:00
|
|
|
mu.Unlock(context.Background())
|
2019-11-13 15:17:45 -05:00
|
|
|
if work {
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
foo *= 2
|
|
|
|
foo /= 2
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ = foo
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkMutex(b *testing.B) {
|
|
|
|
benchmarkMutex(b, false, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkMutexSlack(b *testing.B) {
|
|
|
|
benchmarkMutex(b, true, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkMutexWork(b *testing.B) {
|
|
|
|
benchmarkMutex(b, false, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkMutexWorkSlack(b *testing.B) {
|
|
|
|
benchmarkMutex(b, true, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkMutexNoSpin(b *testing.B) {
|
2021-10-18 11:39:59 -04:00
|
|
|
b.ResetTimer()
|
|
|
|
b.ReportAllocs()
|
|
|
|
|
2019-11-13 15:17:45 -05:00
|
|
|
// This benchmark models a situation where spinning in the mutex should be
|
|
|
|
// non-profitable and allows to confirm that spinning does not do harm.
|
|
|
|
// To achieve this we create excess of goroutines most of which do local work.
|
|
|
|
// These goroutines yield during local work, so that switching from
|
|
|
|
// a blocked goroutine to other goroutines is profitable.
|
|
|
|
// As a matter of fact, this benchmark still triggers some spinning in the mutex.
|
2020-06-14 10:43:10 -04:00
|
|
|
m := NewDRWMutex(ds, "")
|
2019-11-13 15:17:45 -05:00
|
|
|
var acc0, acc1 uint64
|
|
|
|
b.SetParallelism(4)
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
c := make(chan bool)
|
|
|
|
var data [4 << 10]uint64
|
|
|
|
for i := 0; pb.Next(); i++ {
|
|
|
|
if i%4 == 0 {
|
|
|
|
m.Lock(id, source)
|
|
|
|
acc0 -= 100
|
|
|
|
acc1 += 100
|
2022-12-23 22:49:07 -05:00
|
|
|
m.Unlock(context.Background())
|
2019-11-13 15:17:45 -05:00
|
|
|
} else {
|
|
|
|
for i := 0; i < len(data); i += 4 {
|
|
|
|
data[i]++
|
|
|
|
}
|
|
|
|
// Elaborate way to say runtime.Gosched
|
|
|
|
// that does not put the goroutine onto global runq.
|
|
|
|
go func() {
|
|
|
|
c <- true
|
|
|
|
}()
|
|
|
|
<-c
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkMutexSpin(b *testing.B) {
|
2021-10-18 11:39:59 -04:00
|
|
|
b.ResetTimer()
|
|
|
|
b.ReportAllocs()
|
|
|
|
|
2019-11-13 15:17:45 -05:00
|
|
|
// This benchmark models a situation where spinning in the mutex should be
|
|
|
|
// profitable. To achieve this we create a goroutine per-proc.
|
|
|
|
// These goroutines access considerable amount of local data so that
|
|
|
|
// unnecessary rescheduling is penalized by cache misses.
|
2020-06-14 10:43:10 -04:00
|
|
|
m := NewDRWMutex(ds, "")
|
2019-11-13 15:17:45 -05:00
|
|
|
var acc0, acc1 uint64
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
var data [16 << 10]uint64
|
|
|
|
for i := 0; pb.Next(); i++ {
|
|
|
|
m.Lock(id, source)
|
|
|
|
acc0 -= 100
|
|
|
|
acc1 += 100
|
2022-12-23 22:49:07 -05:00
|
|
|
m.Unlock(context.Background())
|
2019-11-13 15:17:45 -05:00
|
|
|
for i := 0; i < len(data); i += 4 {
|
|
|
|
data[i]++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|