mirror of
https://github.com/minio/minio.git
synced 2025-11-07 12:52:58 -05:00
tests: Clean up dsync package (#14415)
Add non-constant timeouts to dsync package. Reduce test runtime by minutes. Hopefully not too aggressive.
This commit is contained in:
@@ -43,32 +43,61 @@ func log(format string, data ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// dRWMutexAcquireTimeout - tolerance limit to wait for lock acquisition before.
|
||||
const drwMutexAcquireTimeout = 1 * time.Second // 1 second.
|
||||
const (
|
||||
// dRWMutexAcquireTimeout - default tolerance limit to wait for lock acquisition before.
|
||||
drwMutexAcquireTimeout = 1 * time.Second // 1 second.
|
||||
|
||||
// dRWMutexRefreshTimeout - timeout for the refresh call
|
||||
const drwMutexRefreshCallTimeout = 5 * time.Second
|
||||
// dRWMutexRefreshTimeout - default timeout for the refresh call
|
||||
drwMutexRefreshCallTimeout = 5 * time.Second
|
||||
|
||||
// dRWMutexUnlockTimeout - timeout for the unlock call
|
||||
const drwMutexUnlockCallTimeout = 30 * time.Second
|
||||
// dRWMutexUnlockTimeout - default timeout for the unlock call
|
||||
drwMutexUnlockCallTimeout = 30 * time.Second
|
||||
|
||||
// dRWMutexForceUnlockTimeout - timeout for the unlock call
|
||||
const drwMutexForceUnlockCallTimeout = 30 * time.Second
|
||||
// dRWMutexForceUnlockTimeout - default timeout for the unlock call
|
||||
drwMutexForceUnlockCallTimeout = 30 * time.Second
|
||||
|
||||
// dRWMutexRefreshInterval - the interval between two refresh calls
|
||||
const drwMutexRefreshInterval = 10 * time.Second
|
||||
// dRWMutexRefreshInterval - default the interval between two refresh calls
|
||||
drwMutexRefreshInterval = 10 * time.Second
|
||||
|
||||
const drwMutexInfinite = 1<<63 - 1
|
||||
lockRetryInterval = 1 * time.Second
|
||||
|
||||
drwMutexInfinite = 1<<63 - 1
|
||||
)
|
||||
|
||||
// Timeouts are timeouts for specific operations.
|
||||
type Timeouts struct {
|
||||
// Acquire - tolerance limit to wait for lock acquisition before.
|
||||
Acquire time.Duration
|
||||
|
||||
// RefreshCall - timeout for the refresh call
|
||||
RefreshCall time.Duration
|
||||
|
||||
// UnlockCall - timeout for the unlock call
|
||||
UnlockCall time.Duration
|
||||
|
||||
// ForceUnlockCall - timeout for the force unlock call
|
||||
ForceUnlockCall time.Duration
|
||||
}
|
||||
|
||||
// DefaultTimeouts contains default timeouts.
|
||||
var DefaultTimeouts = Timeouts{
|
||||
Acquire: drwMutexAcquireTimeout,
|
||||
RefreshCall: drwMutexUnlockCallTimeout,
|
||||
UnlockCall: drwMutexRefreshCallTimeout,
|
||||
ForceUnlockCall: drwMutexForceUnlockCallTimeout,
|
||||
}
|
||||
|
||||
// A DRWMutex is a distributed mutual exclusion lock.
|
||||
type DRWMutex struct {
|
||||
Names []string
|
||||
writeLocks []string // Array of nodes that granted a write lock
|
||||
readLocks []string // Array of array of nodes that granted reader locks
|
||||
rng *rand.Rand
|
||||
m sync.Mutex // Mutex to prevent multiple simultaneous locks from this node
|
||||
clnt *Dsync
|
||||
cancelRefresh context.CancelFunc
|
||||
Names []string
|
||||
writeLocks []string // Array of nodes that granted a write lock
|
||||
readLocks []string // Array of array of nodes that granted reader locks
|
||||
rng *rand.Rand
|
||||
m sync.Mutex // Mutex to prevent multiple simultaneous locks from this node
|
||||
clnt *Dsync
|
||||
cancelRefresh context.CancelFunc
|
||||
refreshInterval time.Duration
|
||||
lockRetryInterval time.Duration
|
||||
}
|
||||
|
||||
// Granted - represents a structure of a granted lock.
|
||||
@@ -90,11 +119,13 @@ func NewDRWMutex(clnt *Dsync, names ...string) *DRWMutex {
|
||||
restClnts, _ := clnt.GetLockers()
|
||||
sort.Strings(names)
|
||||
return &DRWMutex{
|
||||
writeLocks: make([]string, len(restClnts)),
|
||||
readLocks: make([]string, len(restClnts)),
|
||||
Names: names,
|
||||
clnt: clnt,
|
||||
rng: rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())}),
|
||||
writeLocks: make([]string, len(restClnts)),
|
||||
readLocks: make([]string, len(restClnts)),
|
||||
Names: names,
|
||||
clnt: clnt,
|
||||
rng: rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())}),
|
||||
refreshInterval: drwMutexRefreshInterval,
|
||||
lockRetryInterval: lockRetryInterval,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -146,10 +177,6 @@ func (dm *DRWMutex) GetRLock(ctx context.Context, cancel context.CancelFunc, id,
|
||||
return dm.lockBlocking(ctx, cancel, id, source, isReadLock, opts)
|
||||
}
|
||||
|
||||
const (
|
||||
lockRetryInterval = 1 * time.Second
|
||||
)
|
||||
|
||||
// lockBlocking will try to acquire either a read or a write lock
|
||||
//
|
||||
// The function will loop using a built-in timing randomized back-off
|
||||
@@ -209,7 +236,7 @@ func (dm *DRWMutex) lockBlocking(ctx context.Context, lockLossCallback func(), i
|
||||
return locked
|
||||
}
|
||||
|
||||
time.Sleep(time.Duration(dm.rng.Float64() * float64(lockRetryInterval)))
|
||||
time.Sleep(time.Duration(dm.rng.Float64() * float64(dm.lockRetryInterval)))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -224,7 +251,7 @@ func (dm *DRWMutex) startContinousLockRefresh(lockLossCallback func(), id, sourc
|
||||
go func() {
|
||||
defer cancel()
|
||||
|
||||
refreshTimer := time.NewTimer(drwMutexRefreshInterval)
|
||||
refreshTimer := time.NewTimer(dm.refreshInterval)
|
||||
defer refreshTimer.Stop()
|
||||
|
||||
for {
|
||||
@@ -232,7 +259,7 @@ func (dm *DRWMutex) startContinousLockRefresh(lockLossCallback func(), id, sourc
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-refreshTimer.C:
|
||||
refreshTimer.Reset(drwMutexRefreshInterval)
|
||||
refreshTimer.Reset(dm.refreshInterval)
|
||||
|
||||
noQuorum, err := refreshLock(ctx, dm.clnt, id, source, quorum)
|
||||
if err == nil && noQuorum {
|
||||
@@ -250,7 +277,7 @@ func (dm *DRWMutex) startContinousLockRefresh(lockLossCallback func(), id, sourc
|
||||
}
|
||||
|
||||
func forceUnlock(ctx context.Context, ds *Dsync, id string) {
|
||||
ctx, cancel := context.WithTimeout(ctx, drwMutexForceUnlockCallTimeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, ds.Timeouts.ForceUnlockCall)
|
||||
defer cancel()
|
||||
|
||||
restClnts, _ := ds.GetLockers()
|
||||
@@ -300,7 +327,7 @@ func refreshLock(ctx context.Context, ds *Dsync, id, source string, quorum int)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, drwMutexRefreshCallTimeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, ds.Timeouts.RefreshCall)
|
||||
defer cancel()
|
||||
|
||||
refreshed, err := c.Refresh(ctx, args)
|
||||
@@ -379,7 +406,7 @@ func lock(ctx context.Context, ds *Dsync, locks *[]string, id, source string, is
|
||||
}
|
||||
|
||||
// Combined timeout for the lock attempt.
|
||||
ctx, cancel := context.WithTimeout(ctx, drwMutexAcquireTimeout)
|
||||
ctx, cancel := context.WithTimeout(ctx, ds.Timeouts.Acquire)
|
||||
defer cancel()
|
||||
for index, c := range restClnts {
|
||||
wg.Add(1)
|
||||
@@ -573,7 +600,7 @@ func (dm *DRWMutex) Unlock() {
|
||||
|
||||
isReadLock := false
|
||||
for !releaseAll(dm.clnt, tolerance, owner, &locks, isReadLock, restClnts, dm.Names...) {
|
||||
time.Sleep(time.Duration(dm.rng.Float64() * float64(lockRetryInterval)))
|
||||
time.Sleep(time.Duration(dm.rng.Float64() * float64(dm.lockRetryInterval)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -614,7 +641,7 @@ func (dm *DRWMutex) RUnlock() {
|
||||
|
||||
isReadLock := true
|
||||
for !releaseAll(dm.clnt, tolerance, owner, &locks, isReadLock, restClnts, dm.Names...) {
|
||||
time.Sleep(time.Duration(dm.rng.Float64() * float64(lockRetryInterval)))
|
||||
time.Sleep(time.Duration(dm.rng.Float64() * float64(dm.lockRetryInterval)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -635,7 +662,7 @@ func sendRelease(ds *Dsync, c NetLocker, owner string, uid string, isReadLock bo
|
||||
Resources: names,
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), drwMutexUnlockCallTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), ds.Timeouts.UnlockCall)
|
||||
defer cancel()
|
||||
|
||||
if isReadLock {
|
||||
|
||||
Reference in New Issue
Block a user