mirror of
https://github.com/minio/minio.git
synced 2025-12-08 16:53:11 -05:00
allow lock tolerance to match storage-class drive tolerance (#10270)
This commit is contained in:
@@ -28,6 +28,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/config/storageclass"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/dsync"
|
||||
"github.com/minio/minio/pkg/lsync"
|
||||
@@ -147,7 +148,18 @@ func (di *distLockInstance) GetLock(timeout *dynamicTimeout) (timedOutErr error)
|
||||
lockSource := getSource(2)
|
||||
start := UTCNow()
|
||||
|
||||
if !di.rwMutex.GetLock(di.ctx, di.opsID, lockSource, timeout.Timeout()) {
|
||||
// Lockers default to standard storage class always, why because
|
||||
// we always dictate storage tolerance in terms of standard
|
||||
// storage class be it number of drives or a multiplicative
|
||||
// of number of nodes, defaulting lockers to this value
|
||||
// simply means that locking is always similar in behavior
|
||||
// and effect with erasure coded drive tolerance.
|
||||
tolerance := globalStorageClass.GetParityForSC(storageclass.STANDARD)
|
||||
|
||||
if !di.rwMutex.GetLock(di.ctx, di.opsID, lockSource, dsync.Options{
|
||||
Timeout: timeout.Timeout(),
|
||||
Tolerance: tolerance,
|
||||
}) {
|
||||
timeout.LogFailure()
|
||||
return OperationTimedOut{}
|
||||
}
|
||||
@@ -164,7 +176,14 @@ func (di *distLockInstance) Unlock() {
|
||||
func (di *distLockInstance) GetRLock(timeout *dynamicTimeout) (timedOutErr error) {
|
||||
lockSource := getSource(2)
|
||||
start := UTCNow()
|
||||
if !di.rwMutex.GetRLock(di.ctx, di.opsID, lockSource, timeout.Timeout()) {
|
||||
|
||||
// Lockers default to standard storage class always.
|
||||
tolerance := globalStorageClass.GetParityForSC(storageclass.STANDARD)
|
||||
|
||||
if !di.rwMutex.GetRLock(di.ctx, di.opsID, lockSource, dsync.Options{
|
||||
Timeout: timeout.Timeout(),
|
||||
Tolerance: tolerance,
|
||||
}) {
|
||||
timeout.LogFailure()
|
||||
return OperationTimedOut{}
|
||||
}
|
||||
|
||||
@@ -229,6 +229,8 @@ func initSafeMode(ctx context.Context, newObject ObjectLayer) (err error) {
|
||||
initAutoHeal(ctx, newObject)
|
||||
}
|
||||
|
||||
timeout := newDynamicTimeout(3*time.Second, 3*time.Second)
|
||||
|
||||
// **** WARNING ****
|
||||
// Migrating to encrypted backend should happen before initialization of any
|
||||
// sub-systems, make sure that we do not move the above codeblock elsewhere.
|
||||
@@ -244,7 +246,7 @@ func initSafeMode(ctx context.Context, newObject ObjectLayer) (err error) {
|
||||
for range retry.NewTimer(retryCtx) {
|
||||
// let one of the server acquire the lock, if not let them timeout.
|
||||
// which shall be retried again by this loop.
|
||||
if err = txnLk.GetLock(newDynamicTimeout(3*time.Second, 3*time.Second)); err != nil {
|
||||
if err = txnLk.GetLock(timeout); err != nil {
|
||||
logger.Info("Waiting for all MinIO sub-systems to be initialized.. trying to acquire lock")
|
||||
continue
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user