mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
use single dynamic timeout for most locked API/heal ops (#10275)
newDynamicTimeout should be allocated once, in-case of temporary locks in config and IAM we should have allocated timeout once before the `for loop` This PR doesn't fix any issue as such, but provides enough dynamism for the timeout as per expectation.
This commit is contained in:
parent
bb5976d727
commit
e57c742674
@ -1270,10 +1270,9 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
deadlinedCtx, cancel := context.WithTimeout(ctx, deadline)
|
||||
|
||||
defer cancel()
|
||||
|
||||
nsLock := objectAPI.NewNSLock(deadlinedCtx, minioMetaBucket, "obd-in-progress")
|
||||
nsLock := objectAPI.NewNSLock(ctx, minioMetaBucket, "obd-in-progress")
|
||||
if err := nsLock.GetLock(newDynamicTimeout(deadline, deadline)); err != nil { // returns a locked lock
|
||||
errResp(err)
|
||||
return
|
||||
|
@ -417,7 +417,7 @@ func (c *diskCache) Stat(ctx context.Context, bucket, object string) (oi ObjectI
|
||||
func (c *diskCache) statCachedMeta(ctx context.Context, cacheObjPath string) (meta *cacheMeta, partial bool, numHits int, err error) {
|
||||
|
||||
cLock := c.NewNSLockFn(ctx, cacheObjPath)
|
||||
if err = cLock.GetRLock(globalObjectTimeout); err != nil {
|
||||
if err = cLock.GetRLock(globalOperationTimeout); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@ -499,7 +499,7 @@ func (c *diskCache) statCache(ctx context.Context, cacheObjPath string) (meta *c
|
||||
func (c *diskCache) SaveMetadata(ctx context.Context, bucket, object string, meta map[string]string, actualSize int64, rs *HTTPRangeSpec, rsFileName string, incHitsOnly bool) error {
|
||||
cachedPath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(ctx, cachedPath)
|
||||
if err := cLock.GetLock(globalObjectTimeout); err != nil {
|
||||
if err := cLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
@ -665,7 +665,7 @@ func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Read
|
||||
}
|
||||
cachePath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(ctx, cachePath)
|
||||
if err := cLock.GetLock(globalObjectTimeout); err != nil {
|
||||
if err := cLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
@ -871,7 +871,7 @@ func (c *diskCache) bitrotReadFromCache(ctx context.Context, filePath string, of
|
||||
func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions) (gr *GetObjectReader, numHits int, err error) {
|
||||
cacheObjPath := getCacheSHADir(c.dir, bucket, object)
|
||||
cLock := c.NewNSLockFn(ctx, cacheObjPath)
|
||||
if err := cLock.GetRLock(globalObjectTimeout); err != nil {
|
||||
if err := cLock.GetRLock(globalOperationTimeout); err != nil {
|
||||
return nil, numHits, err
|
||||
}
|
||||
|
||||
@ -935,7 +935,7 @@ func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRang
|
||||
// Deletes the cached object
|
||||
func (c *diskCache) delete(ctx context.Context, cacheObjPath string) (err error) {
|
||||
cLock := c.NewNSLockFn(ctx, cacheObjPath)
|
||||
if err := cLock.GetLock(globalObjectTimeout); err != nil {
|
||||
if err := cLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer cLock.Unlock()
|
||||
|
@ -458,12 +458,12 @@ func (z *erasureZones) GetObjectNInfo(ctx context.Context, bucket, object string
|
||||
lock := z.NewNSLock(ctx, bucket, object)
|
||||
switch lockType {
|
||||
case writeLock:
|
||||
if err = lock.GetLock(globalObjectTimeout); err != nil {
|
||||
if err = lock.GetLock(globalOperationTimeout); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.Unlock
|
||||
case readLock:
|
||||
if err = lock.GetRLock(globalObjectTimeout); err != nil {
|
||||
if err = lock.GetRLock(globalOperationTimeout); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.RUnlock
|
||||
@ -492,7 +492,7 @@ func (z *erasureZones) GetObjectNInfo(ctx context.Context, bucket, object string
|
||||
func (z *erasureZones) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
|
||||
// Lock the object before reading.
|
||||
lk := z.NewNSLock(ctx, bucket, object)
|
||||
if err := lk.GetRLock(globalObjectTimeout); err != nil {
|
||||
if err := lk.GetRLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer lk.RUnlock()
|
||||
@ -516,7 +516,7 @@ func (z *erasureZones) GetObject(ctx context.Context, bucket, object string, sta
|
||||
func (z *erasureZones) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
// Lock the object before reading.
|
||||
lk := z.NewNSLock(ctx, bucket, object)
|
||||
if err := lk.GetRLock(globalObjectTimeout); err != nil {
|
||||
if err := lk.GetRLock(globalOperationTimeout); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
defer lk.RUnlock()
|
||||
@ -544,7 +544,7 @@ func (z *erasureZones) GetObjectInfo(ctx context.Context, bucket, object string,
|
||||
func (z *erasureZones) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (ObjectInfo, error) {
|
||||
// Lock the object.
|
||||
lk := z.NewNSLock(ctx, bucket, object)
|
||||
if err := lk.GetLock(globalObjectTimeout); err != nil {
|
||||
if err := lk.GetLock(globalOperationTimeout); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
@ -625,7 +625,7 @@ func (z *erasureZones) CopyObject(ctx context.Context, srcBucket, srcObject, dst
|
||||
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
||||
if !cpSrcDstSame {
|
||||
lk := z.NewNSLock(ctx, dstBucket, dstObject)
|
||||
if err := lk.GetLock(globalObjectTimeout); err != nil {
|
||||
if err := lk.GetLock(globalOperationTimeout); err != nil {
|
||||
return objInfo, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
@ -1732,7 +1732,7 @@ func (z *erasureZones) ListBuckets(ctx context.Context) (buckets []BucketInfo, e
|
||||
func (z *erasureZones) ReloadFormat(ctx context.Context, dryRun bool) error {
|
||||
// Acquire lock on format.json
|
||||
formatLock := z.NewNSLock(ctx, minioMetaBucket, formatConfigFile)
|
||||
if err := formatLock.GetRLock(globalHealingTimeout); err != nil {
|
||||
if err := formatLock.GetRLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer formatLock.RUnlock()
|
||||
@ -1748,7 +1748,7 @@ func (z *erasureZones) ReloadFormat(ctx context.Context, dryRun bool) error {
|
||||
func (z *erasureZones) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) {
|
||||
// Acquire lock on format.json
|
||||
formatLock := z.NewNSLock(ctx, minioMetaBucket, formatConfigFile)
|
||||
if err := formatLock.GetLock(globalHealingTimeout); err != nil {
|
||||
if err := formatLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return madmin.HealResultItem{}, err
|
||||
}
|
||||
defer formatLock.Unlock()
|
||||
@ -1951,14 +1951,14 @@ func (z *erasureZones) HealObject(ctx context.Context, bucket, object, versionID
|
||||
lk := z.NewNSLock(ctx, bucket, object)
|
||||
if bucket == minioMetaBucket {
|
||||
// For .minio.sys bucket heals we should hold write locks.
|
||||
if err := lk.GetLock(globalHealingTimeout); err != nil {
|
||||
if err := lk.GetLock(globalOperationTimeout); err != nil {
|
||||
return madmin.HealResultItem{}, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
} else {
|
||||
// Lock the object before healing. Use read lock since healing
|
||||
// will only regenerate parts & xl.meta of outdated disks.
|
||||
if err := lk.GetRLock(globalHealingTimeout); err != nil {
|
||||
if err := lk.GetRLock(globalOperationTimeout); err != nil {
|
||||
return madmin.HealResultItem{}, err
|
||||
}
|
||||
defer lk.RUnlock()
|
||||
|
@ -707,7 +707,7 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
|
||||
|
||||
// Hold write lock on the object.
|
||||
destLock := fs.NewNSLock(ctx, bucket, object)
|
||||
if err = destLock.GetLock(globalObjectTimeout); err != nil {
|
||||
if err = destLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer destLock.Unlock()
|
||||
|
14
cmd/fs-v1.go
14
cmd/fs-v1.go
@ -601,7 +601,7 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
|
||||
|
||||
if !cpSrcDstSame {
|
||||
objectDWLock := fs.NewNSLock(ctx, dstBucket, dstObject)
|
||||
if err := objectDWLock.GetLock(globalObjectTimeout); err != nil {
|
||||
if err := objectDWLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer objectDWLock.Unlock()
|
||||
@ -691,12 +691,12 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
||||
lock := fs.NewNSLock(ctx, bucket, object)
|
||||
switch lockType {
|
||||
case writeLock:
|
||||
if err = lock.GetLock(globalObjectTimeout); err != nil {
|
||||
if err = lock.GetLock(globalOperationTimeout); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.Unlock
|
||||
case readLock:
|
||||
if err = lock.GetRLock(globalObjectTimeout); err != nil {
|
||||
if err = lock.GetRLock(globalOperationTimeout); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.RUnlock
|
||||
@ -782,7 +782,7 @@ func (fs *FSObjects) GetObject(ctx context.Context, bucket, object string, offse
|
||||
|
||||
// Lock the object before reading.
|
||||
lk := fs.NewNSLock(ctx, bucket, object)
|
||||
if err := lk.GetRLock(globalObjectTimeout); err != nil {
|
||||
if err := lk.GetRLock(globalOperationTimeout); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
@ -1006,7 +1006,7 @@ func (fs *FSObjects) getObjectInfo(ctx context.Context, bucket, object string) (
|
||||
func (fs *FSObjects) getObjectInfoWithLock(ctx context.Context, bucket, object string) (oi ObjectInfo, e error) {
|
||||
// Lock the object before reading.
|
||||
lk := fs.NewNSLock(ctx, bucket, object)
|
||||
if err := lk.GetRLock(globalObjectTimeout); err != nil {
|
||||
if err := lk.GetRLock(globalOperationTimeout); err != nil {
|
||||
return oi, err
|
||||
}
|
||||
defer lk.RUnlock()
|
||||
@ -1044,7 +1044,7 @@ func (fs *FSObjects) GetObjectInfo(ctx context.Context, bucket, object string, o
|
||||
oi, err := fs.getObjectInfoWithLock(ctx, bucket, object)
|
||||
if err == errCorruptedFormat || err == io.EOF {
|
||||
lk := fs.NewNSLock(ctx, bucket, object)
|
||||
if err = lk.GetLock(globalObjectTimeout); err != nil {
|
||||
if err = lk.GetLock(globalOperationTimeout); err != nil {
|
||||
return oi, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
@ -1095,7 +1095,7 @@ func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string
|
||||
|
||||
// Lock the object.
|
||||
lk := fs.NewNSLock(ctx, bucket, object)
|
||||
if err := lk.GetLock(globalObjectTimeout); err != nil {
|
||||
if err := lk.GetLock(globalOperationTimeout); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, err
|
||||
}
|
||||
|
@ -208,9 +208,7 @@ var (
|
||||
globalDomainNames []string // Root domains for virtual host style requests
|
||||
globalDomainIPs set.StringSet // Root domain IP address(s) for a distributed MinIO deployment
|
||||
|
||||
globalObjectTimeout = newDynamicTimeout( /*1*/ 10*time.Minute /*10*/, 600*time.Second) // timeout for Object API related ops
|
||||
globalOperationTimeout = newDynamicTimeout(10*time.Minute /*30*/, 600*time.Second) // default timeout for general ops
|
||||
globalHealingTimeout = newDynamicTimeout(30*time.Minute /*1*/, 30*time.Minute) // timeout for healing related ops
|
||||
globalOperationTimeout = newDynamicTimeout(10*time.Minute, 5*time.Minute) // default timeout for general ops
|
||||
|
||||
globalBucketObjectLockSys *BucketObjectLockSys
|
||||
globalBucketQuotaSys *BucketQuotaSys
|
||||
|
@ -449,10 +449,13 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer) {
|
||||
rquorum := InsufficientReadQuorum{}
|
||||
wquorum := InsufficientWriteQuorum{}
|
||||
|
||||
// allocate dynamic timeout once before the loop
|
||||
iamLockTimeout := newDynamicTimeout(3*time.Second, 5*time.Second)
|
||||
|
||||
for range retry.NewTimerWithJitter(retryCtx, time.Second, 5*time.Second, retry.MaxJitter) {
|
||||
// let one of the server acquire the lock, if not let them timeout.
|
||||
// which shall be retried again by this loop.
|
||||
if err := txnLk.GetLock(newDynamicTimeout(3*time.Second, 5*time.Second)); err != nil {
|
||||
if err := txnLk.GetLock(iamLockTimeout); err != nil {
|
||||
logger.Info("Waiting for all MinIO IAM sub-system to be initialized.. trying to acquire lock")
|
||||
continue
|
||||
}
|
||||
|
@ -229,7 +229,8 @@ func initSafeMode(ctx context.Context, newObject ObjectLayer) (err error) {
|
||||
initAutoHeal(ctx, newObject)
|
||||
}
|
||||
|
||||
timeout := newDynamicTimeout(3*time.Second, 3*time.Second)
|
||||
// allocate dynamic timeout once before the loop
|
||||
configLockTimeout := newDynamicTimeout(3*time.Second, 5*time.Second)
|
||||
|
||||
// **** WARNING ****
|
||||
// Migrating to encrypted backend should happen before initialization of any
|
||||
@ -246,7 +247,7 @@ func initSafeMode(ctx context.Context, newObject ObjectLayer) (err error) {
|
||||
for range retry.NewTimer(retryCtx) {
|
||||
// let one of the server acquire the lock, if not let them timeout.
|
||||
// which shall be retried again by this loop.
|
||||
if err = txnLk.GetLock(timeout); err != nil {
|
||||
if err = txnLk.GetLock(configLockTimeout); err != nil {
|
||||
logger.Info("Waiting for all MinIO sub-systems to be initialized.. trying to acquire lock")
|
||||
continue
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user