mirror of
https://github.com/minio/minio.git
synced 2025-11-09 05:34:56 -05:00
delayed locks until we have started reading the body (#10474)
This is to ensure that Go contexts work properly, after some interesting experiments I found that Go net/http doesn't cancel the context when Body is non-zero and hasn't been read till EOF. The following gist explains this, this can lead to pile up of go-routines on the server which will never be canceled and will die at a really later point in time, which can simply overwhelm the server. https://gist.github.com/harshavardhana/c51dcfd055780eaeb71db54f9c589150 To avoid this refactor the locking such that we take locks after we have started reading from the body and only take locks when needed. Also, remove contextReader as it's not useful, doesn't work as expected context is not canceled until the body reaches EOF so there is no point in wrapping it with context and putting a `select {` on it which can unnecessarily increase the CPU overhead. We will still use the context to cancel the lockers etc. Additional simplification in the locker code to avoid timers as re-using them is a complicated ordeal avoid them in the hot path, since locking is very common this may avoid lots of allocations.
This commit is contained in:
@@ -145,6 +145,32 @@ func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object stri
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var unlockOnDefer bool
|
||||
var nsUnlocker = func() {}
|
||||
defer func() {
|
||||
if unlockOnDefer {
|
||||
nsUnlocker()
|
||||
}
|
||||
}()
|
||||
|
||||
// Acquire lock
|
||||
if lockType != noLock {
|
||||
lock := er.NewNSLock(ctx, bucket, object)
|
||||
switch lockType {
|
||||
case writeLock:
|
||||
if err = lock.GetLock(globalOperationTimeout); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.Unlock
|
||||
case readLock:
|
||||
if err = lock.GetRLock(globalOperationTimeout); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nsUnlocker = lock.RUnlock
|
||||
}
|
||||
unlockOnDefer = true
|
||||
}
|
||||
|
||||
// Handler directory request by returning a reader that
|
||||
// returns no bytes.
|
||||
if HasSuffix(object, SlashSeparator) {
|
||||
@@ -152,7 +178,8 @@ func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object stri
|
||||
if objInfo, err = er.getObjectInfoDir(ctx, bucket, object); err != nil {
|
||||
return nil, toObjectErr(err, bucket, object)
|
||||
}
|
||||
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts)
|
||||
unlockOnDefer = false
|
||||
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts, nsUnlocker)
|
||||
}
|
||||
|
||||
fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, bucket, object, opts)
|
||||
@@ -173,7 +200,8 @@ func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object stri
|
||||
}, toObjectErr(errMethodNotAllowed, bucket, object)
|
||||
}
|
||||
|
||||
fn, off, length, nErr := NewGetObjectReader(rs, objInfo, opts)
|
||||
unlockOnDefer = false
|
||||
fn, off, length, nErr := NewGetObjectReader(rs, objInfo, opts, nsUnlocker)
|
||||
if nErr != nil {
|
||||
return nil, nErr
|
||||
}
|
||||
@@ -202,6 +230,13 @@ func (er erasureObjects) GetObject(ctx context.Context, bucket, object string, s
|
||||
return err
|
||||
}
|
||||
|
||||
// Lock the object before reading.
|
||||
lk := er.NewNSLock(ctx, bucket, object)
|
||||
if err := lk.GetRLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer lk.RUnlock()
|
||||
|
||||
// Start offset cannot be negative.
|
||||
if startOffset < 0 {
|
||||
logger.LogIf(ctx, errUnexpected, logger.Application)
|
||||
@@ -386,6 +421,13 @@ func (er erasureObjects) GetObjectInfo(ctx context.Context, bucket, object strin
|
||||
return info, err
|
||||
}
|
||||
|
||||
// Lock the object before reading.
|
||||
lk := er.NewNSLock(ctx, bucket, object)
|
||||
if err := lk.GetRLock(globalOperationTimeout); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
defer lk.RUnlock()
|
||||
|
||||
if HasSuffix(object, SlashSeparator) {
|
||||
info, err = er.getObjectInfoDir(ctx, bucket, object)
|
||||
if err != nil {
|
||||
@@ -738,6 +780,12 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
lk := er.NewNSLock(ctx, bucket, object)
|
||||
if err := lk.GetLock(globalOperationTimeout); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
|
||||
// Rename the successfully written temporary object to final location.
|
||||
if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaTmpBucket, tempObj, fi.DataDir, bucket, object, writeQuorum, nil); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
@@ -970,6 +1018,13 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string
|
||||
return objInfo, err
|
||||
}
|
||||
|
||||
// Acquire a write lock before deleting the object.
|
||||
lk := er.NewNSLock(ctx, bucket, object)
|
||||
if err = lk.GetLock(globalOperationTimeout); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
defer lk.Unlock()
|
||||
|
||||
storageDisks := er.getDisks()
|
||||
writeQuorum := len(storageDisks)/2 + 1
|
||||
|
||||
|
||||
Reference in New Issue
Block a user