fix: multi-pool setup make sure acquire locks properly (#13280)

This was a regression introduced in '14bb969782'
this has the potential to cause corruption when
there are concurrent overwrites attempting to update
the content on the namespace.

This PR adds a situation where PutObject(), CopyObject()
compete properly for the same locks with NewMultipartUpload()
however it ends up turning off competing locks for the actual
object with GetObject() and DeleteObject() - since they do not
compete due to concurrent I/O on a versioned bucket it can lead
to loss of versions.

This PR fixes this bug with multi-pool setup with replication
that causes corruption of inlined data due to lack of competing
locks in a multi-pool setup.

Instead CompleteMultipartUpload holds the necessary
locks when finishing the transaction, knowing the exact
location of an object to schedule the multipart upload
doesn't need to compete in this manner, a pool id location
for existing object.
This commit is contained in:
Harshavardhana
2021-09-22 21:46:24 -07:00
committed by GitHub
parent f9b104f37b
commit 200caab82b
4 changed files with 47 additions and 34 deletions

View File

@@ -339,6 +339,22 @@ func (z *erasureServerPools) getPoolIdxExisting(ctx context.Context, bucket, obj
return z.getPoolIdxExistingWithOpts(ctx, bucket, object, ObjectOptions{})
}
func (z *erasureServerPools) getPoolIdxNoLock(ctx context.Context, bucket, object string, size int64) (idx int, err error) {
idx, err = z.getPoolIdxExistingNoLock(ctx, bucket, object)
if err != nil && !isErrObjectNotFound(err) {
return idx, err
}
if isErrObjectNotFound(err) {
idx = z.getAvailablePoolIdx(ctx, bucket, object, size)
if idx < 0 {
return -1, toObjectErr(errDiskFull)
}
}
return idx, nil
}
// getPoolIdx returns the found previous object and its corresponding pool idx,
// if none are found falls back to most available space pool.
func (z *erasureServerPools) getPoolIdx(ctx context.Context, bucket, object string, size int64) (idx int, err error) {
@@ -791,7 +807,7 @@ func (z *erasureServerPools) PutObject(ctx context.Context, bucket string, objec
return z.serverPools[0].PutObject(ctx, bucket, object, data, opts)
}
if !opts.NoLock {
ns := z.NewNSLock(minioMetaMultipartBucket, pathJoin(bucket, object, "newMultipartObject.lck"))
ns := z.NewNSLock(bucket, object)
lkctx, err := ns.GetLock(ctx, globalOperationTimeout)
if err != nil {
return ObjectInfo{}, err
@@ -801,7 +817,7 @@ func (z *erasureServerPools) PutObject(ctx context.Context, bucket string, objec
opts.NoLock = true
}
idx, err := z.getPoolIdx(ctx, bucket, object, data.Size())
idx, err := z.getPoolIdxNoLock(ctx, bucket, object, data.Size())
if err != nil {
return ObjectInfo{}, err
}
@@ -939,7 +955,7 @@ func (z *erasureServerPools) CopyObject(ctx context.Context, srcBucket, srcObjec
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
if !dstOpts.NoLock {
ns := z.NewNSLock(minioMetaMultipartBucket, pathJoin(dstBucket, dstObject, "newMultipartObject.lck"))
ns := z.NewNSLock(dstBucket, dstObject)
lkctx, err := ns.GetLock(ctx, globalOperationTimeout)
if err != nil {
return ObjectInfo{}, err
@@ -949,7 +965,7 @@ func (z *erasureServerPools) CopyObject(ctx context.Context, srcBucket, srcObjec
dstOpts.NoLock = true
}
poolIdx, err := z.getPoolIdx(ctx, dstBucket, dstObject, srcInfo.Size)
poolIdx, err := z.getPoolIdxNoLock(ctx, dstBucket, dstObject, srcInfo.Size)
if err != nil {
return objInfo, err
}
@@ -982,6 +998,7 @@ func (z *erasureServerPools) CopyObject(ctx context.Context, srcBucket, srcObjec
Versioned: dstOpts.Versioned,
VersionID: dstOpts.VersionID,
MTime: dstOpts.MTime,
NoLock: true,
}
return z.serverPools[poolIdx].PutObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts)
@@ -1162,14 +1179,6 @@ func (z *erasureServerPools) NewMultipartUpload(ctx context.Context, bucket, obj
return z.serverPools[0].NewMultipartUpload(ctx, bucket, object, opts)
}
ns := z.NewNSLock(minioMetaMultipartBucket, pathJoin(bucket, object, "newMultipartObject.lck"))
lkctx, err := ns.GetLock(ctx, globalOperationTimeout)
if err != nil {
return "", err
}
ctx = lkctx.Context()
defer ns.Unlock(lkctx.Cancel)
for idx, pool := range z.serverPools {
result, err := pool.ListMultipartUploads(ctx, bucket, object, "", "", "", maxUploadsList)
if err != nil {
@@ -1183,6 +1192,8 @@ func (z *erasureServerPools) NewMultipartUpload(ctx context.Context, bucket, obj
}
}
// any parallel writes on the object will block for this poolIdx
// to return since this holds a read lock on the namespace.
idx, err := z.getPoolIdx(ctx, bucket, object, -1)
if err != nil {
return "", err