Create logger package and rename errorIf to LogIf (#5678)

Removing message from error logging
Replace errors.Trace with LogIf
This commit is contained in:
kannappanr
2018-04-05 15:04:40 -07:00
committed by GitHub
parent 91fd8ffeb7
commit f8a3fd0c2a
119 changed files with 2608 additions and 1860 deletions

View File

@@ -26,6 +26,7 @@ import (
"sync"
"time"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/mimedb"
@@ -40,7 +41,7 @@ func (xl xlObjects) getMultipartSHADir(bucket, object string) string {
}
// isUploadIDExists - verify if a given uploadID exists and is valid.
func (xl xlObjects) isUploadIDExists(bucket, object, uploadID string) bool {
func (xl xlObjects) isUploadIDExists(ctx context.Context, bucket, object, uploadID string) bool {
return xl.isObject(minioMetaMultipartBucket, xl.getUploadIDDir(bucket, object, uploadID))
}
@@ -65,7 +66,7 @@ func (xl xlObjects) removeObjectPart(bucket, object, uploadID, partName string)
}
// statPart - returns fileInfo structure for a successful stat on part file.
func (xl xlObjects) statPart(bucket, object, uploadID, partName string) (fileInfo FileInfo, err error) {
func (xl xlObjects) statPart(ctx context.Context, bucket, object, uploadID, partName string) (fileInfo FileInfo, err error) {
var ignoredErrs []error
partNamePath := path.Join(xl.getUploadIDDir(bucket, object, uploadID), partName)
for _, disk := range xl.getLoadBalancedDisks() {
@@ -83,16 +84,17 @@ func (xl xlObjects) statPart(bucket, object, uploadID, partName string) (fileInf
continue
}
// Error is not ignored, return right here.
return FileInfo{}, errors.Trace(err)
logger.LogIf(ctx, err)
return FileInfo{}, err
}
// If all errors were ignored, reduce to maximal occurrence
// based on the read quorum.
readQuorum := len(xl.getDisks()) / 2
return FileInfo{}, reduceReadQuorumErrs(ignoredErrs, nil, readQuorum)
return FileInfo{}, reduceReadQuorumErrs(ctx, ignoredErrs, nil, readQuorum)
}
// commitXLMetadata - commit `xl.json` from source prefix to destination prefix in the given slice of disks.
func commitXLMetadata(disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPrefix string, quorum int) ([]StorageAPI, error) {
func commitXLMetadata(ctx context.Context, disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPrefix string, quorum int) ([]StorageAPI, error) {
var wg = &sync.WaitGroup{}
var mErrs = make([]error, len(disks))
@@ -102,7 +104,8 @@ func commitXLMetadata(disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPr
// Rename `xl.json` to all disks in parallel.
for index, disk := range disks {
if disk == nil {
mErrs[index] = errors.Trace(errDiskNotFound)
logger.LogIf(ctx, errDiskNotFound)
mErrs[index] = errDiskNotFound
continue
}
wg.Add(1)
@@ -115,7 +118,8 @@ func commitXLMetadata(disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPr
// Renames `xl.json` from source prefix to destination prefix.
rErr := disk.RenameFile(srcBucket, srcJSONFile, dstBucket, dstJSONFile)
if rErr != nil {
mErrs[index] = errors.Trace(rErr)
logger.LogIf(ctx, rErr)
mErrs[index] = rErr
return
}
mErrs[index] = nil
@@ -124,10 +128,10 @@ func commitXLMetadata(disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPr
// Wait for all the routines.
wg.Wait()
err := reduceWriteQuorumErrs(mErrs, objectOpIgnoredErrs, quorum)
err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, quorum)
if errors.Cause(err) == errXLWriteQuorum {
// Delete all `xl.json` successfully renamed.
deleteAllXLMetadata(disks, dstBucket, dstPrefix, mErrs)
deleteAllXLMetadata(ctx, disks, dstBucket, dstPrefix, mErrs)
}
return evalDisks(disks, mErrs), err
}
@@ -140,7 +144,7 @@ func commitXLMetadata(disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPr
// towards simplification of multipart APIs.
// The resulting ListMultipartsInfo structure is unmarshalled directly as XML.
func (xl xlObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) {
if err := checkListMultipartArgs(bucket, object, keyMarker, uploadIDMarker, delimiter, xl); err != nil {
if err := checkListMultipartArgs(ctx, bucket, object, keyMarker, uploadIDMarker, delimiter, xl); err != nil {
return result, err
}
@@ -158,7 +162,8 @@ func (xl xlObjects) ListMultipartUploads(ctx context.Context, bucket, object, ke
if err == errFileNotFound {
return result, nil
}
return result, errors.Trace(err)
logger.LogIf(ctx, err)
return result, err
}
for i := range uploadIDs {
uploadIDs[i] = strings.TrimSuffix(uploadIDs[i], slashSeparator)
@@ -184,7 +189,7 @@ func (xl xlObjects) ListMultipartUploads(ctx context.Context, bucket, object, ke
// '.minio.sys/multipart/bucket/object/uploads.json' on all the
// disks. `uploads.json` carries metadata regarding on-going multipart
// operation(s) on the object.
func (xl xlObjects) newMultipartUpload(bucket string, object string, meta map[string]string) (string, error) {
func (xl xlObjects) newMultipartUpload(ctx context.Context, bucket string, object string, meta map[string]string) (string, error) {
dataBlocks, parityBlocks := getRedundancyCount(meta[amzStorageClass], len(xl.getDisks()))
@@ -213,17 +218,17 @@ func (xl xlObjects) newMultipartUpload(bucket string, object string, meta map[st
tempUploadIDPath := uploadID
// Write updated `xl.json` to all disks.
disks, err := writeSameXLMetadata(xl.getDisks(), minioMetaTmpBucket, tempUploadIDPath, xlMeta, writeQuorum)
disks, err := writeSameXLMetadata(ctx, xl.getDisks(), minioMetaTmpBucket, tempUploadIDPath, xlMeta, writeQuorum)
if err != nil {
return "", toObjectErr(err, minioMetaTmpBucket, tempUploadIDPath)
}
// delete the tmp path later in case we fail to rename (ignore
// returned errors) - this will be a no-op in case of a rename
// success.
defer xl.deleteObject(minioMetaTmpBucket, tempUploadIDPath)
defer xl.deleteObject(ctx, minioMetaTmpBucket, tempUploadIDPath)
// Attempt to rename temp upload object to actual upload path object
_, rErr := renameObject(disks, minioMetaTmpBucket, tempUploadIDPath, minioMetaMultipartBucket, uploadIDPath, writeQuorum)
_, rErr := renameObject(ctx, disks, minioMetaTmpBucket, tempUploadIDPath, minioMetaMultipartBucket, uploadIDPath, writeQuorum)
if rErr != nil {
return "", toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath)
}
@@ -238,14 +243,14 @@ func (xl xlObjects) newMultipartUpload(bucket string, object string, meta map[st
//
// Implements S3 compatible initiate multipart API.
func (xl xlObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string) (string, error) {
if err := checkNewMultipartArgs(bucket, object, xl); err != nil {
if err := checkNewMultipartArgs(ctx, bucket, object, xl); err != nil {
return "", err
}
// No metadata is set, allocate a new one.
if meta == nil {
meta = make(map[string]string)
}
return xl.newMultipartUpload(bucket, object, meta)
return xl.newMultipartUpload(ctx, bucket, object, meta)
}
// CopyObjectPart - reads incoming stream and internally erasure codes
@@ -262,20 +267,20 @@ func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, ds
}
defer objectSRLock.RUnlock()
if err := checkNewMultipartArgs(srcBucket, srcObject, xl); err != nil {
if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, xl); err != nil {
return pi, err
}
go func() {
if gerr := xl.getObject(srcBucket, srcObject, startOffset, length, srcInfo.Writer, srcInfo.ETag); gerr != nil {
if gerr := xl.getObject(ctx, srcBucket, srcObject, startOffset, length, srcInfo.Writer, srcInfo.ETag); gerr != nil {
if gerr = srcInfo.Writer.Close(); gerr != nil {
errorIf(gerr, "Unable to read %s of the object `%s/%s`.", srcBucket, srcObject)
logger.LogIf(ctx, gerr)
}
return
}
// Close writer explicitly signalling we wrote all data.
if gerr := srcInfo.Writer.Close(); gerr != nil {
errorIf(gerr, "Unable to read %s of the object `%s/%s`.", srcBucket, srcObject)
logger.LogIf(ctx, gerr)
return
}
}()
@@ -295,13 +300,14 @@ func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, ds
//
// Implements S3 compatible Upload Part API.
func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) {
if err := checkPutObjectPartArgs(bucket, object, xl); err != nil {
if err := checkPutObjectPartArgs(ctx, bucket, object, xl); err != nil {
return pi, err
}
// Validate input data size and it can never be less than zero.
if data.Size() < 0 {
return pi, toObjectErr(errors.Trace(errInvalidArgument))
logger.LogIf(ctx, errInvalidArgument)
return pi, toObjectErr(errInvalidArgument)
}
var partsMetadata []xlMetaV1
@@ -315,13 +321,13 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
}
// Validates if upload ID exists.
if !xl.isUploadIDExists(bucket, object, uploadID) {
if !xl.isUploadIDExists(ctx, bucket, object, uploadID) {
preUploadIDLock.RUnlock()
return pi, errors.Trace(InvalidUploadID{UploadID: uploadID})
return pi, InvalidUploadID{UploadID: uploadID}
}
// Read metadata associated with the object from all disks.
partsMetadata, errs = readAllXLMetadata(xl.getDisks(), minioMetaMultipartBucket,
partsMetadata, errs = readAllXLMetadata(ctx, xl.getDisks(), minioMetaMultipartBucket,
uploadIDPath)
// get Quorum for this object
@@ -330,7 +336,7 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
return pi, toObjectErr(err, bucket, object)
}
reducedErr := reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, writeQuorum)
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
if errors.Cause(reducedErr) == errXLWriteQuorum {
preUploadIDLock.RUnlock()
return pi, toObjectErr(reducedErr, bucket, object)
@@ -341,7 +347,7 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
onlineDisks, modTime := listOnlineDisks(xl.getDisks(), partsMetadata, errs)
// Pick one from the first valid metadata.
xlMeta, err := pickValidXLMeta(partsMetadata, modTime)
xlMeta, err := pickValidXLMeta(ctx, partsMetadata, modTime)
if err != nil {
return pi, err
}
@@ -356,15 +362,15 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
tmpPartPath := path.Join(tmpPart, partSuffix)
// Delete the temporary object part. If PutObjectPart succeeds there would be nothing to delete.
defer xl.deleteObject(minioMetaTmpBucket, tmpPart)
defer xl.deleteObject(ctx, minioMetaTmpBucket, tmpPart)
if data.Size() > 0 {
if pErr := xl.prepareFile(minioMetaTmpBucket, tmpPartPath, data.Size(), onlineDisks, xlMeta.Erasure.BlockSize, xlMeta.Erasure.DataBlocks, writeQuorum); err != nil {
if pErr := xl.prepareFile(ctx, minioMetaTmpBucket, tmpPartPath, data.Size(), onlineDisks, xlMeta.Erasure.BlockSize, xlMeta.Erasure.DataBlocks, writeQuorum); err != nil {
return pi, toObjectErr(pErr, bucket, object)
}
}
storage, err := NewErasureStorage(onlineDisks, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize)
storage, err := NewErasureStorage(ctx, onlineDisks, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize)
if err != nil {
return pi, toObjectErr(err, bucket, object)
}
@@ -373,7 +379,7 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
buffer := xl.bp.Get()
defer xl.bp.Put(buffer)
file, err := storage.CreateFile(data, minioMetaTmpBucket, tmpPartPath, buffer, DefaultBitrotAlgorithm, writeQuorum)
file, err := storage.CreateFile(ctx, data, minioMetaTmpBucket, tmpPartPath, buffer, DefaultBitrotAlgorithm, writeQuorum)
if err != nil {
return pi, toObjectErr(err, bucket, object)
}
@@ -381,7 +387,8 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
// Should return IncompleteBody{} error when reader has fewer bytes
// than specified in request header.
if file.Size < data.Size() {
return pi, errors.Trace(IncompleteBody{})
logger.LogIf(ctx, IncompleteBody{})
return pi, IncompleteBody{}
}
// post-upload check (write) lock
@@ -392,20 +399,20 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
defer postUploadIDLock.Unlock()
// Validate again if upload ID still exists.
if !xl.isUploadIDExists(bucket, object, uploadID) {
return pi, errors.Trace(InvalidUploadID{UploadID: uploadID})
if !xl.isUploadIDExists(ctx, bucket, object, uploadID) {
return pi, InvalidUploadID{UploadID: uploadID}
}
// Rename temporary part file to its final location.
partPath := path.Join(uploadIDPath, partSuffix)
onlineDisks, err = renamePart(onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath, writeQuorum)
onlineDisks, err = renamePart(ctx, onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath, writeQuorum)
if err != nil {
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
}
// Read metadata again because it might be updated with parallel upload of another part.
partsMetadata, errs = readAllXLMetadata(onlineDisks, minioMetaMultipartBucket, uploadIDPath)
reducedErr = reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, writeQuorum)
partsMetadata, errs = readAllXLMetadata(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath)
reducedErr = reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
if errors.Cause(reducedErr) == errXLWriteQuorum {
return pi, toObjectErr(reducedErr, bucket, object)
}
@@ -414,7 +421,7 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
onlineDisks, modTime = listOnlineDisks(onlineDisks, partsMetadata, errs)
// Pick one from the first valid metadata.
xlMeta, err = pickValidXLMeta(partsMetadata, modTime)
xlMeta, err = pickValidXLMeta(ctx, partsMetadata, modTime)
if err != nil {
return pi, err
}
@@ -440,15 +447,15 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
tempXLMetaPath := newUUID
// Writes a unique `xl.json` each disk carrying new checksum related information.
if onlineDisks, err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempXLMetaPath, partsMetadata, writeQuorum); err != nil {
if onlineDisks, err = writeUniqueXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempXLMetaPath, partsMetadata, writeQuorum); err != nil {
return pi, toObjectErr(err, minioMetaTmpBucket, tempXLMetaPath)
}
if _, err = commitXLMetadata(onlineDisks, minioMetaTmpBucket, tempXLMetaPath, minioMetaMultipartBucket, uploadIDPath, writeQuorum); err != nil {
if _, err = commitXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempXLMetaPath, minioMetaMultipartBucket, uploadIDPath, writeQuorum); err != nil {
return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
}
fi, err := xl.statPart(bucket, object, uploadID, partSuffix)
fi, err := xl.statPart(ctx, bucket, object, uploadID, partSuffix)
if err != nil {
return pi, toObjectErr(err, minioMetaMultipartBucket, partSuffix)
}
@@ -470,7 +477,7 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
// ListPartsInfo structure is marshalled directly into XML and
// replied back to the client.
func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListPartsInfo, e error) {
if err := checkListPartsArgs(bucket, object, xl); err != nil {
if err := checkListPartsArgs(ctx, bucket, object, xl); err != nil {
return result, err
}
// Hold lock so that there is no competing
@@ -482,13 +489,13 @@ func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadI
}
defer uploadIDLock.Unlock()
if !xl.isUploadIDExists(bucket, object, uploadID) {
return result, errors.Trace(InvalidUploadID{UploadID: uploadID})
if !xl.isUploadIDExists(ctx, bucket, object, uploadID) {
return result, InvalidUploadID{UploadID: uploadID}
}
uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID)
xlParts, xlMeta, err := xl.readXLMetaParts(minioMetaMultipartBucket, uploadIDPath)
xlParts, xlMeta, err := xl.readXLMetaParts(ctx, minioMetaMultipartBucket, uploadIDPath)
if err != nil {
return result, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
}
@@ -520,7 +527,7 @@ func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadI
count := maxParts
for _, part := range parts {
var fi FileInfo
fi, err = xl.statPart(bucket, object, uploadID, part.Name)
fi, err = xl.statPart(ctx, bucket, object, uploadID, part.Name)
if err != nil {
return result, toObjectErr(err, minioMetaBucket, path.Join(uploadID, part.Name))
}
@@ -553,7 +560,7 @@ func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadI
//
// Implements S3 compatible Complete multipart API.
func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart) (oi ObjectInfo, e error) {
if err := checkCompleteMultipartArgs(bucket, object, xl); err != nil {
if err := checkCompleteMultipartArgs(ctx, bucket, object, xl); err != nil {
return oi, err
}
// Hold write lock on the object.
@@ -577,24 +584,24 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
}
defer uploadIDLock.Unlock()
if !xl.isUploadIDExists(bucket, object, uploadID) {
return oi, errors.Trace(InvalidUploadID{UploadID: uploadID})
if !xl.isUploadIDExists(ctx, bucket, object, uploadID) {
return oi, InvalidUploadID{UploadID: uploadID}
}
// Check if an object is present as one of the parent dir.
// -- FIXME. (needs a new kind of lock).
if xl.parentDirIsObject(bucket, path.Dir(object)) {
return oi, toObjectErr(errors.Trace(errFileAccessDenied), bucket, object)
if xl.parentDirIsObject(ctx, bucket, path.Dir(object)) {
return oi, toObjectErr(errFileAccessDenied, bucket, object)
}
// Calculate s3 compatible md5sum for complete multipart.
s3MD5, err := getCompleteMultipartMD5(parts)
s3MD5, err := getCompleteMultipartMD5(ctx, parts)
if err != nil {
return oi, err
}
// Read metadata associated with the object from all disks.
partsMetadata, errs := readAllXLMetadata(xl.getDisks(), minioMetaMultipartBucket, uploadIDPath)
partsMetadata, errs := readAllXLMetadata(ctx, xl.getDisks(), minioMetaMultipartBucket, uploadIDPath)
// get Quorum for this object
_, writeQuorum, err := objectQuorumFromMeta(xl, partsMetadata, errs)
@@ -602,7 +609,7 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
return oi, toObjectErr(err, bucket, object)
}
reducedErr := reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, writeQuorum)
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
if errors.Cause(reducedErr) == errXLWriteQuorum {
return oi, toObjectErr(reducedErr, bucket, object)
}
@@ -613,7 +620,7 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
var objectSize int64
// Pick one from the first valid metadata.
xlMeta, err := pickValidXLMeta(partsMetadata, modTime)
xlMeta, err := pickValidXLMeta(ctx, partsMetadata, modTime)
if err != nil {
return oi, err
}
@@ -635,21 +642,28 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
partIdx := objectPartIndex(currentXLMeta.Parts, part.PartNumber)
// All parts should have same part number.
if partIdx == -1 {
return oi, errors.Trace(InvalidPart{})
logger.LogIf(ctx, InvalidPart{})
return oi, InvalidPart{}
}
// All parts should have same ETag as previously generated.
if currentXLMeta.Parts[partIdx].ETag != part.ETag {
return oi, errors.Trace(InvalidPart{})
logger.LogIf(ctx, InvalidPart{})
return oi, InvalidPart{}
}
// All parts except the last part has to be atleast 5MB.
if (i < len(parts)-1) && !isMinAllowedPartSize(currentXLMeta.Parts[partIdx].Size) {
return oi, errors.Trace(PartTooSmall{
logger.LogIf(ctx, PartTooSmall{
PartNumber: part.PartNumber,
PartSize: currentXLMeta.Parts[partIdx].Size,
PartETag: part.ETag,
})
return oi, PartTooSmall{
PartNumber: part.PartNumber,
PartSize: currentXLMeta.Parts[partIdx].Size,
PartETag: part.ETag,
}
}
// Last part could have been uploaded as 0bytes, do not need
@@ -689,12 +703,12 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
}
// Write unique `xl.json` for each disk.
if onlineDisks, err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempUploadIDPath, partsMetadata, writeQuorum); err != nil {
if onlineDisks, err = writeUniqueXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempUploadIDPath, partsMetadata, writeQuorum); err != nil {
return oi, toObjectErr(err, minioMetaTmpBucket, tempUploadIDPath)
}
var rErr error
onlineDisks, rErr = commitXLMetadata(onlineDisks, minioMetaTmpBucket, tempUploadIDPath, minioMetaMultipartBucket, uploadIDPath, writeQuorum)
onlineDisks, rErr = commitXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempUploadIDPath, minioMetaMultipartBucket, uploadIDPath, writeQuorum)
if rErr != nil {
return oi, toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath)
}
@@ -704,12 +718,12 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
newUniqueID := mustGetUUID()
// Delete success renamed object.
defer xl.deleteObject(minioMetaTmpBucket, newUniqueID)
defer xl.deleteObject(ctx, minioMetaTmpBucket, newUniqueID)
// NOTE: Do not use online disks slice here.
// The reason is that existing object should be purged
// regardless of `xl.json` status and rolled back in case of errors.
_, err = renameObject(xl.getDisks(), bucket, object, minioMetaTmpBucket, newUniqueID, writeQuorum)
_, err = renameObject(ctx, xl.getDisks(), bucket, object, minioMetaTmpBucket, newUniqueID, writeQuorum)
if err != nil {
return oi, toObjectErr(err, bucket, object)
}
@@ -731,12 +745,13 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
// Deny if WORM is enabled
if globalWORMEnabled {
if xl.isObject(bucket, object) {
return ObjectInfo{}, errors.Trace(ObjectAlreadyExists{Bucket: bucket, Object: object})
logger.LogIf(ctx, ObjectAlreadyExists{Bucket: bucket, Object: object})
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
}
}
// Rename the multipart object to final location.
if _, err = renameObject(onlineDisks, minioMetaMultipartBucket, uploadIDPath, bucket, object, writeQuorum); err != nil {
if _, err = renameObject(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, bucket, object, writeQuorum); err != nil {
return oi, toObjectErr(err, bucket, object)
}
@@ -745,21 +760,22 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
}
// Wrapper which removes all the uploaded parts.
func (xl xlObjects) cleanupUploadedParts(uploadIDPath string, writeQuorum int) error {
func (xl xlObjects) cleanupUploadedParts(ctx context.Context, uploadIDPath string, writeQuorum int) error {
var errs = make([]error, len(xl.getDisks()))
var wg = &sync.WaitGroup{}
// Cleanup uploadID for all disks.
for index, disk := range xl.getDisks() {
if disk == nil {
errs[index] = errors.Trace(errDiskNotFound)
logger.LogIf(ctx, errDiskNotFound)
errs[index] = errDiskNotFound
continue
}
wg.Add(1)
// Cleanup each uploadID in a routine.
go func(index int, disk StorageAPI) {
defer wg.Done()
err := cleanupDir(disk, minioMetaMultipartBucket, uploadIDPath)
err := cleanupDir(ctx, disk, minioMetaMultipartBucket, uploadIDPath)
if err != nil {
errs[index] = err
}
@@ -769,7 +785,7 @@ func (xl xlObjects) cleanupUploadedParts(uploadIDPath string, writeQuorum int) e
// Wait for all the cleanups to finish.
wg.Wait()
return reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, writeQuorum)
return reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
}
// AbortMultipartUpload - aborts an ongoing multipart operation
@@ -784,7 +800,7 @@ func (xl xlObjects) cleanupUploadedParts(uploadIDPath string, writeQuorum int) e
// that this is an atomic idempotent operation. Subsequent calls have
// no affect and further requests to the same uploadID would not be honored.
func (xl xlObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
if err := checkAbortMultipartArgs(bucket, object, xl); err != nil {
if err := checkAbortMultipartArgs(ctx, bucket, object, xl); err != nil {
return err
}
// Construct uploadIDPath.
@@ -797,12 +813,12 @@ func (xl xlObjects) AbortMultipartUpload(ctx context.Context, bucket, object, up
}
defer uploadIDLock.Unlock()
if !xl.isUploadIDExists(bucket, object, uploadID) {
return errors.Trace(InvalidUploadID{UploadID: uploadID})
if !xl.isUploadIDExists(ctx, bucket, object, uploadID) {
return InvalidUploadID{UploadID: uploadID}
}
// Read metadata associated with the object from all disks.
partsMetadata, errs := readAllXLMetadata(xl.getDisks(), minioMetaMultipartBucket, uploadIDPath)
partsMetadata, errs := readAllXLMetadata(ctx, xl.getDisks(), minioMetaMultipartBucket, uploadIDPath)
// get Quorum for this object
_, writeQuorum, err := objectQuorumFromMeta(xl, partsMetadata, errs)
@@ -811,7 +827,7 @@ func (xl xlObjects) AbortMultipartUpload(ctx context.Context, bucket, object, up
}
// Cleanup all uploaded parts.
if err = xl.cleanupUploadedParts(uploadIDPath, writeQuorum); err != nil {
if err = xl.cleanupUploadedParts(ctx, uploadIDPath, writeQuorum); err != nil {
return toObjectErr(err, bucket, object)
}
@@ -820,7 +836,7 @@ func (xl xlObjects) AbortMultipartUpload(ctx context.Context, bucket, object, up
}
// Clean-up the old multipart uploads. Should be run in a Go routine.
func (xl xlObjects) cleanupStaleMultipartUploads(cleanupInterval, expiry time.Duration, doneCh chan struct{}) {
func (xl xlObjects) cleanupStaleMultipartUploads(ctx context.Context, cleanupInterval, expiry time.Duration, doneCh chan struct{}) {
ticker := time.NewTicker(cleanupInterval)
for {
@@ -839,13 +855,13 @@ func (xl xlObjects) cleanupStaleMultipartUploads(cleanupInterval, expiry time.Du
if disk == nil {
continue
}
xl.cleanupStaleMultipartUploadsOnDisk(disk, expiry)
xl.cleanupStaleMultipartUploadsOnDisk(ctx, disk, expiry)
}
}
}
// Remove the old multipart uploads on the given disk.
func (xl xlObjects) cleanupStaleMultipartUploadsOnDisk(disk StorageAPI, expiry time.Duration) {
func (xl xlObjects) cleanupStaleMultipartUploadsOnDisk(ctx context.Context, disk StorageAPI, expiry time.Duration) {
now := time.Now()
shaDirs, err := disk.ListDir(minioMetaMultipartBucket, "")
if err != nil {
@@ -868,7 +884,7 @@ func (xl xlObjects) cleanupStaleMultipartUploadsOnDisk(disk StorageAPI, expiry t
// when it removes files. We igore the error message from xl.cleanupUploadedParts() as we can't
// return it to any client. Hence we set quorum to 0.
quorum := 0
xl.cleanupUploadedParts(uploadIDPath, quorum)
xl.cleanupUploadedParts(ctx, uploadIDPath, quorum)
}
}
}