mirror of
https://github.com/minio/minio.git
synced 2025-11-20 01:50:24 -05:00
XL: Cleanup, comments and all the updated functions. (#1830)
This commit is contained in:
@@ -27,32 +27,199 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/mimedb"
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
)
|
||||
|
||||
// ListMultipartUploads - list multipart uploads.
|
||||
// listMultipartUploads - lists all multipart uploads.
|
||||
func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
|
||||
result := ListMultipartsInfo{
|
||||
IsTruncated: true,
|
||||
MaxUploads: maxUploads,
|
||||
KeyMarker: keyMarker,
|
||||
Prefix: prefix,
|
||||
Delimiter: delimiter,
|
||||
}
|
||||
|
||||
recursive := true
|
||||
if delimiter == slashSeparator {
|
||||
recursive = false
|
||||
}
|
||||
|
||||
// Not using path.Join() as it strips off the trailing '/'.
|
||||
multipartPrefixPath := pathJoin(mpartMetaPrefix, bucket, prefix)
|
||||
if prefix == "" {
|
||||
// Should have a trailing "/" if prefix is ""
|
||||
// For ex. multipartPrefixPath should be "multipart/bucket/" if prefix is ""
|
||||
multipartPrefixPath += slashSeparator
|
||||
}
|
||||
multipartMarkerPath := ""
|
||||
if keyMarker != "" {
|
||||
multipartMarkerPath = pathJoin(mpartMetaPrefix, bucket, keyMarker)
|
||||
}
|
||||
var uploads []uploadMetadata
|
||||
var err error
|
||||
var eof bool
|
||||
// List all upload ids for the keyMarker starting from
|
||||
// uploadIDMarker first.
|
||||
if uploadIDMarker != "" {
|
||||
nsMutex.RLock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, keyMarker))
|
||||
disk := xl.getLoadBalancedQuorumDisks()[0]
|
||||
uploads, _, err = listMultipartUploadIDs(bucket, keyMarker, uploadIDMarker, maxUploads, disk)
|
||||
nsMutex.RUnlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, keyMarker))
|
||||
if err != nil {
|
||||
return ListMultipartsInfo{}, err
|
||||
}
|
||||
maxUploads = maxUploads - len(uploads)
|
||||
}
|
||||
// Validate if we need to list further depending on maxUploads.
|
||||
if maxUploads > 0 {
|
||||
walker := xl.lookupTreeWalk(listParams{minioMetaBucket, recursive, multipartMarkerPath, multipartPrefixPath})
|
||||
if walker == nil {
|
||||
walker = xl.startTreeWalk(minioMetaBucket, multipartPrefixPath, multipartMarkerPath, recursive, xl.isMultipartUpload)
|
||||
}
|
||||
// Collect uploads until we have reached maxUploads count to 0.
|
||||
for maxUploads > 0 {
|
||||
walkResult, ok := <-walker.ch
|
||||
if !ok {
|
||||
// Closed channel.
|
||||
eof = true
|
||||
break
|
||||
}
|
||||
// For any walk error return right away.
|
||||
if walkResult.err != nil {
|
||||
// File not found or Disk not found is a valid case.
|
||||
if walkResult.err == errFileNotFound || walkResult.err == errDiskNotFound {
|
||||
continue
|
||||
}
|
||||
return ListMultipartsInfo{}, err
|
||||
}
|
||||
entry := strings.TrimPrefix(walkResult.entry, retainSlash(pathJoin(mpartMetaPrefix, bucket)))
|
||||
// For an entry looking like a directory, store and
|
||||
// continue the loop not need to fetch uploads.
|
||||
if strings.HasSuffix(walkResult.entry, slashSeparator) {
|
||||
uploads = append(uploads, uploadMetadata{
|
||||
Object: entry,
|
||||
})
|
||||
maxUploads--
|
||||
if maxUploads == 0 {
|
||||
if walkResult.end {
|
||||
eof = true
|
||||
break
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
var newUploads []uploadMetadata
|
||||
var end bool
|
||||
uploadIDMarker = ""
|
||||
// For the new object entry we get all its pending uploadIDs.
|
||||
nsMutex.RLock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, entry))
|
||||
disk := xl.getLoadBalancedQuorumDisks()[0]
|
||||
newUploads, end, err = listMultipartUploadIDs(bucket, entry, uploadIDMarker, maxUploads, disk)
|
||||
nsMutex.RUnlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, entry))
|
||||
if err != nil {
|
||||
if err == errFileNotFound || walkResult.err == errDiskNotFound {
|
||||
continue
|
||||
}
|
||||
return ListMultipartsInfo{}, err
|
||||
}
|
||||
uploads = append(uploads, newUploads...)
|
||||
maxUploads -= len(newUploads)
|
||||
if walkResult.end && end {
|
||||
eof = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// For all received uploads fill in the multiparts result.
|
||||
for _, upload := range uploads {
|
||||
var objectName string
|
||||
var uploadID string
|
||||
if strings.HasSuffix(upload.Object, slashSeparator) {
|
||||
// All directory entries are common prefixes.
|
||||
uploadID = "" // For common prefixes, upload ids are empty.
|
||||
objectName = upload.Object
|
||||
result.CommonPrefixes = append(result.CommonPrefixes, objectName)
|
||||
} else {
|
||||
uploadID = upload.UploadID
|
||||
objectName = upload.Object
|
||||
result.Uploads = append(result.Uploads, upload)
|
||||
}
|
||||
result.NextKeyMarker = objectName
|
||||
result.NextUploadIDMarker = uploadID
|
||||
}
|
||||
result.IsTruncated = !eof
|
||||
// Result is not truncated, reset the markers.
|
||||
if !result.IsTruncated {
|
||||
result.NextKeyMarker = ""
|
||||
result.NextUploadIDMarker = ""
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ListMultipartUploads - lists all the pending multipart uploads on a
|
||||
// bucket. Additionally takes 'prefix, keyMarker, uploadIDmarker and a
|
||||
// delimiter' which allows us to list uploads match a particular
|
||||
// prefix or lexically starting from 'keyMarker' or delimiting the
|
||||
// output to get a directory like listing.
|
||||
//
|
||||
// Implements S3 compatible ListMultipartUploads API. The resulting
|
||||
// ListMultipartsInfo structure is unmarshalled directly into XML and
|
||||
// replied back to the client.
|
||||
func (xl xlObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
|
||||
result := ListMultipartsInfo{}
|
||||
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ListMultipartsInfo{}, BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
if !xl.isBucketExist(bucket) {
|
||||
return ListMultipartsInfo{}, BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
if !IsValidObjectPrefix(prefix) {
|
||||
return ListMultipartsInfo{}, ObjectNameInvalid{Bucket: bucket, Object: prefix}
|
||||
}
|
||||
// Verify if delimiter is anything other than '/', which we do not support.
|
||||
if delimiter != "" && delimiter != slashSeparator {
|
||||
return ListMultipartsInfo{}, UnsupportedDelimiter{
|
||||
Delimiter: delimiter,
|
||||
}
|
||||
}
|
||||
// Verify if marker has prefix.
|
||||
if keyMarker != "" && !strings.HasPrefix(keyMarker, prefix) {
|
||||
return ListMultipartsInfo{}, InvalidMarkerPrefixCombination{
|
||||
Marker: keyMarker,
|
||||
Prefix: prefix,
|
||||
}
|
||||
}
|
||||
if uploadIDMarker != "" {
|
||||
if strings.HasSuffix(keyMarker, slashSeparator) {
|
||||
return result, InvalidUploadIDKeyCombination{
|
||||
UploadIDMarker: uploadIDMarker,
|
||||
KeyMarker: keyMarker,
|
||||
}
|
||||
}
|
||||
id, err := uuid.Parse(uploadIDMarker)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
if id.IsZero() {
|
||||
return result, MalformedUploadID{
|
||||
UploadID: uploadIDMarker,
|
||||
}
|
||||
}
|
||||
}
|
||||
return xl.listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||
}
|
||||
|
||||
// newMultipartUpload - initialize a new multipart.
|
||||
// newMultipartUpload - wrapper for initializing a new multipart
|
||||
// request, returns back a unique upload id.
|
||||
//
|
||||
// Internally this function creates 'uploads.json' associated for the
|
||||
// incoming object at '.minio/multipart/bucket/object/uploads.json' on
|
||||
// all the disks. `uploads.json` carries metadata regarding on going
|
||||
// multipart operation on the object.
|
||||
func (xl xlObjects) newMultipartUpload(bucket string, object string, meta map[string]string) (uploadID string, err error) {
|
||||
// Verify if bucket name is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return "", BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
// Verify whether the bucket exists.
|
||||
if !xl.isBucketExist(bucket) {
|
||||
return "", BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
// Verify if object name is valid.
|
||||
if !IsValidObjectName(object) {
|
||||
return "", ObjectNameInvalid{Bucket: bucket, Object: object}
|
||||
}
|
||||
// No metadata is set, allocate a new one.
|
||||
if meta == nil {
|
||||
meta = make(map[string]string)
|
||||
}
|
||||
|
||||
xlMeta := newXLMetaV1(xl.dataBlocks, xl.parityBlocks)
|
||||
// If not set default to "application/octet-stream"
|
||||
if meta["content-type"] == "" {
|
||||
@@ -92,14 +259,13 @@ func (xl xlObjects) newMultipartUpload(bucket string, object string, meta map[st
|
||||
return "", toObjectErr(rErr, minioMetaBucket, uploadIDPath)
|
||||
}
|
||||
|
||||
// NewMultipartUpload - initialize a new multipart upload, returns a unique id.
|
||||
// NewMultipartUpload - initialize a new multipart upload, returns a
|
||||
// unique id. The unique id returned here is of UUID form, for each
|
||||
// subsequent request each UUID is unique.
|
||||
//
|
||||
// Implements S3 compatible initiate multipart API.
|
||||
func (xl xlObjects) NewMultipartUpload(bucket, object string, meta map[string]string) (string, error) {
|
||||
return xl.newMultipartUpload(bucket, object, meta)
|
||||
}
|
||||
|
||||
// putObjectPart - put object part.
|
||||
func (xl xlObjects) putObjectPart(bucket string, object string, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, error) {
|
||||
// Verify if bucket is valid.
|
||||
// Verify if bucket name is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return "", BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
@@ -107,9 +273,22 @@ func (xl xlObjects) putObjectPart(bucket string, object string, uploadID string,
|
||||
if !xl.isBucketExist(bucket) {
|
||||
return "", BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
// Verify if object name is valid.
|
||||
if !IsValidObjectName(object) {
|
||||
return "", ObjectNameInvalid{Bucket: bucket, Object: object}
|
||||
}
|
||||
// No metadata is set, allocate a new one.
|
||||
if meta == nil {
|
||||
meta = make(map[string]string)
|
||||
}
|
||||
return xl.newMultipartUpload(bucket, object, meta)
|
||||
}
|
||||
|
||||
// putObjectPart - reads incoming data until EOF for the part file on
|
||||
// an ongoing multipart transaction. Internally incoming data is
|
||||
// erasure coded and written across all disks.
|
||||
func (xl xlObjects) putObjectPart(bucket string, object string, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, error) {
|
||||
// Hold the lock and start the operation.
|
||||
uploadIDPath := pathJoin(mpartMetaPrefix, bucket, object, uploadID)
|
||||
nsMutex.Lock(minioMetaBucket, uploadIDPath)
|
||||
defer nsMutex.Unlock(minioMetaBucket, uploadIDPath)
|
||||
@@ -130,51 +309,39 @@ func (xl xlObjects) putObjectPart(bucket string, object string, uploadID string,
|
||||
// Pick one from the first valid metadata.
|
||||
xlMeta := pickValidXLMeta(partsMetadata)
|
||||
|
||||
// Initialize a new erasure with online disks and new distribution.
|
||||
erasure := newErasure(onlineDisks, xlMeta.Erasure.Distribution)
|
||||
|
||||
// Initialize sha512 hash.
|
||||
erasure.InitHash("sha512")
|
||||
|
||||
partSuffix := fmt.Sprintf("object%d", partID)
|
||||
tmpPartPath := path.Join(tmpMetaPrefix, uploadID, partSuffix)
|
||||
|
||||
// Initialize md5 writer.
|
||||
md5Writer := md5.New()
|
||||
|
||||
// Allocate blocksized buffer for reading.
|
||||
buf := make([]byte, blockSizeV1)
|
||||
// Construct a tee reader for md5sum.
|
||||
teeReader := io.TeeReader(data, md5Writer)
|
||||
|
||||
// Read until io.EOF, fill the allocated buf.
|
||||
for {
|
||||
var n int
|
||||
n, err = io.ReadFull(data, buf)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil && err != io.ErrUnexpectedEOF {
|
||||
return "", toObjectErr(err, bucket, object)
|
||||
}
|
||||
// Update md5 writer.
|
||||
md5Writer.Write(buf[:n])
|
||||
var m int64
|
||||
m, err = erasure.AppendFile(minioMetaBucket, tmpPartPath, buf[:n])
|
||||
if err != nil {
|
||||
return "", toObjectErr(err, minioMetaBucket, tmpPartPath)
|
||||
}
|
||||
if m != int64(len(buf[:n])) {
|
||||
return "", toObjectErr(errUnexpected, bucket, object)
|
||||
}
|
||||
// Collect all the previous erasure infos across the disk.
|
||||
var eInfos []erasureInfo
|
||||
for index := range onlineDisks {
|
||||
eInfos = append(eInfos, partsMetadata[index].Erasure)
|
||||
}
|
||||
|
||||
// Erasure code data and write across all disks.
|
||||
newEInfos, err := erasureCreateFile(onlineDisks, minioMetaBucket, tmpPartPath, partSuffix, teeReader, eInfos)
|
||||
if err != nil {
|
||||
return "", toObjectErr(err, minioMetaBucket, tmpPartPath)
|
||||
}
|
||||
|
||||
// Calculate new md5sum.
|
||||
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
|
||||
if md5Hex != "" {
|
||||
if newMD5Hex != md5Hex {
|
||||
// MD5 mismatch, delete the temporary object.
|
||||
xl.deleteObject(minioMetaBucket, tmpPartPath)
|
||||
// Returns md5 mismatch.
|
||||
return "", BadDigest{md5Hex, newMD5Hex}
|
||||
}
|
||||
}
|
||||
|
||||
// Validates if upload ID exists again.
|
||||
if !xl.isUploadIDExists(bucket, object, uploadID) {
|
||||
return "", InvalidUploadID{UploadID: uploadID}
|
||||
}
|
||||
@@ -191,28 +358,17 @@ func (xl xlObjects) putObjectPart(bucket string, object string, uploadID string,
|
||||
// Add the current part.
|
||||
xlMeta.AddObjectPart(partID, partSuffix, newMD5Hex, size)
|
||||
|
||||
// Get calculated hash checksums from erasure to save in `xl.json`.
|
||||
hashChecksums := erasure.GetHashes()
|
||||
|
||||
checkSums := make([]checkSumInfo, len(xl.storageDisks))
|
||||
for index := range xl.storageDisks {
|
||||
blockIndex := xlMeta.Erasure.Distribution[index] - 1
|
||||
checkSums[blockIndex] = checkSumInfo{
|
||||
Name: partSuffix,
|
||||
Algorithm: "sha512",
|
||||
Hash: hashChecksums[blockIndex],
|
||||
}
|
||||
}
|
||||
// Update `xl.json` content for each disks.
|
||||
for index := range partsMetadata {
|
||||
blockIndex := xlMeta.Erasure.Distribution[index] - 1
|
||||
partsMetadata[index].Parts = xlMeta.Parts
|
||||
partsMetadata[index].Erasure.Checksum = append(partsMetadata[index].Erasure.Checksum, checkSums[blockIndex])
|
||||
partsMetadata[index].Erasure = newEInfos[index]
|
||||
}
|
||||
|
||||
// Write all the checksum metadata.
|
||||
tempUploadIDPath := path.Join(tmpMetaPrefix, uploadID)
|
||||
|
||||
// Write unique `xl.json` each disk.
|
||||
// Writes a unique `xl.json` each disk carrying new checksum
|
||||
// related information.
|
||||
if err = xl.writeUniqueXLMetadata(minioMetaBucket, tempUploadIDPath, partsMetadata); err != nil {
|
||||
return "", toObjectErr(err, minioMetaBucket, tempUploadIDPath)
|
||||
}
|
||||
@@ -225,32 +381,29 @@ func (xl xlObjects) putObjectPart(bucket string, object string, uploadID string,
|
||||
return newMD5Hex, nil
|
||||
}
|
||||
|
||||
// PutObjectPart - writes the multipart upload chunks.
|
||||
// PutObjectPart - reads incoming stream and internally erasure codes
|
||||
// them. This call is similar to single put operation but it is part
|
||||
// of the multipart transcation.
|
||||
//
|
||||
// Implements S3 compatible Upload Part API.
|
||||
func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, error) {
|
||||
return xl.putObjectPart(bucket, object, uploadID, partID, size, data, md5Hex)
|
||||
}
|
||||
|
||||
// ListObjectParts - list object parts.
|
||||
func (xl xlObjects) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ListPartsInfo{}, BucketNameInvalid{Bucket: bucket}
|
||||
return "", BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
// Verify whether the bucket exists.
|
||||
if !xl.isBucketExist(bucket) {
|
||||
return ListPartsInfo{}, BucketNotFound{Bucket: bucket}
|
||||
return "", BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return ListPartsInfo{}, ObjectNameInvalid{Bucket: bucket, Object: object}
|
||||
}
|
||||
// Hold lock so that there is no competing abort-multipart-upload or complete-multipart-upload.
|
||||
nsMutex.Lock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object, uploadID))
|
||||
defer nsMutex.Unlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object, uploadID))
|
||||
|
||||
if !xl.isUploadIDExists(bucket, object, uploadID) {
|
||||
return ListPartsInfo{}, InvalidUploadID{UploadID: uploadID}
|
||||
return "", ObjectNameInvalid{Bucket: bucket, Object: object}
|
||||
}
|
||||
return xl.putObjectPart(bucket, object, uploadID, partID, size, data, md5Hex)
|
||||
}
|
||||
|
||||
// listObjectParts - wrapper reading `xl.json` for a given object and
|
||||
// uploadID. Lists all the parts captured inside `xl.json` content.
|
||||
func (xl xlObjects) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) {
|
||||
result := ListPartsInfo{}
|
||||
|
||||
uploadIDPath := path.Join(mpartMetaPrefix, bucket, object, uploadID)
|
||||
@@ -312,11 +465,42 @@ func (xl xlObjects) listObjectParts(bucket, object, uploadID string, partNumberM
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ListObjectParts - list object parts.
|
||||
// ListObjectParts - lists all previously uploaded parts for a given
|
||||
// object and uploadID. Takes additional input of part-number-marker
|
||||
// to indicate where the listing should begin from.
|
||||
//
|
||||
// Implements S3 compatible ListObjectParts API. The resulting
|
||||
// ListPartsInfo structure is unmarshalled directly into XML and
|
||||
// replied back to the client.
|
||||
func (xl xlObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) {
|
||||
return xl.listObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ListPartsInfo{}, BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
// Verify whether the bucket exists.
|
||||
if !xl.isBucketExist(bucket) {
|
||||
return ListPartsInfo{}, BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
if !IsValidObjectName(object) {
|
||||
return ListPartsInfo{}, ObjectNameInvalid{Bucket: bucket, Object: object}
|
||||
}
|
||||
// Hold lock so that there is no competing abort-multipart-upload or complete-multipart-upload.
|
||||
nsMutex.Lock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object, uploadID))
|
||||
defer nsMutex.Unlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object, uploadID))
|
||||
|
||||
if !xl.isUploadIDExists(bucket, object, uploadID) {
|
||||
return ListPartsInfo{}, InvalidUploadID{UploadID: uploadID}
|
||||
}
|
||||
result, err := xl.listObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
|
||||
return result, err
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload - completes an ongoing multipart
|
||||
// transaction after receiving all the parts indicated by the client.
|
||||
// Returns an md5sum calculated by concatenating all the individual
|
||||
// md5sums of all the parts.
|
||||
//
|
||||
// Implements S3 compatible Complete multipart API.
|
||||
func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (string, error) {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
@@ -367,7 +551,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
|
||||
// Allocate parts similar to incoming slice.
|
||||
xlMeta.Parts = make([]objectPartInfo, len(parts))
|
||||
|
||||
// Loop through all parts, validate them and then commit to disk.
|
||||
// Validate each part and then commit to disk.
|
||||
for i, part := range parts {
|
||||
partIdx := currentXLMeta.ObjectPartIndex(part.PartNumber)
|
||||
if partIdx == -1 {
|
||||
@@ -414,6 +598,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
|
||||
partsMetadata[index].Meta = xlMeta.Meta
|
||||
partsMetadata[index].Parts = xlMeta.Parts
|
||||
}
|
||||
|
||||
// Write unique `xl.json` for each disk.
|
||||
if err = xl.writeUniqueXLMetadata(minioMetaBucket, tempUploadIDPath, partsMetadata); err != nil {
|
||||
return "", toObjectErr(err, minioMetaBucket, tempUploadIDPath)
|
||||
@@ -461,20 +646,25 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
|
||||
|
||||
// Validate if there are other incomplete upload-id's present for
|
||||
// the object, if yes do not attempt to delete 'uploads.json'.
|
||||
uploadsJSON, err := readUploadsJSON(bucket, object, xl.storageDisks...)
|
||||
if err == nil {
|
||||
uploadIDIdx := uploadsJSON.Index(uploadID)
|
||||
if uploadIDIdx != -1 {
|
||||
uploadsJSON.Uploads = append(uploadsJSON.Uploads[:uploadIDIdx], uploadsJSON.Uploads[uploadIDIdx+1:]...)
|
||||
}
|
||||
if len(uploadsJSON.Uploads) > 0 {
|
||||
if err = updateUploadsJSON(bucket, object, uploadsJSON, xl.storageDisks...); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return s3MD5, nil
|
||||
}
|
||||
disk := xl.getLoadBalancedQuorumDisks()[0]
|
||||
uploadsJSON, err := readUploadsJSON(bucket, object, disk)
|
||||
if err != nil {
|
||||
return "", toObjectErr(err, minioMetaBucket, object)
|
||||
}
|
||||
|
||||
// If we have successfully read `uploads.json`, then we proceed to
|
||||
// purge or update `uploads.json`.
|
||||
uploadIDIdx := uploadsJSON.Index(uploadID)
|
||||
if uploadIDIdx != -1 {
|
||||
uploadsJSON.Uploads = append(uploadsJSON.Uploads[:uploadIDIdx], uploadsJSON.Uploads[uploadIDIdx+1:]...)
|
||||
}
|
||||
if len(uploadsJSON.Uploads) > 0 {
|
||||
if err = updateUploadsJSON(bucket, object, uploadsJSON, xl.storageDisks...); err != nil {
|
||||
return "", toObjectErr(err, minioMetaBucket, path.Join(mpartMetaPrefix, bucket, object))
|
||||
}
|
||||
// Return success.
|
||||
return s3MD5, nil
|
||||
} // No more pending uploads for the object, proceed to delete
|
||||
// object completely from '.minio/multipart'.
|
||||
err = xl.deleteObject(minioMetaBucket, path.Join(mpartMetaPrefix, bucket, object))
|
||||
if err != nil {
|
||||
return "", toObjectErr(err, minioMetaBucket, path.Join(mpartMetaPrefix, bucket, object))
|
||||
@@ -484,8 +674,59 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
|
||||
return s3MD5, nil
|
||||
}
|
||||
|
||||
// abortMultipartUpload - aborts a multipart upload.
|
||||
func (xl xlObjects) abortMultipartUpload(bucket, object, uploadID string) error {
|
||||
// abortMultipartUpload - wrapper for purging an ongoing multipart
|
||||
// transaction, deletes uploadID entry from `uploads.json` and purges
|
||||
// the directory at '.minio/multipart/bucket/object/uploadID' holding
|
||||
// all the upload parts.
|
||||
func (xl xlObjects) abortMultipartUpload(bucket, object, uploadID string) (err error) {
|
||||
// Cleanup all uploaded parts.
|
||||
if err = cleanupUploadedParts(bucket, object, uploadID, xl.storageDisks...); err != nil {
|
||||
return toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
nsMutex.Lock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object))
|
||||
defer nsMutex.Unlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object))
|
||||
// Validate if there are other incomplete upload-id's present for
|
||||
// the object, if yes do not attempt to delete 'uploads.json'.
|
||||
disk := xl.getLoadBalancedQuorumDisks()[0]
|
||||
uploadsJSON, err := readUploadsJSON(bucket, object, disk)
|
||||
if err != nil {
|
||||
return toObjectErr(err, bucket, object)
|
||||
}
|
||||
uploadIDIdx := uploadsJSON.Index(uploadID)
|
||||
if uploadIDIdx != -1 {
|
||||
uploadsJSON.Uploads = append(uploadsJSON.Uploads[:uploadIDIdx], uploadsJSON.Uploads[uploadIDIdx+1:]...)
|
||||
}
|
||||
if len(uploadsJSON.Uploads) > 0 {
|
||||
// There are pending uploads for the same object, preserve
|
||||
// them update 'uploads.json' in-place.
|
||||
err = updateUploadsJSON(bucket, object, uploadsJSON, xl.storageDisks...)
|
||||
if err != nil {
|
||||
return toObjectErr(err, bucket, object)
|
||||
}
|
||||
return nil
|
||||
} // No more pending uploads for the object, we purge the entire
|
||||
// entry at '.minio/multipart/bucket/object'.
|
||||
if err = xl.deleteObject(minioMetaBucket, path.Join(mpartMetaPrefix, bucket, object)); err != nil {
|
||||
return toObjectErr(err, minioMetaBucket, path.Join(mpartMetaPrefix, bucket, object))
|
||||
}
|
||||
|
||||
// Successfully purged.
|
||||
return nil
|
||||
}
|
||||
|
||||
// AbortMultipartUpload - aborts an ongoing multipart operation
|
||||
// signified by the input uploadID. This is an atomic operation
|
||||
// doesn't require clients to initiate multiple such requests.
|
||||
//
|
||||
// All parts are purged from all disks and reference to the uploadID
|
||||
// would be removed from the system, rollback is not possible on this
|
||||
// operation.
|
||||
//
|
||||
// Implements S3 compatible Abort multipart API, slight difference is
|
||||
// that this is an atomic idempotent operation. Subsequent calls have
|
||||
// no affect and further requests to the same uploadID would not be honored.
|
||||
func (xl xlObjects) AbortMultipartUpload(bucket, object, uploadID string) error {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return BucketNameInvalid{Bucket: bucket}
|
||||
@@ -504,37 +745,6 @@ func (xl xlObjects) abortMultipartUpload(bucket, object, uploadID string) error
|
||||
if !xl.isUploadIDExists(bucket, object, uploadID) {
|
||||
return InvalidUploadID{UploadID: uploadID}
|
||||
}
|
||||
|
||||
// Cleanup all uploaded parts.
|
||||
if err := cleanupUploadedParts(bucket, object, uploadID, xl.storageDisks...); err != nil {
|
||||
return toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
nsMutex.Lock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object))
|
||||
defer nsMutex.Unlock(minioMetaBucket, pathJoin(mpartMetaPrefix, bucket, object))
|
||||
// Validate if there are other incomplete upload-id's present for
|
||||
// the object, if yes do not attempt to delete 'uploads.json'.
|
||||
uploadsJSON, err := readUploadsJSON(bucket, object, xl.storageDisks...)
|
||||
if err == nil {
|
||||
uploadIDIdx := uploadsJSON.Index(uploadID)
|
||||
if uploadIDIdx != -1 {
|
||||
uploadsJSON.Uploads = append(uploadsJSON.Uploads[:uploadIDIdx], uploadsJSON.Uploads[uploadIDIdx+1:]...)
|
||||
}
|
||||
if len(uploadsJSON.Uploads) > 0 {
|
||||
err = updateUploadsJSON(bucket, object, uploadsJSON, xl.storageDisks...)
|
||||
if err != nil {
|
||||
return toObjectErr(err, bucket, object)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if err = xl.deleteObject(minioMetaBucket, path.Join(mpartMetaPrefix, bucket, object)); err != nil {
|
||||
return toObjectErr(err, minioMetaBucket, path.Join(mpartMetaPrefix, bucket, object))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AbortMultipartUpload - aborts a multipart upload.
|
||||
func (xl xlObjects) AbortMultipartUpload(bucket, object, uploadID string) error {
|
||||
return xl.abortMultipartUpload(bucket, object, uploadID)
|
||||
err := xl.abortMultipartUpload(bucket, object, uploadID)
|
||||
return err
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user