2016-05-20 23:48:47 -04:00
|
|
|
/*
|
2020-06-12 23:04:01 -04:00
|
|
|
* MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
|
2016-05-20 23:48:47 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-05-20 23:48:47 -04:00
|
|
|
|
|
|
|
import (
|
2018-03-14 15:01:47 -04:00
|
|
|
"context"
|
2016-05-20 23:48:47 -04:00
|
|
|
"fmt"
|
2019-01-17 07:58:18 -05:00
|
|
|
"io"
|
2016-05-20 23:48:47 -04:00
|
|
|
"path"
|
2018-03-15 16:55:23 -04:00
|
|
|
"sort"
|
2018-09-27 23:36:17 -04:00
|
|
|
"strconv"
|
2016-05-24 16:35:43 -04:00
|
|
|
"strings"
|
2020-08-22 00:39:54 -04:00
|
|
|
"time"
|
2016-05-24 16:35:43 -04:00
|
|
|
|
2020-07-14 12:38:05 -04:00
|
|
|
"github.com/minio/minio-go/v7/pkg/set"
|
2019-10-07 01:50:24 -04:00
|
|
|
xhttp "github.com/minio/minio/cmd/http"
|
2018-04-05 18:04:40 -04:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
2016-05-24 16:35:43 -04:00
|
|
|
"github.com/minio/minio/pkg/mimedb"
|
2019-10-14 12:44:51 -04:00
|
|
|
"github.com/minio/minio/pkg/sync/errgroup"
|
2016-05-20 23:48:47 -04:00
|
|
|
)
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (er erasureObjects) getUploadIDDir(bucket, object, uploadID string) string {
|
|
|
|
return pathJoin(er.getMultipartSHADir(bucket, object), uploadID)
|
2017-01-26 15:51:12 -05:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (er erasureObjects) getMultipartSHADir(bucket, object string) string {
|
2018-03-15 16:55:23 -04:00
|
|
|
return getSHA256Hash([]byte(pathJoin(bucket, object)))
|
2017-01-26 15:51:12 -05:00
|
|
|
}
|
|
|
|
|
2019-04-23 17:54:28 -04:00
|
|
|
// checkUploadIDExists - verify if a given uploadID exists and is valid.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (er erasureObjects) checkUploadIDExists(ctx context.Context, bucket, object, uploadID string) error {
|
2020-07-02 19:17:27 -04:00
|
|
|
_, _, _, err := er.getObjectFileInfo(ctx, minioMetaMultipartBucket, er.getUploadIDDir(bucket, object, uploadID), ObjectOptions{})
|
2019-04-23 17:54:28 -04:00
|
|
|
return err
|
2017-01-26 15:51:12 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Removes part given by partName belonging to a mulitpart upload from minioMetaBucket
|
2020-06-12 23:04:01 -04:00
|
|
|
func (er erasureObjects) removeObjectPart(bucket, object, uploadID, dataDir string, partNumber int) {
|
|
|
|
uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
|
|
|
|
curpartPath := pathJoin(uploadIDPath, dataDir, fmt.Sprintf("part.%d", partNumber))
|
|
|
|
storageDisks := er.getDisks()
|
2019-10-14 12:44:51 -04:00
|
|
|
|
|
|
|
g := errgroup.WithNErrs(len(storageDisks))
|
|
|
|
for index, disk := range storageDisks {
|
2017-01-26 15:51:12 -05:00
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
2019-10-14 12:44:51 -04:00
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
2017-01-26 15:51:12 -05:00
|
|
|
// Ignoring failure to remove parts that weren't present in CompleteMultipartUpload
|
2020-06-12 23:04:01 -04:00
|
|
|
// requests. xl.meta is the authoritative source of truth on which parts constitute
|
2017-01-26 15:51:12 -05:00
|
|
|
// the object. The presence of parts that don't belong in the object doesn't affect correctness.
|
2020-09-04 12:45:06 -04:00
|
|
|
_ = storageDisks[index].DeleteFile(context.TODO(), minioMetaMultipartBucket, curpartPath)
|
2019-10-14 12:44:51 -04:00
|
|
|
return nil
|
|
|
|
}, index)
|
2017-01-26 15:51:12 -05:00
|
|
|
}
|
2019-10-14 12:44:51 -04:00
|
|
|
g.Wait()
|
2017-01-26 15:51:12 -05:00
|
|
|
}
|
|
|
|
|
2020-08-22 00:39:54 -04:00
|
|
|
// Clean-up the old multipart uploads. Should be run in a Go routine.
|
2020-09-08 18:55:40 -04:00
|
|
|
func (er erasureObjects) cleanupStaleUploads(ctx context.Context, cleanupInterval, expiry time.Duration) {
|
2020-08-22 00:39:54 -04:00
|
|
|
ticker := time.NewTicker(cleanupInterval)
|
|
|
|
defer ticker.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case <-ticker.C:
|
|
|
|
var disk StorageAPI
|
2020-08-26 22:29:35 -04:00
|
|
|
// run multiple cleanup's local to this server.
|
|
|
|
for _, d := range er.getLoadBalancedLocalDisks() {
|
2020-08-22 00:39:54 -04:00
|
|
|
if d != nil {
|
|
|
|
disk = d
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
2020-09-08 18:55:40 -04:00
|
|
|
er.cleanupStaleUploadsOnDisk(ctx, disk, expiry)
|
2020-08-22 00:39:54 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the old multipart uploads on the given disk.
|
2020-09-08 18:55:40 -04:00
|
|
|
func (er erasureObjects) cleanupStaleUploadsOnDisk(ctx context.Context, disk StorageAPI, expiry time.Duration) {
|
2020-08-22 00:39:54 -04:00
|
|
|
now := time.Now()
|
2020-09-04 12:45:06 -04:00
|
|
|
shaDirs, err := disk.ListDir(ctx, minioMetaMultipartBucket, "", -1)
|
2020-08-22 00:39:54 -04:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, shaDir := range shaDirs {
|
2020-09-04 12:45:06 -04:00
|
|
|
uploadIDDirs, err := disk.ListDir(ctx, minioMetaMultipartBucket, shaDir, -1)
|
2020-08-22 00:39:54 -04:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, uploadIDDir := range uploadIDDirs {
|
|
|
|
uploadIDPath := pathJoin(shaDir, uploadIDDir)
|
2020-09-04 12:45:06 -04:00
|
|
|
fi, err := disk.ReadVersion(ctx, minioMetaMultipartBucket, uploadIDPath, "")
|
2020-08-22 00:39:54 -04:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if now.Sub(fi.ModTime) > expiry {
|
|
|
|
er.deleteObject(ctx, minioMetaMultipartBucket, uploadIDPath, fi.Erasure.DataBlocks+1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-09-08 18:55:40 -04:00
|
|
|
tmpDirs, err := disk.ListDir(ctx, minioMetaTmpBucket, "", -1)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, tmpDir := range tmpDirs {
|
|
|
|
fi, err := disk.ReadVersion(ctx, minioMetaTmpBucket, tmpDir, "")
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if now.Sub(fi.ModTime) > expiry {
|
|
|
|
er.deleteObject(ctx, minioMetaTmpBucket, tmpDir, fi.Erasure.DataBlocks+1)
|
|
|
|
}
|
|
|
|
}
|
2020-08-22 00:39:54 -04:00
|
|
|
}
|
|
|
|
|
2017-11-30 18:58:46 -05:00
|
|
|
// ListMultipartUploads - lists all the pending multipart
|
|
|
|
// uploads for a particular object in a bucket.
|
|
|
|
//
|
|
|
|
// Implements minimal S3 compatible ListMultipartUploads API. We do
|
|
|
|
// not support prefix based listing, this is a deliberate attempt
|
|
|
|
// towards simplification of multipart APIs.
|
|
|
|
// The resulting ListMultipartsInfo structure is unmarshalled directly as XML.
|
2020-07-03 22:27:13 -04:00
|
|
|
func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) {
|
2017-11-30 18:58:46 -05:00
|
|
|
result.MaxUploads = maxUploads
|
|
|
|
result.KeyMarker = keyMarker
|
|
|
|
result.Prefix = object
|
|
|
|
result.Delimiter = delimiter
|
|
|
|
|
2020-07-03 22:27:13 -04:00
|
|
|
var uploadIDs []string
|
2020-09-28 22:39:32 -04:00
|
|
|
var disk StorageAPI
|
|
|
|
for _, disk = range er.getLoadBalancedDisks() {
|
2020-09-04 12:45:06 -04:00
|
|
|
uploadIDs, err = disk.ListDir(ctx, minioMetaMultipartBucket, er.getMultipartSHADir(bucket, object), -1)
|
2016-06-01 19:43:31 -04:00
|
|
|
if err != nil {
|
2020-07-03 22:27:13 -04:00
|
|
|
if err == errDiskNotFound {
|
|
|
|
continue
|
|
|
|
}
|
2018-03-15 16:55:23 -04:00
|
|
|
if err == errFileNotFound {
|
|
|
|
return result, nil
|
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
2020-07-03 22:27:13 -04:00
|
|
|
return result, toObjectErr(err, bucket, object)
|
2016-06-01 19:43:31 -04:00
|
|
|
}
|
2020-07-03 22:27:13 -04:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := range uploadIDs {
|
|
|
|
uploadIDs[i] = strings.TrimSuffix(uploadIDs[i], SlashSeparator)
|
|
|
|
}
|
|
|
|
|
|
|
|
// S3 spec says uploadIDs should be sorted based on initiated time, we need
|
|
|
|
// to read the metadata entry.
|
|
|
|
var uploads []MultipartInfo
|
|
|
|
|
|
|
|
populatedUploadIds := set.NewStringSet()
|
|
|
|
|
2020-09-28 22:39:32 -04:00
|
|
|
for _, uploadID := range uploadIDs {
|
|
|
|
if populatedUploadIds.Contains(uploadID) {
|
2020-07-03 22:27:13 -04:00
|
|
|
continue
|
2016-06-01 19:43:31 -04:00
|
|
|
}
|
2020-09-28 22:39:32 -04:00
|
|
|
fi, err := disk.ReadVersion(ctx, minioMetaMultipartBucket, pathJoin(er.getUploadIDDir(bucket, object, uploadID)), "")
|
|
|
|
if err != nil {
|
|
|
|
return result, toObjectErr(err, bucket, object)
|
2017-11-30 18:58:46 -05:00
|
|
|
}
|
2020-09-28 22:39:32 -04:00
|
|
|
populatedUploadIds.Add(uploadID)
|
|
|
|
uploads = append(uploads, MultipartInfo{
|
|
|
|
Object: object,
|
|
|
|
UploadID: uploadID,
|
|
|
|
Initiated: fi.ModTime,
|
|
|
|
})
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-12-02 02:15:17 -05:00
|
|
|
|
2020-07-03 22:27:13 -04:00
|
|
|
sort.Slice(uploads, func(i int, j int) bool {
|
|
|
|
return uploads[i].Initiated.Before(uploads[j].Initiated)
|
|
|
|
})
|
|
|
|
|
|
|
|
uploadIndex := 0
|
|
|
|
if uploadIDMarker != "" {
|
|
|
|
for uploadIndex < len(uploads) {
|
|
|
|
if uploads[uploadIndex].UploadID != uploadIDMarker {
|
|
|
|
uploadIndex++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if uploads[uploadIndex].UploadID == uploadIDMarker {
|
|
|
|
uploadIndex++
|
|
|
|
break
|
|
|
|
}
|
|
|
|
uploadIndex++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for uploadIndex < len(uploads) {
|
|
|
|
result.Uploads = append(result.Uploads, uploads[uploadIndex])
|
|
|
|
result.NextUploadIDMarker = uploads[uploadIndex].UploadID
|
|
|
|
uploadIndex++
|
|
|
|
if len(result.Uploads) == maxUploads {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
result.IsTruncated = uploadIndex < len(uploads)
|
|
|
|
|
|
|
|
if !result.IsTruncated {
|
|
|
|
result.NextKeyMarker = ""
|
|
|
|
result.NextUploadIDMarker = ""
|
|
|
|
}
|
|
|
|
|
2017-11-30 18:58:46 -05:00
|
|
|
return result, nil
|
2016-06-01 19:43:31 -04:00
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2016-06-01 19:43:31 -04:00
|
|
|
// newMultipartUpload - wrapper for initializing a new multipart
|
2016-10-24 20:37:18 -04:00
|
|
|
// request; returns a unique upload id.
|
2016-06-01 19:43:31 -04:00
|
|
|
//
|
|
|
|
// Internally this function creates 'uploads.json' associated for the
|
2016-10-24 20:37:18 -04:00
|
|
|
// incoming object at
|
|
|
|
// '.minio.sys/multipart/bucket/object/uploads.json' on all the
|
|
|
|
// disks. `uploads.json` carries metadata regarding on-going multipart
|
|
|
|
// operation(s) on the object.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string, object string, opts ObjectOptions) (string, error) {
|
2017-12-22 06:28:13 -05:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
onlineDisks := er.getDisks()
|
|
|
|
parityBlocks := globalStorageClass.GetParityForSC(opts.UserDefined[xhttp.AmzStorageClass])
|
2019-10-07 01:50:24 -04:00
|
|
|
if parityBlocks == 0 {
|
|
|
|
parityBlocks = len(onlineDisks) / 2
|
|
|
|
}
|
|
|
|
dataBlocks := len(onlineDisks) - parityBlocks
|
2017-12-22 06:28:13 -05:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
fi := newFileInfo(object, dataBlocks, parityBlocks)
|
2017-12-22 06:28:13 -05:00
|
|
|
|
|
|
|
// we now know the number of blocks this object needs for data and parity.
|
|
|
|
// establish the writeQuorum using this data
|
2020-06-09 22:19:03 -04:00
|
|
|
writeQuorum := dataBlocks
|
|
|
|
if dataBlocks == parityBlocks {
|
|
|
|
writeQuorum = dataBlocks + 1
|
|
|
|
}
|
2017-12-22 06:28:13 -05:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
if opts.UserDefined["content-type"] == "" {
|
2018-10-02 02:18:17 -04:00
|
|
|
contentType := mimedb.TypeByExtension(path.Ext(object))
|
2020-06-12 23:04:01 -04:00
|
|
|
opts.UserDefined["content-type"] = contentType
|
|
|
|
}
|
|
|
|
|
|
|
|
// Calculate the version to be saved.
|
|
|
|
if opts.Versioned {
|
2020-06-17 14:13:41 -04:00
|
|
|
fi.VersionID = opts.VersionID
|
|
|
|
if fi.VersionID == "" {
|
|
|
|
fi.VersionID = mustGetUUID()
|
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
fi.DataDir = mustGetUUID()
|
|
|
|
fi.ModTime = UTCNow()
|
2020-09-10 14:37:22 -04:00
|
|
|
fi.Metadata = cloneMSS(opts.UserDefined)
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2016-11-22 19:52:37 -05:00
|
|
|
uploadID := mustGetUUID()
|
2020-06-12 23:04:01 -04:00
|
|
|
uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
|
2016-11-20 17:25:43 -05:00
|
|
|
tempUploadIDPath := uploadID
|
2017-08-14 21:08:42 -04:00
|
|
|
|
2019-04-25 10:33:26 -04:00
|
|
|
// Delete the tmp path later in case we fail to commit (ignore
|
|
|
|
// returned errors) - this will be a no-op in case of a commit
|
|
|
|
// success.
|
2020-06-12 23:04:01 -04:00
|
|
|
defer er.deleteObject(ctx, minioMetaTmpBucket, tempUploadIDPath, writeQuorum)
|
2019-04-25 10:33:26 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
var partsMetadata = make([]FileInfo, len(onlineDisks))
|
2019-09-11 13:22:12 -04:00
|
|
|
for i := range onlineDisks {
|
2020-06-12 23:04:01 -04:00
|
|
|
partsMetadata[i] = fi
|
2019-09-11 13:22:12 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
var err error
|
2020-06-12 23:04:01 -04:00
|
|
|
// Write updated `xl.meta` to all disks.
|
|
|
|
onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempUploadIDPath, partsMetadata, writeQuorum)
|
2017-01-31 12:38:34 -05:00
|
|
|
if err != nil {
|
2016-11-20 17:25:43 -05:00
|
|
|
return "", toObjectErr(err, minioMetaTmpBucket, tempUploadIDPath)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-10-24 20:37:18 -04:00
|
|
|
|
2017-02-21 22:43:44 -05:00
|
|
|
// Attempt to rename temp upload object to actual upload path object
|
2019-09-11 13:22:12 -04:00
|
|
|
_, err = rename(ctx, onlineDisks, minioMetaTmpBucket, tempUploadIDPath, minioMetaMultipartBucket, uploadIDPath, true, writeQuorum, nil)
|
|
|
|
if err != nil {
|
|
|
|
return "", toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
2016-10-24 20:37:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Return success.
|
|
|
|
return uploadID, nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2016-06-01 19:43:31 -04:00
|
|
|
// NewMultipartUpload - initialize a new multipart upload, returns a
|
|
|
|
// unique id. The unique id returned here is of UUID form, for each
|
|
|
|
// subsequent request each UUID is unique.
|
|
|
|
//
|
|
|
|
// Implements S3 compatible initiate multipart API.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (er erasureObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) {
|
2016-06-01 19:43:31 -04:00
|
|
|
// No metadata is set, allocate a new one.
|
2019-02-09 00:31:06 -05:00
|
|
|
if opts.UserDefined == nil {
|
|
|
|
opts.UserDefined = make(map[string]string)
|
2016-06-01 19:43:31 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
return er.newMultipartUpload(ctx, bucket, object, opts)
|
2016-06-01 19:43:31 -04:00
|
|
|
}
|
|
|
|
|
2017-01-31 12:38:34 -05:00
|
|
|
// CopyObjectPart - reads incoming stream and internally erasure codes
|
|
|
|
// them. This call is similar to put object part operation but the source
|
|
|
|
// data is read from an existing object.
|
|
|
|
//
|
|
|
|
// Implements S3 compatible Upload Part Copy API.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (er erasureObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) {
|
|
|
|
partInfo, err := er.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, NewPutObjReader(srcInfo.Reader, nil, nil), dstOpts)
|
2017-10-22 01:30:34 -04:00
|
|
|
if err != nil {
|
|
|
|
return pi, toObjectErr(err, dstBucket, dstObject)
|
|
|
|
}
|
|
|
|
|
2017-01-31 12:38:34 -05:00
|
|
|
// Success.
|
|
|
|
return partInfo, nil
|
|
|
|
}
|
|
|
|
|
2016-07-05 04:04:50 -04:00
|
|
|
// PutObjectPart - reads incoming stream and internally erasure codes
|
|
|
|
// them. This call is similar to single put operation but it is part
|
2016-08-05 01:01:58 -04:00
|
|
|
// of the multipart transaction.
|
2016-07-05 04:04:50 -04:00
|
|
|
//
|
|
|
|
// Implements S3 compatible Upload Part API.
|
2020-09-14 18:57:13 -04:00
|
|
|
func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, err error) {
|
|
|
|
uploadIDLock := er.NewNSLock(ctx, bucket, pathJoin(object, uploadID))
|
|
|
|
if err = uploadIDLock.GetRLock(globalOperationTimeout); err != nil {
|
|
|
|
return PartInfo{}, err
|
|
|
|
}
|
|
|
|
readLocked := true
|
|
|
|
defer func() {
|
|
|
|
if readLocked {
|
|
|
|
uploadIDLock.RUnlock()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2018-11-14 20:36:41 -05:00
|
|
|
data := r.Reader
|
2017-10-06 12:38:01 -04:00
|
|
|
// Validate input data size and it can never be less than zero.
|
2018-09-27 23:36:17 -04:00
|
|
|
if data.Size() < -1 {
|
2019-10-11 21:50:54 -04:00
|
|
|
logger.LogIf(ctx, errInvalidArgument, logger.Application)
|
2018-04-05 18:04:40 -04:00
|
|
|
return pi, toObjectErr(errInvalidArgument)
|
2017-10-06 12:38:01 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
var partsMetadata []FileInfo
|
2016-07-11 20:24:49 -04:00
|
|
|
var errs []error
|
2020-06-12 23:04:01 -04:00
|
|
|
uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
|
2017-10-06 12:38:01 -04:00
|
|
|
|
2016-07-11 20:24:49 -04:00
|
|
|
// Validates if upload ID exists.
|
2020-09-14 18:57:13 -04:00
|
|
|
if err = er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
2019-04-23 17:54:28 -04:00
|
|
|
return pi, toObjectErr(err, bucket, object, uploadID)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2017-01-31 12:38:34 -05:00
|
|
|
|
2016-05-31 23:23:31 -04:00
|
|
|
// Read metadata associated with the object from all disks.
|
2020-06-12 23:04:01 -04:00
|
|
|
partsMetadata, errs = readAllFileInfo(ctx, er.getDisks(), minioMetaMultipartBucket,
|
|
|
|
uploadIDPath, "")
|
2017-12-22 06:28:13 -05:00
|
|
|
|
|
|
|
// get Quorum for this object
|
2020-06-12 23:04:01 -04:00
|
|
|
_, writeQuorum, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs)
|
2017-12-22 06:28:13 -05:00
|
|
|
if err != nil {
|
|
|
|
return pi, toObjectErr(err, bucket, object)
|
|
|
|
}
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
|
2020-06-12 23:04:01 -04:00
|
|
|
if reducedErr == errErasureWriteQuorum {
|
2017-06-21 22:53:09 -04:00
|
|
|
return pi, toObjectErr(reducedErr, bucket, object)
|
2016-06-27 13:01:09 -04:00
|
|
|
}
|
2016-05-30 14:26:10 -04:00
|
|
|
|
2016-05-25 19:42:31 -04:00
|
|
|
// List all online disks.
|
2020-06-12 23:04:01 -04:00
|
|
|
onlineDisks, modTime := listOnlineDisks(er.getDisks(), partsMetadata, errs)
|
2016-07-12 18:20:31 -04:00
|
|
|
|
|
|
|
// Pick one from the first valid metadata.
|
2020-06-12 23:04:01 -04:00
|
|
|
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum)
|
2016-11-20 23:56:44 -05:00
|
|
|
if err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return pi, err
|
2016-11-20 23:56:44 -05:00
|
|
|
}
|
2016-07-14 17:59:01 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
onlineDisks = shuffleDisks(onlineDisks, fi.Erasure.Distribution)
|
2016-05-26 22:55:48 -04:00
|
|
|
|
2016-07-11 20:24:49 -04:00
|
|
|
// Need a unique name for the part being written in minioMetaBucket to
|
|
|
|
// accommodate concurrent PutObjectPart requests
|
2016-07-12 18:20:31 -04:00
|
|
|
|
2016-06-28 00:42:33 -04:00
|
|
|
partSuffix := fmt.Sprintf("part.%d", partID)
|
2016-11-22 19:52:37 -05:00
|
|
|
tmpPart := mustGetUUID()
|
2020-06-12 23:04:01 -04:00
|
|
|
tmpPartPath := pathJoin(tmpPart, partSuffix)
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2016-10-20 01:52:03 -04:00
|
|
|
// Delete the temporary object part. If PutObjectPart succeeds there would be nothing to delete.
|
2020-06-12 23:04:01 -04:00
|
|
|
defer er.deleteObject(ctx, minioMetaTmpBucket, tmpPart, writeQuorum)
|
2018-11-26 16:20:21 -05:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
|
2017-08-14 21:08:42 -04:00
|
|
|
if err != nil {
|
|
|
|
return pi, toObjectErr(err, bucket, object)
|
|
|
|
}
|
2018-02-15 20:45:57 -05:00
|
|
|
|
|
|
|
// Fetch buffer for I/O, returns from the pool if not allocates a new one and returns.
|
2018-06-13 14:55:12 -04:00
|
|
|
var buffer []byte
|
|
|
|
switch size := data.Size(); {
|
|
|
|
case size == 0:
|
|
|
|
buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
|
2020-06-12 23:04:01 -04:00
|
|
|
case size == -1 || size >= fi.Erasure.BlockSize:
|
|
|
|
buffer = er.bp.Get()
|
|
|
|
defer er.bp.Put(buffer)
|
|
|
|
case size < fi.Erasure.BlockSize:
|
|
|
|
// No need to allocate fully fi.Erasure.BlockSize buffer if the incoming data is smaller.
|
|
|
|
buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1))
|
2018-06-13 14:55:12 -04:00
|
|
|
}
|
2018-02-15 20:45:57 -05:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
if len(buffer) > int(fi.Erasure.BlockSize) {
|
|
|
|
buffer = buffer[:fi.Erasure.BlockSize]
|
2018-08-06 18:14:08 -04:00
|
|
|
}
|
2019-01-17 07:58:18 -05:00
|
|
|
writers := make([]io.Writer, len(onlineDisks))
|
2018-08-06 18:14:08 -04:00
|
|
|
for i, disk := range onlineDisks {
|
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
2019-01-17 07:58:18 -05:00
|
|
|
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tmpPartPath, erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize())
|
2018-08-06 18:14:08 -04:00
|
|
|
}
|
2018-11-26 16:20:21 -05:00
|
|
|
|
2020-08-03 15:15:08 -04:00
|
|
|
n, err := erasure.Encode(ctx, data, writers, buffer, writeQuorum)
|
2019-01-17 07:58:18 -05:00
|
|
|
closeBitrotWriters(writers)
|
2016-07-18 22:06:48 -04:00
|
|
|
if err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return pi, toObjectErr(err, bucket, object)
|
2016-07-18 22:06:48 -04:00
|
|
|
}
|
2016-10-20 01:52:03 -04:00
|
|
|
|
2016-07-18 22:06:48 -04:00
|
|
|
// Should return IncompleteBody{} error when reader has fewer bytes
|
|
|
|
// than specified in request header.
|
2018-08-06 18:14:08 -04:00
|
|
|
if n < data.Size() {
|
2020-09-08 17:22:04 -04:00
|
|
|
return pi, IncompleteBody{Bucket: bucket, Object: object}
|
2016-07-18 22:06:48 -04:00
|
|
|
}
|
2016-07-01 17:33:28 -04:00
|
|
|
|
2018-08-06 18:14:08 -04:00
|
|
|
for i := range writers {
|
|
|
|
if writers[i] == nil {
|
|
|
|
onlineDisks[i] = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-14 18:57:13 -04:00
|
|
|
// Unlock here before acquiring write locks all concurrent
|
|
|
|
// PutObjectParts would serialize here updating `xl.meta`
|
|
|
|
uploadIDLock.RUnlock()
|
|
|
|
readLocked = false
|
|
|
|
if err = uploadIDLock.GetLock(globalOperationTimeout); err != nil {
|
|
|
|
return PartInfo{}, err
|
|
|
|
}
|
|
|
|
defer uploadIDLock.Unlock()
|
|
|
|
|
2019-04-23 17:54:28 -04:00
|
|
|
// Validates if upload ID exists.
|
2020-09-14 18:57:13 -04:00
|
|
|
if err = er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
2019-04-23 17:54:28 -04:00
|
|
|
return pi, toObjectErr(err, bucket, object, uploadID)
|
2016-05-30 14:26:10 -04:00
|
|
|
}
|
|
|
|
|
2016-05-29 18:38:14 -04:00
|
|
|
// Rename temporary part file to its final location.
|
2020-06-12 23:04:01 -04:00
|
|
|
partPath := pathJoin(uploadIDPath, fi.DataDir, partSuffix)
|
2018-10-04 20:22:49 -04:00
|
|
|
onlineDisks, err = rename(ctx, onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath, false, writeQuorum, nil)
|
2016-05-20 23:48:47 -04:00
|
|
|
if err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-05-26 06:15:01 -04:00
|
|
|
|
2016-07-14 17:59:01 -04:00
|
|
|
// Read metadata again because it might be updated with parallel upload of another part.
|
2020-06-12 23:04:01 -04:00
|
|
|
partsMetadata, errs = readAllFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, "")
|
2018-04-05 18:04:40 -04:00
|
|
|
reducedErr = reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
|
2020-06-12 23:04:01 -04:00
|
|
|
if reducedErr == errErasureWriteQuorum {
|
2017-06-21 22:53:09 -04:00
|
|
|
return pi, toObjectErr(reducedErr, bucket, object)
|
2016-07-11 20:24:49 -04:00
|
|
|
}
|
|
|
|
|
2016-07-14 17:59:01 -04:00
|
|
|
// Get current highest version based on re-read partsMetadata.
|
|
|
|
onlineDisks, modTime = listOnlineDisks(onlineDisks, partsMetadata, errs)
|
2016-07-11 20:24:49 -04:00
|
|
|
|
|
|
|
// Pick one from the first valid metadata.
|
2020-06-12 23:04:01 -04:00
|
|
|
fi, err = pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum)
|
2016-11-20 23:56:44 -05:00
|
|
|
if err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return pi, err
|
2016-11-20 23:56:44 -05:00
|
|
|
}
|
2016-07-11 20:24:49 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Once part is successfully committed, proceed with updating erasure metadata.
|
|
|
|
fi.ModTime = UTCNow()
|
2016-06-17 14:57:51 -04:00
|
|
|
|
2018-11-14 20:36:41 -05:00
|
|
|
md5hex := r.MD5CurrentHexString()
|
2017-10-22 01:30:34 -04:00
|
|
|
|
2016-05-31 23:23:31 -04:00
|
|
|
// Add the current part.
|
2020-06-12 23:04:01 -04:00
|
|
|
fi.AddObjectPart(partID, md5hex, n, data.ActualSize())
|
2016-05-26 06:15:01 -04:00
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
for i, disk := range onlineDisks {
|
|
|
|
if disk == OfflineDisk {
|
2016-07-14 17:59:01 -04:00
|
|
|
continue
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
partsMetadata[i].Size = fi.Size
|
|
|
|
partsMetadata[i].ModTime = fi.ModTime
|
|
|
|
partsMetadata[i].Parts = fi.Parts
|
2020-03-02 19:29:30 -05:00
|
|
|
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
|
|
|
PartNumber: partID,
|
|
|
|
Algorithm: DefaultBitrotAlgorithm,
|
|
|
|
Hash: bitrotWriterSum(writers[i]),
|
|
|
|
})
|
2016-05-31 23:23:31 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Writes update `xl.meta` format for each disk.
|
|
|
|
if _, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil {
|
2017-08-23 20:58:52 -04:00
|
|
|
return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
2017-01-31 12:38:34 -05:00
|
|
|
}
|
|
|
|
|
2016-05-28 18:13:15 -04:00
|
|
|
// Return success.
|
2017-01-31 12:38:34 -05:00
|
|
|
return PartInfo{
|
|
|
|
PartNumber: partID,
|
2017-10-22 01:30:34 -04:00
|
|
|
ETag: md5hex,
|
2020-06-12 23:04:01 -04:00
|
|
|
LastModified: fi.ModTime,
|
|
|
|
Size: fi.Size,
|
2018-09-27 23:36:17 -04:00
|
|
|
ActualSize: data.ActualSize(),
|
2017-01-31 12:38:34 -05:00
|
|
|
}, nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2020-05-28 15:36:20 -04:00
|
|
|
// GetMultipartInfo returns multipart metadata uploaded during newMultipartUpload, used
|
|
|
|
// by callers to verify object states
|
|
|
|
// - encrypted
|
|
|
|
// - compressed
|
2020-06-12 23:04:01 -04:00
|
|
|
func (er erasureObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) {
|
2020-05-28 15:36:20 -04:00
|
|
|
result := MultipartInfo{
|
|
|
|
Bucket: bucket,
|
|
|
|
Object: object,
|
|
|
|
UploadID: uploadID,
|
|
|
|
}
|
|
|
|
|
2020-09-14 18:57:13 -04:00
|
|
|
uploadIDLock := er.NewNSLock(ctx, bucket, pathJoin(object, uploadID))
|
|
|
|
if err := uploadIDLock.GetRLock(globalOperationTimeout); err != nil {
|
|
|
|
return MultipartInfo{}, err
|
|
|
|
}
|
|
|
|
defer uploadIDLock.RUnlock()
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
2020-05-28 15:36:20 -04:00
|
|
|
return result, toObjectErr(err, bucket, object, uploadID)
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
|
2020-05-28 15:36:20 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
storageDisks := er.getDisks()
|
2020-05-28 15:36:20 -04:00
|
|
|
|
|
|
|
// Read metadata associated with the object from all disks.
|
2020-06-12 23:04:01 -04:00
|
|
|
partsMetadata, errs := readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, opts.VersionID)
|
2020-05-28 15:36:20 -04:00
|
|
|
|
|
|
|
// get Quorum for this object
|
2020-06-12 23:04:01 -04:00
|
|
|
readQuorum, _, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs)
|
2020-05-28 15:36:20 -04:00
|
|
|
if err != nil {
|
|
|
|
return result, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum)
|
|
|
|
if reducedErr == errErasureReadQuorum {
|
2020-05-28 15:36:20 -04:00
|
|
|
return result, toObjectErr(reducedErr, minioMetaMultipartBucket, uploadIDPath)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
|
|
|
|
|
|
|
|
// Pick one from the first valid metadata.
|
2020-06-12 23:04:01 -04:00
|
|
|
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, readQuorum)
|
2020-05-28 15:36:20 -04:00
|
|
|
if err != nil {
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
2020-09-10 14:37:22 -04:00
|
|
|
result.UserDefined = cloneMSS(fi.Metadata)
|
2020-05-28 15:36:20 -04:00
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2018-03-15 16:55:23 -04:00
|
|
|
// ListObjectParts - lists all previously uploaded parts for a given
|
|
|
|
// object and uploadID. Takes additional input of part-number-marker
|
|
|
|
// to indicate where the listing should begin from.
|
|
|
|
//
|
|
|
|
// Implements S3 compatible ListObjectParts API. The resulting
|
2018-06-28 19:02:02 -04:00
|
|
|
// ListPartsInfo structure is marshaled directly into XML and
|
2018-03-15 16:55:23 -04:00
|
|
|
// replied back to the client.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, e error) {
|
2020-09-14 18:57:13 -04:00
|
|
|
uploadIDLock := er.NewNSLock(ctx, bucket, pathJoin(object, uploadID))
|
|
|
|
if err := uploadIDLock.GetRLock(globalOperationTimeout); err != nil {
|
|
|
|
return ListPartsInfo{}, err
|
|
|
|
}
|
|
|
|
defer uploadIDLock.RUnlock()
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
2019-04-23 17:54:28 -04:00
|
|
|
return result, toObjectErr(err, bucket, object, uploadID)
|
2018-03-15 16:55:23 -04:00
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
|
2016-05-31 23:23:31 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
storageDisks := er.getDisks()
|
2019-02-01 11:58:41 -05:00
|
|
|
|
|
|
|
// Read metadata associated with the object from all disks.
|
2020-06-12 23:04:01 -04:00
|
|
|
partsMetadata, errs := readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, "")
|
2019-02-01 11:58:41 -05:00
|
|
|
|
|
|
|
// get Quorum for this object
|
2020-06-12 23:04:01 -04:00
|
|
|
_, writeQuorum, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs)
|
2016-05-20 23:48:47 -04:00
|
|
|
if err != nil {
|
2018-03-15 16:55:23 -04:00
|
|
|
return result, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-05-25 00:24:20 -04:00
|
|
|
|
2019-02-01 11:58:41 -05:00
|
|
|
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
|
2020-06-12 23:04:01 -04:00
|
|
|
if reducedErr == errErasureWriteQuorum {
|
2019-09-16 17:27:34 -04:00
|
|
|
return result, toObjectErr(reducedErr, minioMetaMultipartBucket, uploadIDPath)
|
2019-02-01 11:58:41 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
_, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
|
|
|
|
|
|
|
|
// Pick one from the first valid metadata.
|
2020-06-12 23:04:01 -04:00
|
|
|
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum)
|
2019-02-01 11:58:41 -05:00
|
|
|
if err != nil {
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
2016-05-25 00:24:20 -04:00
|
|
|
// Populate the result stub.
|
|
|
|
result.Bucket = bucket
|
|
|
|
result.Object = object
|
|
|
|
result.UploadID = uploadID
|
|
|
|
result.MaxParts = maxParts
|
2018-01-31 16:17:24 -05:00
|
|
|
result.PartNumberMarker = partNumberMarker
|
2020-09-10 14:37:22 -04:00
|
|
|
result.UserDefined = cloneMSS(fi.Metadata)
|
2016-05-25 00:24:20 -04:00
|
|
|
|
|
|
|
// For empty number of parts or maxParts as zero, return right here.
|
2020-06-12 23:04:01 -04:00
|
|
|
if len(fi.Parts) == 0 || maxParts == 0 {
|
2016-05-25 00:24:20 -04:00
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Limit output to maxPartsList.
|
|
|
|
if maxParts > maxPartsList {
|
|
|
|
maxParts = maxPartsList
|
|
|
|
}
|
|
|
|
|
2016-05-20 23:48:47 -04:00
|
|
|
// Only parts with higher part numbers will be listed.
|
2020-06-12 23:04:01 -04:00
|
|
|
partIdx := objectPartIndex(fi.Parts, partNumberMarker)
|
|
|
|
parts := fi.Parts
|
2016-05-25 00:24:20 -04:00
|
|
|
if partIdx != -1 {
|
2020-06-12 23:04:01 -04:00
|
|
|
parts = fi.Parts[partIdx+1:]
|
2016-05-25 00:24:20 -04:00
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
count := maxParts
|
2016-05-25 00:24:20 -04:00
|
|
|
for _, part := range parts {
|
2017-01-31 12:38:34 -05:00
|
|
|
result.Parts = append(result.Parts, PartInfo{
|
2016-05-25 00:24:20 -04:00
|
|
|
PartNumber: part.Number,
|
2016-05-20 23:48:47 -04:00
|
|
|
ETag: part.ETag,
|
2020-06-12 23:04:01 -04:00
|
|
|
LastModified: fi.ModTime,
|
2016-05-28 03:18:58 -04:00
|
|
|
Size: part.Size,
|
2016-05-20 23:48:47 -04:00
|
|
|
})
|
|
|
|
count--
|
|
|
|
if count == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If listed entries are more than maxParts, we set IsTruncated as true.
|
|
|
|
if len(parts) > len(result.Parts) {
|
|
|
|
result.IsTruncated = true
|
|
|
|
// Make sure to fill next part number marker if IsTruncated is
|
|
|
|
// true for subsequent listing.
|
|
|
|
nextPartNumberMarker := result.Parts[len(result.Parts)-1].PartNumber
|
|
|
|
result.NextPartNumberMarker = nextPartNumberMarker
|
|
|
|
}
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2016-06-01 19:43:31 -04:00
|
|
|
// CompleteMultipartUpload - completes an ongoing multipart
|
|
|
|
// transaction after receiving all the parts indicated by the client.
|
|
|
|
// Returns an md5sum calculated by concatenating all the individual
|
|
|
|
// md5sums of all the parts.
|
|
|
|
//
|
|
|
|
// Implements S3 compatible Complete multipart API.
|
2020-09-14 18:57:13 -04:00
|
|
|
func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, err error) {
|
|
|
|
// Hold read-locks to verify uploaded parts, also disallows
|
|
|
|
// parallel part uploads as well.
|
|
|
|
uploadIDLock := er.NewNSLock(ctx, bucket, pathJoin(object, uploadID))
|
|
|
|
if err = uploadIDLock.GetRLock(globalOperationTimeout); err != nil {
|
|
|
|
return oi, err
|
|
|
|
}
|
|
|
|
defer uploadIDLock.RUnlock()
|
|
|
|
|
|
|
|
if err = er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
2019-04-23 17:54:28 -04:00
|
|
|
return oi, toObjectErr(err, bucket, object, uploadID)
|
2016-05-28 00:50:09 -04:00
|
|
|
}
|
2017-02-21 22:43:44 -05:00
|
|
|
|
|
|
|
// Check if an object is present as one of the parent dir.
|
|
|
|
// -- FIXME. (needs a new kind of lock).
|
2020-06-12 23:04:01 -04:00
|
|
|
if er.parentDirIsObject(ctx, bucket, path.Dir(object)) {
|
2019-03-20 16:06:53 -04:00
|
|
|
return oi, toObjectErr(errFileParentIsFile, bucket, object)
|
2017-02-21 22:43:44 -05:00
|
|
|
}
|
|
|
|
|
2020-04-27 13:06:21 -04:00
|
|
|
defer ObjectPathUpdated(path.Join(bucket, object))
|
|
|
|
|
2016-05-20 23:48:47 -04:00
|
|
|
// Calculate s3 compatible md5sum for complete multipart.
|
2019-05-08 21:35:40 -04:00
|
|
|
s3MD5 := getCompleteMultipartMD5(parts)
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
|
2019-11-19 20:42:27 -05:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
storageDisks := er.getDisks()
|
2020-01-15 21:30:32 -05:00
|
|
|
|
2016-05-31 23:23:31 -04:00
|
|
|
// Read metadata associated with the object from all disks.
|
2020-06-12 23:04:01 -04:00
|
|
|
partsMetadata, errs := readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, "")
|
2017-12-22 06:28:13 -05:00
|
|
|
|
|
|
|
// get Quorum for this object
|
2020-06-12 23:04:01 -04:00
|
|
|
_, writeQuorum, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs)
|
2017-12-22 06:28:13 -05:00
|
|
|
if err != nil {
|
|
|
|
return oi, toObjectErr(err, bucket, object)
|
|
|
|
}
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
|
2020-06-12 23:04:01 -04:00
|
|
|
if reducedErr == errErasureWriteQuorum {
|
2017-06-21 22:53:09 -04:00
|
|
|
return oi, toObjectErr(reducedErr, bucket, object)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2020-01-15 21:30:32 -05:00
|
|
|
onlineDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
|
2016-07-14 17:59:01 -04:00
|
|
|
|
2016-05-31 23:23:31 -04:00
|
|
|
// Calculate full object size.
|
2016-05-20 23:48:47 -04:00
|
|
|
var objectSize int64
|
2016-05-25 00:24:20 -04:00
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
// Calculate consolidated actual size.
|
|
|
|
var objectActualSize int64
|
|
|
|
|
2016-05-31 23:23:31 -04:00
|
|
|
// Pick one from the first valid metadata.
|
2020-06-12 23:04:01 -04:00
|
|
|
fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum)
|
2016-11-20 23:56:44 -05:00
|
|
|
if err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return oi, err
|
2016-11-20 23:56:44 -05:00
|
|
|
}
|
2016-05-31 23:23:31 -04:00
|
|
|
|
2016-07-27 14:57:08 -04:00
|
|
|
// Order online disks in accordance with distribution order.
|
2020-06-12 23:04:01 -04:00
|
|
|
onlineDisks = shuffleDisks(onlineDisks, fi.Erasure.Distribution)
|
2016-07-27 14:57:08 -04:00
|
|
|
|
|
|
|
// Order parts metadata in accordance with distribution order.
|
2020-06-12 23:04:01 -04:00
|
|
|
partsMetadata = shufflePartsMetadata(partsMetadata, fi.Erasure.Distribution)
|
2016-07-27 14:57:08 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Save current erasure metadata for validation.
|
|
|
|
var currentFI = fi
|
2016-05-25 00:24:20 -04:00
|
|
|
|
|
|
|
// Allocate parts similar to incoming slice.
|
2020-06-12 23:04:01 -04:00
|
|
|
fi.Parts = make([]ObjectPartInfo, len(parts))
|
2016-05-25 00:24:20 -04:00
|
|
|
|
2016-06-01 19:43:31 -04:00
|
|
|
// Validate each part and then commit to disk.
|
2016-05-20 23:48:47 -04:00
|
|
|
for i, part := range parts {
|
2020-06-12 23:04:01 -04:00
|
|
|
partIdx := objectPartIndex(currentFI.Parts, part.PartNumber)
|
2016-06-19 17:51:20 -04:00
|
|
|
// All parts should have same part number.
|
2016-05-25 00:24:20 -04:00
|
|
|
if partIdx == -1 {
|
2018-08-14 21:35:30 -04:00
|
|
|
invp := InvalidPart{
|
|
|
|
PartNumber: part.PartNumber,
|
|
|
|
GotETag: part.ETag,
|
|
|
|
}
|
|
|
|
return oi, invp
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-06-19 17:51:20 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// ensure that part ETag is canonicalized to strip off extraneous quotes
|
|
|
|
part.ETag = canonicalizeETag(part.ETag)
|
|
|
|
if currentFI.Parts[partIdx].ETag != part.ETag {
|
2018-08-14 21:35:30 -04:00
|
|
|
invp := InvalidPart{
|
|
|
|
PartNumber: part.PartNumber,
|
2020-06-12 23:04:01 -04:00
|
|
|
ExpETag: currentFI.Parts[partIdx].ETag,
|
2018-08-14 21:35:30 -04:00
|
|
|
GotETag: part.ETag,
|
|
|
|
}
|
|
|
|
return oi, invp
|
2016-05-25 00:24:20 -04:00
|
|
|
}
|
2016-06-19 17:51:20 -04:00
|
|
|
|
2016-05-20 23:48:47 -04:00
|
|
|
// All parts except the last part has to be atleast 5MB.
|
2020-06-12 23:04:01 -04:00
|
|
|
if (i < len(parts)-1) && !isMinAllowedPartSize(currentFI.Parts[partIdx].ActualSize) {
|
2018-04-05 18:04:40 -04:00
|
|
|
return oi, PartTooSmall{
|
|
|
|
PartNumber: part.PartNumber,
|
2020-06-12 23:04:01 -04:00
|
|
|
PartSize: currentFI.Parts[partIdx].ActualSize,
|
2018-04-05 18:04:40 -04:00
|
|
|
PartETag: part.ETag,
|
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-05-25 00:24:20 -04:00
|
|
|
|
|
|
|
// Save for total object size.
|
2020-06-12 23:04:01 -04:00
|
|
|
objectSize += currentFI.Parts[partIdx].Size
|
2016-05-25 00:24:20 -04:00
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
// Save the consolidated actual size.
|
2020-06-12 23:04:01 -04:00
|
|
|
objectActualSize += currentFI.Parts[partIdx].ActualSize
|
2018-09-27 23:36:17 -04:00
|
|
|
|
2016-05-25 00:24:20 -04:00
|
|
|
// Add incoming parts.
|
2020-06-12 23:04:01 -04:00
|
|
|
fi.Parts[i] = ObjectPartInfo{
|
2018-09-27 23:36:17 -04:00
|
|
|
Number: part.PartNumber,
|
2020-06-12 23:04:01 -04:00
|
|
|
Size: currentFI.Parts[partIdx].Size,
|
|
|
|
ActualSize: currentFI.Parts[partIdx].ActualSize,
|
2016-05-25 00:24:20 -04:00
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Save the final object size and modtime.
|
2020-06-12 23:04:01 -04:00
|
|
|
fi.Size = objectSize
|
2020-07-08 20:36:56 -04:00
|
|
|
fi.ModTime = opts.MTime
|
|
|
|
if opts.MTime.IsZero() {
|
|
|
|
fi.ModTime = UTCNow()
|
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
|
|
|
|
// Save successfully calculated md5sum.
|
2020-06-12 23:04:01 -04:00
|
|
|
fi.Metadata["etag"] = s3MD5
|
2020-08-12 20:32:24 -04:00
|
|
|
if opts.UserDefined["etag"] != "" { // preserve ETag if set
|
|
|
|
fi.Metadata["etag"] = opts.UserDefined["etag"]
|
|
|
|
}
|
2017-10-22 01:30:34 -04:00
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
// Save the consolidated actual size.
|
2020-06-12 23:04:01 -04:00
|
|
|
fi.Metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(objectActualSize, 10)
|
2018-09-27 23:36:17 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Update all erasure metadata, make sure to not modify fields like
|
2016-05-31 23:23:31 -04:00
|
|
|
// checksum which are different on each disks.
|
|
|
|
for index := range partsMetadata {
|
2020-06-12 23:04:01 -04:00
|
|
|
partsMetadata[index].Size = fi.Size
|
|
|
|
partsMetadata[index].ModTime = fi.ModTime
|
|
|
|
partsMetadata[index].Metadata = fi.Metadata
|
|
|
|
partsMetadata[index].Parts = fi.Parts
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-08-31 14:39:08 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Write final `xl.meta` at uploadID location
|
|
|
|
if onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil {
|
|
|
|
return oi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2016-06-10 09:13:16 -04:00
|
|
|
// Remove parts that weren't present in CompleteMultipartUpload request.
|
2020-06-12 23:04:01 -04:00
|
|
|
for _, curpart := range currentFI.Parts {
|
|
|
|
if objectPartIndex(fi.Parts, curpart.Number) == -1 {
|
2016-05-28 16:23:08 -04:00
|
|
|
// Delete the missing part files. e.g,
|
|
|
|
// Request 1: NewMultipart
|
|
|
|
// Request 2: PutObjectPart 1
|
|
|
|
// Request 3: PutObjectPart 2
|
|
|
|
// Request 4: CompleteMultipartUpload --part 2
|
|
|
|
// N.B. 1st part is not present. This part should be removed from the storage.
|
2020-06-12 23:04:01 -04:00
|
|
|
er.removeObjectPart(bucket, object, uploadID, fi.DataDir, curpart.Number)
|
2016-05-28 16:23:08 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-14 18:57:13 -04:00
|
|
|
// Hold namespace to complete the transaction
|
|
|
|
lk := er.NewNSLock(ctx, bucket, object)
|
|
|
|
if err = lk.GetLock(globalOperationTimeout); err != nil {
|
|
|
|
return oi, err
|
|
|
|
}
|
|
|
|
defer lk.Unlock()
|
|
|
|
|
2016-05-29 03:42:09 -04:00
|
|
|
// Rename the multipart object to final location.
|
2020-06-12 23:04:01 -04:00
|
|
|
if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath,
|
|
|
|
fi.DataDir, bucket, object, writeQuorum, nil); err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return oi, toObjectErr(err, bucket, object)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2020-01-15 21:30:32 -05:00
|
|
|
// Check if there is any offline disk and add it to the MRF list
|
2020-06-29 16:07:26 -04:00
|
|
|
for i, disk := range onlineDisks {
|
|
|
|
if disk == nil || storageDisks[i] == nil {
|
|
|
|
er.addPartial(bucket, object, fi.VersionID)
|
2020-06-02 18:27:03 -04:00
|
|
|
break
|
2020-01-15 21:30:32 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
for i := 0; i < len(onlineDisks); i++ {
|
|
|
|
if onlineDisks[i] == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Object info is the same in all disks, so we can pick
|
|
|
|
// the first meta from online disk
|
|
|
|
fi = partsMetadata[i]
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2017-01-16 22:23:43 -05:00
|
|
|
// Success, return object info.
|
2020-06-12 23:04:01 -04:00
|
|
|
return fi.ToObjectInfo(bucket, object), nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2016-06-01 19:43:31 -04:00
|
|
|
// AbortMultipartUpload - aborts an ongoing multipart operation
|
|
|
|
// signified by the input uploadID. This is an atomic operation
|
|
|
|
// doesn't require clients to initiate multiple such requests.
|
|
|
|
//
|
|
|
|
// All parts are purged from all disks and reference to the uploadID
|
|
|
|
// would be removed from the system, rollback is not possible on this
|
|
|
|
// operation.
|
2020-09-14 18:57:13 -04:00
|
|
|
func (er erasureObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error {
|
|
|
|
lk := er.NewNSLock(ctx, bucket, pathJoin(object, uploadID))
|
|
|
|
if err := lk.GetLock(globalOperationTimeout); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer lk.Unlock()
|
|
|
|
|
2019-04-23 17:54:28 -04:00
|
|
|
// Validates if upload ID exists.
|
2020-06-12 23:04:01 -04:00
|
|
|
if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
2019-04-23 17:54:28 -04:00
|
|
|
return toObjectErr(err, bucket, object, uploadID)
|
2016-05-28 16:23:08 -04:00
|
|
|
}
|
2018-03-15 16:55:23 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
|
2019-11-19 20:42:27 -05:00
|
|
|
|
2018-03-15 16:55:23 -04:00
|
|
|
// Read metadata associated with the object from all disks.
|
2020-06-12 23:04:01 -04:00
|
|
|
partsMetadata, errs := readAllFileInfo(ctx, er.getDisks(), minioMetaMultipartBucket, uploadIDPath, "")
|
2018-03-15 16:55:23 -04:00
|
|
|
|
|
|
|
// get Quorum for this object
|
2020-06-12 23:04:01 -04:00
|
|
|
_, writeQuorum, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs)
|
2018-03-15 16:55:23 -04:00
|
|
|
if err != nil {
|
2019-11-09 12:27:23 -05:00
|
|
|
return toObjectErr(err, bucket, object, uploadID)
|
2018-03-15 16:55:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Cleanup all uploaded parts.
|
2020-06-12 23:04:01 -04:00
|
|
|
if err = er.deleteObject(ctx, minioMetaMultipartBucket, uploadIDPath, writeQuorum); err != nil {
|
2019-11-09 12:27:23 -05:00
|
|
|
return toObjectErr(err, bucket, object, uploadID)
|
2018-03-15 16:55:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Successfully purged.
|
|
|
|
return nil
|
|
|
|
}
|