2016-05-20 23:48:47 -04:00
|
|
|
/*
|
2018-03-27 19:44:45 -04:00
|
|
|
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
2016-05-20 23:48:47 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-05-20 23:48:47 -04:00
|
|
|
|
|
|
|
import (
|
2018-03-14 15:01:47 -04:00
|
|
|
"context"
|
2017-10-24 15:25:42 -04:00
|
|
|
"encoding/hex"
|
2016-05-20 23:48:47 -04:00
|
|
|
"fmt"
|
|
|
|
"path"
|
2018-03-15 16:55:23 -04:00
|
|
|
"sort"
|
2018-09-27 23:36:17 -04:00
|
|
|
"strconv"
|
2016-05-24 16:35:43 -04:00
|
|
|
"strings"
|
2017-01-26 15:51:12 -05:00
|
|
|
"sync"
|
2016-05-20 23:48:47 -04:00
|
|
|
"time"
|
2016-05-24 16:35:43 -04:00
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
2017-10-22 01:30:34 -04:00
|
|
|
"github.com/minio/minio/pkg/hash"
|
2016-05-24 16:35:43 -04:00
|
|
|
"github.com/minio/minio/pkg/mimedb"
|
2016-05-20 23:48:47 -04:00
|
|
|
)
|
|
|
|
|
2018-03-15 16:55:23 -04:00
|
|
|
func (xl xlObjects) getUploadIDDir(bucket, object, uploadID string) string {
|
|
|
|
return pathJoin(xl.getMultipartSHADir(bucket, object), uploadID)
|
2017-01-26 15:51:12 -05:00
|
|
|
}
|
|
|
|
|
2018-03-15 16:55:23 -04:00
|
|
|
func (xl xlObjects) getMultipartSHADir(bucket, object string) string {
|
|
|
|
return getSHA256Hash([]byte(pathJoin(bucket, object)))
|
2017-01-26 15:51:12 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// isUploadIDExists - verify if a given uploadID exists and is valid.
|
2018-04-05 18:04:40 -04:00
|
|
|
func (xl xlObjects) isUploadIDExists(ctx context.Context, bucket, object, uploadID string) bool {
|
2018-03-15 16:55:23 -04:00
|
|
|
return xl.isObject(minioMetaMultipartBucket, xl.getUploadIDDir(bucket, object, uploadID))
|
2017-01-26 15:51:12 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Removes part given by partName belonging to a mulitpart upload from minioMetaBucket
|
|
|
|
func (xl xlObjects) removeObjectPart(bucket, object, uploadID, partName string) {
|
|
|
|
curpartPath := path.Join(bucket, object, uploadID, partName)
|
|
|
|
wg := sync.WaitGroup{}
|
2018-02-15 20:45:57 -05:00
|
|
|
for i, disk := range xl.getDisks() {
|
2017-01-26 15:51:12 -05:00
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
wg.Add(1)
|
|
|
|
go func(index int, disk StorageAPI) {
|
|
|
|
defer wg.Done()
|
|
|
|
// Ignoring failure to remove parts that weren't present in CompleteMultipartUpload
|
|
|
|
// requests. xl.json is the authoritative source of truth on which parts constitute
|
|
|
|
// the object. The presence of parts that don't belong in the object doesn't affect correctness.
|
|
|
|
_ = disk.DeleteFile(minioMetaMultipartBucket, curpartPath)
|
|
|
|
}(i, disk)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
// statPart - returns fileInfo structure for a successful stat on part file.
|
2018-04-05 18:04:40 -04:00
|
|
|
func (xl xlObjects) statPart(ctx context.Context, bucket, object, uploadID, partName string) (fileInfo FileInfo, err error) {
|
2017-04-14 04:46:16 -04:00
|
|
|
var ignoredErrs []error
|
2018-03-15 16:55:23 -04:00
|
|
|
partNamePath := path.Join(xl.getUploadIDDir(bucket, object, uploadID), partName)
|
2017-01-26 15:51:12 -05:00
|
|
|
for _, disk := range xl.getLoadBalancedDisks() {
|
|
|
|
if disk == nil {
|
2017-04-14 04:46:16 -04:00
|
|
|
ignoredErrs = append(ignoredErrs, errDiskNotFound)
|
2017-01-26 15:51:12 -05:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
fileInfo, err = disk.StatFile(minioMetaMultipartBucket, partNamePath)
|
|
|
|
if err == nil {
|
|
|
|
return fileInfo, nil
|
|
|
|
}
|
|
|
|
// For any reason disk was deleted or goes offline we continue to next disk.
|
2018-04-10 12:36:37 -04:00
|
|
|
if IsErrIgnored(err, objMetadataOpIgnoredErrs...) {
|
2017-04-14 04:46:16 -04:00
|
|
|
ignoredErrs = append(ignoredErrs, err)
|
2017-01-26 15:51:12 -05:00
|
|
|
continue
|
|
|
|
}
|
2017-04-14 04:46:16 -04:00
|
|
|
// Error is not ignored, return right here.
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return FileInfo{}, err
|
2017-01-26 15:51:12 -05:00
|
|
|
}
|
2017-04-14 04:46:16 -04:00
|
|
|
// If all errors were ignored, reduce to maximal occurrence
|
|
|
|
// based on the read quorum.
|
2018-02-15 20:45:57 -05:00
|
|
|
readQuorum := len(xl.getDisks()) / 2
|
2018-04-05 18:04:40 -04:00
|
|
|
return FileInfo{}, reduceReadQuorumErrs(ctx, ignoredErrs, nil, readQuorum)
|
2017-01-26 15:51:12 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// commitXLMetadata - commit `xl.json` from source prefix to destination prefix in the given slice of disks.
|
2018-04-05 18:04:40 -04:00
|
|
|
func commitXLMetadata(ctx context.Context, disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPrefix string, quorum int) ([]StorageAPI, error) {
|
2017-01-26 15:51:12 -05:00
|
|
|
var wg = &sync.WaitGroup{}
|
|
|
|
var mErrs = make([]error, len(disks))
|
|
|
|
|
|
|
|
srcJSONFile := path.Join(srcPrefix, xlMetaJSONFile)
|
|
|
|
dstJSONFile := path.Join(dstPrefix, xlMetaJSONFile)
|
|
|
|
|
|
|
|
// Rename `xl.json` to all disks in parallel.
|
|
|
|
for index, disk := range disks {
|
|
|
|
if disk == nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
mErrs[index] = errDiskNotFound
|
2017-01-26 15:51:12 -05:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
wg.Add(1)
|
|
|
|
// Rename `xl.json` in a routine.
|
|
|
|
go func(index int, disk StorageAPI) {
|
|
|
|
defer wg.Done()
|
|
|
|
// Delete any dangling directories.
|
|
|
|
defer disk.DeleteFile(srcBucket, srcPrefix)
|
|
|
|
|
|
|
|
// Renames `xl.json` from source prefix to destination prefix.
|
|
|
|
rErr := disk.RenameFile(srcBucket, srcJSONFile, dstBucket, dstJSONFile)
|
|
|
|
if rErr != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, rErr)
|
|
|
|
mErrs[index] = rErr
|
2017-01-26 15:51:12 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
mErrs[index] = nil
|
|
|
|
}(index, disk)
|
|
|
|
}
|
|
|
|
// Wait for all the routines.
|
|
|
|
wg.Wait()
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, quorum)
|
2018-04-10 12:36:37 -04:00
|
|
|
if err == errXLWriteQuorum {
|
2017-01-26 15:51:12 -05:00
|
|
|
// Delete all `xl.json` successfully renamed.
|
2018-04-05 18:04:40 -04:00
|
|
|
deleteAllXLMetadata(ctx, disks, dstBucket, dstPrefix, mErrs)
|
2017-01-26 15:51:12 -05:00
|
|
|
}
|
2017-06-14 20:14:27 -04:00
|
|
|
return evalDisks(disks, mErrs), err
|
2017-01-26 15:51:12 -05:00
|
|
|
}
|
|
|
|
|
2017-11-30 18:58:46 -05:00
|
|
|
// ListMultipartUploads - lists all the pending multipart
|
|
|
|
// uploads for a particular object in a bucket.
|
|
|
|
//
|
|
|
|
// Implements minimal S3 compatible ListMultipartUploads API. We do
|
|
|
|
// not support prefix based listing, this is a deliberate attempt
|
|
|
|
// towards simplification of multipart APIs.
|
|
|
|
// The resulting ListMultipartsInfo structure is unmarshalled directly as XML.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (xl xlObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) {
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkListMultipartArgs(ctx, bucket, object, keyMarker, uploadIDMarker, delimiter, xl); err != nil {
|
2018-03-15 16:55:23 -04:00
|
|
|
return result, err
|
2016-06-01 19:43:31 -04:00
|
|
|
}
|
|
|
|
|
2017-11-30 18:58:46 -05:00
|
|
|
result.MaxUploads = maxUploads
|
|
|
|
result.KeyMarker = keyMarker
|
|
|
|
result.Prefix = object
|
|
|
|
result.Delimiter = delimiter
|
|
|
|
|
|
|
|
for _, disk := range xl.getLoadBalancedDisks() {
|
|
|
|
if disk == nil {
|
|
|
|
continue
|
2016-06-02 19:34:15 -04:00
|
|
|
}
|
2018-05-08 22:08:21 -04:00
|
|
|
uploadIDs, err := disk.ListDir(minioMetaMultipartBucket, xl.getMultipartSHADir(bucket, object), -1)
|
2016-06-01 19:43:31 -04:00
|
|
|
if err != nil {
|
2018-03-15 16:55:23 -04:00
|
|
|
if err == errFileNotFound {
|
|
|
|
return result, nil
|
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return result, err
|
2016-06-01 19:43:31 -04:00
|
|
|
}
|
2018-03-15 16:55:23 -04:00
|
|
|
for i := range uploadIDs {
|
|
|
|
uploadIDs[i] = strings.TrimSuffix(uploadIDs[i], slashSeparator)
|
2016-06-01 19:43:31 -04:00
|
|
|
}
|
2018-03-15 16:55:23 -04:00
|
|
|
sort.Strings(uploadIDs)
|
|
|
|
for _, uploadID := range uploadIDs {
|
|
|
|
if len(result.Uploads) == maxUploads {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
result.Uploads = append(result.Uploads, MultipartInfo{Object: object, UploadID: uploadID})
|
2017-11-30 18:58:46 -05:00
|
|
|
}
|
|
|
|
break
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-12-02 02:15:17 -05:00
|
|
|
|
2017-11-30 18:58:46 -05:00
|
|
|
return result, nil
|
2016-06-01 19:43:31 -04:00
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2016-06-01 19:43:31 -04:00
|
|
|
// newMultipartUpload - wrapper for initializing a new multipart
|
2016-10-24 20:37:18 -04:00
|
|
|
// request; returns a unique upload id.
|
2016-06-01 19:43:31 -04:00
|
|
|
//
|
|
|
|
// Internally this function creates 'uploads.json' associated for the
|
2016-10-24 20:37:18 -04:00
|
|
|
// incoming object at
|
|
|
|
// '.minio.sys/multipart/bucket/object/uploads.json' on all the
|
|
|
|
// disks. `uploads.json` carries metadata regarding on-going multipart
|
|
|
|
// operation(s) on the object.
|
2018-04-05 18:04:40 -04:00
|
|
|
func (xl xlObjects) newMultipartUpload(ctx context.Context, bucket string, object string, meta map[string]string) (string, error) {
|
2017-12-22 06:28:13 -05:00
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
dataBlocks, parityBlocks := getRedundancyCount(meta[amzStorageClass], len(xl.getDisks()))
|
2017-12-22 06:28:13 -05:00
|
|
|
|
|
|
|
xlMeta := newXLMetaV1(object, dataBlocks, parityBlocks)
|
|
|
|
|
|
|
|
// we now know the number of blocks this object needs for data and parity.
|
|
|
|
// establish the writeQuorum using this data
|
|
|
|
writeQuorum := dataBlocks + 1
|
|
|
|
|
2016-05-20 23:48:47 -04:00
|
|
|
// If not set default to "application/octet-stream"
|
|
|
|
if meta["content-type"] == "" {
|
2016-05-24 16:35:43 -04:00
|
|
|
contentType := "application/octet-stream"
|
2016-08-17 16:26:08 -04:00
|
|
|
if objectExt := path.Ext(object); objectExt != "" {
|
2016-05-24 16:35:43 -04:00
|
|
|
content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))]
|
|
|
|
if ok {
|
|
|
|
contentType = content.ContentType
|
|
|
|
}
|
|
|
|
}
|
|
|
|
meta["content-type"] = contentType
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2017-03-18 14:28:41 -04:00
|
|
|
xlMeta.Stat.ModTime = UTCNow()
|
2016-05-20 23:48:47 -04:00
|
|
|
xlMeta.Meta = meta
|
|
|
|
|
2016-11-22 19:52:37 -05:00
|
|
|
uploadID := mustGetUUID()
|
2018-03-15 16:55:23 -04:00
|
|
|
uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID)
|
2016-11-20 17:25:43 -05:00
|
|
|
tempUploadIDPath := uploadID
|
2017-08-14 21:08:42 -04:00
|
|
|
|
2016-06-02 19:34:15 -04:00
|
|
|
// Write updated `xl.json` to all disks.
|
2018-04-05 18:04:40 -04:00
|
|
|
disks, err := writeSameXLMetadata(ctx, xl.getDisks(), minioMetaTmpBucket, tempUploadIDPath, xlMeta, writeQuorum)
|
2017-01-31 12:38:34 -05:00
|
|
|
if err != nil {
|
2016-11-20 17:25:43 -05:00
|
|
|
return "", toObjectErr(err, minioMetaTmpBucket, tempUploadIDPath)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-10-24 20:37:18 -04:00
|
|
|
// delete the tmp path later in case we fail to rename (ignore
|
|
|
|
// returned errors) - this will be a no-op in case of a rename
|
|
|
|
// success.
|
2018-08-29 16:36:19 -04:00
|
|
|
defer xl.deleteObject(ctx, minioMetaTmpBucket, tempUploadIDPath, writeQuorum, false)
|
2016-10-24 20:37:18 -04:00
|
|
|
|
2017-02-21 22:43:44 -05:00
|
|
|
// Attempt to rename temp upload object to actual upload path object
|
2018-04-05 18:04:40 -04:00
|
|
|
_, rErr := renameObject(ctx, disks, minioMetaTmpBucket, tempUploadIDPath, minioMetaMultipartBucket, uploadIDPath, writeQuorum)
|
2017-02-21 22:43:44 -05:00
|
|
|
if rErr != nil {
|
2016-11-22 16:15:06 -05:00
|
|
|
return "", toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath)
|
2016-10-24 20:37:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Return success.
|
|
|
|
return uploadID, nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2016-06-01 19:43:31 -04:00
|
|
|
// NewMultipartUpload - initialize a new multipart upload, returns a
|
|
|
|
// unique id. The unique id returned here is of UUID form, for each
|
|
|
|
// subsequent request each UUID is unique.
|
|
|
|
//
|
|
|
|
// Implements S3 compatible initiate multipart API.
|
2018-09-10 12:42:43 -04:00
|
|
|
func (xl xlObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string, opts ObjectOptions) (string, error) {
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkNewMultipartArgs(ctx, bucket, object, xl); err != nil {
|
2016-12-02 02:15:17 -05:00
|
|
|
return "", err
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-06-01 19:43:31 -04:00
|
|
|
// No metadata is set, allocate a new one.
|
|
|
|
if meta == nil {
|
|
|
|
meta = make(map[string]string)
|
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
return xl.newMultipartUpload(ctx, bucket, object, meta)
|
2016-06-01 19:43:31 -04:00
|
|
|
}
|
|
|
|
|
2017-01-31 12:38:34 -05:00
|
|
|
// CopyObjectPart - reads incoming stream and internally erasure codes
|
|
|
|
// them. This call is similar to put object part operation but the source
|
|
|
|
// data is read from an existing object.
|
|
|
|
//
|
|
|
|
// Implements S3 compatible Upload Part Copy API.
|
2018-09-10 12:42:43 -04:00
|
|
|
func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) {
|
2018-01-12 23:34:52 -05:00
|
|
|
// Hold read locks on source object only if we are
|
|
|
|
// going to read data from source object.
|
|
|
|
objectSRLock := xl.nsMutex.NewNSLock(srcBucket, srcObject)
|
|
|
|
if err := objectSRLock.GetRLock(globalObjectTimeout); err != nil {
|
|
|
|
return pi, err
|
|
|
|
}
|
|
|
|
defer objectSRLock.RUnlock()
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, xl); err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return pi, err
|
2017-01-31 12:38:34 -05:00
|
|
|
}
|
|
|
|
|
2018-09-10 12:42:43 -04:00
|
|
|
partInfo, err := xl.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.Reader, dstOpts)
|
2017-10-22 01:30:34 -04:00
|
|
|
if err != nil {
|
|
|
|
return pi, toObjectErr(err, dstBucket, dstObject)
|
|
|
|
}
|
|
|
|
|
2017-01-31 12:38:34 -05:00
|
|
|
// Success.
|
|
|
|
return partInfo, nil
|
|
|
|
}
|
|
|
|
|
2016-07-05 04:04:50 -04:00
|
|
|
// PutObjectPart - reads incoming stream and internally erasure codes
|
|
|
|
// them. This call is similar to single put operation but it is part
|
2016-08-05 01:01:58 -04:00
|
|
|
// of the multipart transaction.
|
2016-07-05 04:04:50 -04:00
|
|
|
//
|
|
|
|
// Implements S3 compatible Upload Part API.
|
2018-09-10 12:42:43 -04:00
|
|
|
func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (pi PartInfo, e error) {
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkPutObjectPartArgs(ctx, bucket, object, xl); err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return pi, err
|
2016-07-05 04:04:50 -04:00
|
|
|
}
|
2016-05-27 04:12:44 -04:00
|
|
|
|
2017-10-06 12:38:01 -04:00
|
|
|
// Validate input data size and it can never be less than zero.
|
2018-09-27 23:36:17 -04:00
|
|
|
if data.Size() < -1 {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, errInvalidArgument)
|
|
|
|
return pi, toObjectErr(errInvalidArgument)
|
2017-10-06 12:38:01 -04:00
|
|
|
}
|
|
|
|
|
2016-07-11 20:24:49 -04:00
|
|
|
var partsMetadata []xlMetaV1
|
|
|
|
var errs []error
|
2018-03-15 16:55:23 -04:00
|
|
|
uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID)
|
2016-08-31 14:39:08 -04:00
|
|
|
|
2016-11-09 13:58:41 -05:00
|
|
|
// pre-check upload id lock.
|
2018-01-12 23:34:52 -05:00
|
|
|
preUploadIDLock := xl.nsMutex.NewNSLock(minioMetaMultipartBucket, uploadIDPath)
|
2017-08-31 14:29:22 -04:00
|
|
|
if err := preUploadIDLock.GetRLock(globalOperationTimeout); err != nil {
|
|
|
|
return pi, err
|
|
|
|
}
|
2017-10-06 12:38:01 -04:00
|
|
|
|
2016-07-11 20:24:49 -04:00
|
|
|
// Validates if upload ID exists.
|
2018-04-05 18:04:40 -04:00
|
|
|
if !xl.isUploadIDExists(ctx, bucket, object, uploadID) {
|
2016-11-09 13:58:41 -05:00
|
|
|
preUploadIDLock.RUnlock()
|
2018-04-05 18:04:40 -04:00
|
|
|
return pi, InvalidUploadID{UploadID: uploadID}
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2017-01-31 12:38:34 -05:00
|
|
|
|
2016-05-31 23:23:31 -04:00
|
|
|
// Read metadata associated with the object from all disks.
|
2018-04-05 18:04:40 -04:00
|
|
|
partsMetadata, errs = readAllXLMetadata(ctx, xl.getDisks(), minioMetaMultipartBucket,
|
2016-07-11 20:24:49 -04:00
|
|
|
uploadIDPath)
|
2017-12-22 06:28:13 -05:00
|
|
|
|
|
|
|
// get Quorum for this object
|
2018-07-31 03:23:29 -04:00
|
|
|
_, writeQuorum, err := objectQuorumFromMeta(ctx, xl, partsMetadata, errs)
|
2017-12-22 06:28:13 -05:00
|
|
|
if err != nil {
|
2018-08-14 21:35:30 -04:00
|
|
|
preUploadIDLock.RUnlock()
|
2017-12-22 06:28:13 -05:00
|
|
|
return pi, toObjectErr(err, bucket, object)
|
|
|
|
}
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
|
2018-04-10 12:36:37 -04:00
|
|
|
if reducedErr == errXLWriteQuorum {
|
2016-11-09 13:58:41 -05:00
|
|
|
preUploadIDLock.RUnlock()
|
2017-06-21 22:53:09 -04:00
|
|
|
return pi, toObjectErr(reducedErr, bucket, object)
|
2016-06-27 13:01:09 -04:00
|
|
|
}
|
2016-11-09 13:58:41 -05:00
|
|
|
preUploadIDLock.RUnlock()
|
2016-05-30 14:26:10 -04:00
|
|
|
|
2016-05-25 19:42:31 -04:00
|
|
|
// List all online disks.
|
2018-02-15 20:45:57 -05:00
|
|
|
onlineDisks, modTime := listOnlineDisks(xl.getDisks(), partsMetadata, errs)
|
2016-07-12 18:20:31 -04:00
|
|
|
|
|
|
|
// Pick one from the first valid metadata.
|
2018-08-17 17:42:04 -04:00
|
|
|
xlMeta, err := pickValidXLMeta(ctx, partsMetadata, modTime, writeQuorum)
|
2016-11-20 23:56:44 -05:00
|
|
|
if err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return pi, err
|
2016-11-20 23:56:44 -05:00
|
|
|
}
|
2016-07-14 17:59:01 -04:00
|
|
|
|
2017-02-24 12:20:40 -05:00
|
|
|
onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution)
|
2016-05-26 22:55:48 -04:00
|
|
|
|
2016-07-11 20:24:49 -04:00
|
|
|
// Need a unique name for the part being written in minioMetaBucket to
|
|
|
|
// accommodate concurrent PutObjectPart requests
|
2016-07-12 18:20:31 -04:00
|
|
|
|
2016-06-28 00:42:33 -04:00
|
|
|
partSuffix := fmt.Sprintf("part.%d", partID)
|
2016-11-22 19:52:37 -05:00
|
|
|
tmpPart := mustGetUUID()
|
|
|
|
tmpPartPath := path.Join(tmpPart, partSuffix)
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2016-10-20 01:52:03 -04:00
|
|
|
// Delete the temporary object part. If PutObjectPart succeeds there would be nothing to delete.
|
2018-08-29 16:36:19 -04:00
|
|
|
defer xl.deleteObject(ctx, minioMetaTmpBucket, tmpPart, writeQuorum, false)
|
2018-09-27 23:36:17 -04:00
|
|
|
if data.Size() > 0 || data.Size() == -1 {
|
2018-04-05 18:04:40 -04:00
|
|
|
if pErr := xl.prepareFile(ctx, minioMetaTmpBucket, tmpPartPath, data.Size(), onlineDisks, xlMeta.Erasure.BlockSize, xlMeta.Erasure.DataBlocks, writeQuorum); err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return pi, toObjectErr(pErr, bucket, object)
|
2017-03-07 17:48:56 -05:00
|
|
|
|
2016-10-29 15:44:44 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-24 02:35:37 -04:00
|
|
|
erasure, err := NewErasure(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize)
|
2017-08-14 21:08:42 -04:00
|
|
|
if err != nil {
|
|
|
|
return pi, toObjectErr(err, bucket, object)
|
|
|
|
}
|
2018-02-15 20:45:57 -05:00
|
|
|
|
|
|
|
// Fetch buffer for I/O, returns from the pool if not allocates a new one and returns.
|
2018-06-13 14:55:12 -04:00
|
|
|
var buffer []byte
|
|
|
|
switch size := data.Size(); {
|
|
|
|
case size == 0:
|
|
|
|
buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
|
2018-09-27 23:36:17 -04:00
|
|
|
case size == -1 || size > blockSizeV1:
|
|
|
|
buffer = xl.bp.Get()
|
|
|
|
defer xl.bp.Put(buffer)
|
2018-06-13 14:55:12 -04:00
|
|
|
case size < blockSizeV1:
|
|
|
|
// No need to allocate fully blockSizeV1 buffer if the incoming data is smaller.
|
|
|
|
buffer = make([]byte, size, 2*size)
|
|
|
|
}
|
2018-02-15 20:45:57 -05:00
|
|
|
|
2018-08-06 18:14:08 -04:00
|
|
|
if len(buffer) > int(xlMeta.Erasure.BlockSize) {
|
|
|
|
buffer = buffer[:xlMeta.Erasure.BlockSize]
|
|
|
|
}
|
|
|
|
writers := make([]*bitrotWriter, len(onlineDisks))
|
|
|
|
for i, disk := range onlineDisks {
|
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tmpPartPath, DefaultBitrotAlgorithm)
|
|
|
|
}
|
2018-08-24 02:35:37 -04:00
|
|
|
n, err := erasure.Encode(ctx, data, writers, buffer, erasure.dataBlocks+1)
|
2016-07-18 22:06:48 -04:00
|
|
|
if err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return pi, toObjectErr(err, bucket, object)
|
2016-07-18 22:06:48 -04:00
|
|
|
}
|
2016-10-20 01:52:03 -04:00
|
|
|
|
2016-07-18 22:06:48 -04:00
|
|
|
// Should return IncompleteBody{} error when reader has fewer bytes
|
|
|
|
// than specified in request header.
|
2018-08-06 18:14:08 -04:00
|
|
|
if n < data.Size() {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, IncompleteBody{})
|
|
|
|
return pi, IncompleteBody{}
|
2016-07-18 22:06:48 -04:00
|
|
|
}
|
2016-07-01 17:33:28 -04:00
|
|
|
|
2018-08-06 18:14:08 -04:00
|
|
|
for i := range writers {
|
|
|
|
if writers[i] == nil {
|
|
|
|
onlineDisks[i] = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-09 13:58:41 -05:00
|
|
|
// post-upload check (write) lock
|
2018-01-12 23:34:52 -05:00
|
|
|
postUploadIDLock := xl.nsMutex.NewNSLock(minioMetaMultipartBucket, uploadIDPath)
|
2017-08-31 14:29:22 -04:00
|
|
|
if err = postUploadIDLock.GetLock(globalOperationTimeout); err != nil {
|
|
|
|
return pi, err
|
|
|
|
}
|
2016-11-09 13:58:41 -05:00
|
|
|
defer postUploadIDLock.Unlock()
|
2016-07-11 20:24:49 -04:00
|
|
|
|
2016-07-14 17:59:01 -04:00
|
|
|
// Validate again if upload ID still exists.
|
2018-04-05 18:04:40 -04:00
|
|
|
if !xl.isUploadIDExists(ctx, bucket, object, uploadID) {
|
|
|
|
return pi, InvalidUploadID{UploadID: uploadID}
|
2016-05-30 14:26:10 -04:00
|
|
|
}
|
|
|
|
|
2016-05-29 18:38:14 -04:00
|
|
|
// Rename temporary part file to its final location.
|
2016-05-31 23:23:31 -04:00
|
|
|
partPath := path.Join(uploadIDPath, partSuffix)
|
2018-04-05 18:04:40 -04:00
|
|
|
onlineDisks, err = renamePart(ctx, onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath, writeQuorum)
|
2016-05-20 23:48:47 -04:00
|
|
|
if err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-05-26 06:15:01 -04:00
|
|
|
|
2016-07-14 17:59:01 -04:00
|
|
|
// Read metadata again because it might be updated with parallel upload of another part.
|
2018-04-05 18:04:40 -04:00
|
|
|
partsMetadata, errs = readAllXLMetadata(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath)
|
|
|
|
reducedErr = reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
|
2018-04-10 12:36:37 -04:00
|
|
|
if reducedErr == errXLWriteQuorum {
|
2017-06-21 22:53:09 -04:00
|
|
|
return pi, toObjectErr(reducedErr, bucket, object)
|
2016-07-11 20:24:49 -04:00
|
|
|
}
|
|
|
|
|
2016-07-14 17:59:01 -04:00
|
|
|
// Get current highest version based on re-read partsMetadata.
|
|
|
|
onlineDisks, modTime = listOnlineDisks(onlineDisks, partsMetadata, errs)
|
2016-07-11 20:24:49 -04:00
|
|
|
|
|
|
|
// Pick one from the first valid metadata.
|
2018-08-17 17:42:04 -04:00
|
|
|
xlMeta, err = pickValidXLMeta(ctx, partsMetadata, modTime, writeQuorum)
|
2016-11-20 23:56:44 -05:00
|
|
|
if err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return pi, err
|
2016-11-20 23:56:44 -05:00
|
|
|
}
|
2016-07-11 20:24:49 -04:00
|
|
|
|
2016-05-26 06:15:01 -04:00
|
|
|
// Once part is successfully committed, proceed with updating XL metadata.
|
2017-03-18 14:28:41 -04:00
|
|
|
xlMeta.Stat.ModTime = UTCNow()
|
2016-06-17 14:57:51 -04:00
|
|
|
|
2017-10-24 15:25:42 -04:00
|
|
|
md5hex := hex.EncodeToString(data.MD5Current())
|
2017-10-22 01:30:34 -04:00
|
|
|
|
2016-05-31 23:23:31 -04:00
|
|
|
// Add the current part.
|
2018-09-27 23:36:17 -04:00
|
|
|
xlMeta.AddObjectPart(partID, partSuffix, md5hex, n, data.ActualSize())
|
2016-05-26 06:15:01 -04:00
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
for i, disk := range onlineDisks {
|
|
|
|
if disk == OfflineDisk {
|
2016-07-14 17:59:01 -04:00
|
|
|
continue
|
|
|
|
}
|
2018-08-16 20:55:01 -04:00
|
|
|
partsMetadata[i].Stat = xlMeta.Stat
|
2017-08-14 21:08:42 -04:00
|
|
|
partsMetadata[i].Parts = xlMeta.Parts
|
2018-08-06 18:14:08 -04:00
|
|
|
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{partSuffix, DefaultBitrotAlgorithm, writers[i].Sum()})
|
2016-05-31 23:23:31 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Write all the checksum metadata.
|
2016-11-22 19:52:37 -05:00
|
|
|
newUUID := mustGetUUID()
|
2016-11-20 17:25:43 -05:00
|
|
|
tempXLMetaPath := newUUID
|
2016-05-31 23:23:31 -04:00
|
|
|
|
2016-07-19 22:24:32 -04:00
|
|
|
// Writes a unique `xl.json` each disk carrying new checksum related information.
|
2018-04-05 18:04:40 -04:00
|
|
|
if onlineDisks, err = writeUniqueXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempXLMetaPath, partsMetadata, writeQuorum); err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return pi, toObjectErr(err, minioMetaTmpBucket, tempXLMetaPath)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2017-08-23 20:58:52 -04:00
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
if _, err = commitXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempXLMetaPath, minioMetaMultipartBucket, uploadIDPath, writeQuorum); err != nil {
|
2017-08-23 20:58:52 -04:00
|
|
|
return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
2017-01-31 12:38:34 -05:00
|
|
|
}
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
fi, err := xl.statPart(ctx, bucket, object, uploadID, partSuffix)
|
2017-01-31 12:38:34 -05:00
|
|
|
if err != nil {
|
2017-08-23 20:58:52 -04:00
|
|
|
return pi, toObjectErr(err, minioMetaMultipartBucket, partSuffix)
|
2016-05-28 18:13:15 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Return success.
|
2017-01-31 12:38:34 -05:00
|
|
|
return PartInfo{
|
|
|
|
PartNumber: partID,
|
|
|
|
LastModified: fi.ModTime,
|
2017-10-22 01:30:34 -04:00
|
|
|
ETag: md5hex,
|
2017-01-31 12:38:34 -05:00
|
|
|
Size: fi.Size,
|
2018-09-27 23:36:17 -04:00
|
|
|
ActualSize: data.ActualSize(),
|
2017-01-31 12:38:34 -05:00
|
|
|
}, nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2018-03-15 16:55:23 -04:00
|
|
|
// ListObjectParts - lists all previously uploaded parts for a given
|
|
|
|
// object and uploadID. Takes additional input of part-number-marker
|
|
|
|
// to indicate where the listing should begin from.
|
|
|
|
//
|
|
|
|
// Implements S3 compatible ListObjectParts API. The resulting
|
2018-06-28 19:02:02 -04:00
|
|
|
// ListPartsInfo structure is marshaled directly into XML and
|
2018-03-15 16:55:23 -04:00
|
|
|
// replied back to the client.
|
2018-03-15 16:27:16 -04:00
|
|
|
func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListPartsInfo, e error) {
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkListPartsArgs(ctx, bucket, object, xl); err != nil {
|
2018-03-15 16:55:23 -04:00
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
// Hold lock so that there is no competing
|
|
|
|
// abort-multipart-upload or complete-multipart-upload.
|
|
|
|
uploadIDLock := xl.nsMutex.NewNSLock(minioMetaMultipartBucket,
|
|
|
|
xl.getUploadIDDir(bucket, object, uploadID))
|
|
|
|
if err := uploadIDLock.GetLock(globalListingTimeout); err != nil {
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
defer uploadIDLock.Unlock()
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
if !xl.isUploadIDExists(ctx, bucket, object, uploadID) {
|
|
|
|
return result, InvalidUploadID{UploadID: uploadID}
|
2018-03-15 16:55:23 -04:00
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2018-03-15 16:55:23 -04:00
|
|
|
uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID)
|
2016-05-31 23:23:31 -04:00
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
xlParts, xlMeta, err := xl.readXLMetaParts(ctx, minioMetaMultipartBucket, uploadIDPath)
|
2016-05-20 23:48:47 -04:00
|
|
|
if err != nil {
|
2018-03-15 16:55:23 -04:00
|
|
|
return result, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-05-25 00:24:20 -04:00
|
|
|
|
|
|
|
// Populate the result stub.
|
|
|
|
result.Bucket = bucket
|
|
|
|
result.Object = object
|
|
|
|
result.UploadID = uploadID
|
|
|
|
result.MaxParts = maxParts
|
2018-01-31 16:17:24 -05:00
|
|
|
result.PartNumberMarker = partNumberMarker
|
2018-03-01 14:37:57 -05:00
|
|
|
result.UserDefined = xlMeta
|
2016-05-25 00:24:20 -04:00
|
|
|
|
|
|
|
// For empty number of parts or maxParts as zero, return right here.
|
2016-09-09 01:38:18 -04:00
|
|
|
if len(xlParts) == 0 || maxParts == 0 {
|
2016-05-25 00:24:20 -04:00
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Limit output to maxPartsList.
|
|
|
|
if maxParts > maxPartsList {
|
|
|
|
maxParts = maxPartsList
|
|
|
|
}
|
|
|
|
|
2016-05-20 23:48:47 -04:00
|
|
|
// Only parts with higher part numbers will be listed.
|
2016-09-09 01:38:18 -04:00
|
|
|
partIdx := objectPartIndex(xlParts, partNumberMarker)
|
|
|
|
parts := xlParts
|
2016-05-25 00:24:20 -04:00
|
|
|
if partIdx != -1 {
|
2016-09-09 01:38:18 -04:00
|
|
|
parts = xlParts[partIdx+1:]
|
2016-05-25 00:24:20 -04:00
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
count := maxParts
|
2016-05-25 00:24:20 -04:00
|
|
|
for _, part := range parts {
|
2016-05-25 04:33:39 -04:00
|
|
|
var fi FileInfo
|
2018-04-05 18:04:40 -04:00
|
|
|
fi, err = xl.statPart(ctx, bucket, object, uploadID, part.Name)
|
2016-05-20 23:48:47 -04:00
|
|
|
if err != nil {
|
2018-03-15 16:55:23 -04:00
|
|
|
return result, toObjectErr(err, minioMetaBucket, path.Join(uploadID, part.Name))
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2017-01-31 12:38:34 -05:00
|
|
|
result.Parts = append(result.Parts, PartInfo{
|
2016-05-25 00:24:20 -04:00
|
|
|
PartNumber: part.Number,
|
2016-05-20 23:48:47 -04:00
|
|
|
ETag: part.ETag,
|
|
|
|
LastModified: fi.ModTime,
|
2016-05-28 03:18:58 -04:00
|
|
|
Size: part.Size,
|
2016-05-20 23:48:47 -04:00
|
|
|
})
|
|
|
|
count--
|
|
|
|
if count == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If listed entries are more than maxParts, we set IsTruncated as true.
|
|
|
|
if len(parts) > len(result.Parts) {
|
|
|
|
result.IsTruncated = true
|
|
|
|
// Make sure to fill next part number marker if IsTruncated is
|
|
|
|
// true for subsequent listing.
|
|
|
|
nextPartNumberMarker := result.Parts[len(result.Parts)-1].PartNumber
|
|
|
|
result.NextPartNumberMarker = nextPartNumberMarker
|
|
|
|
}
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2016-06-01 19:43:31 -04:00
|
|
|
// CompleteMultipartUpload - completes an ongoing multipart
|
|
|
|
// transaction after receiving all the parts indicated by the client.
|
|
|
|
// Returns an md5sum calculated by concatenating all the individual
|
|
|
|
// md5sums of all the parts.
|
|
|
|
//
|
|
|
|
// Implements S3 compatible Complete multipart API.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart) (oi ObjectInfo, e error) {
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkCompleteMultipartArgs(ctx, bucket, object, xl); err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return oi, err
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2018-01-12 23:34:52 -05:00
|
|
|
// Hold write lock on the object.
|
|
|
|
destLock := xl.nsMutex.NewNSLock(bucket, object)
|
|
|
|
if err := destLock.GetLock(globalObjectTimeout); err != nil {
|
|
|
|
return oi, err
|
|
|
|
}
|
|
|
|
defer destLock.Unlock()
|
2018-03-15 16:55:23 -04:00
|
|
|
|
|
|
|
uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID)
|
|
|
|
|
2016-05-20 23:48:47 -04:00
|
|
|
// Hold lock so that
|
2016-11-09 13:58:41 -05:00
|
|
|
//
|
2016-05-20 23:48:47 -04:00
|
|
|
// 1) no one aborts this multipart upload
|
2016-11-09 13:58:41 -05:00
|
|
|
//
|
|
|
|
// 2) no one does a parallel complete-multipart-upload on this
|
|
|
|
// multipart upload
|
2018-03-15 16:55:23 -04:00
|
|
|
uploadIDLock := xl.nsMutex.NewNSLock(minioMetaMultipartBucket, uploadIDPath)
|
2017-08-31 14:29:22 -04:00
|
|
|
if err := uploadIDLock.GetLock(globalOperationTimeout); err != nil {
|
|
|
|
return oi, err
|
|
|
|
}
|
2016-11-09 13:58:41 -05:00
|
|
|
defer uploadIDLock.Unlock()
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
if !xl.isUploadIDExists(ctx, bucket, object, uploadID) {
|
|
|
|
return oi, InvalidUploadID{UploadID: uploadID}
|
2016-05-28 00:50:09 -04:00
|
|
|
}
|
2017-02-21 22:43:44 -05:00
|
|
|
|
|
|
|
// Check if an object is present as one of the parent dir.
|
|
|
|
// -- FIXME. (needs a new kind of lock).
|
2018-04-05 18:04:40 -04:00
|
|
|
if xl.parentDirIsObject(ctx, bucket, path.Dir(object)) {
|
|
|
|
return oi, toObjectErr(errFileAccessDenied, bucket, object)
|
2017-02-21 22:43:44 -05:00
|
|
|
}
|
|
|
|
|
2016-05-20 23:48:47 -04:00
|
|
|
// Calculate s3 compatible md5sum for complete multipart.
|
2018-04-05 18:04:40 -04:00
|
|
|
s3MD5, err := getCompleteMultipartMD5(ctx, parts)
|
2016-05-20 23:48:47 -04:00
|
|
|
if err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return oi, err
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2016-05-31 23:23:31 -04:00
|
|
|
// Read metadata associated with the object from all disks.
|
2018-04-05 18:04:40 -04:00
|
|
|
partsMetadata, errs := readAllXLMetadata(ctx, xl.getDisks(), minioMetaMultipartBucket, uploadIDPath)
|
2017-12-22 06:28:13 -05:00
|
|
|
|
|
|
|
// get Quorum for this object
|
2018-07-31 03:23:29 -04:00
|
|
|
_, writeQuorum, err := objectQuorumFromMeta(ctx, xl, partsMetadata, errs)
|
2017-12-22 06:28:13 -05:00
|
|
|
if err != nil {
|
|
|
|
return oi, toObjectErr(err, bucket, object)
|
|
|
|
}
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
|
2018-04-10 12:36:37 -04:00
|
|
|
if reducedErr == errXLWriteQuorum {
|
2017-06-21 22:53:09 -04:00
|
|
|
return oi, toObjectErr(reducedErr, bucket, object)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
onlineDisks, modTime := listOnlineDisks(xl.getDisks(), partsMetadata, errs)
|
2016-07-14 17:59:01 -04:00
|
|
|
|
2016-05-31 23:23:31 -04:00
|
|
|
// Calculate full object size.
|
2016-05-20 23:48:47 -04:00
|
|
|
var objectSize int64
|
2016-05-25 00:24:20 -04:00
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
// Calculate consolidated actual size.
|
|
|
|
var objectActualSize int64
|
|
|
|
|
2016-05-31 23:23:31 -04:00
|
|
|
// Pick one from the first valid metadata.
|
2018-08-17 17:42:04 -04:00
|
|
|
xlMeta, err := pickValidXLMeta(ctx, partsMetadata, modTime, writeQuorum)
|
2016-11-20 23:56:44 -05:00
|
|
|
if err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return oi, err
|
2016-11-20 23:56:44 -05:00
|
|
|
}
|
2016-05-31 23:23:31 -04:00
|
|
|
|
2016-07-27 14:57:08 -04:00
|
|
|
// Order online disks in accordance with distribution order.
|
2017-02-24 12:20:40 -05:00
|
|
|
onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution)
|
2016-07-27 14:57:08 -04:00
|
|
|
|
|
|
|
// Order parts metadata in accordance with distribution order.
|
2017-02-24 12:20:40 -05:00
|
|
|
partsMetadata = shufflePartsMetadata(partsMetadata, xlMeta.Erasure.Distribution)
|
2016-07-27 14:57:08 -04:00
|
|
|
|
2016-05-25 00:24:20 -04:00
|
|
|
// Save current xl meta for validation.
|
|
|
|
var currentXLMeta = xlMeta
|
|
|
|
|
|
|
|
// Allocate parts similar to incoming slice.
|
|
|
|
xlMeta.Parts = make([]objectPartInfo, len(parts))
|
|
|
|
|
2016-06-01 19:43:31 -04:00
|
|
|
// Validate each part and then commit to disk.
|
2016-05-20 23:48:47 -04:00
|
|
|
for i, part := range parts {
|
2016-09-09 01:38:18 -04:00
|
|
|
partIdx := objectPartIndex(currentXLMeta.Parts, part.PartNumber)
|
2016-06-19 17:51:20 -04:00
|
|
|
// All parts should have same part number.
|
2016-05-25 00:24:20 -04:00
|
|
|
if partIdx == -1 {
|
2018-08-14 21:35:30 -04:00
|
|
|
invp := InvalidPart{
|
|
|
|
PartNumber: part.PartNumber,
|
|
|
|
GotETag: part.ETag,
|
|
|
|
}
|
|
|
|
logger.LogIf(ctx, invp)
|
|
|
|
return oi, invp
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-06-19 17:51:20 -04:00
|
|
|
|
|
|
|
// All parts should have same ETag as previously generated.
|
2016-05-25 00:24:20 -04:00
|
|
|
if currentXLMeta.Parts[partIdx].ETag != part.ETag {
|
2018-08-14 21:35:30 -04:00
|
|
|
invp := InvalidPart{
|
|
|
|
PartNumber: part.PartNumber,
|
|
|
|
ExpETag: currentXLMeta.Parts[partIdx].ETag,
|
|
|
|
GotETag: part.ETag,
|
|
|
|
}
|
|
|
|
logger.LogIf(ctx, invp)
|
|
|
|
return oi, invp
|
2016-05-25 00:24:20 -04:00
|
|
|
}
|
2016-06-19 17:51:20 -04:00
|
|
|
|
2016-05-20 23:48:47 -04:00
|
|
|
// All parts except the last part has to be atleast 5MB.
|
2018-09-27 23:36:17 -04:00
|
|
|
if (i < len(parts)-1) && !isMinAllowedPartSize(currentXLMeta.Parts[partIdx].ActualSize) {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, PartTooSmall{
|
2016-06-28 17:51:49 -04:00
|
|
|
PartNumber: part.PartNumber,
|
2018-09-27 23:36:17 -04:00
|
|
|
PartSize: currentXLMeta.Parts[partIdx].ActualSize,
|
2016-06-28 17:51:49 -04:00
|
|
|
PartETag: part.ETag,
|
2016-08-25 12:39:01 -04:00
|
|
|
})
|
2018-04-05 18:04:40 -04:00
|
|
|
return oi, PartTooSmall{
|
|
|
|
PartNumber: part.PartNumber,
|
2018-09-27 23:36:17 -04:00
|
|
|
PartSize: currentXLMeta.Parts[partIdx].ActualSize,
|
2018-04-05 18:04:40 -04:00
|
|
|
PartETag: part.ETag,
|
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-05-25 00:24:20 -04:00
|
|
|
|
2016-06-19 17:51:20 -04:00
|
|
|
// Last part could have been uploaded as 0bytes, do not need
|
|
|
|
// to save it in final `xl.json`.
|
|
|
|
if (i == len(parts)-1) && currentXLMeta.Parts[partIdx].Size == 0 {
|
|
|
|
xlMeta.Parts = xlMeta.Parts[:i] // Skip the part.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2016-05-25 00:24:20 -04:00
|
|
|
// Save for total object size.
|
|
|
|
objectSize += currentXLMeta.Parts[partIdx].Size
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
// Save the consolidated actual size.
|
|
|
|
objectActualSize += currentXLMeta.Parts[partIdx].ActualSize
|
|
|
|
|
2016-05-25 00:24:20 -04:00
|
|
|
// Add incoming parts.
|
|
|
|
xlMeta.Parts[i] = objectPartInfo{
|
2018-09-27 23:36:17 -04:00
|
|
|
Number: part.PartNumber,
|
|
|
|
ETag: part.ETag,
|
|
|
|
Size: currentXLMeta.Parts[partIdx].Size,
|
|
|
|
Name: fmt.Sprintf("part.%d", part.PartNumber),
|
|
|
|
ActualSize: currentXLMeta.Parts[partIdx].ActualSize,
|
2016-05-25 00:24:20 -04:00
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Save the final object size and modtime.
|
|
|
|
xlMeta.Stat.Size = objectSize
|
2017-03-18 14:28:41 -04:00
|
|
|
xlMeta.Stat.ModTime = UTCNow()
|
2016-05-20 23:48:47 -04:00
|
|
|
|
|
|
|
// Save successfully calculated md5sum.
|
2017-05-14 15:05:51 -04:00
|
|
|
xlMeta.Meta["etag"] = s3MD5
|
2017-10-22 01:30:34 -04:00
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
// Save the consolidated actual size.
|
|
|
|
xlMeta.Meta[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(objectActualSize, 10)
|
|
|
|
|
2016-11-20 17:25:43 -05:00
|
|
|
tempUploadIDPath := uploadID
|
2016-05-31 23:23:31 -04:00
|
|
|
|
|
|
|
// Update all xl metadata, make sure to not modify fields like
|
|
|
|
// checksum which are different on each disks.
|
|
|
|
for index := range partsMetadata {
|
|
|
|
partsMetadata[index].Stat = xlMeta.Stat
|
|
|
|
partsMetadata[index].Meta = xlMeta.Meta
|
|
|
|
partsMetadata[index].Parts = xlMeta.Parts
|
|
|
|
}
|
2016-06-01 19:43:31 -04:00
|
|
|
|
2016-05-31 23:23:31 -04:00
|
|
|
// Write unique `xl.json` for each disk.
|
2018-04-05 18:04:40 -04:00
|
|
|
if onlineDisks, err = writeUniqueXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempUploadIDPath, partsMetadata, writeQuorum); err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return oi, toObjectErr(err, minioMetaTmpBucket, tempUploadIDPath)
|
2016-05-28 18:13:15 -04:00
|
|
|
}
|
2017-02-21 22:43:44 -05:00
|
|
|
|
2017-06-14 20:14:27 -04:00
|
|
|
var rErr error
|
2018-04-05 18:04:40 -04:00
|
|
|
onlineDisks, rErr = commitXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempUploadIDPath, minioMetaMultipartBucket, uploadIDPath, writeQuorum)
|
2016-05-28 18:13:15 -04:00
|
|
|
if rErr != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return oi, toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-08-31 14:39:08 -04:00
|
|
|
|
2016-06-20 09:18:47 -04:00
|
|
|
if xl.isObject(bucket, object) {
|
2017-02-21 22:43:44 -05:00
|
|
|
// Rename if an object already exists to temporary location.
|
|
|
|
newUniqueID := mustGetUUID()
|
|
|
|
|
|
|
|
// Delete success renamed object.
|
2018-08-29 16:36:19 -04:00
|
|
|
defer xl.deleteObject(ctx, minioMetaTmpBucket, newUniqueID, writeQuorum, false)
|
2017-02-21 22:43:44 -05:00
|
|
|
|
2016-07-27 14:57:08 -04:00
|
|
|
// NOTE: Do not use online disks slice here.
|
|
|
|
// The reason is that existing object should be purged
|
|
|
|
// regardless of `xl.json` status and rolled back in case of errors.
|
2018-04-05 18:04:40 -04:00
|
|
|
_, err = renameObject(ctx, xl.getDisks(), bucket, object, minioMetaTmpBucket, newUniqueID, writeQuorum)
|
2016-06-20 09:18:47 -04:00
|
|
|
if err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return oi, toObjectErr(err, bucket, object)
|
2016-06-20 09:18:47 -04:00
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2016-06-10 09:13:16 -04:00
|
|
|
// Remove parts that weren't present in CompleteMultipartUpload request.
|
2016-05-28 16:23:08 -04:00
|
|
|
for _, curpart := range currentXLMeta.Parts {
|
2016-09-09 01:38:18 -04:00
|
|
|
if objectPartIndex(xlMeta.Parts, curpart.Number) == -1 {
|
2016-05-28 16:23:08 -04:00
|
|
|
// Delete the missing part files. e.g,
|
|
|
|
// Request 1: NewMultipart
|
|
|
|
// Request 2: PutObjectPart 1
|
|
|
|
// Request 3: PutObjectPart 2
|
|
|
|
// Request 4: CompleteMultipartUpload --part 2
|
|
|
|
// N.B. 1st part is not present. This part should be removed from the storage.
|
|
|
|
xl.removeObjectPart(bucket, object, uploadID, curpart.Name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-27 19:44:45 -04:00
|
|
|
// Deny if WORM is enabled
|
|
|
|
if globalWORMEnabled {
|
|
|
|
if xl.isObject(bucket, object) {
|
2018-04-05 18:04:40 -04:00
|
|
|
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
|
2018-03-27 19:44:45 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-29 03:42:09 -04:00
|
|
|
// Rename the multipart object to final location.
|
2018-04-05 18:04:40 -04:00
|
|
|
if _, err = renameObject(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, bucket, object, writeQuorum); err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return oi, toObjectErr(err, bucket, object)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2017-01-16 22:23:43 -05:00
|
|
|
// Success, return object info.
|
2018-03-01 14:37:57 -05:00
|
|
|
return xlMeta.ToObjectInfo(bucket, object), nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2016-06-01 19:43:31 -04:00
|
|
|
// AbortMultipartUpload - aborts an ongoing multipart operation
|
|
|
|
// signified by the input uploadID. This is an atomic operation
|
|
|
|
// doesn't require clients to initiate multiple such requests.
|
|
|
|
//
|
|
|
|
// All parts are purged from all disks and reference to the uploadID
|
|
|
|
// would be removed from the system, rollback is not possible on this
|
|
|
|
// operation.
|
|
|
|
//
|
|
|
|
// Implements S3 compatible Abort multipart API, slight difference is
|
|
|
|
// that this is an atomic idempotent operation. Subsequent calls have
|
|
|
|
// no affect and further requests to the same uploadID would not be honored.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (xl xlObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkAbortMultipartArgs(ctx, bucket, object, xl); err != nil {
|
2016-12-02 02:15:17 -05:00
|
|
|
return err
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2018-03-15 16:55:23 -04:00
|
|
|
// Construct uploadIDPath.
|
|
|
|
uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID)
|
2016-11-09 13:58:41 -05:00
|
|
|
// Hold lock so that there is no competing
|
|
|
|
// complete-multipart-upload or put-object-part.
|
2018-03-15 16:55:23 -04:00
|
|
|
uploadIDLock := xl.nsMutex.NewNSLock(minioMetaMultipartBucket, uploadIDPath)
|
2017-08-31 14:29:22 -04:00
|
|
|
if err := uploadIDLock.GetLock(globalOperationTimeout); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-11-09 13:58:41 -05:00
|
|
|
defer uploadIDLock.Unlock()
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
if !xl.isUploadIDExists(ctx, bucket, object, uploadID) {
|
|
|
|
return InvalidUploadID{UploadID: uploadID}
|
2016-05-28 16:23:08 -04:00
|
|
|
}
|
2018-03-15 16:55:23 -04:00
|
|
|
|
|
|
|
// Read metadata associated with the object from all disks.
|
2018-04-05 18:04:40 -04:00
|
|
|
partsMetadata, errs := readAllXLMetadata(ctx, xl.getDisks(), minioMetaMultipartBucket, uploadIDPath)
|
2018-03-15 16:55:23 -04:00
|
|
|
|
|
|
|
// get Quorum for this object
|
2018-07-31 03:23:29 -04:00
|
|
|
_, writeQuorum, err := objectQuorumFromMeta(ctx, xl, partsMetadata, errs)
|
2018-03-15 16:55:23 -04:00
|
|
|
if err != nil {
|
|
|
|
return toObjectErr(err, bucket, object)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cleanup all uploaded parts.
|
2018-08-29 16:36:19 -04:00
|
|
|
if err = xl.deleteObject(ctx, minioMetaMultipartBucket, uploadIDPath, writeQuorum, false); err != nil {
|
2018-03-15 16:55:23 -04:00
|
|
|
return toObjectErr(err, bucket, object)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Successfully purged.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clean-up the old multipart uploads. Should be run in a Go routine.
|
2018-04-05 18:04:40 -04:00
|
|
|
func (xl xlObjects) cleanupStaleMultipartUploads(ctx context.Context, cleanupInterval, expiry time.Duration, doneCh chan struct{}) {
|
2018-03-15 16:55:23 -04:00
|
|
|
ticker := time.NewTicker(cleanupInterval)
|
2018-05-04 13:43:20 -04:00
|
|
|
defer ticker.Stop()
|
2018-03-15 16:55:23 -04:00
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-doneCh:
|
|
|
|
return
|
|
|
|
case <-ticker.C:
|
|
|
|
var disk StorageAPI
|
|
|
|
for _, d := range xl.getLoadBalancedDisks() {
|
|
|
|
if d != nil {
|
|
|
|
disk = d
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
xl.cleanupStaleMultipartUploadsOnDisk(ctx, disk, expiry)
|
2018-03-15 16:55:23 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the old multipart uploads on the given disk.
|
2018-04-05 18:04:40 -04:00
|
|
|
func (xl xlObjects) cleanupStaleMultipartUploadsOnDisk(ctx context.Context, disk StorageAPI, expiry time.Duration) {
|
2018-03-15 16:55:23 -04:00
|
|
|
now := time.Now()
|
2018-05-08 22:08:21 -04:00
|
|
|
shaDirs, err := disk.ListDir(minioMetaMultipartBucket, "", -1)
|
2018-03-15 16:55:23 -04:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, shaDir := range shaDirs {
|
2018-05-08 22:08:21 -04:00
|
|
|
uploadIDDirs, err := disk.ListDir(minioMetaMultipartBucket, shaDir, -1)
|
2018-03-15 16:55:23 -04:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, uploadIDDir := range uploadIDDirs {
|
|
|
|
uploadIDPath := pathJoin(shaDir, uploadIDDir)
|
|
|
|
fi, err := disk.StatFile(minioMetaMultipartBucket, pathJoin(uploadIDPath, xlMetaJSONFile))
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if now.Sub(fi.ModTime) > expiry {
|
2018-08-29 16:36:19 -04:00
|
|
|
xl.deleteObject(ctx, minioMetaMultipartBucket, uploadIDPath, len(xl.getDisks())/2+1, false)
|
2018-03-15 16:55:23 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|