2016-05-02 19:57:31 -04:00
|
|
|
/*
|
2018-03-27 19:44:45 -04:00
|
|
|
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
|
2016-05-02 19:57:31 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-05-02 19:57:31 -04:00
|
|
|
|
|
|
|
import (
|
2018-03-14 15:01:47 -04:00
|
|
|
"context"
|
2017-10-24 15:25:42 -04:00
|
|
|
"encoding/hex"
|
2018-01-31 16:17:24 -05:00
|
|
|
"encoding/json"
|
2016-05-02 19:57:31 -04:00
|
|
|
"fmt"
|
2018-01-31 16:17:24 -05:00
|
|
|
"io/ioutil"
|
2017-01-16 20:05:00 -05:00
|
|
|
"os"
|
|
|
|
pathutil "path"
|
2018-01-31 16:17:24 -05:00
|
|
|
"sort"
|
|
|
|
"strconv"
|
2016-05-03 19:10:24 -04:00
|
|
|
"strings"
|
2016-05-20 23:48:47 -04:00
|
|
|
"time"
|
2016-12-19 22:32:55 -05:00
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
2017-11-25 14:58:29 -05:00
|
|
|
"github.com/minio/minio/pkg/errors"
|
2018-01-31 16:17:24 -05:00
|
|
|
mioutil "github.com/minio/minio/pkg/ioutil"
|
|
|
|
|
2017-10-22 01:30:34 -04:00
|
|
|
"github.com/minio/minio/pkg/hash"
|
2016-05-02 19:57:31 -04:00
|
|
|
)
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Returns EXPORT/.minio.sys/multipart/SHA256/UPLOADID
|
2018-02-20 15:21:12 -05:00
|
|
|
func (fs *FSObjects) getUploadIDDir(bucket, object, uploadID string) string {
|
2018-01-31 16:17:24 -05:00
|
|
|
return pathJoin(fs.fsPath, minioMetaMultipartBucket, getSHA256Hash([]byte(pathJoin(bucket, object))), uploadID)
|
2017-01-26 15:51:12 -05:00
|
|
|
}
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Returns EXPORT/.minio.sys/multipart/SHA256
|
2018-02-20 15:21:12 -05:00
|
|
|
func (fs *FSObjects) getMultipartSHADir(bucket, object string) string {
|
2018-01-31 16:17:24 -05:00
|
|
|
return pathJoin(fs.fsPath, minioMetaMultipartBucket, getSHA256Hash([]byte(pathJoin(bucket, object))))
|
2017-01-26 15:51:12 -05:00
|
|
|
}
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Returns partNumber.etag
|
2018-02-20 15:21:12 -05:00
|
|
|
func (fs *FSObjects) encodePartFile(partNumber int, etag string) string {
|
2018-01-31 16:17:24 -05:00
|
|
|
return fmt.Sprintf("%.5d.%s", partNumber, etag)
|
2017-01-26 15:51:12 -05:00
|
|
|
}
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Returns partNumber and etag
|
2018-02-20 15:21:12 -05:00
|
|
|
func (fs *FSObjects) decodePartFile(name string) (partNumber int, etag string, err error) {
|
2018-01-31 16:17:24 -05:00
|
|
|
result := strings.Split(name, ".")
|
|
|
|
if len(result) != 2 {
|
|
|
|
return 0, "", errUnexpected
|
2017-08-31 14:29:22 -04:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
partNumber, err = strconv.Atoi(result[0])
|
2017-01-16 20:05:00 -05:00
|
|
|
if err != nil {
|
2018-01-31 16:17:24 -05:00
|
|
|
return 0, "", errUnexpected
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
return partNumber, result[1], nil
|
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Appends parts to an appendFile sequentially.
|
2018-04-05 18:04:40 -04:00
|
|
|
func (fs *FSObjects) backgroundAppend(ctx context.Context, bucket, object, uploadID string) {
|
2018-01-31 16:17:24 -05:00
|
|
|
fs.appendFileMapMu.Lock()
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.GetReqInfo(ctx).AppendTags("uploadID", uploadID)
|
2018-01-31 16:17:24 -05:00
|
|
|
file := fs.appendFileMap[uploadID]
|
|
|
|
if file == nil {
|
|
|
|
file = &fsAppendFile{
|
|
|
|
filePath: pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, fmt.Sprintf("%s.%s", uploadID, mustGetUUID())),
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
fs.appendFileMap[uploadID] = file
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
fs.appendFileMapMu.Unlock()
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
file.Lock()
|
|
|
|
defer file.Unlock()
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Since we append sequentially nextPartNumber will always be len(file.parts)+1
|
|
|
|
nextPartNumber := len(file.parts) + 1
|
|
|
|
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
|
2016-05-03 19:10:24 -04:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
entries, err := readDir(uploadIDDir)
|
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.GetReqInfo(ctx).AppendTags("uploadIDDir", uploadIDDir)
|
|
|
|
logger.LogIf(ctx, err)
|
2018-01-31 16:17:24 -05:00
|
|
|
return
|
2016-05-03 19:10:24 -04:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
sort.Strings(entries)
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
for _, entry := range entries {
|
2018-03-28 17:14:06 -04:00
|
|
|
if entry == fs.metaJSONFile {
|
2018-01-31 16:17:24 -05:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
partNumber, etag, err := fs.decodePartFile(entry)
|
2016-05-26 17:43:17 -04:00
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.GetReqInfo(ctx).AppendTags("entry", entry)
|
|
|
|
logger.LogIf(ctx, err)
|
2018-01-31 16:17:24 -05:00
|
|
|
return
|
2016-05-26 17:43:17 -04:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
if partNumber < nextPartNumber {
|
|
|
|
// Part already appended.
|
|
|
|
continue
|
2016-05-26 17:43:17 -04:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
if partNumber > nextPartNumber {
|
|
|
|
// Required part number is not yet uploaded.
|
|
|
|
return
|
2016-05-26 17:43:17 -04:00
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
partPath := pathJoin(uploadIDDir, entry)
|
|
|
|
err = mioutil.AppendFile(file.filePath, partPath)
|
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
reqInfo := logger.GetReqInfo(ctx).AppendTags("partPath", partPath)
|
|
|
|
reqInfo.AppendTags("filepath", file.filePath)
|
|
|
|
logger.LogIf(ctx, err)
|
2018-01-31 16:17:24 -05:00
|
|
|
return
|
2016-05-03 19:10:24 -04:00
|
|
|
}
|
2016-07-07 12:06:35 -04:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
file.parts = append(file.parts, PartInfo{PartNumber: nextPartNumber, ETag: etag})
|
|
|
|
nextPartNumber++
|
2016-05-03 19:10:24 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-05 19:08:25 -04:00
|
|
|
// ListMultipartUploads - lists all the uploadIDs for the specified object.
|
|
|
|
// We do not support prefix based listing.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) {
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkListMultipartArgs(ctx, bucket, object, keyMarker, uploadIDMarker, delimiter, fs); err != nil {
|
|
|
|
return result, toObjectErr(err)
|
2017-08-04 13:45:57 -04:00
|
|
|
}
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
|
|
|
|
return result, toObjectErr(err, bucket)
|
2017-08-04 13:45:57 -04:00
|
|
|
}
|
|
|
|
|
2017-10-05 19:08:25 -04:00
|
|
|
result.MaxUploads = maxUploads
|
|
|
|
result.KeyMarker = keyMarker
|
|
|
|
result.Prefix = object
|
|
|
|
result.Delimiter = delimiter
|
2018-01-31 16:17:24 -05:00
|
|
|
result.NextKeyMarker = object
|
|
|
|
result.UploadIDMarker = uploadIDMarker
|
2017-10-05 19:08:25 -04:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
uploadIDs, err := readDir(fs.getMultipartSHADir(bucket, object))
|
2017-10-05 19:08:25 -04:00
|
|
|
if err != nil {
|
2018-01-31 16:17:24 -05:00
|
|
|
if err == errFileNotFound {
|
|
|
|
result.IsTruncated = false
|
|
|
|
return result, nil
|
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return result, toObjectErr(err)
|
2016-06-02 15:18:56 -04:00
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// S3 spec says uploaIDs should be sorted based on initiated time. ModTime of fs.json
|
|
|
|
// is the creation time of the uploadID, hence we will use that.
|
|
|
|
var uploads []MultipartInfo
|
|
|
|
for _, uploadID := range uploadIDs {
|
2018-03-28 17:14:06 -04:00
|
|
|
metaFilePath := pathJoin(fs.getMultipartSHADir(bucket, object), uploadID, fs.metaJSONFile)
|
2018-04-05 18:04:40 -04:00
|
|
|
fi, err := fsStatFile(ctx, metaFilePath)
|
2018-01-31 16:17:24 -05:00
|
|
|
if err != nil {
|
|
|
|
return result, toObjectErr(err, bucket, object)
|
|
|
|
}
|
|
|
|
uploads = append(uploads, MultipartInfo{
|
|
|
|
Object: object,
|
|
|
|
UploadID: strings.TrimSuffix(uploadID, slashSeparator),
|
|
|
|
Initiated: fi.ModTime(),
|
|
|
|
})
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
sort.Slice(uploads, func(i int, j int) bool {
|
|
|
|
return uploads[i].Initiated.Before(uploads[j].Initiated)
|
|
|
|
})
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
uploadIndex := 0
|
|
|
|
if uploadIDMarker != "" {
|
|
|
|
for uploadIndex < len(uploads) {
|
|
|
|
if uploads[uploadIndex].UploadID != uploadIDMarker {
|
|
|
|
uploadIndex++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if uploads[uploadIndex].UploadID == uploadIDMarker {
|
|
|
|
uploadIndex++
|
|
|
|
break
|
|
|
|
}
|
|
|
|
uploadIndex++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for uploadIndex < len(uploads) {
|
|
|
|
result.Uploads = append(result.Uploads, uploads[uploadIndex])
|
|
|
|
result.NextUploadIDMarker = uploads[uploadIndex].UploadID
|
|
|
|
uploadIndex++
|
|
|
|
if len(result.Uploads) == maxUploads {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
result.IsTruncated = uploadIndex < len(uploads)
|
2017-10-05 19:08:25 -04:00
|
|
|
|
|
|
|
if !result.IsTruncated {
|
|
|
|
result.NextKeyMarker = ""
|
|
|
|
result.NextUploadIDMarker = ""
|
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2016-06-02 15:18:56 -04:00
|
|
|
// NewMultipartUpload - initialize a new multipart upload, returns a
|
|
|
|
// unique id. The unique id returned here is of UUID form, for each
|
|
|
|
// subsequent request each UUID is unique.
|
|
|
|
//
|
|
|
|
// Implements S3 compatible initiate multipart API.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string) (string, error) {
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkNewMultipartArgs(ctx, bucket, object, fs); err != nil {
|
2018-01-31 16:17:24 -05:00
|
|
|
return "", toObjectErr(err, bucket)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
|
2017-01-16 20:05:00 -05:00
|
|
|
return "", toObjectErr(err, bucket)
|
|
|
|
}
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
uploadID := mustGetUUID()
|
|
|
|
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
|
|
|
|
|
|
|
|
err := mkdirAll(uploadIDDir, 0755)
|
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return "", err
|
2017-08-31 14:29:22 -04:00
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Initialize fs.json values.
|
|
|
|
fsMeta := newFSMetaV1()
|
|
|
|
fsMeta.Meta = meta
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
fsMetaBytes, err := json.Marshal(fsMeta)
|
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return "", err
|
2016-08-31 16:42:57 -04:00
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0644); err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return "", err
|
2016-08-31 16:42:57 -04:00
|
|
|
}
|
2018-03-01 14:37:57 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
return uploadID, nil
|
2016-08-31 16:42:57 -04:00
|
|
|
}
|
|
|
|
|
2017-01-31 12:38:34 -05:00
|
|
|
// CopyObjectPart - similar to PutObjectPart but reads data from an existing
|
|
|
|
// object. Internally incoming data is written to '.minio.sys/tmp' location
|
|
|
|
// and safely renamed to '.minio.sys/multipart' for reach parts.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int,
|
2018-02-21 03:48:47 -05:00
|
|
|
startOffset int64, length int64, srcInfo ObjectInfo) (pi PartInfo, e error) {
|
2018-01-12 23:34:52 -05:00
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, fs); err != nil {
|
|
|
|
return pi, toObjectErr(err)
|
2017-01-31 12:38:34 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize pipe.
|
|
|
|
go func() {
|
2018-03-14 15:01:47 -04:00
|
|
|
if gerr := fs.GetObject(ctx, srcBucket, srcObject, startOffset, length, srcInfo.Writer, srcInfo.ETag); gerr != nil {
|
2018-03-01 14:37:57 -05:00
|
|
|
if gerr = srcInfo.Writer.Close(); gerr != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, gerr)
|
2018-03-01 14:37:57 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Close writer explicitly signalling we wrote all data.
|
|
|
|
if gerr := srcInfo.Writer.Close(); gerr != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, gerr)
|
2017-01-31 12:38:34 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2018-03-14 15:01:47 -04:00
|
|
|
partInfo, err := fs.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.Reader)
|
2017-01-31 12:38:34 -05:00
|
|
|
if err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return pi, toObjectErr(err, dstBucket, dstObject)
|
2017-01-31 12:38:34 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return partInfo, nil
|
|
|
|
}
|
|
|
|
|
2016-06-02 18:19:13 -04:00
|
|
|
// PutObjectPart - reads incoming data until EOF for the part file on
|
2016-06-02 15:18:56 -04:00
|
|
|
// an ongoing multipart transaction. Internally incoming data is
|
2016-09-06 23:31:50 -04:00
|
|
|
// written to '.minio.sys/tmp' location and safely renamed to
|
|
|
|
// '.minio.sys/multipart' for reach parts.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) {
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkPutObjectPartArgs(ctx, bucket, object, fs); err != nil {
|
|
|
|
return pi, toObjectErr(err, bucket)
|
2016-06-02 18:19:13 -04:00
|
|
|
}
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
|
|
|
|
return pi, toObjectErr(err, bucket)
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
|
|
|
|
2017-10-06 12:38:01 -04:00
|
|
|
// Validate input data size and it can never be less than zero.
|
|
|
|
if data.Size() < 0 {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, errInvalidArgument)
|
|
|
|
return pi, toObjectErr(errInvalidArgument)
|
2017-10-06 12:38:01 -04:00
|
|
|
}
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
|
2016-06-02 18:19:13 -04:00
|
|
|
|
|
|
|
// Just check if the uploadID exists to avoid copy if it doesn't.
|
2018-04-05 18:04:40 -04:00
|
|
|
_, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile))
|
2017-01-16 20:05:00 -05:00
|
|
|
if err != nil {
|
2018-01-31 16:17:24 -05:00
|
|
|
if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied {
|
2018-04-05 18:04:40 -04:00
|
|
|
return pi, InvalidUploadID{UploadID: uploadID}
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
return pi, toObjectErr(err, bucket, object)
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2016-07-05 23:59:54 -04:00
|
|
|
bufSize := int64(readSizeV1)
|
2017-09-19 15:40:27 -04:00
|
|
|
if size := data.Size(); size > 0 && bufSize > size {
|
2016-07-05 23:59:54 -04:00
|
|
|
bufSize = size
|
|
|
|
}
|
2017-09-19 15:40:27 -04:00
|
|
|
buf := make([]byte, bufSize)
|
2016-10-29 15:44:44 -04:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
tmpPartPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, uploadID+"."+mustGetUUID()+"."+strconv.Itoa(partID))
|
2018-04-05 18:04:40 -04:00
|
|
|
bytesWritten, err := fsCreateFile(ctx, tmpPartPath, data, buf, data.Size())
|
2018-01-31 16:17:24 -05:00
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
fsRemoveFile(ctx, tmpPartPath)
|
2018-01-31 16:17:24 -05:00
|
|
|
return pi, toObjectErr(err, minioMetaTmpBucket, tmpPartPath)
|
2016-07-18 22:06:48 -04:00
|
|
|
}
|
2016-12-03 14:53:12 -05:00
|
|
|
|
2016-07-18 22:06:48 -04:00
|
|
|
// Should return IncompleteBody{} error when reader has fewer
|
|
|
|
// bytes than specified in request header.
|
2017-09-19 15:40:27 -04:00
|
|
|
if bytesWritten < data.Size() {
|
2018-04-05 18:04:40 -04:00
|
|
|
fsRemoveFile(ctx, tmpPartPath)
|
|
|
|
return pi, IncompleteBody{}
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2016-10-20 01:52:03 -04:00
|
|
|
// Delete temporary part in case of failure. If
|
|
|
|
// PutObjectPart succeeds then there would be nothing to
|
2018-01-31 16:17:24 -05:00
|
|
|
// delete in which case we just ignore the error.
|
2018-04-05 18:04:40 -04:00
|
|
|
defer fsRemoveFile(ctx, tmpPartPath)
|
2018-01-31 16:17:24 -05:00
|
|
|
|
|
|
|
etag := hex.EncodeToString(data.MD5Current())
|
|
|
|
if etag == "" {
|
|
|
|
etag = GenETag()
|
2017-08-31 14:29:22 -04:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
partPath := pathJoin(uploadIDDir, fs.encodePartFile(partID, etag))
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
if err = fsRenameFile(ctx, tmpPartPath, partPath); err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
go fs.backgroundAppend(ctx, bucket, object, uploadID)
|
2017-10-22 01:30:34 -04:00
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
fi, err := fsStatFile(ctx, partPath)
|
2017-01-31 12:38:34 -05:00
|
|
|
if err != nil {
|
2018-01-31 16:17:24 -05:00
|
|
|
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2017-01-31 12:38:34 -05:00
|
|
|
return PartInfo{
|
|
|
|
PartNumber: partID,
|
|
|
|
LastModified: fi.ModTime(),
|
2018-01-31 16:17:24 -05:00
|
|
|
ETag: etag,
|
2017-01-31 12:38:34 -05:00
|
|
|
Size: fi.Size(),
|
|
|
|
}, nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2017-10-05 19:08:25 -04:00
|
|
|
// ListObjectParts - lists all previously uploaded parts for a given
|
|
|
|
// object and uploadID. Takes additional input of part-number-marker
|
|
|
|
// to indicate where the listing should begin from.
|
|
|
|
//
|
|
|
|
// Implements S3 compatible ListObjectParts API. The resulting
|
|
|
|
// ListPartsInfo structure is unmarshalled directly into XML and
|
|
|
|
// replied back to the client.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListPartsInfo, e error) {
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkListPartsArgs(ctx, bucket, object, fs); err != nil {
|
|
|
|
return result, toObjectErr(err)
|
2016-06-02 15:18:56 -04:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
result.Bucket = bucket
|
|
|
|
result.Object = object
|
|
|
|
result.UploadID = uploadID
|
|
|
|
result.MaxParts = maxParts
|
|
|
|
result.PartNumberMarker = partNumberMarker
|
2016-08-31 14:39:08 -04:00
|
|
|
|
2017-08-04 13:45:57 -04:00
|
|
|
// Check if bucket exists
|
2018-04-05 18:04:40 -04:00
|
|
|
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
|
|
|
|
return result, toObjectErr(err, bucket)
|
2016-05-03 19:10:24 -04:00
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
|
2018-04-05 18:04:40 -04:00
|
|
|
_, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile))
|
2018-01-31 16:17:24 -05:00
|
|
|
if err != nil {
|
|
|
|
if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied {
|
2018-04-05 18:04:40 -04:00
|
|
|
return result, InvalidUploadID{UploadID: uploadID}
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
return result, toObjectErr(err, bucket, object)
|
2017-10-05 19:08:25 -04:00
|
|
|
}
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
entries, err := readDir(uploadIDDir)
|
2017-01-16 20:05:00 -05:00
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return result, toObjectErr(err, bucket)
|
2016-10-29 15:44:44 -04:00
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
partsMap := make(map[int]string)
|
|
|
|
for _, entry := range entries {
|
2018-03-28 17:14:06 -04:00
|
|
|
if entry == fs.metaJSONFile {
|
2018-01-31 16:17:24 -05:00
|
|
|
continue
|
|
|
|
}
|
2018-03-01 14:37:57 -05:00
|
|
|
partNumber, etag1, derr := fs.decodePartFile(entry)
|
|
|
|
if derr != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, derr)
|
|
|
|
return result, toObjectErr(derr)
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
|
|
|
etag2, ok := partsMap[partNumber]
|
|
|
|
if !ok {
|
|
|
|
partsMap[partNumber] = etag1
|
|
|
|
continue
|
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
stat1, serr := fsStatFile(ctx, pathJoin(uploadIDDir, fs.encodePartFile(partNumber, etag1)))
|
2018-03-01 14:37:57 -05:00
|
|
|
if serr != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
return result, toObjectErr(serr)
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
stat2, serr := fsStatFile(ctx, pathJoin(uploadIDDir, fs.encodePartFile(partNumber, etag2)))
|
2018-03-01 14:37:57 -05:00
|
|
|
if serr != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
return result, toObjectErr(serr)
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
|
|
|
if stat1.ModTime().After(stat2.ModTime()) {
|
|
|
|
partsMap[partNumber] = etag1
|
|
|
|
}
|
|
|
|
}
|
2018-03-01 14:37:57 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
var parts []PartInfo
|
|
|
|
for partNumber, etag := range partsMap {
|
|
|
|
parts = append(parts, PartInfo{PartNumber: partNumber, ETag: etag})
|
|
|
|
}
|
2018-02-27 22:03:00 -05:00
|
|
|
sort.Slice(parts, func(i int, j int) bool {
|
2018-01-31 16:17:24 -05:00
|
|
|
return parts[i].PartNumber < parts[j].PartNumber
|
|
|
|
})
|
|
|
|
i := 0
|
|
|
|
if partNumberMarker != 0 {
|
|
|
|
// If the marker was set, skip the entries till the marker.
|
|
|
|
for _, part := range parts {
|
|
|
|
i++
|
|
|
|
if part.PartNumber == partNumberMarker {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
partsCount := 0
|
|
|
|
for partsCount < maxParts && i < len(parts) {
|
|
|
|
result.Parts = append(result.Parts, parts[i])
|
|
|
|
i++
|
|
|
|
partsCount++
|
|
|
|
}
|
|
|
|
if i < len(parts) {
|
|
|
|
result.IsTruncated = true
|
|
|
|
if partsCount != 0 {
|
|
|
|
result.NextPartNumberMarker = result.Parts[partsCount-1].PartNumber
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for i, part := range result.Parts {
|
2018-03-01 14:37:57 -05:00
|
|
|
var stat os.FileInfo
|
2018-04-05 18:04:40 -04:00
|
|
|
stat, err = fsStatFile(ctx, pathJoin(uploadIDDir, fs.encodePartFile(part.PartNumber, part.ETag)))
|
2018-01-31 16:17:24 -05:00
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
return result, toObjectErr(err)
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
|
|
|
result.Parts[i].LastModified = stat.ModTime()
|
|
|
|
result.Parts[i].Size = stat.Size()
|
|
|
|
}
|
2018-03-01 14:37:57 -05:00
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
fsMetaBytes, err := ioutil.ReadFile(pathJoin(uploadIDDir, fs.metaJSONFile))
|
2018-03-01 14:37:57 -05:00
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return result, err
|
2018-03-01 14:37:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
result.UserDefined = parseFSMetaMap(fsMetaBytes)
|
2018-01-31 16:17:24 -05:00
|
|
|
return result, nil
|
2016-10-29 15:44:44 -04:00
|
|
|
}
|
|
|
|
|
2016-06-02 15:18:56 -04:00
|
|
|
// CompleteMultipartUpload - completes an ongoing multipart
|
|
|
|
// transaction after receiving all the parts indicated by the client.
|
|
|
|
// Returns an md5sum calculated by concatenating all the individual
|
|
|
|
// md5sums of all the parts.
|
|
|
|
//
|
|
|
|
// Implements S3 compatible Complete multipart API.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart) (oi ObjectInfo, e error) {
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkCompleteMultipartArgs(ctx, bucket, object, fs); err != nil {
|
2018-01-31 16:17:24 -05:00
|
|
|
return oi, toObjectErr(err)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-06-02 15:18:56 -04:00
|
|
|
|
2017-05-09 20:46:46 -04:00
|
|
|
// Check if an object is present as one of the parent dir.
|
2018-04-05 18:04:40 -04:00
|
|
|
if fs.parentDirIsObject(ctx, bucket, pathutil.Dir(object)) {
|
|
|
|
return oi, toObjectErr(errFileAccessDenied, bucket, object)
|
2017-05-09 20:46:46 -04:00
|
|
|
}
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return oi, toObjectErr(err, bucket)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
|
|
|
|
// Just check if the uploadID exists to avoid copy if it doesn't.
|
2018-04-05 18:04:40 -04:00
|
|
|
_, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile))
|
2017-01-16 20:05:00 -05:00
|
|
|
if err != nil {
|
2018-01-31 16:17:24 -05:00
|
|
|
if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied {
|
2018-04-05 18:04:40 -04:00
|
|
|
return oi, InvalidUploadID{UploadID: uploadID}
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
return oi, toObjectErr(err, bucket, object)
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Calculate s3 compatible md5sum for complete multipart.
|
2018-04-05 18:04:40 -04:00
|
|
|
s3MD5, err := getCompleteMultipartMD5(ctx, parts)
|
2016-08-31 16:42:57 -04:00
|
|
|
if err != nil {
|
2018-01-31 16:17:24 -05:00
|
|
|
return oi, err
|
2016-08-31 16:42:57 -04:00
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2017-10-17 15:01:28 -04:00
|
|
|
partSize := int64(-1) // Used later to ensure that all parts sizes are same.
|
2018-01-31 16:17:24 -05:00
|
|
|
|
2018-03-01 14:37:57 -05:00
|
|
|
fsMeta := fsMetaV1{}
|
|
|
|
|
|
|
|
// Allocate parts similar to incoming slice.
|
|
|
|
fsMeta.Parts = make([]objectPartInfo, len(parts))
|
|
|
|
|
2017-07-07 11:41:29 -04:00
|
|
|
// Validate all parts and then commit to disk.
|
|
|
|
for i, part := range parts {
|
2018-01-31 16:17:24 -05:00
|
|
|
partPath := pathJoin(uploadIDDir, fs.encodePartFile(part.PartNumber, part.ETag))
|
|
|
|
var fi os.FileInfo
|
2018-04-05 18:04:40 -04:00
|
|
|
fi, err = fsStatFile(ctx, partPath)
|
2018-01-31 16:17:24 -05:00
|
|
|
if err != nil {
|
|
|
|
if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied {
|
2018-04-05 18:04:40 -04:00
|
|
|
return oi, InvalidPart{}
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
return oi, err
|
2017-07-07 11:41:29 -04:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
if partSize == -1 {
|
|
|
|
partSize = fi.Size()
|
|
|
|
}
|
2018-03-01 14:37:57 -05:00
|
|
|
|
|
|
|
fsMeta.Parts[i] = objectPartInfo{
|
|
|
|
Number: part.PartNumber,
|
|
|
|
ETag: part.ETag,
|
|
|
|
Size: fi.Size(),
|
|
|
|
}
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
if i == len(parts)-1 {
|
|
|
|
break
|
2017-07-07 11:41:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// All parts except the last part has to be atleast 5MB.
|
2018-01-31 16:17:24 -05:00
|
|
|
if !isMinAllowedPartSize(fi.Size()) {
|
2018-04-05 18:04:40 -04:00
|
|
|
err = PartTooSmall{
|
2017-07-07 11:41:29 -04:00
|
|
|
PartNumber: part.PartNumber,
|
2018-01-31 16:17:24 -05:00
|
|
|
PartSize: fi.Size(),
|
2017-07-07 11:41:29 -04:00
|
|
|
PartETag: part.ETag,
|
2018-04-05 18:04:40 -04:00
|
|
|
}
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return oi, err
|
2017-07-07 11:41:29 -04:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
|
2017-10-17 15:01:28 -04:00
|
|
|
// TODO: Make necessary changes in future as explained in the below comment.
|
|
|
|
// All parts except the last part has to be of same size. We are introducing this
|
|
|
|
// check to see if any clients break. If clients do not break then we can optimize
|
|
|
|
// multipart PutObjectPart by writing the part at the right offset using pwrite()
|
|
|
|
// so that we don't need to do background append at all. i.e by the time we get
|
|
|
|
// CompleteMultipartUpload we already have the full file available which can be
|
|
|
|
// renamed to the main name-space.
|
2018-01-31 16:17:24 -05:00
|
|
|
if partSize != fi.Size() {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, PartsSizeUnequal{})
|
|
|
|
return oi, PartsSizeUnequal{}
|
2017-10-17 15:01:28 -04:00
|
|
|
}
|
2017-07-07 11:41:29 -04:00
|
|
|
}
|
|
|
|
|
2016-11-23 06:04:04 -05:00
|
|
|
appendFallback := true // In case background-append did not append the required parts.
|
2018-01-31 16:17:24 -05:00
|
|
|
appendFilePath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, fmt.Sprintf("%s.%s", uploadID, mustGetUUID()))
|
|
|
|
|
|
|
|
// Most of the times appendFile would already be fully appended by now. We call fs.backgroundAppend()
|
|
|
|
// to take care of the following corner case:
|
|
|
|
// 1. The last PutObjectPart triggers go-routine fs.backgroundAppend, this go-routine has not started yet.
|
|
|
|
// 2. Now CompleteMultipartUpload gets called which sees that lastPart is not appended and starts appending
|
|
|
|
// from the beginning
|
2018-04-05 18:04:40 -04:00
|
|
|
fs.backgroundAppend(ctx, bucket, object, uploadID)
|
2018-01-31 16:17:24 -05:00
|
|
|
|
|
|
|
fs.appendFileMapMu.Lock()
|
|
|
|
file := fs.appendFileMap[uploadID]
|
|
|
|
delete(fs.appendFileMap, uploadID)
|
|
|
|
fs.appendFileMapMu.Unlock()
|
|
|
|
|
|
|
|
if file != nil {
|
|
|
|
file.Lock()
|
|
|
|
defer file.Unlock()
|
|
|
|
// Verify that appendFile has all the parts.
|
|
|
|
if len(file.parts) == len(parts) {
|
|
|
|
for i := range parts {
|
|
|
|
if parts[i].ETag != file.parts[i].ETag {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if parts[i].PartNumber != file.parts[i].PartNumber {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if i == len(parts)-1 {
|
|
|
|
appendFilePath = file.filePath
|
|
|
|
appendFallback = false
|
|
|
|
}
|
2016-11-21 02:42:53 -05:00
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-11-21 02:42:53 -05:00
|
|
|
}
|
2016-11-01 03:56:03 -04:00
|
|
|
|
2016-11-21 02:42:53 -05:00
|
|
|
if appendFallback {
|
2018-04-05 18:04:40 -04:00
|
|
|
fsRemoveFile(ctx, file.filePath)
|
2017-07-07 11:41:29 -04:00
|
|
|
for _, part := range parts {
|
2018-01-31 16:17:24 -05:00
|
|
|
partPath := pathJoin(uploadIDDir, fs.encodePartFile(part.PartNumber, part.ETag))
|
|
|
|
err = mioutil.AppendFile(appendFilePath, partPath)
|
2017-01-16 20:05:00 -05:00
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return oi, toObjectErr(err)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Hold write lock on the object.
|
|
|
|
destLock := fs.nsMutex.NewNSLock(bucket, object)
|
|
|
|
if err = destLock.GetLock(globalObjectTimeout); err != nil {
|
|
|
|
return oi, err
|
|
|
|
}
|
|
|
|
defer destLock.Unlock()
|
2018-03-28 17:14:06 -04:00
|
|
|
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile)
|
2018-01-31 16:17:24 -05:00
|
|
|
metaFile, err := fs.rwPool.Create(fsMetaPath)
|
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return oi, toObjectErr(err, bucket, object)
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
2018-02-01 04:57:12 -05:00
|
|
|
defer metaFile.Close()
|
2016-07-21 20:31:14 -04:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Read saved fs metadata for ongoing multipart.
|
2018-03-28 17:14:06 -04:00
|
|
|
fsMetaBuf, err := ioutil.ReadFile(pathJoin(uploadIDDir, fs.metaJSONFile))
|
2018-01-31 16:17:24 -05:00
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return oi, toObjectErr(err, bucket, object)
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
|
|
|
err = json.Unmarshal(fsMetaBuf, &fsMeta)
|
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return oi, toObjectErr(err, bucket, object)
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
2016-11-11 19:36:07 -05:00
|
|
|
// Save additional metadata.
|
|
|
|
if len(fsMeta.Meta) == 0 {
|
|
|
|
fsMeta.Meta = make(map[string]string)
|
|
|
|
}
|
2017-05-14 15:05:51 -04:00
|
|
|
fsMeta.Meta["etag"] = s3MD5
|
2017-01-16 20:05:00 -05:00
|
|
|
if _, err = fsMeta.WriteTo(metaFile); err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return oi, toObjectErr(err, bucket, object)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2018-03-27 19:44:45 -04:00
|
|
|
// Deny if WORM is enabled
|
|
|
|
if globalWORMEnabled {
|
2018-04-05 18:04:40 -04:00
|
|
|
if _, err = fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object)); err == nil {
|
|
|
|
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
|
2018-03-27 19:44:45 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
err = fsRenameFile(ctx, appendFilePath, pathJoin(fs.fsPath, bucket, object))
|
2017-09-28 11:09:28 -04:00
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return oi, toObjectErr(err, bucket, object)
|
2017-01-16 22:23:43 -05:00
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
fsRemoveAll(ctx, uploadIDDir)
|
|
|
|
fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object))
|
2017-01-16 22:23:43 -05:00
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
return oi, toObjectErr(err, bucket, object)
|
2016-06-02 18:54:00 -04:00
|
|
|
}
|
|
|
|
|
2017-01-16 22:23:43 -05:00
|
|
|
return fsMeta.ToObjectInfo(bucket, object, fi), nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2016-06-02 15:18:56 -04:00
|
|
|
// AbortMultipartUpload - aborts an ongoing multipart operation
|
|
|
|
// signified by the input uploadID. This is an atomic operation
|
|
|
|
// doesn't require clients to initiate multiple such requests.
|
|
|
|
//
|
|
|
|
// All parts are purged from all disks and reference to the uploadID
|
|
|
|
// would be removed from the system, rollback is not possible on this
|
|
|
|
// operation.
|
|
|
|
//
|
|
|
|
// Implements S3 compatible Abort multipart API, slight difference is
|
|
|
|
// that this is an atomic idempotent operation. Subsequent calls have
|
|
|
|
// no affect and further requests to the same uploadID would not be
|
|
|
|
// honored.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (fs *FSObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkAbortMultipartArgs(ctx, bucket, object, fs); err != nil {
|
2016-12-02 02:15:17 -05:00
|
|
|
return err
|
2016-06-02 15:18:56 -04:00
|
|
|
}
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
|
|
|
|
return toObjectErr(err, bucket)
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
fs.appendFileMapMu.Lock()
|
|
|
|
delete(fs.appendFileMap, uploadID)
|
|
|
|
fs.appendFileMapMu.Unlock()
|
2017-09-28 11:09:28 -04:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
|
|
|
|
// Just check if the uploadID exists to avoid copy if it doesn't.
|
2018-04-05 18:04:40 -04:00
|
|
|
_, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile))
|
2017-01-16 20:05:00 -05:00
|
|
|
if err != nil {
|
2018-01-31 16:17:24 -05:00
|
|
|
if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied {
|
2018-04-05 18:04:40 -04:00
|
|
|
return InvalidUploadID{UploadID: uploadID}
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
return toObjectErr(err, bucket, object)
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
// Ignore the error returned as Windows fails to remove directory if a file in it
|
|
|
|
// is Open()ed by the backgroundAppend()
|
2018-04-05 18:04:40 -04:00
|
|
|
fsRemoveAll(ctx, uploadIDDir)
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
return nil
|
|
|
|
}
|
2016-06-02 15:18:56 -04:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Removes multipart uploads if any older than `expiry` duration
|
|
|
|
// on all buckets for every `cleanupInterval`, this function is
|
|
|
|
// blocking and should be run in a go-routine.
|
2018-04-05 18:04:40 -04:00
|
|
|
func (fs *FSObjects) cleanupStaleMultipartUploads(ctx context.Context, cleanupInterval, expiry time.Duration, doneCh chan struct{}) {
|
2018-01-31 16:17:24 -05:00
|
|
|
ticker := time.NewTicker(cleanupInterval)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-doneCh:
|
|
|
|
// Stop the timer.
|
|
|
|
ticker.Stop()
|
|
|
|
return
|
|
|
|
case <-ticker.C:
|
|
|
|
now := time.Now()
|
|
|
|
entries, err := readDir(pathJoin(fs.fsPath, minioMetaMultipartBucket))
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, entry := range entries {
|
|
|
|
uploadIDs, err := readDir(pathJoin(fs.fsPath, minioMetaMultipartBucket, entry))
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, uploadID := range uploadIDs {
|
2018-04-05 18:04:40 -04:00
|
|
|
fi, err := fsStatDir(ctx, pathJoin(fs.fsPath, minioMetaMultipartBucket, entry, uploadID))
|
2018-01-31 16:17:24 -05:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if now.Sub(fi.ModTime()) > expiry {
|
2018-04-05 18:04:40 -04:00
|
|
|
fsRemoveAll(ctx, pathJoin(fs.fsPath, minioMetaMultipartBucket, entry, uploadID))
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-06-02 15:18:56 -04:00
|
|
|
}
|
2016-05-02 19:57:31 -04:00
|
|
|
}
|