2021-04-18 15:41:13 -04:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2016-05-02 19:57:31 -04:00
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-05-02 19:57:31 -04:00
|
|
|
|
|
|
|
import (
|
2018-03-14 15:01:47 -04:00
|
|
|
"context"
|
2018-01-31 16:17:24 -05:00
|
|
|
"encoding/json"
|
2020-09-18 03:16:16 -04:00
|
|
|
"errors"
|
2016-05-02 19:57:31 -04:00
|
|
|
"fmt"
|
2018-01-31 16:17:24 -05:00
|
|
|
"io/ioutil"
|
2017-01-16 20:05:00 -05:00
|
|
|
"os"
|
2022-02-15 12:25:47 -05:00
|
|
|
"path/filepath"
|
2018-01-31 16:17:24 -05:00
|
|
|
"sort"
|
|
|
|
"strconv"
|
2016-05-03 19:10:24 -04:00
|
|
|
"strings"
|
2016-05-20 23:48:47 -04:00
|
|
|
"time"
|
2016-12-19 22:32:55 -05:00
|
|
|
|
2019-09-05 18:51:27 -04:00
|
|
|
jsoniter "github.com/json-iterator/go"
|
2021-06-01 17:59:40 -04:00
|
|
|
xioutil "github.com/minio/minio/internal/ioutil"
|
|
|
|
"github.com/minio/minio/internal/logger"
|
2021-05-28 18:17:01 -04:00
|
|
|
"github.com/minio/pkg/trie"
|
2016-05-02 19:57:31 -04:00
|
|
|
)
|
|
|
|
|
2022-02-15 12:25:47 -05:00
|
|
|
const (
|
|
|
|
bgAppendsDirName = "bg-appends"
|
|
|
|
bgAppendsCleanupInterval = 10 * time.Minute
|
|
|
|
)
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Returns EXPORT/.minio.sys/multipart/SHA256/UPLOADID
|
2018-02-20 15:21:12 -05:00
|
|
|
func (fs *FSObjects) getUploadIDDir(bucket, object, uploadID string) string {
|
2018-01-31 16:17:24 -05:00
|
|
|
return pathJoin(fs.fsPath, minioMetaMultipartBucket, getSHA256Hash([]byte(pathJoin(bucket, object))), uploadID)
|
2017-01-26 15:51:12 -05:00
|
|
|
}
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Returns EXPORT/.minio.sys/multipart/SHA256
|
2018-02-20 15:21:12 -05:00
|
|
|
func (fs *FSObjects) getMultipartSHADir(bucket, object string) string {
|
2018-01-31 16:17:24 -05:00
|
|
|
return pathJoin(fs.fsPath, minioMetaMultipartBucket, getSHA256Hash([]byte(pathJoin(bucket, object))))
|
2017-01-26 15:51:12 -05:00
|
|
|
}
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Returns partNumber.etag
|
2018-09-27 23:36:17 -04:00
|
|
|
func (fs *FSObjects) encodePartFile(partNumber int, etag string, actualSize int64) string {
|
|
|
|
return fmt.Sprintf("%.5d.%s.%d", partNumber, etag, actualSize)
|
2017-01-26 15:51:12 -05:00
|
|
|
}
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Returns partNumber and etag
|
2018-09-27 23:36:17 -04:00
|
|
|
func (fs *FSObjects) decodePartFile(name string) (partNumber int, etag string, actualSize int64, err error) {
|
2018-01-31 16:17:24 -05:00
|
|
|
result := strings.Split(name, ".")
|
2018-09-27 23:36:17 -04:00
|
|
|
if len(result) != 3 {
|
|
|
|
return 0, "", 0, errUnexpected
|
2017-08-31 14:29:22 -04:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
partNumber, err = strconv.Atoi(result[0])
|
2017-01-16 20:05:00 -05:00
|
|
|
if err != nil {
|
2018-09-27 23:36:17 -04:00
|
|
|
return 0, "", 0, errUnexpected
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
2018-09-27 23:36:17 -04:00
|
|
|
actualSize, err = strconv.ParseInt(result[2], 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
return 0, "", 0, errUnexpected
|
|
|
|
}
|
|
|
|
return partNumber, result[1], actualSize, nil
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Appends parts to an appendFile sequentially.
|
2018-04-05 18:04:40 -04:00
|
|
|
func (fs *FSObjects) backgroundAppend(ctx context.Context, bucket, object, uploadID string) {
|
2018-01-31 16:17:24 -05:00
|
|
|
fs.appendFileMapMu.Lock()
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.GetReqInfo(ctx).AppendTags("uploadID", uploadID)
|
2018-01-31 16:17:24 -05:00
|
|
|
file := fs.appendFileMap[uploadID]
|
|
|
|
if file == nil {
|
|
|
|
file = &fsAppendFile{
|
2022-02-15 12:25:47 -05:00
|
|
|
filePath: pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, bgAppendsDirName, fmt.Sprintf("%s.%s", uploadID, mustGetUUID())),
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
fs.appendFileMap[uploadID] = file
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
fs.appendFileMapMu.Unlock()
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
file.Lock()
|
|
|
|
defer file.Unlock()
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Since we append sequentially nextPartNumber will always be len(file.parts)+1
|
|
|
|
nextPartNumber := len(file.parts) + 1
|
|
|
|
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
|
2016-05-03 19:10:24 -04:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
entries, err := readDir(uploadIDDir)
|
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.GetReqInfo(ctx).AppendTags("uploadIDDir", uploadIDDir)
|
|
|
|
logger.LogIf(ctx, err)
|
2018-01-31 16:17:24 -05:00
|
|
|
return
|
2016-05-03 19:10:24 -04:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
sort.Strings(entries)
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
for _, entry := range entries {
|
2018-03-28 17:14:06 -04:00
|
|
|
if entry == fs.metaJSONFile {
|
2018-01-31 16:17:24 -05:00
|
|
|
continue
|
|
|
|
}
|
2018-09-27 23:36:17 -04:00
|
|
|
partNumber, etag, actualSize, err := fs.decodePartFile(entry)
|
2016-05-26 17:43:17 -04:00
|
|
|
if err != nil {
|
2019-04-12 18:51:32 -04:00
|
|
|
// Skip part files whose name don't match expected format. These could be backend filesystem specific files.
|
|
|
|
continue
|
2016-05-26 17:43:17 -04:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
if partNumber < nextPartNumber {
|
|
|
|
// Part already appended.
|
|
|
|
continue
|
2016-05-26 17:43:17 -04:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
if partNumber > nextPartNumber {
|
|
|
|
// Required part number is not yet uploaded.
|
|
|
|
return
|
2016-05-26 17:43:17 -04:00
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
partPath := pathJoin(uploadIDDir, entry)
|
2021-02-24 03:14:16 -05:00
|
|
|
err = xioutil.AppendFile(file.filePath, partPath, globalFSOSync)
|
2018-01-31 16:17:24 -05:00
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
reqInfo := logger.GetReqInfo(ctx).AppendTags("partPath", partPath)
|
|
|
|
reqInfo.AppendTags("filepath", file.filePath)
|
|
|
|
logger.LogIf(ctx, err)
|
2018-01-31 16:17:24 -05:00
|
|
|
return
|
2016-05-03 19:10:24 -04:00
|
|
|
}
|
2016-07-07 12:06:35 -04:00
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
file.parts = append(file.parts, PartInfo{PartNumber: partNumber, ETag: etag, ActualSize: actualSize})
|
2018-01-31 16:17:24 -05:00
|
|
|
nextPartNumber++
|
2016-05-03 19:10:24 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-05 19:08:25 -04:00
|
|
|
// ListMultipartUploads - lists all the uploadIDs for the specified object.
|
|
|
|
// We do not support prefix based listing.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) {
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkListMultipartArgs(ctx, bucket, object, keyMarker, uploadIDMarker, delimiter, fs); err != nil {
|
|
|
|
return result, toObjectErr(err)
|
2017-08-04 13:45:57 -04:00
|
|
|
}
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
|
|
|
|
return result, toObjectErr(err, bucket)
|
2017-08-04 13:45:57 -04:00
|
|
|
}
|
|
|
|
|
2017-10-05 19:08:25 -04:00
|
|
|
result.MaxUploads = maxUploads
|
|
|
|
result.KeyMarker = keyMarker
|
|
|
|
result.Prefix = object
|
|
|
|
result.Delimiter = delimiter
|
2018-01-31 16:17:24 -05:00
|
|
|
result.NextKeyMarker = object
|
|
|
|
result.UploadIDMarker = uploadIDMarker
|
2017-10-05 19:08:25 -04:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
uploadIDs, err := readDir(fs.getMultipartSHADir(bucket, object))
|
2017-10-05 19:08:25 -04:00
|
|
|
if err != nil {
|
2018-01-31 16:17:24 -05:00
|
|
|
if err == errFileNotFound {
|
|
|
|
result.IsTruncated = false
|
|
|
|
return result, nil
|
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return result, toObjectErr(err)
|
2016-06-02 15:18:56 -04:00
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2019-07-23 01:36:15 -04:00
|
|
|
// S3 spec says uploadIDs should be sorted based on initiated time. ModTime of fs.json
|
2018-01-31 16:17:24 -05:00
|
|
|
// is the creation time of the uploadID, hence we will use that.
|
|
|
|
var uploads []MultipartInfo
|
|
|
|
for _, uploadID := range uploadIDs {
|
2018-03-28 17:14:06 -04:00
|
|
|
metaFilePath := pathJoin(fs.getMultipartSHADir(bucket, object), uploadID, fs.metaJSONFile)
|
2018-04-05 18:04:40 -04:00
|
|
|
fi, err := fsStatFile(ctx, metaFilePath)
|
2018-01-31 16:17:24 -05:00
|
|
|
if err != nil {
|
|
|
|
return result, toObjectErr(err, bucket, object)
|
|
|
|
}
|
|
|
|
uploads = append(uploads, MultipartInfo{
|
|
|
|
Object: object,
|
2019-08-06 15:08:58 -04:00
|
|
|
UploadID: strings.TrimSuffix(uploadID, SlashSeparator),
|
2018-01-31 16:17:24 -05:00
|
|
|
Initiated: fi.ModTime(),
|
|
|
|
})
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
sort.Slice(uploads, func(i int, j int) bool {
|
|
|
|
return uploads[i].Initiated.Before(uploads[j].Initiated)
|
|
|
|
})
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
uploadIndex := 0
|
|
|
|
if uploadIDMarker != "" {
|
|
|
|
for uploadIndex < len(uploads) {
|
|
|
|
if uploads[uploadIndex].UploadID != uploadIDMarker {
|
|
|
|
uploadIndex++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if uploads[uploadIndex].UploadID == uploadIDMarker {
|
|
|
|
uploadIndex++
|
|
|
|
break
|
|
|
|
}
|
|
|
|
uploadIndex++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for uploadIndex < len(uploads) {
|
|
|
|
result.Uploads = append(result.Uploads, uploads[uploadIndex])
|
|
|
|
result.NextUploadIDMarker = uploads[uploadIndex].UploadID
|
|
|
|
uploadIndex++
|
|
|
|
if len(result.Uploads) == maxUploads {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
result.IsTruncated = uploadIndex < len(uploads)
|
2017-10-05 19:08:25 -04:00
|
|
|
|
|
|
|
if !result.IsTruncated {
|
|
|
|
result.NextKeyMarker = ""
|
|
|
|
result.NextUploadIDMarker = ""
|
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2016-06-02 15:18:56 -04:00
|
|
|
// NewMultipartUpload - initialize a new multipart upload, returns a
|
|
|
|
// unique id. The unique id returned here is of UUID form, for each
|
|
|
|
// subsequent request each UUID is unique.
|
|
|
|
//
|
|
|
|
// Implements S3 compatible initiate multipart API.
|
2019-02-09 00:31:06 -05:00
|
|
|
func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) {
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkNewMultipartArgs(ctx, bucket, object, fs); err != nil {
|
2018-01-31 16:17:24 -05:00
|
|
|
return "", toObjectErr(err, bucket)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
|
2017-01-16 20:05:00 -05:00
|
|
|
return "", toObjectErr(err, bucket)
|
|
|
|
}
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
uploadID := mustGetUUID()
|
|
|
|
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
|
|
|
|
|
2022-01-02 12:15:06 -05:00
|
|
|
err := mkdirAll(uploadIDDir, 0o755)
|
2018-01-31 16:17:24 -05:00
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return "", err
|
2017-08-31 14:29:22 -04:00
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Initialize fs.json values.
|
|
|
|
fsMeta := newFSMetaV1()
|
2019-02-09 00:31:06 -05:00
|
|
|
fsMeta.Meta = opts.UserDefined
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
fsMetaBytes, err := json.Marshal(fsMeta)
|
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return "", err
|
2016-08-31 16:42:57 -04:00
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2022-01-02 12:15:06 -05:00
|
|
|
if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0o666); err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return "", err
|
2016-08-31 16:42:57 -04:00
|
|
|
}
|
2018-03-01 14:37:57 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
return uploadID, nil
|
2016-08-31 16:42:57 -04:00
|
|
|
}
|
|
|
|
|
2017-01-31 12:38:34 -05:00
|
|
|
// CopyObjectPart - similar to PutObjectPart but reads data from an existing
|
|
|
|
// object. Internally incoming data is written to '.minio.sys/tmp' location
|
|
|
|
// and safely renamed to '.minio.sys/multipart' for reach parts.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int,
|
2022-01-02 12:15:06 -05:00
|
|
|
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error,
|
|
|
|
) {
|
2020-06-12 23:04:01 -04:00
|
|
|
if srcOpts.VersionID != "" && srcOpts.VersionID != nullVersionID {
|
|
|
|
return pi, VersionNotFound{
|
|
|
|
Bucket: srcBucket,
|
|
|
|
Object: srcObject,
|
|
|
|
VersionID: srcOpts.VersionID,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, fs); err != nil {
|
|
|
|
return pi, toObjectErr(err)
|
2017-01-31 12:38:34 -05:00
|
|
|
}
|
|
|
|
|
2018-11-14 20:36:41 -05:00
|
|
|
partInfo, err := fs.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.PutObjReader, dstOpts)
|
2017-01-31 12:38:34 -05:00
|
|
|
if err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return pi, toObjectErr(err, dstBucket, dstObject)
|
2017-01-31 12:38:34 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return partInfo, nil
|
|
|
|
}
|
|
|
|
|
2016-06-02 18:19:13 -04:00
|
|
|
// PutObjectPart - reads incoming data until EOF for the part file on
|
2016-06-02 15:18:56 -04:00
|
|
|
// an ongoing multipart transaction. Internally incoming data is
|
2016-09-06 23:31:50 -04:00
|
|
|
// written to '.minio.sys/tmp' location and safely renamed to
|
|
|
|
// '.minio.sys/multipart' for reach parts.
|
2018-11-14 20:36:41 -05:00
|
|
|
func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, e error) {
|
2020-06-12 23:04:01 -04:00
|
|
|
if opts.VersionID != "" && opts.VersionID != nullVersionID {
|
|
|
|
return pi, VersionNotFound{
|
|
|
|
Bucket: bucket,
|
|
|
|
Object: object,
|
|
|
|
VersionID: opts.VersionID,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-14 20:36:41 -05:00
|
|
|
data := r.Reader
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkPutObjectPartArgs(ctx, bucket, object, fs); err != nil {
|
|
|
|
return pi, toObjectErr(err, bucket)
|
2016-06-02 18:19:13 -04:00
|
|
|
}
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
|
|
|
|
return pi, toObjectErr(err, bucket)
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
// Validate input data size and it can never be less than -1.
|
|
|
|
if data.Size() < -1 {
|
2019-10-11 21:50:54 -04:00
|
|
|
logger.LogIf(ctx, errInvalidArgument, logger.Application)
|
2018-04-05 18:04:40 -04:00
|
|
|
return pi, toObjectErr(errInvalidArgument)
|
2017-10-06 12:38:01 -04:00
|
|
|
}
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
|
2016-06-02 18:19:13 -04:00
|
|
|
|
|
|
|
// Just check if the uploadID exists to avoid copy if it doesn't.
|
2018-04-05 18:04:40 -04:00
|
|
|
_, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile))
|
2017-01-16 20:05:00 -05:00
|
|
|
if err != nil {
|
2018-04-10 12:36:37 -04:00
|
|
|
if err == errFileNotFound || err == errFileAccessDenied {
|
2020-09-08 17:22:04 -04:00
|
|
|
return pi, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID}
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
return pi, toObjectErr(err, bucket, object)
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
tmpPartPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, uploadID+"."+mustGetUUID()+"."+strconv.Itoa(partID))
|
fix: use buffers only when necessary for io.Copy() (#11229)
Use separate sync.Pool for writes/reads
Avoid passing buffers for io.CopyBuffer()
if the writer or reader implement io.WriteTo or io.ReadFrom
respectively then its useless for sync.Pool to allocate
buffers on its own since that will be completely ignored
by the io.CopyBuffer Go implementation.
Improve this wherever we see this to be optimal.
This allows us to be more efficient on memory usage.
```
385 // copyBuffer is the actual implementation of Copy and CopyBuffer.
386 // if buf is nil, one is allocated.
387 func copyBuffer(dst Writer, src Reader, buf []byte) (written int64, err error) {
388 // If the reader has a WriteTo method, use it to do the copy.
389 // Avoids an allocation and a copy.
390 if wt, ok := src.(WriterTo); ok {
391 return wt.WriteTo(dst)
392 }
393 // Similarly, if the writer has a ReadFrom method, use it to do the copy.
394 if rt, ok := dst.(ReaderFrom); ok {
395 return rt.ReadFrom(src)
396 }
```
From readahead package
```
// WriteTo writes data to w until there's no more data to write or when an error occurs.
// The return value n is the number of bytes written.
// Any error encountered during the write is also returned.
func (a *reader) WriteTo(w io.Writer) (n int64, err error) {
if a.err != nil {
return 0, a.err
}
n = 0
for {
err = a.fill()
if err != nil {
return n, err
}
n2, err := w.Write(a.cur.buffer())
a.cur.inc(n2)
n += int64(n2)
if err != nil {
return n, err
}
```
2021-01-06 12:36:55 -05:00
|
|
|
bytesWritten, err := fsCreateFile(ctx, tmpPartPath, data, data.Size())
|
2020-08-31 15:35:40 -04:00
|
|
|
|
|
|
|
// Delete temporary part in case of failure. If
|
|
|
|
// PutObjectPart succeeds then there would be nothing to
|
|
|
|
// delete in which case we just ignore the error.
|
|
|
|
defer fsRemoveFile(ctx, tmpPartPath)
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
if err != nil {
|
|
|
|
return pi, toObjectErr(err, minioMetaTmpBucket, tmpPartPath)
|
2016-07-18 22:06:48 -04:00
|
|
|
}
|
2016-12-03 14:53:12 -05:00
|
|
|
|
2016-07-18 22:06:48 -04:00
|
|
|
// Should return IncompleteBody{} error when reader has fewer
|
|
|
|
// bytes than specified in request header.
|
2017-09-19 15:40:27 -04:00
|
|
|
if bytesWritten < data.Size() {
|
2020-09-08 17:22:04 -04:00
|
|
|
return pi, IncompleteBody{Bucket: bucket, Object: object}
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2018-11-14 20:36:41 -05:00
|
|
|
etag := r.MD5CurrentHexString()
|
2018-01-31 16:17:24 -05:00
|
|
|
|
|
|
|
if etag == "" {
|
|
|
|
etag = GenETag()
|
2017-08-31 14:29:22 -04:00
|
|
|
}
|
2018-11-14 20:36:41 -05:00
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
partPath := pathJoin(uploadIDDir, fs.encodePartFile(partID, etag, data.ActualSize()))
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2019-07-23 01:36:15 -04:00
|
|
|
// Make sure not to create parent directories if they don't exist - the upload might have been aborted.
|
2021-11-18 19:09:12 -05:00
|
|
|
if err = Rename(tmpPartPath, partPath); err != nil {
|
2019-07-23 01:36:15 -04:00
|
|
|
if err == errFileNotFound || err == errFileAccessDenied {
|
2020-09-08 17:22:04 -04:00
|
|
|
return pi, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID}
|
2019-07-23 01:36:15 -04:00
|
|
|
}
|
2017-06-21 22:53:09 -04:00
|
|
|
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
go fs.backgroundAppend(ctx, bucket, object, uploadID)
|
2017-10-22 01:30:34 -04:00
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
fi, err := fsStatFile(ctx, partPath)
|
2017-01-31 12:38:34 -05:00
|
|
|
if err != nil {
|
2018-01-31 16:17:24 -05:00
|
|
|
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2017-01-31 12:38:34 -05:00
|
|
|
return PartInfo{
|
|
|
|
PartNumber: partID,
|
|
|
|
LastModified: fi.ModTime(),
|
2018-01-31 16:17:24 -05:00
|
|
|
ETag: etag,
|
2017-01-31 12:38:34 -05:00
|
|
|
Size: fi.Size(),
|
2018-09-27 23:36:17 -04:00
|
|
|
ActualSize: data.ActualSize(),
|
2017-01-31 12:38:34 -05:00
|
|
|
}, nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2020-05-28 15:36:20 -04:00
|
|
|
// GetMultipartInfo returns multipart metadata uploaded during newMultipartUpload, used
|
|
|
|
// by callers to verify object states
|
|
|
|
// - encrypted
|
|
|
|
// - compressed
|
|
|
|
func (fs *FSObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) {
|
|
|
|
minfo := MultipartInfo{
|
|
|
|
Bucket: bucket,
|
|
|
|
Object: object,
|
|
|
|
UploadID: uploadID,
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := checkListPartsArgs(ctx, bucket, object, fs); err != nil {
|
|
|
|
return minfo, toObjectErr(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if bucket exists
|
|
|
|
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
|
|
|
|
return minfo, toObjectErr(err, bucket)
|
|
|
|
}
|
|
|
|
|
|
|
|
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
|
|
|
|
if _, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile)); err != nil {
|
|
|
|
if err == errFileNotFound || err == errFileAccessDenied {
|
2020-09-08 17:22:04 -04:00
|
|
|
return minfo, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID}
|
2020-05-28 15:36:20 -04:00
|
|
|
}
|
|
|
|
return minfo, toObjectErr(err, bucket, object)
|
|
|
|
}
|
|
|
|
|
2021-02-24 03:14:16 -05:00
|
|
|
fsMetaBytes, err := xioutil.ReadFile(pathJoin(uploadIDDir, fs.metaJSONFile))
|
2020-05-28 15:36:20 -04:00
|
|
|
if err != nil {
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return minfo, toObjectErr(err, bucket, object)
|
|
|
|
}
|
|
|
|
|
|
|
|
var fsMeta fsMetaV1
|
2022-01-02 12:15:06 -05:00
|
|
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
2020-05-28 15:36:20 -04:00
|
|
|
if err = json.Unmarshal(fsMetaBytes, &fsMeta); err != nil {
|
|
|
|
return minfo, toObjectErr(err, bucket, object)
|
|
|
|
}
|
|
|
|
|
|
|
|
minfo.UserDefined = fsMeta.Meta
|
|
|
|
return minfo, nil
|
|
|
|
}
|
|
|
|
|
2017-10-05 19:08:25 -04:00
|
|
|
// ListObjectParts - lists all previously uploaded parts for a given
|
|
|
|
// object and uploadID. Takes additional input of part-number-marker
|
|
|
|
// to indicate where the listing should begin from.
|
|
|
|
//
|
|
|
|
// Implements S3 compatible ListObjectParts API. The resulting
|
|
|
|
// ListPartsInfo structure is unmarshalled directly into XML and
|
|
|
|
// replied back to the client.
|
2019-01-05 17:16:43 -05:00
|
|
|
func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, e error) {
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkListPartsArgs(ctx, bucket, object, fs); err != nil {
|
|
|
|
return result, toObjectErr(err)
|
2016-06-02 15:18:56 -04:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
result.Bucket = bucket
|
|
|
|
result.Object = object
|
|
|
|
result.UploadID = uploadID
|
|
|
|
result.MaxParts = maxParts
|
|
|
|
result.PartNumberMarker = partNumberMarker
|
2016-08-31 14:39:08 -04:00
|
|
|
|
2017-08-04 13:45:57 -04:00
|
|
|
// Check if bucket exists
|
2018-04-05 18:04:40 -04:00
|
|
|
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
|
|
|
|
return result, toObjectErr(err, bucket)
|
2016-05-03 19:10:24 -04:00
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
|
2018-09-27 23:36:17 -04:00
|
|
|
if _, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile)); err != nil {
|
2018-04-10 12:36:37 -04:00
|
|
|
if err == errFileNotFound || err == errFileAccessDenied {
|
2020-09-08 17:22:04 -04:00
|
|
|
return result, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID}
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
return result, toObjectErr(err, bucket, object)
|
2017-10-05 19:08:25 -04:00
|
|
|
}
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
entries, err := readDir(uploadIDDir)
|
2017-01-16 20:05:00 -05:00
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return result, toObjectErr(err, bucket)
|
2016-10-29 15:44:44 -04:00
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2020-09-17 21:51:16 -04:00
|
|
|
partsMap := make(map[int]PartInfo)
|
2018-01-31 16:17:24 -05:00
|
|
|
for _, entry := range entries {
|
2018-03-28 17:14:06 -04:00
|
|
|
if entry == fs.metaJSONFile {
|
2018-01-31 16:17:24 -05:00
|
|
|
continue
|
|
|
|
}
|
2020-09-17 21:51:16 -04:00
|
|
|
|
|
|
|
partNumber, currentEtag, actualSize, derr := fs.decodePartFile(entry)
|
2018-03-01 14:37:57 -05:00
|
|
|
if derr != nil {
|
2019-04-12 18:51:32 -04:00
|
|
|
// Skip part files whose name don't match expected format. These could be backend filesystem specific files.
|
|
|
|
continue
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
2020-09-17 21:51:16 -04:00
|
|
|
|
|
|
|
entryStat, err := fsStatFile(ctx, pathJoin(uploadIDDir, entry))
|
|
|
|
if err != nil {
|
2018-01-31 16:17:24 -05:00
|
|
|
continue
|
|
|
|
}
|
2020-09-17 21:51:16 -04:00
|
|
|
|
|
|
|
currentMeta := PartInfo{
|
|
|
|
PartNumber: partNumber,
|
|
|
|
ETag: currentEtag,
|
|
|
|
ActualSize: actualSize,
|
|
|
|
Size: entryStat.Size(),
|
|
|
|
LastModified: entryStat.ModTime(),
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
2020-09-17 21:51:16 -04:00
|
|
|
|
|
|
|
cachedMeta, ok := partsMap[partNumber]
|
|
|
|
if !ok {
|
|
|
|
partsMap[partNumber] = currentMeta
|
|
|
|
continue
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
2020-09-17 21:51:16 -04:00
|
|
|
|
|
|
|
if currentMeta.LastModified.After(cachedMeta.LastModified) {
|
|
|
|
partsMap[partNumber] = currentMeta
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
|
|
|
}
|
2018-03-01 14:37:57 -05:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
var parts []PartInfo
|
2020-09-17 21:51:16 -04:00
|
|
|
for _, partInfo := range partsMap {
|
|
|
|
parts = append(parts, partInfo)
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
2020-09-17 21:51:16 -04:00
|
|
|
|
2018-02-27 22:03:00 -05:00
|
|
|
sort.Slice(parts, func(i int, j int) bool {
|
2018-01-31 16:17:24 -05:00
|
|
|
return parts[i].PartNumber < parts[j].PartNumber
|
|
|
|
})
|
2020-09-17 21:51:16 -04:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
i := 0
|
|
|
|
if partNumberMarker != 0 {
|
|
|
|
// If the marker was set, skip the entries till the marker.
|
|
|
|
for _, part := range parts {
|
|
|
|
i++
|
|
|
|
if part.PartNumber == partNumberMarker {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
partsCount := 0
|
|
|
|
for partsCount < maxParts && i < len(parts) {
|
|
|
|
result.Parts = append(result.Parts, parts[i])
|
|
|
|
i++
|
|
|
|
partsCount++
|
|
|
|
}
|
|
|
|
if i < len(parts) {
|
|
|
|
result.IsTruncated = true
|
|
|
|
if partsCount != 0 {
|
|
|
|
result.NextPartNumberMarker = result.Parts[partsCount-1].PartNumber
|
|
|
|
}
|
|
|
|
}
|
2020-09-17 21:51:16 -04:00
|
|
|
|
|
|
|
rc, _, err := fsOpenFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile), 0)
|
|
|
|
if err != nil {
|
|
|
|
if err == errFileNotFound || err == errFileAccessDenied {
|
|
|
|
return result, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID}
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
2020-09-17 21:51:16 -04:00
|
|
|
return result, toObjectErr(err, bucket, object)
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
2020-09-17 21:51:16 -04:00
|
|
|
defer rc.Close()
|
2018-03-01 14:37:57 -05:00
|
|
|
|
2020-09-17 21:51:16 -04:00
|
|
|
fsMetaBytes, err := ioutil.ReadAll(rc)
|
2018-03-01 14:37:57 -05:00
|
|
|
if err != nil {
|
2020-09-17 21:51:16 -04:00
|
|
|
return result, toObjectErr(err, bucket, object)
|
2018-03-01 14:37:57 -05:00
|
|
|
}
|
|
|
|
|
2019-09-05 18:51:27 -04:00
|
|
|
var fsMeta fsMetaV1
|
2022-01-02 12:15:06 -05:00
|
|
|
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
2019-09-05 18:51:27 -04:00
|
|
|
if err = json.Unmarshal(fsMetaBytes, &fsMeta); err != nil {
|
2019-08-19 14:35:52 -04:00
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
2019-09-05 18:51:27 -04:00
|
|
|
result.UserDefined = fsMeta.Meta
|
2018-01-31 16:17:24 -05:00
|
|
|
return result, nil
|
2016-10-29 15:44:44 -04:00
|
|
|
}
|
|
|
|
|
2016-06-02 15:18:56 -04:00
|
|
|
// CompleteMultipartUpload - completes an ongoing multipart
|
|
|
|
// transaction after receiving all the parts indicated by the client.
|
|
|
|
// Returns an md5sum calculated by concatenating all the individual
|
|
|
|
// md5sums of all the parts.
|
|
|
|
//
|
|
|
|
// Implements S3 compatible Complete multipart API.
|
2018-11-14 20:36:41 -05:00
|
|
|
func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, e error) {
|
2018-09-27 23:36:17 -04:00
|
|
|
var actualSize int64
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkCompleteMultipartArgs(ctx, bucket, object, fs); err != nil {
|
2018-01-31 16:17:24 -05:00
|
|
|
return oi, toObjectErr(err)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-06-02 15:18:56 -04:00
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return oi, toObjectErr(err, bucket)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2021-05-17 11:25:48 -04:00
|
|
|
defer NSUpdated(bucket, object)
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
|
|
|
|
// Just check if the uploadID exists to avoid copy if it doesn't.
|
2018-04-05 18:04:40 -04:00
|
|
|
_, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile))
|
2017-01-16 20:05:00 -05:00
|
|
|
if err != nil {
|
2018-04-10 12:36:37 -04:00
|
|
|
if err == errFileNotFound || err == errFileAccessDenied {
|
2020-09-08 17:22:04 -04:00
|
|
|
return oi, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID}
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
return oi, toObjectErr(err, bucket, object)
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
|
|
|
|
2020-09-21 04:18:13 -04:00
|
|
|
// ensure that part ETag is canonicalized to strip off extraneous quotes
|
|
|
|
for i := range parts {
|
|
|
|
parts[i].ETag = canonicalizeETag(parts[i].ETag)
|
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
|
2018-03-01 14:37:57 -05:00
|
|
|
fsMeta := fsMetaV1{}
|
|
|
|
|
|
|
|
// Allocate parts similar to incoming slice.
|
2019-01-05 17:16:43 -05:00
|
|
|
fsMeta.Parts = make([]ObjectPartInfo, len(parts))
|
2018-03-01 14:37:57 -05:00
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
entries, err := readDir(uploadIDDir)
|
|
|
|
if err != nil {
|
|
|
|
logger.GetReqInfo(ctx).AppendTags("uploadIDDir", uploadIDDir)
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return oi, err
|
|
|
|
}
|
|
|
|
|
2020-09-21 04:18:13 -04:00
|
|
|
// Create entries trie structure for prefix match
|
|
|
|
entriesTrie := trie.NewTrie()
|
|
|
|
for _, entry := range entries {
|
|
|
|
entriesTrie.Insert(entry)
|
2019-04-01 15:19:52 -04:00
|
|
|
}
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
// Save consolidated actual size.
|
|
|
|
var objectActualSize int64
|
2017-07-07 11:41:29 -04:00
|
|
|
// Validate all parts and then commit to disk.
|
|
|
|
for i, part := range parts {
|
2020-09-21 04:18:13 -04:00
|
|
|
partFile := getPartFile(entriesTrie, part.PartNumber, part.ETag)
|
2018-09-27 23:36:17 -04:00
|
|
|
if partFile == "" {
|
|
|
|
return oi, InvalidPart{
|
|
|
|
PartNumber: part.PartNumber,
|
|
|
|
GotETag: part.ETag,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read the actualSize from the pathFileName.
|
|
|
|
subParts := strings.Split(partFile, ".")
|
|
|
|
actualSize, err = strconv.ParseInt(subParts[len(subParts)-1], 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
return oi, InvalidPart{
|
|
|
|
PartNumber: part.PartNumber,
|
|
|
|
GotETag: part.ETag,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
partPath := pathJoin(uploadIDDir, partFile)
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
var fi os.FileInfo
|
2018-04-05 18:04:40 -04:00
|
|
|
fi, err = fsStatFile(ctx, partPath)
|
2018-01-31 16:17:24 -05:00
|
|
|
if err != nil {
|
2018-04-10 12:36:37 -04:00
|
|
|
if err == errFileNotFound || err == errFileAccessDenied {
|
2018-04-05 18:04:40 -04:00
|
|
|
return oi, InvalidPart{}
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
return oi, err
|
2017-07-07 11:41:29 -04:00
|
|
|
}
|
2018-03-01 14:37:57 -05:00
|
|
|
|
2019-01-05 17:16:43 -05:00
|
|
|
fsMeta.Parts[i] = ObjectPartInfo{
|
2018-09-27 23:36:17 -04:00
|
|
|
Number: part.PartNumber,
|
|
|
|
Size: fi.Size(),
|
|
|
|
ActualSize: actualSize,
|
2018-03-01 14:37:57 -05:00
|
|
|
}
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
// Consolidate the actual size.
|
|
|
|
objectActualSize += actualSize
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
if i == len(parts)-1 {
|
|
|
|
break
|
2017-07-07 11:41:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// All parts except the last part has to be atleast 5MB.
|
2018-09-27 23:36:17 -04:00
|
|
|
if !isMinAllowedPartSize(actualSize) {
|
2018-07-25 00:31:03 -04:00
|
|
|
return oi, PartTooSmall{
|
2017-07-07 11:41:29 -04:00
|
|
|
PartNumber: part.PartNumber,
|
2018-09-27 23:36:17 -04:00
|
|
|
PartSize: actualSize,
|
2017-07-07 11:41:29 -04:00
|
|
|
PartETag: part.ETag,
|
2018-04-05 18:04:40 -04:00
|
|
|
}
|
2017-10-17 15:01:28 -04:00
|
|
|
}
|
2017-07-07 11:41:29 -04:00
|
|
|
}
|
|
|
|
|
2016-11-23 06:04:04 -05:00
|
|
|
appendFallback := true // In case background-append did not append the required parts.
|
2022-02-15 12:25:47 -05:00
|
|
|
appendFilePath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, "bg-appends", fmt.Sprintf("%s.%s", uploadID, mustGetUUID()))
|
2018-01-31 16:17:24 -05:00
|
|
|
|
|
|
|
// Most of the times appendFile would already be fully appended by now. We call fs.backgroundAppend()
|
|
|
|
// to take care of the following corner case:
|
|
|
|
// 1. The last PutObjectPart triggers go-routine fs.backgroundAppend, this go-routine has not started yet.
|
|
|
|
// 2. Now CompleteMultipartUpload gets called which sees that lastPart is not appended and starts appending
|
|
|
|
// from the beginning
|
2018-04-05 18:04:40 -04:00
|
|
|
fs.backgroundAppend(ctx, bucket, object, uploadID)
|
2018-01-31 16:17:24 -05:00
|
|
|
|
|
|
|
fs.appendFileMapMu.Lock()
|
|
|
|
file := fs.appendFileMap[uploadID]
|
|
|
|
delete(fs.appendFileMap, uploadID)
|
|
|
|
fs.appendFileMapMu.Unlock()
|
|
|
|
|
|
|
|
if file != nil {
|
|
|
|
file.Lock()
|
|
|
|
defer file.Unlock()
|
|
|
|
// Verify that appendFile has all the parts.
|
|
|
|
if len(file.parts) == len(parts) {
|
|
|
|
for i := range parts {
|
|
|
|
if parts[i].ETag != file.parts[i].ETag {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if parts[i].PartNumber != file.parts[i].PartNumber {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if i == len(parts)-1 {
|
|
|
|
appendFilePath = file.filePath
|
|
|
|
appendFallback = false
|
|
|
|
}
|
2016-11-21 02:42:53 -05:00
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-11-21 02:42:53 -05:00
|
|
|
}
|
2016-11-01 03:56:03 -04:00
|
|
|
|
2016-11-21 02:42:53 -05:00
|
|
|
if appendFallback {
|
2018-10-01 12:50:09 -04:00
|
|
|
if file != nil {
|
|
|
|
fsRemoveFile(ctx, file.filePath)
|
|
|
|
}
|
2017-07-07 11:41:29 -04:00
|
|
|
for _, part := range parts {
|
2020-09-21 04:18:13 -04:00
|
|
|
partFile := getPartFile(entriesTrie, part.PartNumber, part.ETag)
|
|
|
|
if partFile == "" {
|
|
|
|
logger.LogIf(ctx, fmt.Errorf("%.5d.%s missing will not proceed",
|
|
|
|
part.PartNumber, part.ETag))
|
|
|
|
return oi, InvalidPart{
|
|
|
|
PartNumber: part.PartNumber,
|
|
|
|
GotETag: part.ETag,
|
|
|
|
}
|
|
|
|
}
|
2021-02-24 03:14:16 -05:00
|
|
|
if err = xioutil.AppendFile(appendFilePath, pathJoin(uploadIDDir, partFile), globalFSOSync); err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return oi, toObjectErr(err)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Hold write lock on the object.
|
2020-11-04 11:25:42 -05:00
|
|
|
destLock := fs.NewNSLock(bucket, object)
|
2021-04-29 23:55:21 -04:00
|
|
|
lkctx, err := destLock.GetLock(ctx, globalOperationTimeout)
|
2021-03-03 21:36:43 -05:00
|
|
|
if err != nil {
|
2018-01-31 16:17:24 -05:00
|
|
|
return oi, err
|
|
|
|
}
|
2021-04-29 23:55:21 -04:00
|
|
|
ctx = lkctx.Context()
|
|
|
|
defer destLock.Unlock(lkctx.Cancel)
|
2020-09-18 03:16:16 -04:00
|
|
|
|
|
|
|
bucketMetaDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix)
|
|
|
|
fsMetaPath := pathJoin(bucketMetaDir, bucket, object, fs.metaJSONFile)
|
|
|
|
metaFile, err := fs.rwPool.Write(fsMetaPath)
|
|
|
|
var freshFile bool
|
2018-01-31 16:17:24 -05:00
|
|
|
if err != nil {
|
2020-09-18 03:16:16 -04:00
|
|
|
if !errors.Is(err, errFileNotFound) {
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return oi, toObjectErr(err, bucket, object)
|
|
|
|
}
|
|
|
|
metaFile, err = fs.rwPool.Create(fsMetaPath)
|
|
|
|
if err != nil {
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return oi, toObjectErr(err, bucket, object)
|
|
|
|
}
|
|
|
|
freshFile = true
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
2018-02-01 04:57:12 -05:00
|
|
|
defer metaFile.Close()
|
2020-09-18 03:16:16 -04:00
|
|
|
defer func() {
|
|
|
|
// Remove meta file when CompleteMultipart encounters
|
|
|
|
// any error and it is a fresh file.
|
|
|
|
//
|
|
|
|
// We should preserve the `fs.json` of any
|
|
|
|
// existing object
|
|
|
|
if e != nil && freshFile {
|
|
|
|
tmpDir := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID)
|
|
|
|
fsRemoveMeta(ctx, bucketMetaDir, fsMetaPath, tmpDir)
|
|
|
|
}
|
|
|
|
}()
|
2016-07-21 20:31:14 -04:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Read saved fs metadata for ongoing multipart.
|
2021-02-24 03:14:16 -05:00
|
|
|
fsMetaBuf, err := xioutil.ReadFile(pathJoin(uploadIDDir, fs.metaJSONFile))
|
2018-01-31 16:17:24 -05:00
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return oi, toObjectErr(err, bucket, object)
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
|
|
|
err = json.Unmarshal(fsMetaBuf, &fsMeta)
|
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return oi, toObjectErr(err, bucket, object)
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
2016-11-11 19:36:07 -05:00
|
|
|
// Save additional metadata.
|
2020-09-10 14:37:22 -04:00
|
|
|
if fsMeta.Meta == nil {
|
2016-11-11 19:36:07 -05:00
|
|
|
fsMeta.Meta = make(map[string]string)
|
|
|
|
}
|
2021-09-09 01:25:23 -04:00
|
|
|
|
|
|
|
fsMeta.Meta["etag"] = opts.UserDefined["etag"]
|
|
|
|
if fsMeta.Meta["etag"] == "" {
|
|
|
|
fsMeta.Meta["etag"] = getCompleteMultipartMD5(parts)
|
|
|
|
}
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
// Save consolidated actual size.
|
|
|
|
fsMeta.Meta[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(objectActualSize, 10)
|
2017-01-16 20:05:00 -05:00
|
|
|
if _, err = fsMeta.WriteTo(metaFile); err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return oi, toObjectErr(err, bucket, object)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
err = fsRenameFile(ctx, appendFilePath, pathJoin(fs.fsPath, bucket, object))
|
2017-09-28 11:09:28 -04:00
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return oi, toObjectErr(err, bucket, object)
|
2017-01-16 22:23:43 -05:00
|
|
|
}
|
2020-05-30 16:56:31 -04:00
|
|
|
|
|
|
|
// Purge multipart folders
|
|
|
|
{
|
|
|
|
fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, mustGetUUID())
|
|
|
|
defer fsRemoveAll(ctx, fsTmpObjPath) // remove multipart temporary files in background.
|
|
|
|
|
2021-11-18 19:09:12 -05:00
|
|
|
Rename(uploadIDDir, fsTmpObjPath)
|
2020-05-30 16:56:31 -04:00
|
|
|
|
|
|
|
// It is safe to ignore any directory not empty error (in case there were multiple uploadIDs on the same object)
|
|
|
|
fsRemoveDir(ctx, fs.getMultipartSHADir(bucket, object))
|
|
|
|
}
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object))
|
2017-01-16 22:23:43 -05:00
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
return oi, toObjectErr(err, bucket, object)
|
2016-06-02 18:54:00 -04:00
|
|
|
}
|
|
|
|
|
2017-01-16 22:23:43 -05:00
|
|
|
return fsMeta.ToObjectInfo(bucket, object, fi), nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2016-06-02 15:18:56 -04:00
|
|
|
// AbortMultipartUpload - aborts an ongoing multipart operation
|
|
|
|
// signified by the input uploadID. This is an atomic operation
|
|
|
|
// doesn't require clients to initiate multiple such requests.
|
|
|
|
//
|
|
|
|
// All parts are purged from all disks and reference to the uploadID
|
|
|
|
// would be removed from the system, rollback is not possible on this
|
|
|
|
// operation.
|
|
|
|
//
|
|
|
|
// Implements S3 compatible Abort multipart API, slight difference is
|
|
|
|
// that this is an atomic idempotent operation. Subsequent calls have
|
|
|
|
// no affect and further requests to the same uploadID would not be
|
|
|
|
// honored.
|
2020-09-14 18:57:13 -04:00
|
|
|
func (fs *FSObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error {
|
2018-04-05 18:04:40 -04:00
|
|
|
if err := checkAbortMultipartArgs(ctx, bucket, object, fs); err != nil {
|
2016-12-02 02:15:17 -05:00
|
|
|
return err
|
2016-06-02 15:18:56 -04:00
|
|
|
}
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
|
|
|
|
return toObjectErr(err, bucket)
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
fs.appendFileMapMu.Lock()
|
2018-10-22 10:36:30 -04:00
|
|
|
// Remove file in tmp folder
|
|
|
|
file := fs.appendFileMap[uploadID]
|
|
|
|
if file != nil {
|
|
|
|
fsRemoveFile(ctx, file.filePath)
|
|
|
|
}
|
2018-01-31 16:17:24 -05:00
|
|
|
delete(fs.appendFileMap, uploadID)
|
|
|
|
fs.appendFileMapMu.Unlock()
|
2017-09-28 11:09:28 -04:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
|
|
|
|
// Just check if the uploadID exists to avoid copy if it doesn't.
|
2018-04-05 18:04:40 -04:00
|
|
|
_, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile))
|
2017-01-16 20:05:00 -05:00
|
|
|
if err != nil {
|
2018-04-10 12:36:37 -04:00
|
|
|
if err == errFileNotFound || err == errFileAccessDenied {
|
2020-09-08 17:22:04 -04:00
|
|
|
return InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID}
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
return toObjectErr(err, bucket, object)
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
2018-10-22 10:36:30 -04:00
|
|
|
|
2020-05-30 16:56:31 -04:00
|
|
|
// Purge multipart folders
|
|
|
|
{
|
|
|
|
fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, mustGetUUID())
|
|
|
|
defer fsRemoveAll(ctx, fsTmpObjPath) // remove multipart temporary files in background.
|
|
|
|
|
2021-11-18 19:09:12 -05:00
|
|
|
Rename(uploadIDDir, fsTmpObjPath)
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2020-05-30 16:56:31 -04:00
|
|
|
// It is safe to ignore any directory not empty error (in case there were multiple uploadIDs on the same object)
|
|
|
|
fsRemoveDir(ctx, fs.getMultipartSHADir(bucket, object))
|
|
|
|
}
|
2018-10-22 10:36:30 -04:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
return nil
|
|
|
|
}
|
2016-06-02 15:18:56 -04:00
|
|
|
|
2022-02-15 12:25:47 -05:00
|
|
|
// Return all uploads IDs with full path of each upload-id directory.
|
|
|
|
// Do not return an error as this is a lazy operation
|
|
|
|
func (fs *FSObjects) getAllUploadIDs(ctx context.Context) (result map[string]string) {
|
|
|
|
result = make(map[string]string)
|
|
|
|
|
|
|
|
entries, err := readDir(pathJoin(fs.fsPath, minioMetaMultipartBucket))
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, entry := range entries {
|
|
|
|
uploadIDs, err := readDir(pathJoin(fs.fsPath, minioMetaMultipartBucket, entry))
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Remove the trailing slash separator
|
|
|
|
for i := range uploadIDs {
|
|
|
|
uploadID := strings.TrimSuffix(uploadIDs[i], SlashSeparator)
|
|
|
|
result[uploadID] = pathJoin(fs.fsPath, minioMetaMultipartBucket, entry, uploadID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
// Removes multipart uploads if any older than `expiry` duration
|
|
|
|
// on all buckets for every `cleanupInterval`, this function is
|
|
|
|
// blocking and should be run in a go-routine.
|
2021-10-04 13:52:28 -04:00
|
|
|
func (fs *FSObjects) cleanupStaleUploads(ctx context.Context) {
|
2022-02-15 12:25:47 -05:00
|
|
|
expiryUploadsTimer := time.NewTimer(globalAPIConfig.getStaleUploadsCleanupInterval())
|
|
|
|
defer expiryUploadsTimer.Stop()
|
|
|
|
|
|
|
|
bgAppendTmpCleaner := time.NewTimer(bgAppendsCleanupInterval)
|
|
|
|
defer bgAppendTmpCleaner.Stop()
|
2018-05-04 13:43:20 -04:00
|
|
|
|
2018-01-31 16:17:24 -05:00
|
|
|
for {
|
|
|
|
select {
|
2020-04-16 13:56:18 -04:00
|
|
|
case <-ctx.Done():
|
2018-01-31 16:17:24 -05:00
|
|
|
return
|
2022-02-15 12:25:47 -05:00
|
|
|
case <-bgAppendTmpCleaner.C:
|
|
|
|
foundUploadIDs := fs.getAllUploadIDs(ctx)
|
2021-02-05 22:23:48 -05:00
|
|
|
|
2022-02-15 12:25:47 -05:00
|
|
|
// Remove background append map from the memory
|
|
|
|
fs.appendFileMapMu.Lock()
|
|
|
|
for uploadID := range fs.appendFileMap {
|
|
|
|
_, ok := foundUploadIDs[uploadID]
|
|
|
|
if !ok {
|
|
|
|
delete(fs.appendFileMap, uploadID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fs.appendFileMapMu.Unlock()
|
|
|
|
|
|
|
|
// Remove background appends file from the disk
|
|
|
|
bgAppendsDir := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, bgAppendsDirName)
|
|
|
|
entries, err := readDir(bgAppendsDir)
|
2018-01-31 16:17:24 -05:00
|
|
|
if err != nil {
|
2022-02-15 12:25:47 -05:00
|
|
|
break
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
|
|
|
for _, entry := range entries {
|
2022-02-15 12:25:47 -05:00
|
|
|
uploadID := strings.Split(entry, ".")[0]
|
|
|
|
_, ok := foundUploadIDs[uploadID]
|
|
|
|
if !ok {
|
|
|
|
fsRemoveFile(ctx, pathJoin(bgAppendsDir, entry))
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
2022-02-15 12:25:47 -05:00
|
|
|
}
|
2019-10-16 14:57:52 -04:00
|
|
|
|
2022-05-18 01:42:59 -04:00
|
|
|
bgAppendTmpCleaner.Reset(bgAppendsCleanupInterval)
|
2022-02-15 12:25:47 -05:00
|
|
|
case <-expiryUploadsTimer.C:
|
|
|
|
expiry := globalAPIConfig.getStaleUploadsExpiry()
|
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
uploadIDs := fs.getAllUploadIDs(ctx)
|
|
|
|
|
|
|
|
for uploadID, path := range uploadIDs {
|
|
|
|
fi, err := fsStatDir(ctx, path)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if now.Sub(fi.ModTime()) > expiry {
|
|
|
|
fsRemoveAll(ctx, path)
|
|
|
|
// Remove upload ID parent directory if empty
|
|
|
|
fsRemoveDir(ctx, filepath.Base(path))
|
|
|
|
|
|
|
|
// Remove uploadID from the append file map and its corresponding temporary file
|
|
|
|
fs.appendFileMapMu.Lock()
|
|
|
|
bgAppend, ok := fs.appendFileMap[uploadID]
|
|
|
|
if ok {
|
|
|
|
_ = fsRemoveFile(ctx, bgAppend.filePath)
|
|
|
|
delete(fs.appendFileMap, uploadID)
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
2022-02-15 12:25:47 -05:00
|
|
|
fs.appendFileMapMu.Unlock()
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
|
|
|
}
|
2022-05-18 01:42:59 -04:00
|
|
|
|
|
|
|
// Reset for the next interval
|
|
|
|
expiryUploadsTimer.Reset(globalAPIConfig.getStaleUploadsCleanupInterval())
|
2018-01-31 16:17:24 -05:00
|
|
|
}
|
2016-06-02 15:18:56 -04:00
|
|
|
}
|
2016-05-02 19:57:31 -04:00
|
|
|
}
|