mirror of
https://github.com/minio/minio.git
synced 2025-01-22 20:23:14 -05:00
Add extended checksum support (#15433)
This commit is contained in:
parent
929b9e164e
commit
a9f1ad7924
@ -30,7 +30,7 @@ import (
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
@ -232,6 +232,7 @@ const (
|
||||
|
||||
// S3 extended errors.
|
||||
ErrContentSHA256Mismatch
|
||||
ErrContentChecksumMismatch
|
||||
|
||||
// Add new extended error codes here.
|
||||
|
||||
@ -392,6 +393,8 @@ const (
|
||||
ErrAccountNotEligible
|
||||
ErrAdminServiceAccountNotFound
|
||||
ErrPostPolicyConditionInvalidFormat
|
||||
|
||||
ErrInvalidChecksum
|
||||
)
|
||||
|
||||
type errorCodeMap map[APIErrorCode]APIError
|
||||
@ -1160,6 +1163,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "The provided 'x-amz-content-sha256' header does not match what was computed.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrContentChecksumMismatch: {
|
||||
Code: "XAmzContentChecksumMismatch",
|
||||
Description: "The provided 'x-amz-checksum' header does not match what was computed.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
// MinIO extensions.
|
||||
ErrStorageFull: {
|
||||
@ -1874,6 +1882,11 @@ var errorCodes = errorCodeMap{
|
||||
Description: "Invalid according to Policy: Policy Condition failed",
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
ErrInvalidChecksum: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "Invalid checksum provided.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// Add your error structure here.
|
||||
}
|
||||
|
||||
@ -2046,6 +2059,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrSignatureDoesNotMatch
|
||||
case hash.SHA256Mismatch:
|
||||
apiErr = ErrContentSHA256Mismatch
|
||||
case hash.ChecksumMismatch:
|
||||
apiErr = ErrContentChecksumMismatch
|
||||
case ObjectTooLarge:
|
||||
apiErr = ErrEntityTooLarge
|
||||
case ObjectTooSmall:
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/handlers"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
@ -163,6 +164,12 @@ type Part struct {
|
||||
LastModified string
|
||||
ETag string
|
||||
Size int64
|
||||
|
||||
// Checksum values
|
||||
ChecksumCRC32 string
|
||||
ChecksumCRC32C string
|
||||
ChecksumSHA1 string
|
||||
ChecksumSHA256 string
|
||||
}
|
||||
|
||||
// ListPartsResponse - format for list parts response.
|
||||
@ -184,6 +191,7 @@ type ListPartsResponse struct {
|
||||
MaxParts int
|
||||
IsTruncated bool
|
||||
|
||||
ChecksumAlgorithm string
|
||||
// List of parts.
|
||||
Parts []Part `xml:"Part"`
|
||||
}
|
||||
@ -381,6 +389,11 @@ type CompleteMultipartUploadResponse struct {
|
||||
Bucket string
|
||||
Key string
|
||||
ETag string
|
||||
|
||||
ChecksumCRC32 string
|
||||
ChecksumCRC32C string
|
||||
ChecksumSHA1 string
|
||||
ChecksumSHA256 string
|
||||
}
|
||||
|
||||
// DeleteError structure.
|
||||
@ -690,14 +703,19 @@ func generateInitiateMultipartUploadResponse(bucket, key, uploadID string) Initi
|
||||
}
|
||||
|
||||
// generates CompleteMultipartUploadResponse for given bucket, key, location and ETag.
|
||||
func generateCompleteMultpartUploadResponse(bucket, key, location, etag string) CompleteMultipartUploadResponse {
|
||||
return CompleteMultipartUploadResponse{
|
||||
func generateCompleteMultpartUploadResponse(bucket, key, location string, oi ObjectInfo) CompleteMultipartUploadResponse {
|
||||
c := CompleteMultipartUploadResponse{
|
||||
Location: location,
|
||||
Bucket: bucket,
|
||||
Key: key,
|
||||
// AWS S3 quotes the ETag in XML, make sure we are compatible here.
|
||||
ETag: "\"" + etag + "\"",
|
||||
ETag: "\"" + oi.ETag + "\"",
|
||||
ChecksumSHA1: oi.Checksum[hash.ChecksumSHA1.String()],
|
||||
ChecksumSHA256: oi.Checksum[hash.ChecksumSHA256.String()],
|
||||
ChecksumCRC32: oi.Checksum[hash.ChecksumCRC32.String()],
|
||||
ChecksumCRC32C: oi.Checksum[hash.ChecksumCRC32C.String()],
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// generates ListPartsResponse from ListPartsInfo.
|
||||
@ -722,6 +740,7 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
|
||||
listPartsResponse.PartNumberMarker = partsInfo.PartNumberMarker
|
||||
listPartsResponse.IsTruncated = partsInfo.IsTruncated
|
||||
listPartsResponse.NextPartNumberMarker = partsInfo.NextPartNumberMarker
|
||||
listPartsResponse.ChecksumAlgorithm = partsInfo.ChecksumAlgorithm
|
||||
|
||||
listPartsResponse.Parts = make([]Part, len(partsInfo.Parts))
|
||||
for index, part := range partsInfo.Parts {
|
||||
@ -730,6 +749,10 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
|
||||
newPart.ETag = "\"" + part.ETag + "\""
|
||||
newPart.Size = part.Size
|
||||
newPart.LastModified = part.LastModified.UTC().Format(iso8601TimeFormat)
|
||||
newPart.ChecksumCRC32 = part.ChecksumCRC32
|
||||
newPart.ChecksumCRC32C = part.ChecksumCRC32C
|
||||
newPart.ChecksumSHA1 = part.ChecksumSHA1
|
||||
newPart.ChecksumSHA256 = part.ChecksumSHA256
|
||||
listPartsResponse.Parts[index] = newPart
|
||||
}
|
||||
return listPartsResponse
|
||||
|
File diff suppressed because one or more lines are too long
@ -25,7 +25,7 @@ import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/dustin/go-humanize"
|
||||
)
|
||||
|
||||
// Benchmark utility functions for ObjectLayer.PutObject().
|
||||
@ -85,12 +85,12 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
|
||||
// PutObjectPart returns etag of the object inserted.
|
||||
// etag variable is assigned with that value.
|
||||
var etag, uploadID string
|
||||
var etag string
|
||||
// get text data generated for number of bytes equal to object size.
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for NewMultipartUpload.
|
||||
uploadID, err = obj.NewMultipartUpload(context.Background(), bucket, object, ObjectOptions{})
|
||||
res, err := obj.NewMultipartUpload(context.Background(), bucket, object, ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -113,7 +113,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
}
|
||||
md5hex := getMD5Hash(textPartData)
|
||||
var partInfo PartInfo
|
||||
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
|
||||
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, res.UploadID, j,
|
||||
mustGetPutObjReader(b, bytes.NewReader(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
|
@ -87,7 +87,7 @@ type CacheObjectLayer interface {
|
||||
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
// Multipart operations.
|
||||
NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error)
|
||||
NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (res *NewMultipartUploadResult, err error)
|
||||
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
|
||||
AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error
|
||||
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
@ -122,7 +122,7 @@ type cacheObjects struct {
|
||||
InnerDeleteObjectFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
InnerPutObjectFn func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
InnerCopyObjectFn func(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
InnerNewMultipartUploadFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error)
|
||||
InnerNewMultipartUploadFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (res *NewMultipartUploadResult, err error)
|
||||
InnerPutObjectPartFn func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
|
||||
InnerAbortMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error
|
||||
InnerCompleteMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
@ -866,7 +866,7 @@ func newServerCacheObjects(ctx context.Context, config cache.Config) (CacheObjec
|
||||
InnerCopyObjectFn: func(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||
return newObjectLayerFn().CopyObject(ctx, srcBucket, srcObject, destBucket, destObject, srcInfo, srcOpts, dstOpts)
|
||||
},
|
||||
InnerNewMultipartUploadFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) {
|
||||
InnerNewMultipartUploadFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (res *NewMultipartUploadResult, err error) {
|
||||
return newObjectLayerFn().NewMultipartUpload(ctx, bucket, object, opts)
|
||||
},
|
||||
InnerPutObjectPartFn: func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
|
||||
@ -961,7 +961,7 @@ func (c *cacheObjects) queuePendingWriteback(ctx context.Context) {
|
||||
}
|
||||
|
||||
// NewMultipartUpload - Starts a new multipart upload operation to backend - if writethrough mode is enabled, starts caching the multipart.
|
||||
func (c *cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) {
|
||||
func (c *cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (res *NewMultipartUploadResult, err error) {
|
||||
newMultipartUploadFn := c.InnerNewMultipartUploadFn
|
||||
dcache, err := c.getCacheToLoc(ctx, bucket, object)
|
||||
if err != nil {
|
||||
@ -996,9 +996,11 @@ func (c *cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object st
|
||||
}
|
||||
|
||||
// perform multipart upload on backend and cache simultaneously
|
||||
uploadID, err = newMultipartUploadFn(ctx, bucket, object, opts)
|
||||
dcache.NewMultipartUpload(GlobalContext, bucket, object, uploadID, opts)
|
||||
return uploadID, err
|
||||
res, err = newMultipartUploadFn(ctx, bucket, object, opts)
|
||||
if err == nil {
|
||||
dcache.NewMultipartUpload(GlobalContext, bucket, object, res.UploadID, opts)
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
||||
// PutObjectPart streams part to cache concurrently if writethrough mode is enabled. Otherwise redirects the call to remote
|
||||
|
@ -514,6 +514,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
partModTime := latestMeta.Parts[partIndex].ModTime
|
||||
partNumber := latestMeta.Parts[partIndex].Number
|
||||
partIdx := latestMeta.Parts[partIndex].Index
|
||||
partChecksums := latestMeta.Parts[partIndex].Checksums
|
||||
tillOffset := erasure.ShardFileOffset(0, partSize, partSize)
|
||||
readers := make([]io.ReaderAt, len(latestDisks))
|
||||
checksumAlgo := erasureInfo.GetChecksumInfo(partNumber).Algorithm
|
||||
@ -567,7 +568,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
}
|
||||
|
||||
partsMetadata[i].DataDir = dstDataDir
|
||||
partsMetadata[i].AddObjectPart(partNumber, "", partSize, partActualSize, partModTime, partIdx)
|
||||
partsMetadata[i].AddObjectPart(partNumber, "", partSize, partActualSize, partModTime, partIdx, partChecksums)
|
||||
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
||||
PartNumber: partNumber,
|
||||
Algorithm: checksumAlgo,
|
||||
|
@ -561,14 +561,14 @@ func TestHealCorrectQuorum(t *testing.T) {
|
||||
|
||||
// Create an object with multiple parts uploaded in decreasing
|
||||
// part number.
|
||||
uploadID, err := objLayer.NewMultipartUpload(ctx, bucket, object, opts)
|
||||
res, err := objLayer.NewMultipartUpload(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create a multipart upload - %v", err)
|
||||
}
|
||||
|
||||
var uploadedParts []CompletePart
|
||||
for _, partID := range []int{2, 1} {
|
||||
pInfo, err1 := objLayer.PutObjectPart(ctx, bucket, object, uploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
|
||||
pInfo, err1 := objLayer.PutObjectPart(ctx, bucket, object, res.UploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
|
||||
if err1 != nil {
|
||||
t.Fatalf("Failed to upload a part - %v", err1)
|
||||
}
|
||||
@ -578,7 +578,7 @@ func TestHealCorrectQuorum(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
_, err = objLayer.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{})
|
||||
_, err = objLayer.CompleteMultipartUpload(ctx, bucket, object, res.UploadID, uploadedParts, ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to complete multipart upload - got: %v", err)
|
||||
}
|
||||
@ -694,10 +694,11 @@ func TestHealObjectCorruptedPools(t *testing.T) {
|
||||
z := objLayer.(*erasureServerPools)
|
||||
set := z.serverPools[1]
|
||||
|
||||
uploadID, err := set.NewMultipartUpload(ctx, bucket, object, opts)
|
||||
res, err := set.NewMultipartUpload(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create a multipart upload - %v", err)
|
||||
}
|
||||
uploadID := res.UploadID
|
||||
|
||||
var uploadedParts []CompletePart
|
||||
for _, partID := range []int{2, 1} {
|
||||
@ -868,14 +869,14 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
|
||||
|
||||
// Create an object with multiple parts uploaded in decreasing
|
||||
// part number.
|
||||
uploadID, err := objLayer.NewMultipartUpload(ctx, bucket, object, opts)
|
||||
res, err := objLayer.NewMultipartUpload(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create a multipart upload - %v", err)
|
||||
}
|
||||
|
||||
var uploadedParts []CompletePart
|
||||
for _, partID := range []int{2, 1} {
|
||||
pInfo, err1 := objLayer.PutObjectPart(ctx, bucket, object, uploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
|
||||
pInfo, err1 := objLayer.PutObjectPart(ctx, bucket, object, res.UploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
|
||||
if err1 != nil {
|
||||
t.Fatalf("Failed to upload a part - %v", err1)
|
||||
}
|
||||
@ -885,7 +886,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
_, err = objLayer.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{})
|
||||
_, err = objLayer.CompleteMultipartUpload(ctx, bucket, object, res.UploadID, uploadedParts, ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to complete multipart upload - %v", err)
|
||||
}
|
||||
@ -1011,14 +1012,14 @@ func TestHealObjectCorruptedParts(t *testing.T) {
|
||||
|
||||
// Create an object with multiple parts uploaded in decreasing
|
||||
// part number.
|
||||
uploadID, err := objLayer.NewMultipartUpload(ctx, bucket, object, opts)
|
||||
res, err := objLayer.NewMultipartUpload(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create a multipart upload - %v", err)
|
||||
}
|
||||
|
||||
var uploadedParts []CompletePart
|
||||
for _, partID := range []int{2, 1} {
|
||||
pInfo, err1 := objLayer.PutObjectPart(ctx, bucket, object, uploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
|
||||
pInfo, err1 := objLayer.PutObjectPart(ctx, bucket, object, res.UploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
|
||||
if err1 != nil {
|
||||
t.Fatalf("Failed to upload a part - %v", err1)
|
||||
}
|
||||
@ -1028,7 +1029,7 @@ func TestHealObjectCorruptedParts(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
_, err = objLayer.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{})
|
||||
_, err = objLayer.CompleteMultipartUpload(ctx, bucket, object, res.UploadID, uploadedParts, ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to complete multipart upload - %v", err)
|
||||
}
|
||||
@ -1168,14 +1169,14 @@ func TestHealObjectErasure(t *testing.T) {
|
||||
|
||||
// Create an object with multiple parts uploaded in decreasing
|
||||
// part number.
|
||||
uploadID, err := obj.NewMultipartUpload(ctx, bucket, object, opts)
|
||||
res, err := obj.NewMultipartUpload(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create a multipart upload - %v", err)
|
||||
}
|
||||
|
||||
var uploadedParts []CompletePart
|
||||
for _, partID := range []int{2, 1} {
|
||||
pInfo, err1 := obj.PutObjectPart(ctx, bucket, object, uploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
|
||||
pInfo, err1 := obj.PutObjectPart(ctx, bucket, object, res.UploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
|
||||
if err1 != nil {
|
||||
t.Fatalf("Failed to upload a part - %v", err1)
|
||||
}
|
||||
@ -1190,7 +1191,7 @@ func TestHealObjectErasure(t *testing.T) {
|
||||
er := z.serverPools[0].sets[0]
|
||||
firstDisk := er.getDisks()[0]
|
||||
|
||||
_, err = obj.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{})
|
||||
_, err = obj.CompleteMultipartUpload(ctx, bucket, object, res.UploadID, uploadedParts, ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to complete multipart upload - %v", err)
|
||||
}
|
||||
|
@ -187,6 +187,7 @@ func (fi FileInfo) ToObjectInfo(bucket, object string, versioned bool) ObjectInf
|
||||
objInfo.RestoreExpires, _ = restoreStatus.Expiry()
|
||||
}
|
||||
}
|
||||
objInfo.Checksum = fi.Checksum
|
||||
// Success.
|
||||
return objInfo
|
||||
}
|
||||
@ -237,7 +238,7 @@ func objectPartIndex(parts []ObjectPartInfo, partNumber int) int {
|
||||
}
|
||||
|
||||
// AddObjectPart - add a new object part in order.
|
||||
func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize, actualSize int64, modTime time.Time, idx []byte) {
|
||||
func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize, actualSize int64, modTime time.Time, idx []byte, checksums map[string]string) {
|
||||
partInfo := ObjectPartInfo{
|
||||
Number: partNumber,
|
||||
ETag: partETag,
|
||||
@ -245,6 +246,7 @@ func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize, act
|
||||
ActualSize: actualSize,
|
||||
ModTime: modTime,
|
||||
Index: idx,
|
||||
Checksums: checksums,
|
||||
}
|
||||
|
||||
// Update part info if it already exists.
|
||||
|
@ -58,7 +58,7 @@ func TestAddObjectPart(t *testing.T) {
|
||||
for _, testCase := range testCases {
|
||||
if testCase.expectedIndex > -1 {
|
||||
partNumString := strconv.Itoa(testCase.partNum)
|
||||
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize, UTCNow(), nil)
|
||||
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize, UTCNow(), nil, nil)
|
||||
}
|
||||
|
||||
if index := objectPartIndex(fi.Parts, testCase.partNum); index != testCase.expectedIndex {
|
||||
@ -91,7 +91,7 @@ func TestObjectPartIndex(t *testing.T) {
|
||||
// Add some parts for testing.
|
||||
for _, testCase := range testCases {
|
||||
partNumString := strconv.Itoa(testCase.partNum)
|
||||
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize, UTCNow(), nil)
|
||||
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize, UTCNow(), nil, nil)
|
||||
}
|
||||
|
||||
// Add failure test case.
|
||||
@ -121,7 +121,7 @@ func TestObjectToPartOffset(t *testing.T) {
|
||||
// Total size of all parts is 5,242,899 bytes.
|
||||
for _, partNum := range []int{1, 2, 4, 5, 7} {
|
||||
partNumString := strconv.Itoa(partNum)
|
||||
fi.AddObjectPart(partNum, "etag."+partNumString, int64(partNum+humanize.MiByte), ActualSize, UTCNow(), nil)
|
||||
fi.AddObjectPart(partNum, "etag."+partNumString, int64(partNum+humanize.MiByte), ActualSize, UTCNow(), nil, nil)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
@ -160,7 +160,7 @@ func TestObjectToPartOffset(t *testing.T) {
|
||||
func TestFindFileInfoInQuorum(t *testing.T) {
|
||||
getNFInfo := func(n int, quorum int, t int64, dataDir string) []FileInfo {
|
||||
fi := newFileInfo("test", 8, 8)
|
||||
fi.AddObjectPart(1, "etag", 100, 100, UTCNow(), nil)
|
||||
fi.AddObjectPart(1, "etag", 100, 100, UTCNow(), nil, nil)
|
||||
fi.ModTime = time.Unix(t, 0)
|
||||
fi.DataDir = dataDir
|
||||
fis := make([]FileInfo, n)
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
|
||||
"github.com/klauspost/readahead"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/sync/errgroup"
|
||||
@ -325,7 +326,7 @@ func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, objec
|
||||
// '.minio.sys/multipart/bucket/object/uploads.json' on all the
|
||||
// disks. `uploads.json` carries metadata regarding on-going multipart
|
||||
// operation(s) on the object.
|
||||
func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string, object string, opts ObjectOptions) (string, error) {
|
||||
func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string, object string, opts ObjectOptions) (*NewMultipartUploadResult, error) {
|
||||
userDefined := cloneMSS(opts.UserDefined)
|
||||
|
||||
onlineDisks := er.getDisks()
|
||||
@ -352,7 +353,6 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
|
||||
if parityOrig != parityDrives {
|
||||
userDefined[minIOErasureUpgraded] = strconv.Itoa(parityOrig) + "->" + strconv.Itoa(parityDrives)
|
||||
}
|
||||
|
||||
dataDrives := len(onlineDisks) - parityDrives
|
||||
|
||||
// we now know the number of blocks this object needs for data and parity.
|
||||
@ -382,6 +382,10 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
|
||||
userDefined["content-type"] = mimedb.TypeByExtension(path.Ext(object))
|
||||
}
|
||||
|
||||
if opts.WantChecksum != nil && opts.WantChecksum.Type.IsSet() {
|
||||
userDefined[hash.MinIOMultipartChecksum] = opts.WantChecksum.Type.String()
|
||||
}
|
||||
|
||||
modTime := opts.MTime
|
||||
if opts.MTime.IsZero() {
|
||||
modTime = UTCNow()
|
||||
@ -402,11 +406,12 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
|
||||
|
||||
// Write updated `xl.meta` to all disks.
|
||||
if _, err := writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil {
|
||||
return "", toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
||||
return nil, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
||||
}
|
||||
|
||||
// Return success.
|
||||
return uploadID, nil
|
||||
return &NewMultipartUploadResult{
|
||||
UploadID: uploadID,
|
||||
ChecksumAlgo: userDefined[hash.MinIOMultipartChecksum],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewMultipartUpload - initialize a new multipart upload, returns a
|
||||
@ -414,7 +419,7 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
|
||||
// subsequent request each UUID is unique.
|
||||
//
|
||||
// Implements S3 compatible initiate multipart API.
|
||||
func (er erasureObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) {
|
||||
func (er erasureObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (*NewMultipartUploadResult, error) {
|
||||
auditObjectErasureSet(ctx, object, &er)
|
||||
|
||||
return er.newMultipartUpload(ctx, bucket, object, opts)
|
||||
@ -590,9 +595,18 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
||||
// Pick one from the first valid metadata.
|
||||
fi, err := pickValidFileInfo(pctx, partsMetadata, modTime, writeQuorum)
|
||||
if err != nil {
|
||||
return pi, err
|
||||
return pi, toObjectErr(err)
|
||||
}
|
||||
|
||||
if cs := fi.Metadata[hash.MinIOMultipartChecksum]; cs != "" {
|
||||
if r.ContentCRCType().String() != cs {
|
||||
return pi, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Object: fi.Name,
|
||||
Err: fmt.Errorf("checksum missing"),
|
||||
}
|
||||
}
|
||||
}
|
||||
onlineDisks = shuffleDisks(onlineDisks, fi.Erasure.Distribution)
|
||||
|
||||
// Need a unique name for the part being written in minioMetaBucket to
|
||||
@ -703,6 +717,7 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
||||
ActualSize: data.ActualSize(),
|
||||
ModTime: UTCNow(),
|
||||
Index: index,
|
||||
Checksums: r.ContentCRC(),
|
||||
}
|
||||
|
||||
partMsg, err := part.MarshalMsg(nil)
|
||||
@ -718,11 +733,15 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
||||
|
||||
// Return success.
|
||||
return PartInfo{
|
||||
PartNumber: part.Number,
|
||||
ETag: part.ETag,
|
||||
LastModified: part.ModTime,
|
||||
Size: part.Size,
|
||||
ActualSize: part.ActualSize,
|
||||
PartNumber: part.Number,
|
||||
ETag: part.ETag,
|
||||
LastModified: part.ModTime,
|
||||
Size: part.Size,
|
||||
ActualSize: part.ActualSize,
|
||||
ChecksumCRC32: part.Checksums["CRC32"],
|
||||
ChecksumCRC32C: part.Checksums["CRC32C"],
|
||||
ChecksumSHA1: part.Checksums["SHA1"],
|
||||
ChecksumSHA256: part.Checksums["SHA256"],
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -872,6 +891,7 @@ func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, up
|
||||
result.MaxParts = maxParts
|
||||
result.PartNumberMarker = partNumberMarker
|
||||
result.UserDefined = cloneMSS(fi.Metadata)
|
||||
result.ChecksumAlgorithm = fi.Metadata[hash.MinIOMultipartChecksum]
|
||||
|
||||
// For empty number of parts or maxParts as zero, return right here.
|
||||
if len(partInfoFiles) == 0 || maxParts == 0 {
|
||||
@ -898,7 +918,7 @@ func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, up
|
||||
}
|
||||
|
||||
// Add the current part.
|
||||
fi.AddObjectPart(partI.Number, partI.ETag, partI.Size, partI.ActualSize, partI.ModTime, partI.Index)
|
||||
fi.AddObjectPart(partI.Number, partI.ETag, partI.Size, partI.ActualSize, partI.ModTime, partI.Index, partI.Checksums)
|
||||
}
|
||||
|
||||
// Only parts with higher part numbers will be listed.
|
||||
@ -906,11 +926,15 @@ func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, up
|
||||
result.Parts = make([]PartInfo, 0, len(parts))
|
||||
for _, part := range parts {
|
||||
result.Parts = append(result.Parts, PartInfo{
|
||||
PartNumber: part.Number,
|
||||
ETag: part.ETag,
|
||||
LastModified: part.ModTime,
|
||||
ActualSize: part.ActualSize,
|
||||
Size: part.Size,
|
||||
PartNumber: part.Number,
|
||||
ETag: part.ETag,
|
||||
LastModified: part.ModTime,
|
||||
ActualSize: part.ActualSize,
|
||||
Size: part.Size,
|
||||
ChecksumCRC32: part.Checksums["CRC32"],
|
||||
ChecksumCRC32C: part.Checksums["CRC32C"],
|
||||
ChecksumSHA1: part.Checksums["SHA1"],
|
||||
ChecksumSHA256: part.Checksums["SHA256"],
|
||||
})
|
||||
if len(result.Parts) >= maxParts {
|
||||
break
|
||||
@ -1000,7 +1024,20 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
||||
return oi, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
var partI ObjectPartInfo
|
||||
// Checksum type set when upload started.
|
||||
var checksumType hash.ChecksumType
|
||||
if cs := fi.Metadata[hash.MinIOMultipartChecksum]; cs != "" {
|
||||
checksumType = hash.NewChecksumType(cs)
|
||||
if opts.WantChecksum != nil && !opts.WantChecksum.Type.Is(checksumType) {
|
||||
return oi, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Object: fi.Name,
|
||||
Err: fmt.Errorf("checksum type mismatch"),
|
||||
}
|
||||
}
|
||||
}
|
||||
var checksumCombined []byte
|
||||
|
||||
for i, part := range partInfoFiles {
|
||||
partID := parts[i].PartNumber
|
||||
if part.Error != "" || !part.Exists {
|
||||
@ -1009,6 +1046,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
||||
}
|
||||
}
|
||||
|
||||
var partI ObjectPartInfo
|
||||
_, err := partI.UnmarshalMsg(part.Data)
|
||||
if err != nil {
|
||||
// Maybe crash or similar.
|
||||
@ -1026,7 +1064,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
||||
}
|
||||
|
||||
// Add the current part.
|
||||
fi.AddObjectPart(partI.Number, partI.ETag, partI.Size, partI.ActualSize, partI.ModTime, partI.Index)
|
||||
fi.AddObjectPart(partI.Number, partI.ETag, partI.Size, partI.ActualSize, partI.ModTime, partI.Index, partI.Checksums)
|
||||
}
|
||||
|
||||
// Calculate full object size.
|
||||
@ -1056,43 +1094,86 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
|
||||
}
|
||||
return oi, invp
|
||||
}
|
||||
gotPart := currentFI.Parts[partIdx]
|
||||
|
||||
// ensure that part ETag is canonicalized to strip off extraneous quotes
|
||||
part.ETag = canonicalizeETag(part.ETag)
|
||||
if currentFI.Parts[partIdx].ETag != part.ETag {
|
||||
if gotPart.ETag != part.ETag {
|
||||
invp := InvalidPart{
|
||||
PartNumber: part.PartNumber,
|
||||
ExpETag: currentFI.Parts[partIdx].ETag,
|
||||
ExpETag: gotPart.ETag,
|
||||
GotETag: part.ETag,
|
||||
}
|
||||
return oi, invp
|
||||
}
|
||||
|
||||
if checksumType.IsSet() {
|
||||
crc := gotPart.Checksums[checksumType.String()]
|
||||
if crc == "" {
|
||||
return oi, InvalidPart{
|
||||
PartNumber: part.PartNumber,
|
||||
}
|
||||
}
|
||||
wantCS := map[string]string{
|
||||
hash.ChecksumCRC32.String(): part.ChecksumCRC32,
|
||||
hash.ChecksumCRC32C.String(): part.ChecksumCRC32C,
|
||||
hash.ChecksumSHA1.String(): part.ChecksumSHA1,
|
||||
hash.ChecksumSHA256.String(): part.ChecksumSHA256,
|
||||
}
|
||||
if wantCS[checksumType.String()] != crc {
|
||||
return oi, InvalidPart{
|
||||
PartNumber: part.PartNumber,
|
||||
ExpETag: wantCS[checksumType.String()],
|
||||
GotETag: crc,
|
||||
}
|
||||
}
|
||||
cs := hash.NewChecksumString(checksumType.String(), crc)
|
||||
if !cs.Valid() {
|
||||
return oi, InvalidPart{
|
||||
PartNumber: part.PartNumber,
|
||||
}
|
||||
}
|
||||
checksumCombined = append(checksumCombined, cs.Raw()...)
|
||||
}
|
||||
|
||||
// All parts except the last part has to be at least 5MB.
|
||||
if (i < len(parts)-1) && !isMinAllowedPartSize(currentFI.Parts[partIdx].ActualSize) {
|
||||
return oi, PartTooSmall{
|
||||
PartNumber: part.PartNumber,
|
||||
PartSize: currentFI.Parts[partIdx].ActualSize,
|
||||
PartSize: gotPart.ActualSize,
|
||||
PartETag: part.ETag,
|
||||
}
|
||||
}
|
||||
|
||||
// Save for total object size.
|
||||
objectSize += currentFI.Parts[partIdx].Size
|
||||
objectSize += gotPart.Size
|
||||
|
||||
// Save the consolidated actual size.
|
||||
objectActualSize += currentFI.Parts[partIdx].ActualSize
|
||||
objectActualSize += gotPart.ActualSize
|
||||
|
||||
// Add incoming parts.
|
||||
fi.Parts[i] = ObjectPartInfo{
|
||||
Number: part.PartNumber,
|
||||
Size: currentFI.Parts[partIdx].Size,
|
||||
ActualSize: currentFI.Parts[partIdx].ActualSize,
|
||||
ModTime: currentFI.Parts[partIdx].ModTime,
|
||||
Index: currentFI.Parts[partIdx].Index,
|
||||
Size: gotPart.Size,
|
||||
ActualSize: gotPart.ActualSize,
|
||||
ModTime: gotPart.ModTime,
|
||||
Index: gotPart.Index,
|
||||
Checksums: nil, // Not transferred since we do not need it.
|
||||
}
|
||||
}
|
||||
|
||||
if opts.WantChecksum != nil {
|
||||
err := opts.WantChecksum.Matches(checksumCombined)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
}
|
||||
if checksumType.IsSet() {
|
||||
cs := hash.NewChecksumFromData(checksumType, checksumCombined)
|
||||
fi.Checksum = map[string]string{cs.Type.String(): cs.Encoded}
|
||||
}
|
||||
delete(fi.Metadata, hash.MinIOMultipartChecksum) // Not needed in final object.
|
||||
|
||||
// Save the final object size and modtime.
|
||||
fi.Size = objectSize
|
||||
fi.ModTime = opts.MTime
|
||||
|
@ -837,7 +837,7 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *
|
||||
continue
|
||||
}
|
||||
partsMetadata[i].Data = inlineBuffers[i].Bytes()
|
||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, index)
|
||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, index, nil)
|
||||
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
||||
PartNumber: 1,
|
||||
Algorithm: DefaultBitrotAlgorithm,
|
||||
@ -962,6 +962,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
||||
}
|
||||
|
||||
fi.DataDir = mustGetUUID()
|
||||
fi.Checksum = opts.WantChecksum.AsMap()
|
||||
uniqueID := mustGetUUID()
|
||||
tempObj := uniqueID
|
||||
|
||||
@ -1105,7 +1106,8 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
||||
} else {
|
||||
partsMetadata[i].Data = nil
|
||||
}
|
||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, compIndex)
|
||||
// No need to add checksum to part. We already have it on the object.
|
||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, compIndex, nil)
|
||||
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
||||
PartNumber: 1,
|
||||
Algorithm: DefaultBitrotAlgorithm,
|
||||
@ -1911,7 +1913,7 @@ func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket s
|
||||
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
|
||||
}
|
||||
|
||||
uploadID, err := er.NewMultipartUpload(ctx, bucket, object, ropts)
|
||||
res, err := er.NewMultipartUpload(ctx, bucket, object, ropts)
|
||||
if err != nil {
|
||||
return setRestoreHeaderFn(oi, err)
|
||||
}
|
||||
@ -1931,7 +1933,7 @@ func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket s
|
||||
if err != nil {
|
||||
return setRestoreHeaderFn(oi, err)
|
||||
}
|
||||
pInfo, err := er.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, NewPutObjReader(hr), ObjectOptions{})
|
||||
pInfo, err := er.PutObjectPart(ctx, bucket, object, res.UploadID, partInfo.Number, NewPutObjReader(hr), ObjectOptions{})
|
||||
if err != nil {
|
||||
return setRestoreHeaderFn(oi, err)
|
||||
}
|
||||
@ -1943,7 +1945,7 @@ func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket s
|
||||
ETag: pInfo.ETag,
|
||||
})
|
||||
}
|
||||
_, err = er.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{
|
||||
_, err = er.CompleteMultipartUpload(ctx, bucket, object, res.UploadID, uploadedParts, ObjectOptions{
|
||||
MTime: oi.ModTime,
|
||||
})
|
||||
return setRestoreHeaderFn(oi, err)
|
||||
|
@ -31,7 +31,7 @@ import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio/internal/config/storageclass"
|
||||
)
|
||||
|
||||
@ -58,18 +58,18 @@ func TestRepeatPutObjectPart(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
uploadID, err := objLayer.NewMultipartUpload(ctx, "bucket1", "mpartObj1", opts)
|
||||
res, err := objLayer.NewMultipartUpload(ctx, "bucket1", "mpartObj1", opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
|
||||
md5Hex := getMD5Hash(fiveMBBytes)
|
||||
_, err = objLayer.PutObjectPart(ctx, "bucket1", "mpartObj1", uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts)
|
||||
_, err = objLayer.PutObjectPart(ctx, "bucket1", "mpartObj1", res.UploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930
|
||||
_, err = objLayer.PutObjectPart(ctx, "bucket1", "mpartObj1", uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts)
|
||||
_, err = objLayer.PutObjectPart(ctx, "bucket1", "mpartObj1", res.UploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -587,7 +587,7 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri
|
||||
}
|
||||
|
||||
if objInfo.isMultipart() {
|
||||
uploadID, err := z.NewMultipartUpload(ctx, bucket, objInfo.Name, ObjectOptions{
|
||||
res, err := z.NewMultipartUpload(ctx, bucket, objInfo.Name, ObjectOptions{
|
||||
VersionID: objInfo.VersionID,
|
||||
MTime: objInfo.ModTime,
|
||||
UserDefined: objInfo.UserDefined,
|
||||
@ -595,14 +595,14 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri
|
||||
if err != nil {
|
||||
return fmt.Errorf("decommissionObject: NewMultipartUpload() %w", err)
|
||||
}
|
||||
defer z.AbortMultipartUpload(ctx, bucket, objInfo.Name, uploadID, ObjectOptions{})
|
||||
defer z.AbortMultipartUpload(ctx, bucket, objInfo.Name, res.UploadID, ObjectOptions{})
|
||||
parts := make([]CompletePart, len(objInfo.Parts))
|
||||
for i, part := range objInfo.Parts {
|
||||
hr, err := hash.NewReader(gr, part.Size, "", "", part.ActualSize)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decommissionObject: hash.NewReader() %w", err)
|
||||
}
|
||||
pi, err := z.PutObjectPart(ctx, bucket, objInfo.Name, uploadID,
|
||||
pi, err := z.PutObjectPart(ctx, bucket, objInfo.Name, res.UploadID,
|
||||
part.Number,
|
||||
NewPutObjReader(hr),
|
||||
ObjectOptions{
|
||||
@ -615,11 +615,15 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri
|
||||
return fmt.Errorf("decommissionObject: PutObjectPart() %w", err)
|
||||
}
|
||||
parts[i] = CompletePart{
|
||||
ETag: pi.ETag,
|
||||
PartNumber: pi.PartNumber,
|
||||
ETag: pi.ETag,
|
||||
PartNumber: pi.PartNumber,
|
||||
ChecksumCRC32: pi.ChecksumCRC32,
|
||||
ChecksumCRC32C: pi.ChecksumCRC32C,
|
||||
ChecksumSHA256: pi.ChecksumSHA256,
|
||||
ChecksumSHA1: pi.ChecksumSHA1,
|
||||
}
|
||||
}
|
||||
_, err = z.CompleteMultipartUpload(ctx, bucket, objInfo.Name, uploadID, parts, ObjectOptions{
|
||||
_, err = z.CompleteMultipartUpload(ctx, bucket, objInfo.Name, res.UploadID, parts, ObjectOptions{
|
||||
MTime: objInfo.ModTime,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -1376,14 +1376,14 @@ func (z *erasureServerPools) ListMultipartUploads(ctx context.Context, bucket, p
|
||||
}
|
||||
|
||||
// Initiate a new multipart upload on a hashedSet based on object name.
|
||||
func (z *erasureServerPools) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) {
|
||||
func (z *erasureServerPools) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (*NewMultipartUploadResult, error) {
|
||||
if err := checkNewMultipartArgs(ctx, bucket, object, z); err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if z.SinglePool() {
|
||||
if !isMinioMetaBucketName(bucket) && !hasSpaceFor(getDiskInfos(ctx, z.serverPools[0].getHashedSet(object).getDisks()...), -1) {
|
||||
return "", toObjectErr(errDiskFull)
|
||||
return nil, toObjectErr(errDiskFull)
|
||||
}
|
||||
return z.serverPools[0].NewMultipartUpload(ctx, bucket, object, opts)
|
||||
}
|
||||
@ -1394,7 +1394,7 @@ func (z *erasureServerPools) NewMultipartUpload(ctx context.Context, bucket, obj
|
||||
}
|
||||
result, err := pool.ListMultipartUploads(ctx, bucket, object, "", "", "", maxUploadsList)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
// If there is a multipart upload with the same bucket/object name,
|
||||
// create the new multipart in the same pool, this will avoid
|
||||
@ -1408,7 +1408,7 @@ func (z *erasureServerPools) NewMultipartUpload(ctx context.Context, bucket, obj
|
||||
// to return since this holds a read lock on the namespace.
|
||||
idx, err := z.getPoolIdx(ctx, bucket, object, -1)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return z.serverPools[idx].NewMultipartUpload(ctx, bucket, object, opts)
|
||||
|
@ -1079,7 +1079,7 @@ func (s *erasureSets) ListMultipartUploads(ctx context.Context, bucket, prefix,
|
||||
}
|
||||
|
||||
// Initiate a new multipart upload on a hashedSet based on object name.
|
||||
func (s *erasureSets) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) {
|
||||
func (s *erasureSets) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (res *NewMultipartUploadResult, err error) {
|
||||
set := s.getHashedSet(object)
|
||||
return set.NewMultipartUpload(ctx, bucket, object, opts)
|
||||
}
|
||||
|
@ -919,7 +919,7 @@ func (es *erasureSingle) putMetacacheObject(ctx context.Context, key string, r *
|
||||
continue
|
||||
}
|
||||
partsMetadata[i].Data = inlineBuffers[i].Bytes()
|
||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, index)
|
||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, index, nil)
|
||||
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
||||
PartNumber: 1,
|
||||
Algorithm: DefaultBitrotAlgorithm,
|
||||
@ -1163,7 +1163,7 @@ func (es *erasureSingle) putObject(ctx context.Context, bucket string, object st
|
||||
} else {
|
||||
partsMetadata[i].Data = nil
|
||||
}
|
||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, index)
|
||||
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, index, nil)
|
||||
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
||||
PartNumber: 1,
|
||||
Algorithm: DefaultBitrotAlgorithm,
|
||||
@ -1896,7 +1896,7 @@ func (es *erasureSingle) restoreTransitionedObject(ctx context.Context, bucket s
|
||||
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
|
||||
}
|
||||
|
||||
uploadID, err := es.NewMultipartUpload(ctx, bucket, object, ropts)
|
||||
result, err := es.NewMultipartUpload(ctx, bucket, object, ropts)
|
||||
if err != nil {
|
||||
return setRestoreHeaderFn(oi, err)
|
||||
}
|
||||
@ -1916,7 +1916,7 @@ func (es *erasureSingle) restoreTransitionedObject(ctx context.Context, bucket s
|
||||
if err != nil {
|
||||
return setRestoreHeaderFn(oi, err)
|
||||
}
|
||||
pInfo, err := es.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, NewPutObjReader(hr), ObjectOptions{})
|
||||
pInfo, err := es.PutObjectPart(ctx, bucket, object, result.UploadID, partInfo.Number, NewPutObjReader(hr), ObjectOptions{})
|
||||
if err != nil {
|
||||
return setRestoreHeaderFn(oi, err)
|
||||
}
|
||||
@ -1928,7 +1928,7 @@ func (es *erasureSingle) restoreTransitionedObject(ctx context.Context, bucket s
|
||||
ETag: pInfo.ETag,
|
||||
})
|
||||
}
|
||||
_, err = es.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{
|
||||
_, err = es.CompleteMultipartUpload(ctx, bucket, object, result.UploadID, uploadedParts, ObjectOptions{
|
||||
MTime: oi.ModTime,
|
||||
})
|
||||
return setRestoreHeaderFn(oi, err)
|
||||
@ -2136,7 +2136,7 @@ func (es *erasureSingle) ListMultipartUploads(ctx context.Context, bucket, objec
|
||||
// '.minio.sys/multipart/bucket/object/uploads.json' on all the
|
||||
// disks. `uploads.json` carries metadata regarding on-going multipart
|
||||
// operation(s) on the object.
|
||||
func (es *erasureSingle) newMultipartUpload(ctx context.Context, bucket string, object string, opts ObjectOptions) (string, error) {
|
||||
func (es *erasureSingle) newMultipartUpload(ctx context.Context, bucket string, object string, opts ObjectOptions) (*NewMultipartUploadResult, error) {
|
||||
onlineDisks := []StorageAPI{es.disk}
|
||||
parityDrives := 0
|
||||
dataDrives := len(onlineDisks) - parityDrives
|
||||
@ -2188,11 +2188,11 @@ func (es *erasureSingle) newMultipartUpload(ctx context.Context, bucket string,
|
||||
|
||||
// Write updated `xl.meta` to all disks.
|
||||
if _, err := writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil {
|
||||
return "", toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
||||
return nil, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
||||
}
|
||||
|
||||
// Return success.
|
||||
return uploadID, nil
|
||||
return &NewMultipartUploadResult{UploadID: uploadID}, nil
|
||||
}
|
||||
|
||||
// NewMultipartUpload - initialize a new multipart upload, returns a
|
||||
@ -2200,9 +2200,9 @@ func (es *erasureSingle) newMultipartUpload(ctx context.Context, bucket string,
|
||||
// subsequent request each UUID is unique.
|
||||
//
|
||||
// Implements S3 compatible initiate multipart API.
|
||||
func (es *erasureSingle) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) {
|
||||
func (es *erasureSingle) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (*NewMultipartUploadResult, error) {
|
||||
if err := checkNewMultipartArgs(ctx, bucket, object, es); err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// No metadata is set, allocate a new one.
|
||||
@ -2441,7 +2441,7 @@ func (es *erasureSingle) PutObjectPart(ctx context.Context, bucket, object, uplo
|
||||
}
|
||||
|
||||
// Add the current part.
|
||||
fi.AddObjectPart(partID, md5hex, n, data.ActualSize(), fi.ModTime, index)
|
||||
fi.AddObjectPart(partID, md5hex, n, data.ActualSize(), fi.ModTime, index, nil)
|
||||
|
||||
for i, disk := range onlineDisks {
|
||||
if disk == OfflineDisk {
|
||||
|
@ -218,13 +218,13 @@ func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, k
|
||||
// subsequent request each UUID is unique.
|
||||
//
|
||||
// Implements S3 compatible initiate multipart API.
|
||||
func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) {
|
||||
func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (*NewMultipartUploadResult, error) {
|
||||
if err := checkNewMultipartArgs(ctx, bucket, object, fs); err != nil {
|
||||
return "", toObjectErr(err, bucket)
|
||||
return nil, toObjectErr(err, bucket)
|
||||
}
|
||||
|
||||
if _, err := fs.statBucketDir(ctx, bucket); err != nil {
|
||||
return "", toObjectErr(err, bucket)
|
||||
return nil, toObjectErr(err, bucket)
|
||||
}
|
||||
|
||||
uploadID := mustGetUUID()
|
||||
@ -233,7 +233,7 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
|
||||
err := mkdirAll(uploadIDDir, 0o755)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize fs.json values.
|
||||
@ -243,15 +243,14 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
|
||||
fsMetaBytes, err := json.Marshal(fsMeta)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0o666); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return uploadID, nil
|
||||
return &NewMultipartUploadResult{UploadID: uploadID}, nil
|
||||
}
|
||||
|
||||
// CopyObjectPart - similar to PutObjectPart but reads data from an existing
|
||||
|
@ -47,7 +47,7 @@ func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(GlobalContext)
|
||||
obj.MakeBucketWithLocation(ctx, bucketName, MakeBucketOptions{})
|
||||
|
||||
uploadID, err := obj.NewMultipartUpload(ctx, bucketName, objectName, ObjectOptions{})
|
||||
res, err := obj.NewMultipartUpload(ctx, bucketName, objectName, ObjectOptions{})
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
@ -78,7 +78,7 @@ func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) {
|
||||
cleanupWg.Wait()
|
||||
|
||||
// Check if upload id was already purged.
|
||||
if err = obj.AbortMultipartUpload(GlobalContext, bucketName, objectName, uploadID, ObjectOptions{}); err != nil {
|
||||
if err = obj.AbortMultipartUpload(GlobalContext, bucketName, objectName, res.UploadID, ObjectOptions{}); err != nil {
|
||||
if _, ok := err.(InvalidUploadID); !ok {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
@ -128,7 +128,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
|
||||
t.Fatal("Cannot create bucket, err: ", err)
|
||||
}
|
||||
|
||||
uploadID, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}})
|
||||
res, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}})
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
@ -139,7 +139,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
|
||||
newDisk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||
defer os.RemoveAll(newDisk)
|
||||
obj = initFSObjects(newDisk, t)
|
||||
if _, err = obj.PutObjectPart(GlobalContext, bucketName, objectName, uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(data), dataLen, md5Hex, sha256sum), ObjectOptions{}); err != nil {
|
||||
if _, err = obj.PutObjectPart(GlobalContext, bucketName, objectName, res.UploadID, 1, mustGetPutObjReader(t, bytes.NewReader(data), dataLen, md5Hex, sha256sum), ObjectOptions{}); err != nil {
|
||||
if !isSameType(err, BucketNotFound{}) {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
@ -161,7 +161,7 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) {
|
||||
t.Fatal("Cannot create bucket, err: ", err)
|
||||
}
|
||||
|
||||
uploadID, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}})
|
||||
res, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}})
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
@ -172,7 +172,7 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) {
|
||||
newDisk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||
defer os.RemoveAll(newDisk)
|
||||
obj = initFSObjects(newDisk, t)
|
||||
if _, err := obj.CompleteMultipartUpload(GlobalContext, bucketName, objectName, uploadID, parts, ObjectOptions{}); err != nil {
|
||||
if _, err := obj.CompleteMultipartUpload(GlobalContext, bucketName, objectName, res.UploadID, parts, ObjectOptions{}); err != nil {
|
||||
if !isSameType(err, BucketNotFound{}) {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
@ -194,19 +194,19 @@ func TestCompleteMultipartUpload(t *testing.T) {
|
||||
t.Fatal("Cannot create bucket, err: ", err)
|
||||
}
|
||||
|
||||
uploadID, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}})
|
||||
res, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}})
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
|
||||
md5Hex := getMD5Hash(data)
|
||||
|
||||
if _, err := obj.PutObjectPart(GlobalContext, bucketName, objectName, uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(data), 5, md5Hex, ""), ObjectOptions{}); err != nil {
|
||||
if _, err := obj.PutObjectPart(GlobalContext, bucketName, objectName, res.UploadID, 1, mustGetPutObjReader(t, bytes.NewReader(data), 5, md5Hex, ""), ObjectOptions{}); err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
|
||||
parts := []CompletePart{{PartNumber: 1, ETag: md5Hex}}
|
||||
if _, err := obj.CompleteMultipartUpload(GlobalContext, bucketName, objectName, uploadID, parts, ObjectOptions{}); err != nil {
|
||||
if _, err := obj.CompleteMultipartUpload(GlobalContext, bucketName, objectName, res.UploadID, parts, ObjectOptions{}); err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
}
|
||||
@ -231,7 +231,7 @@ func TestAbortMultipartUpload(t *testing.T) {
|
||||
t.Fatal("Cannot create bucket, err: ", err)
|
||||
}
|
||||
|
||||
uploadID, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}})
|
||||
res, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}})
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
@ -239,10 +239,10 @@ func TestAbortMultipartUpload(t *testing.T) {
|
||||
md5Hex := getMD5Hash(data)
|
||||
|
||||
opts := ObjectOptions{}
|
||||
if _, err := obj.PutObjectPart(GlobalContext, bucketName, objectName, uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(data), 5, md5Hex, ""), opts); err != nil {
|
||||
if _, err := obj.PutObjectPart(GlobalContext, bucketName, objectName, res.UploadID, 1, mustGetPutObjReader(t, bytes.NewReader(data), 5, md5Hex, ""), opts); err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
if err := obj.AbortMultipartUpload(GlobalContext, bucketName, objectName, uploadID, opts); err != nil {
|
||||
if err := obj.AbortMultipartUpload(GlobalContext, bucketName, objectName, res.UploadID, opts); err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
}
|
||||
|
@ -435,7 +435,7 @@ func (l *s3EncObjects) ListMultipartUploads(ctx context.Context, bucket string,
|
||||
}
|
||||
|
||||
// NewMultipartUpload uploads object in multiple parts
|
||||
func (l *s3EncObjects) NewMultipartUpload(ctx context.Context, bucket string, object string, o minio.ObjectOptions) (uploadID string, err error) {
|
||||
func (l *s3EncObjects) NewMultipartUpload(ctx context.Context, bucket, object string, o minio.ObjectOptions) (result *minio.NewMultipartUploadResult, err error) {
|
||||
var sseOpts encrypt.ServerSide
|
||||
if o.ServerSideEncryption == nil {
|
||||
return l.s3Objects.NewMultipartUpload(ctx, bucket, object, minio.ObjectOptions{UserDefined: o.UserDefined})
|
||||
@ -446,7 +446,7 @@ func (l *s3EncObjects) NewMultipartUpload(ctx context.Context, bucket string, ob
|
||||
sseOpts = o.ServerSideEncryption
|
||||
}
|
||||
|
||||
uploadID, err = l.s3Objects.NewMultipartUpload(ctx, bucket, getGWContentPath(object), minio.ObjectOptions{ServerSideEncryption: sseOpts})
|
||||
result, err = l.s3Objects.NewMultipartUpload(ctx, bucket, getGWContentPath(object), minio.ObjectOptions{ServerSideEncryption: sseOpts})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -454,11 +454,11 @@ func (l *s3EncObjects) NewMultipartUpload(ctx context.Context, bucket string, ob
|
||||
gwmeta := newGWMetaV1()
|
||||
gwmeta.Meta = o.UserDefined
|
||||
gwmeta.Stat.ModTime = time.Now().UTC()
|
||||
err = l.writeGWMetadata(ctx, bucket, getTmpDareMetaPath(object, uploadID), gwmeta, minio.ObjectOptions{})
|
||||
err = l.writeGWMetadata(ctx, bucket, getTmpDareMetaPath(object, result.UploadID), gwmeta, minio.ObjectOptions{})
|
||||
if err != nil {
|
||||
return uploadID, minio.ErrorRespToObjectError(err)
|
||||
return nil, minio.ErrorRespToObjectError(err)
|
||||
}
|
||||
return uploadID, nil
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// PutObject creates a new object with the incoming data,
|
||||
|
@ -607,13 +607,13 @@ func (l *s3Objects) ListMultipartUploads(ctx context.Context, bucket string, pre
|
||||
}
|
||||
|
||||
// NewMultipartUpload upload object in multiple parts
|
||||
func (l *s3Objects) NewMultipartUpload(ctx context.Context, bucket string, object string, o minio.ObjectOptions) (uploadID string, err error) {
|
||||
func (l *s3Objects) NewMultipartUpload(ctx context.Context, bucket, object string, o minio.ObjectOptions) (result *minio.NewMultipartUploadResult, err error) {
|
||||
var tagMap map[string]string
|
||||
userDefined := minio.CloneMSS(o.UserDefined)
|
||||
if tagStr, ok := userDefined[xhttp.AmzObjectTagging]; ok {
|
||||
tagObj, err := tags.Parse(tagStr, true)
|
||||
if err != nil {
|
||||
return uploadID, minio.ErrorRespToObjectError(err, bucket, object)
|
||||
return nil, minio.ErrorRespToObjectError(err, bucket, object)
|
||||
}
|
||||
tagMap = tagObj.ToMap()
|
||||
delete(userDefined, xhttp.AmzObjectTagging)
|
||||
@ -624,11 +624,11 @@ func (l *s3Objects) NewMultipartUpload(ctx context.Context, bucket string, objec
|
||||
ServerSideEncryption: o.ServerSideEncryption,
|
||||
UserTags: tagMap,
|
||||
}
|
||||
uploadID, err = l.Client.NewMultipartUpload(ctx, bucket, object, opts)
|
||||
uploadID, err := l.Client.NewMultipartUpload(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
return uploadID, minio.ErrorRespToObjectError(err, bucket, object)
|
||||
return nil, minio.ErrorRespToObjectError(err, bucket, object)
|
||||
}
|
||||
return uploadID, nil
|
||||
return &minio.NewMultipartUploadResult{UploadID: uploadID}, nil
|
||||
}
|
||||
|
||||
// PutObjectPart puts a part of object in bucket
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
@ -178,6 +178,9 @@ type ObjectInfo struct {
|
||||
NumVersions int
|
||||
// The modtime of the successor object version if any
|
||||
SuccessorModTime time.Time
|
||||
|
||||
// User-Defined object tags
|
||||
Checksum map[string]string
|
||||
}
|
||||
|
||||
// ArchiveInfo returns any saved zip archive meta information
|
||||
@ -329,6 +332,9 @@ type ListPartsInfo struct {
|
||||
|
||||
// Any metadata set during InitMultipartUpload, including encryption headers.
|
||||
UserDefined map[string]string
|
||||
|
||||
// ChecksumAlgorithm if set
|
||||
ChecksumAlgorithm string
|
||||
}
|
||||
|
||||
// Lookup - returns if uploadID is valid
|
||||
@ -505,6 +511,12 @@ type PartInfo struct {
|
||||
|
||||
// Decompressed Size.
|
||||
ActualSize int64
|
||||
|
||||
// Checksum values
|
||||
ChecksumCRC32 string
|
||||
ChecksumCRC32C string
|
||||
ChecksumSHA1 string
|
||||
ChecksumSHA256 string
|
||||
}
|
||||
|
||||
// CompletePart - represents the part that was completed, this is sent by the client
|
||||
@ -516,6 +528,12 @@ type CompletePart struct {
|
||||
|
||||
// Entity tag returned when the part was uploaded.
|
||||
ETag string
|
||||
|
||||
// Checksum values. Optional.
|
||||
ChecksumCRC32 string
|
||||
ChecksumCRC32C string
|
||||
ChecksumSHA1 string
|
||||
ChecksumSHA256 string
|
||||
}
|
||||
|
||||
// CompletedParts - is a collection satisfying sort.Interface.
|
||||
@ -530,3 +548,9 @@ func (a CompletedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
|
||||
type CompleteMultipartUpload struct {
|
||||
Parts []CompletePart `xml:"Part"`
|
||||
}
|
||||
|
||||
// NewMultipartUploadResult contains information about a newly created multipart upload.
|
||||
type NewMultipartUploadResult struct {
|
||||
UploadID string
|
||||
ChecksumAlgo string
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"github.com/minio/madmin-go"
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
"github.com/minio/pkg/bucket/policy"
|
||||
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
@ -59,6 +60,8 @@ type ObjectOptions struct {
|
||||
Transition TransitionOptions
|
||||
Expiration ExpirationOptions
|
||||
|
||||
WantChecksum *hash.Checksum // x-amz-checksum-XXX checksum sent to PutObject/ CompleteMultipartUpload.
|
||||
|
||||
NoDecryption bool // indicates if the stream must be decrypted.
|
||||
PreserveETag string // preserves this etag during a PUT call.
|
||||
NoLock bool // indicates to lower layers if the caller is expecting to hold locks.
|
||||
@ -222,7 +225,7 @@ type ObjectLayer interface {
|
||||
|
||||
// Multipart operations.
|
||||
ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error)
|
||||
NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error)
|
||||
NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (result *NewMultipartUploadResult, err error)
|
||||
CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int,
|
||||
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (info PartInfo, err error)
|
||||
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio/internal/config/storageclass"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
)
|
||||
@ -62,12 +62,12 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, "\\", opts)
|
||||
res, err := obj.NewMultipartUpload(context.Background(), bucket, "\\", opts)
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
err = obj.AbortMultipartUpload(context.Background(), bucket, "\\", uploadID, opts)
|
||||
err = obj.AbortMultipartUpload(context.Background(), bucket, "\\", res.UploadID, opts)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case InvalidUploadID:
|
||||
@ -95,10 +95,11 @@ func testObjectAbortMultipartUpload(obj ObjectLayer, instanceType string, t Test
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, opts)
|
||||
res, err := obj.NewMultipartUpload(context.Background(), bucket, object, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
uploadID := res.UploadID
|
||||
|
||||
abortTestCases := []struct {
|
||||
bucketName string
|
||||
@ -173,11 +174,12 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
// Initiate Multipart Upload on the above created bucket.
|
||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, opts)
|
||||
res, err := obj.NewMultipartUpload(context.Background(), bucket, object, opts)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
uploadID := res.UploadID
|
||||
// Creating a dummy bucket for tests.
|
||||
err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", MakeBucketOptions{})
|
||||
if err != nil {
|
||||
@ -204,50 +206,50 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
|
||||
}{
|
||||
// Test case 1-4.
|
||||
// Cases with invalid bucket name.
|
||||
{".test", "obj", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Bucket not found: .test")},
|
||||
{"------", "obj", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Bucket not found: ------")},
|
||||
{bucketName: ".test", objName: "obj", PartID: 1, expectedError: fmt.Errorf("%s", "Bucket not found: .test")},
|
||||
{bucketName: "------", objName: "obj", PartID: 1, expectedError: fmt.Errorf("%s", "Bucket not found: ------")},
|
||||
{
|
||||
"$this-is-not-valid-too", "obj", "", 1, "", "", "", 0, false, "",
|
||||
fmt.Errorf("%s", "Bucket not found: $this-is-not-valid-too"),
|
||||
bucketName: "$this-is-not-valid-too", objName: "obj", PartID: 1,
|
||||
expectedError: fmt.Errorf("%s", "Bucket not found: $this-is-not-valid-too"),
|
||||
},
|
||||
{"a", "obj", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Bucket not found: a")},
|
||||
{bucketName: "a", objName: "obj", PartID: 1, expectedError: fmt.Errorf("%s", "Bucket not found: a")},
|
||||
// Test case - 5.
|
||||
// Case with invalid object names.
|
||||
{bucket, "", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Object name invalid: minio-bucket/")},
|
||||
{bucketName: bucket, PartID: 1, expectedError: fmt.Errorf("%s", "Object name invalid: minio-bucket/")},
|
||||
// Test case - 6.
|
||||
// Valid object and bucket names but non-existent bucket.
|
||||
{"abc", "def", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Bucket not found: abc")},
|
||||
{bucketName: "abc", objName: "def", PartID: 1, expectedError: fmt.Errorf("%s", "Bucket not found: abc")},
|
||||
// Test Case - 7.
|
||||
// Existing bucket, but using a bucket on which NewMultipartUpload is not Initiated.
|
||||
{"unused-bucket", "def", "xyz", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Invalid upload id xyz")},
|
||||
{bucketName: "unused-bucket", objName: "def", uploadID: "xyz", PartID: 1, expectedError: fmt.Errorf("%s", "Invalid upload id xyz")},
|
||||
// Test Case - 8.
|
||||
// Existing bucket, object name different from which NewMultipartUpload is constructed from.
|
||||
// Expecting "Invalid upload id".
|
||||
{bucket, "def", "xyz", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Invalid upload id xyz")},
|
||||
{bucketName: bucket, objName: "def", uploadID: "xyz", PartID: 1, expectedError: fmt.Errorf("%s", "Invalid upload id xyz")},
|
||||
// Test Case - 9.
|
||||
// Existing bucket, bucket and object name are the ones from which NewMultipartUpload is constructed from.
|
||||
// But the uploadID is invalid.
|
||||
// Expecting "Invalid upload id".
|
||||
{bucket, object, "xyz", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Invalid upload id xyz")},
|
||||
{bucketName: bucket, objName: object, uploadID: "xyz", PartID: 1, expectedError: fmt.Errorf("%s", "Invalid upload id xyz")},
|
||||
// Test Case - 10.
|
||||
// Case with valid UploadID, existing bucket name.
|
||||
// But using the bucket name from which NewMultipartUpload is not constructed from.
|
||||
{"unused-bucket", object, uploadID, 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Invalid upload id "+uploadID)},
|
||||
{bucketName: "unused-bucket", objName: object, uploadID: uploadID, PartID: 1, expectedError: fmt.Errorf("%s", "Invalid upload id "+uploadID)},
|
||||
// Test Case - 11.
|
||||
// Case with valid UploadID, existing bucket name.
|
||||
// But using the object name from which NewMultipartUpload is not constructed from.
|
||||
{bucket, "none-object", uploadID, 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Invalid upload id "+uploadID)},
|
||||
{bucketName: bucket, objName: "none-object", uploadID: uploadID, PartID: 1, expectedError: fmt.Errorf("%s", "Invalid upload id "+uploadID)},
|
||||
// Test case - 12.
|
||||
// Input to replicate Md5 mismatch.
|
||||
{
|
||||
bucket, object, uploadID, 1, "", "d41d8cd98f00b204e9800998ecf8427f", "", 0, false, "",
|
||||
hash.BadDigest{ExpectedMD5: "d41d8cd98f00b204e9800998ecf8427f", CalculatedMD5: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
bucketName: bucket, objName: object, uploadID: uploadID, PartID: 1, inputMd5: "d41d8cd98f00b204e9800998ecf8427f",
|
||||
expectedError: hash.BadDigest{ExpectedMD5: "d41d8cd98f00b204e9800998ecf8427f", CalculatedMD5: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
},
|
||||
// Test case - 13.
|
||||
// When incorrect sha256 is provided.
|
||||
{
|
||||
bucket, object, uploadID, 1, "", "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b854", 0, false, "",
|
||||
hash.SHA256Mismatch{
|
||||
bucketName: bucket, objName: object, uploadID: uploadID, PartID: 1, inputSHA256: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b854",
|
||||
expectedError: hash.SHA256Mismatch{
|
||||
ExpectedSHA256: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b854",
|
||||
CalculatedSHA256: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
},
|
||||
@ -255,22 +257,22 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
|
||||
// Test case - 14.
|
||||
// Input with size more than the size of actual data inside the reader.
|
||||
{
|
||||
bucket, object, uploadID, 1, "abcd", "e2fc714c4727ee9395f324cd2e7f3335", "", int64(len("abcd") + 1), false, "",
|
||||
hash.BadDigest{ExpectedMD5: "e2fc714c4727ee9395f324cd2e7f3335", CalculatedMD5: "e2fc714c4727ee9395f324cd2e7f331f"},
|
||||
bucketName: bucket, objName: object, uploadID: uploadID, PartID: 1, inputReaderData: "abcd", inputMd5: "e2fc714c4727ee9395f324cd2e7f3335", intputDataSize: int64(len("abcd") + 1),
|
||||
expectedError: hash.BadDigest{ExpectedMD5: "e2fc714c4727ee9395f324cd2e7f3335", CalculatedMD5: "e2fc714c4727ee9395f324cd2e7f331f"},
|
||||
},
|
||||
// Test case - 15.
|
||||
// Input with size less than the size of actual data inside the reader.
|
||||
{
|
||||
bucket, object, uploadID, 1, "abcd", "900150983cd24fb0d6963f7d28e17f73", "", int64(len("abcd") - 1), false, "",
|
||||
hash.BadDigest{ExpectedMD5: "900150983cd24fb0d6963f7d28e17f73", CalculatedMD5: "900150983cd24fb0d6963f7d28e17f72"},
|
||||
bucketName: bucket, objName: object, uploadID: uploadID, PartID: 1, inputReaderData: "abcd", inputMd5: "900150983cd24fb0d6963f7d28e17f73", intputDataSize: int64(len("abcd") - 1),
|
||||
expectedError: hash.BadDigest{ExpectedMD5: "900150983cd24fb0d6963f7d28e17f73", CalculatedMD5: "900150983cd24fb0d6963f7d28e17f72"},
|
||||
},
|
||||
|
||||
// Test case - 16-19.
|
||||
// Validating for success cases.
|
||||
{bucket, object, uploadID, 1, "abcd", "e2fc714c4727ee9395f324cd2e7f331f", "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", int64(len("abcd")), true, "", nil},
|
||||
{bucket, object, uploadID, 2, "efgh", "1f7690ebdd9b4caf8fab49ca1757bf27", "e5e088a0b66163a0a26a5e053d2a4496dc16ab6e0e3dd1adf2d16aa84a078c9d", int64(len("efgh")), true, "", nil},
|
||||
{bucket, object, uploadID, 3, "ijkl", "09a0877d04abf8759f99adec02baf579", "005c19658919186b85618c5870463eec8d9b8c1a9d00208a5352891ba5bbe086", int64(len("abcd")), true, "", nil},
|
||||
{bucket, object, uploadID, 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", "f1afc31479522d6cff1ed068f93998f05a8cd3b22f5c37d7f307084f62d1d270", int64(len("abcd")), true, "", nil},
|
||||
{bucketName: bucket, objName: object, uploadID: uploadID, PartID: 1, inputReaderData: "abcd", inputMd5: "e2fc714c4727ee9395f324cd2e7f331f", inputSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", intputDataSize: int64(len("abcd")), shouldPass: true},
|
||||
{bucketName: bucket, objName: object, uploadID: uploadID, PartID: 2, inputReaderData: "efgh", inputMd5: "1f7690ebdd9b4caf8fab49ca1757bf27", inputSHA256: "e5e088a0b66163a0a26a5e053d2a4496dc16ab6e0e3dd1adf2d16aa84a078c9d", intputDataSize: int64(len("efgh")), shouldPass: true},
|
||||
{bucketName: bucket, objName: object, uploadID: uploadID, PartID: 3, inputReaderData: "ijkl", inputMd5: "09a0877d04abf8759f99adec02baf579", inputSHA256: "005c19658919186b85618c5870463eec8d9b8c1a9d00208a5352891ba5bbe086", intputDataSize: int64(len("abcd")), shouldPass: true},
|
||||
{bucketName: bucket, objName: object, uploadID: uploadID, PartID: 4, inputReaderData: "mnop", inputMd5: "e132e96a5ddad6da8b07bba6f6131fef", inputSHA256: "f1afc31479522d6cff1ed068f93998f05a8cd3b22f5c37d7f307084f62d1d270", intputDataSize: int64(len("abcd")), shouldPass: true},
|
||||
}
|
||||
|
||||
// Validate all the test cases.
|
||||
@ -320,13 +322,13 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
// Initiate Multipart Upload on the above created bucket.
|
||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], opts)
|
||||
res, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], opts)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
uploadIDs = append(uploadIDs, uploadID)
|
||||
uploadIDs = append(uploadIDs, res.UploadID)
|
||||
|
||||
// bucketnames[1].
|
||||
// objectNames[0].
|
||||
@ -340,13 +342,13 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
||||
for i := 0; i < 3; i++ {
|
||||
// Initiate Multipart Upload on bucketNames[1] for the same object 3 times.
|
||||
// Used to test the listing for the case of multiple uploadID's for a given object.
|
||||
uploadID, err = obj.NewMultipartUpload(context.Background(), bucketNames[1], objectNames[0], opts)
|
||||
res, err = obj.NewMultipartUpload(context.Background(), bucketNames[1], objectNames[0], opts)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
uploadIDs = append(uploadIDs, uploadID)
|
||||
uploadIDs = append(uploadIDs, res.UploadID)
|
||||
}
|
||||
|
||||
// Bucket to test for mutiple objects, each with unique UUID.
|
||||
@ -361,14 +363,13 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
||||
// Initiate Multipart Upload on bucketNames[2].
|
||||
// Used to test the listing for the case of multiple objects for a given bucket.
|
||||
for i := 0; i < 6; i++ {
|
||||
var uploadID string
|
||||
uploadID, err = obj.NewMultipartUpload(context.Background(), bucketNames[2], objectNames[i], opts)
|
||||
res, err = obj.NewMultipartUpload(context.Background(), bucketNames[2], objectNames[i], opts)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
// uploadIds [4-9].
|
||||
uploadIDs = append(uploadIDs, uploadID)
|
||||
uploadIDs = append(uploadIDs, res.UploadID)
|
||||
}
|
||||
// Create multipart parts.
|
||||
// Need parts to be uploaded before MultipartLists can be called and tested.
|
||||
@ -1202,7 +1203,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
|
||||
}
|
||||
opts := ObjectOptions{}
|
||||
// Initiate Multipart Upload on the above created bucket.
|
||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], opts)
|
||||
res, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], opts)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -1211,7 +1212,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
|
||||
// Remove some random disk.
|
||||
removeDiskN(disks, 1)
|
||||
|
||||
uploadIDs = append(uploadIDs, uploadID)
|
||||
uploadIDs = append(uploadIDs, res.UploadID)
|
||||
|
||||
// Create multipart parts.
|
||||
// Need parts to be uploaded before MultipartLists can be called and tested.
|
||||
@ -1445,13 +1446,13 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
// Initiate Multipart Upload on the above created bucket.
|
||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], opts)
|
||||
res, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], opts)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
uploadIDs = append(uploadIDs, uploadID)
|
||||
uploadIDs = append(uploadIDs, res.UploadID)
|
||||
|
||||
// Create multipart parts.
|
||||
// Need parts to be uploaded before MultipartLists can be called and tested.
|
||||
@ -1672,7 +1673,6 @@ func TestObjectCompleteMultipartUpload(t *testing.T) {
|
||||
// Tests validate CompleteMultipart functionality.
|
||||
func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
var err error
|
||||
var uploadID string
|
||||
bucketNames := []string{"minio-bucket", "minio-2-bucket"}
|
||||
objectNames := []string{"minio-object-1.txt"}
|
||||
uploadIDs := []string{}
|
||||
@ -1687,13 +1687,13 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
||||
t.Fatalf("%s : %s", instanceType, err)
|
||||
}
|
||||
// Initiate Multipart Upload on the above created bucket.
|
||||
uploadID, err = obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-Id": "id"}})
|
||||
res, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-Id": "id"}})
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err)
|
||||
}
|
||||
|
||||
uploadIDs = append(uploadIDs, uploadID)
|
||||
uploadIDs = append(uploadIDs, res.UploadID)
|
||||
// Parts with size greater than 5 MiB.
|
||||
// Generating a 6MiB byte array.
|
||||
validPart := bytes.Repeat([]byte("abcdef"), 1*humanize.MiByte)
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
@ -231,6 +232,7 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mtimeStr := strings.TrimSpace(r.Header.Get(xhttp.MinIOSourceMTime))
|
||||
mtime := UTCNow()
|
||||
if mtimeStr != "" {
|
||||
@ -289,6 +291,15 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada
|
||||
metadata["etag"] = etag
|
||||
}
|
||||
|
||||
wantCRC, err := hash.GetContentChecksum(r)
|
||||
if err != nil {
|
||||
return opts, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
Err: fmt.Errorf("invalid/unknown checksum sent: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
// In the case of multipart custom format, the metadata needs to be checked in addition to header to see if it
|
||||
// is SSE-S3 encrypted, primarily because S3 protocol does not require SSE-S3 headers in PutObjectPart calls
|
||||
if GlobalGatewaySSE.SSES3() && (crypto.S3.IsRequested(r.Header) || crypto.S3.IsEncrypted(metadata)) {
|
||||
@ -299,6 +310,7 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada
|
||||
Versioned: versioned,
|
||||
VersionSuspended: versionSuspended,
|
||||
MTime: mtime,
|
||||
WantChecksum: wantCRC,
|
||||
}, nil
|
||||
}
|
||||
if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) {
|
||||
@ -307,6 +319,7 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada
|
||||
opts.Versioned = versioned
|
||||
opts.VersionSuspended = versionSuspended
|
||||
opts.UserDefined = metadata
|
||||
opts.WantChecksum = wantCRC
|
||||
return
|
||||
}
|
||||
if crypto.S3KMS.IsRequested(r.Header) {
|
||||
@ -325,6 +338,7 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada
|
||||
Versioned: versioned,
|
||||
VersionSuspended: versionSuspended,
|
||||
MTime: mtime,
|
||||
WantChecksum: wantCRC,
|
||||
}, nil
|
||||
}
|
||||
// default case of passing encryption headers and UserDefined metadata to backend
|
||||
@ -339,6 +353,8 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada
|
||||
opts.ReplicationSourceLegalholdTimestamp = lholdtimestmp
|
||||
opts.ReplicationSourceRetentionTimestamp = retaintimestmp
|
||||
opts.ReplicationSourceTaggingTimestamp = taggingtimestmp
|
||||
opts.WantChecksum = wantCRC
|
||||
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
@ -389,6 +405,14 @@ func completeMultipartOpts(ctx context.Context, r *http.Request, bucket, object
|
||||
}
|
||||
}
|
||||
}
|
||||
opts.WantChecksum, err = hash.GetContentChecksum(r)
|
||||
if err != nil {
|
||||
return opts, InvalidArgument{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
Err: fmt.Errorf("invalid/unknown checksum sent: %v", err),
|
||||
}
|
||||
}
|
||||
opts.MTime = mtime
|
||||
opts.UserDefined = make(map[string]string)
|
||||
return opts, nil
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
)
|
||||
|
||||
@ -80,125 +80,110 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
|
||||
expectedMd5 string
|
||||
expectedError error
|
||||
}{
|
||||
// Test case 1-4.
|
||||
// Cases with invalid bucket name.
|
||||
{".test", "obj", []byte(""), nil, "", 0, "", BucketNotFound{Bucket: ".test"}},
|
||||
{"------", "obj", []byte(""), nil, "", 0, "", BucketNotFound{Bucket: "------"}},
|
||||
{
|
||||
"$this-is-not-valid-too", "obj", []byte(""), nil, "", 0, "",
|
||||
BucketNotFound{Bucket: "$this-is-not-valid-too"},
|
||||
0: {bucketName: ".test", objName: "obj", inputData: []byte(""), expectedError: BucketNotFound{Bucket: ".test"}},
|
||||
1: {bucketName: "------", objName: "obj", inputData: []byte(""), expectedError: BucketNotFound{Bucket: "------"}},
|
||||
2: {
|
||||
bucketName: "$this-is-not-valid-too", objName: "obj", inputData: []byte(""),
|
||||
expectedError: BucketNotFound{Bucket: "$this-is-not-valid-too"},
|
||||
},
|
||||
{"a", "obj", []byte(""), nil, "", 0, "", BucketNotFound{Bucket: "a"}},
|
||||
3: {bucketName: "a", objName: "obj", inputData: []byte(""), expectedError: BucketNotFound{Bucket: "a"}},
|
||||
|
||||
// Test case - 5.
|
||||
// Case with invalid object names.
|
||||
{bucket, "", []byte(""), nil, "", 0, "", ObjectNameInvalid{Bucket: bucket, Object: ""}},
|
||||
4: {bucketName: bucket, inputData: []byte(""), expectedError: ObjectNameInvalid{Bucket: bucket, Object: ""}},
|
||||
|
||||
// Test case - 6.
|
||||
// Valid object and bucket names but non-existent bucket.
|
||||
{"abc", "def", []byte(""), nil, "", 0, "", BucketNotFound{Bucket: "abc"}},
|
||||
5: {bucketName: "abc", objName: "def", inputData: []byte(""), expectedError: BucketNotFound{Bucket: "abc"}},
|
||||
|
||||
// Test case - 7.
|
||||
// Input to replicate Md5 mismatch.
|
||||
{
|
||||
bucket, object, []byte(""),
|
||||
map[string]string{"etag": "d41d8cd98f00b204e9800998ecf8427f"},
|
||||
"", 0, "",
|
||||
hash.BadDigest{ExpectedMD5: "d41d8cd98f00b204e9800998ecf8427f", CalculatedMD5: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
6: {
|
||||
bucketName: bucket, objName: object, inputData: []byte(""),
|
||||
inputMeta: map[string]string{"etag": "d41d8cd98f00b204e9800998ecf8427f"},
|
||||
expectedError: hash.BadDigest{ExpectedMD5: "d41d8cd98f00b204e9800998ecf8427f", CalculatedMD5: "d41d8cd98f00b204e9800998ecf8427e"},
|
||||
},
|
||||
|
||||
// Test case - 8.
|
||||
// With incorrect sha256.
|
||||
{
|
||||
bucket, object, []byte("abcd"),
|
||||
map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"},
|
||||
"88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580", int64(len("abcd")),
|
||||
"",
|
||||
hash.SHA256Mismatch{
|
||||
7: {
|
||||
bucketName: bucket, objName: object, inputData: []byte("abcd"),
|
||||
inputMeta: map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"},
|
||||
inputSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580", intputDataSize: int64(len("abcd")),
|
||||
expectedError: hash.SHA256Mismatch{
|
||||
ExpectedSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580",
|
||||
CalculatedSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589",
|
||||
},
|
||||
},
|
||||
|
||||
// Test case - 9.
|
||||
// Input with size more than the size of actual data inside the reader.
|
||||
{
|
||||
bucket, object, []byte("abcd"),
|
||||
map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331e"},
|
||||
"", int64(len("abcd") + 1), "",
|
||||
hash.BadDigest{ExpectedMD5: "e2fc714c4727ee9395f324cd2e7f331e", CalculatedMD5: "e2fc714c4727ee9395f324cd2e7f331f"},
|
||||
8: {
|
||||
bucketName: bucket, objName: object, inputData: []byte("abcd"),
|
||||
inputMeta: map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331e"}, intputDataSize: int64(len("abcd") + 1),
|
||||
expectedError: hash.BadDigest{ExpectedMD5: "e2fc714c4727ee9395f324cd2e7f331e", CalculatedMD5: "e2fc714c4727ee9395f324cd2e7f331f"},
|
||||
},
|
||||
|
||||
// Test case - 10.
|
||||
// Input with size less than the size of actual data inside the reader.
|
||||
{
|
||||
bucket, object, []byte("abcd"),
|
||||
map[string]string{"etag": "900150983cd24fb0d6963f7d28e17f73"},
|
||||
"", int64(len("abcd") - 1), "",
|
||||
hash.BadDigest{ExpectedMD5: "900150983cd24fb0d6963f7d28e17f73", CalculatedMD5: "900150983cd24fb0d6963f7d28e17f72"},
|
||||
9: {
|
||||
bucketName: bucket, objName: object, inputData: []byte("abcd"),
|
||||
inputMeta: map[string]string{"etag": "900150983cd24fb0d6963f7d28e17f73"}, intputDataSize: int64(len("abcd") - 1),
|
||||
expectedError: hash.BadDigest{ExpectedMD5: "900150983cd24fb0d6963f7d28e17f73", CalculatedMD5: "900150983cd24fb0d6963f7d28e17f72"},
|
||||
},
|
||||
|
||||
// Test case - 11-14.
|
||||
// Validating for success cases.
|
||||
{bucket, object, []byte("abcd"), map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"}, "", int64(len("abcd")), "", nil},
|
||||
{bucket, object, []byte("efgh"), map[string]string{"etag": "1f7690ebdd9b4caf8fab49ca1757bf27"}, "", int64(len("efgh")), "", nil},
|
||||
{bucket, object, []byte("ijkl"), map[string]string{"etag": "09a0877d04abf8759f99adec02baf579"}, "", int64(len("ijkl")), "", nil},
|
||||
{bucket, object, []byte("mnop"), map[string]string{"etag": "e132e96a5ddad6da8b07bba6f6131fef"}, "", int64(len("mnop")), "", nil},
|
||||
10: {bucketName: bucket, objName: object, inputData: []byte("abcd"), inputMeta: map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"}, intputDataSize: int64(len("abcd"))},
|
||||
11: {bucketName: bucket, objName: object, inputData: []byte("efgh"), inputMeta: map[string]string{"etag": "1f7690ebdd9b4caf8fab49ca1757bf27"}, intputDataSize: int64(len("efgh"))},
|
||||
12: {bucketName: bucket, objName: object, inputData: []byte("ijkl"), inputMeta: map[string]string{"etag": "09a0877d04abf8759f99adec02baf579"}, intputDataSize: int64(len("ijkl"))},
|
||||
13: {bucketName: bucket, objName: object, inputData: []byte("mnop"), inputMeta: map[string]string{"etag": "e132e96a5ddad6da8b07bba6f6131fef"}, intputDataSize: int64(len("mnop"))},
|
||||
|
||||
// Test case 15-17.
|
||||
// With no metadata
|
||||
{bucket, object, data, nil, "", int64(len(data)), getMD5Hash(data), nil},
|
||||
{bucket, object, nilBytes, nil, "", int64(len(nilBytes)), getMD5Hash(nilBytes), nil},
|
||||
{bucket, object, fiveMBBytes, nil, "", int64(len(fiveMBBytes)), getMD5Hash(fiveMBBytes), nil},
|
||||
14: {bucketName: bucket, objName: object, inputData: data, intputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)},
|
||||
15: {bucketName: bucket, objName: object, inputData: nilBytes, intputDataSize: int64(len(nilBytes)), expectedMd5: getMD5Hash(nilBytes)},
|
||||
16: {bucketName: bucket, objName: object, inputData: fiveMBBytes, intputDataSize: int64(len(fiveMBBytes)), expectedMd5: getMD5Hash(fiveMBBytes)},
|
||||
|
||||
// Test case 18-20.
|
||||
// With arbitrary metadata
|
||||
{bucket, object, data, map[string]string{"answer": "42"}, "", int64(len(data)), getMD5Hash(data), nil},
|
||||
{bucket, object, nilBytes, map[string]string{"answer": "42"}, "", int64(len(nilBytes)), getMD5Hash(nilBytes), nil},
|
||||
{bucket, object, fiveMBBytes, map[string]string{"answer": "42"}, "", int64(len(fiveMBBytes)), getMD5Hash(fiveMBBytes), nil},
|
||||
17: {bucketName: bucket, objName: object, inputData: data, inputMeta: map[string]string{"answer": "42"}, intputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)},
|
||||
18: {bucketName: bucket, objName: object, inputData: nilBytes, inputMeta: map[string]string{"answer": "42"}, intputDataSize: int64(len(nilBytes)), expectedMd5: getMD5Hash(nilBytes)},
|
||||
19: {bucketName: bucket, objName: object, inputData: fiveMBBytes, inputMeta: map[string]string{"answer": "42"}, intputDataSize: int64(len(fiveMBBytes)), expectedMd5: getMD5Hash(fiveMBBytes)},
|
||||
|
||||
// Test case 21-23.
|
||||
// With valid md5sum and sha256.
|
||||
{bucket, object, data, md5Header(data), getSHA256Hash(data), int64(len(data)), getMD5Hash(data), nil},
|
||||
{bucket, object, nilBytes, md5Header(nilBytes), getSHA256Hash(nilBytes), int64(len(nilBytes)), getMD5Hash(nilBytes), nil},
|
||||
{bucket, object, fiveMBBytes, md5Header(fiveMBBytes), getSHA256Hash(fiveMBBytes), int64(len(fiveMBBytes)), getMD5Hash(fiveMBBytes), nil},
|
||||
20: {bucketName: bucket, objName: object, inputData: data, inputMeta: md5Header(data), inputSHA256: getSHA256Hash(data), intputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)},
|
||||
21: {bucketName: bucket, objName: object, inputData: nilBytes, inputMeta: md5Header(nilBytes), inputSHA256: getSHA256Hash(nilBytes), intputDataSize: int64(len(nilBytes)), expectedMd5: getMD5Hash(nilBytes)},
|
||||
22: {bucketName: bucket, objName: object, inputData: fiveMBBytes, inputMeta: md5Header(fiveMBBytes), inputSHA256: getSHA256Hash(fiveMBBytes), intputDataSize: int64(len(fiveMBBytes)), expectedMd5: getMD5Hash(fiveMBBytes)},
|
||||
|
||||
// Test case 24-26.
|
||||
// data with invalid md5sum in header
|
||||
{
|
||||
bucket, object, data, invalidMD5Header, "", int64(len(data)), getMD5Hash(data),
|
||||
hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(data)},
|
||||
23: {
|
||||
bucketName: bucket, objName: object, inputData: data, inputMeta: invalidMD5Header, intputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data),
|
||||
expectedError: hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(data)},
|
||||
},
|
||||
{
|
||||
bucket, object, nilBytes, invalidMD5Header, "", int64(len(nilBytes)), getMD5Hash(nilBytes),
|
||||
hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(nilBytes)},
|
||||
24: {
|
||||
bucketName: bucket, objName: object, inputData: nilBytes, inputMeta: invalidMD5Header, intputDataSize: int64(len(nilBytes)), expectedMd5: getMD5Hash(nilBytes),
|
||||
expectedError: hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(nilBytes)},
|
||||
},
|
||||
{
|
||||
bucket, object, fiveMBBytes, invalidMD5Header, "", int64(len(fiveMBBytes)), getMD5Hash(fiveMBBytes),
|
||||
hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(fiveMBBytes)},
|
||||
25: {
|
||||
bucketName: bucket, objName: object, inputData: fiveMBBytes, inputMeta: invalidMD5Header, intputDataSize: int64(len(fiveMBBytes)), expectedMd5: getMD5Hash(fiveMBBytes),
|
||||
expectedError: hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(fiveMBBytes)},
|
||||
},
|
||||
|
||||
// Test case 27-29.
|
||||
// data with size different from the actual number of bytes available in the reader
|
||||
{bucket, object, data, nil, "", int64(len(data) - 1), getMD5Hash(data[:len(data)-1]), nil},
|
||||
{bucket, object, nilBytes, nil, "", int64(len(nilBytes) + 1), getMD5Hash(nilBytes), IncompleteBody{Bucket: bucket, Object: object}},
|
||||
{bucket, object, fiveMBBytes, nil, "", 0, getMD5Hash(fiveMBBytes), nil},
|
||||
26: {bucketName: bucket, objName: object, inputData: data, intputDataSize: int64(len(data) - 1), expectedMd5: getMD5Hash(data[:len(data)-1])},
|
||||
27: {bucketName: bucket, objName: object, inputData: nilBytes, intputDataSize: int64(len(nilBytes) + 1), expectedMd5: getMD5Hash(nilBytes), expectedError: IncompleteBody{Bucket: bucket, Object: object}},
|
||||
28: {bucketName: bucket, objName: object, inputData: fiveMBBytes, expectedMd5: getMD5Hash(fiveMBBytes)},
|
||||
|
||||
// Test case 30
|
||||
// valid data with X-Amz-Meta- meta
|
||||
{bucket, object, data, map[string]string{"X-Amz-Meta-AppID": "a42"}, "", int64(len(data)), getMD5Hash(data), nil},
|
||||
29: {bucketName: bucket, objName: object, inputData: data, inputMeta: map[string]string{"X-Amz-Meta-AppID": "a42"}, intputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)},
|
||||
|
||||
// Test case 31
|
||||
// Put an empty object with a trailing slash
|
||||
{bucket, "emptydir/", []byte{}, nil, "", 0, getMD5Hash([]byte{}), nil},
|
||||
// Test case 32
|
||||
30: {bucketName: bucket, objName: "emptydir/", inputData: []byte{}, expectedMd5: getMD5Hash([]byte{})},
|
||||
// Put an object inside the empty directory
|
||||
{bucket, "emptydir/" + object, data, nil, "", int64(len(data)), getMD5Hash(data), nil},
|
||||
// Test case 33
|
||||
31: {bucketName: bucket, objName: "emptydir/" + object, inputData: data, intputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)},
|
||||
// Put the empty object with a trailing slash again (refer to Test case 31), this needs to succeed
|
||||
{bucket, "emptydir/", []byte{}, nil, "", 0, getMD5Hash([]byte{}), nil},
|
||||
}
|
||||
32: {bucketName: bucket, objName: "emptydir/", inputData: []byte{}, expectedMd5: getMD5Hash([]byte{})},
|
||||
|
||||
// With invalid crc32.
|
||||
33: {
|
||||
bucketName: bucket, objName: object, inputData: []byte("abcd"),
|
||||
inputMeta: map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f", "x-amz-checksum-crc32": "abcd"},
|
||||
intputDataSize: int64(len("abcd")),
|
||||
},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
in := mustGetPutObjReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256)
|
||||
objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, in, ObjectOptions{UserDefined: testCase.inputMeta})
|
||||
@ -403,11 +388,12 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
|
||||
}
|
||||
opts := ObjectOptions{}
|
||||
// Initiate Multipart Upload on the above created bucket.
|
||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, opts)
|
||||
res, err := obj.NewMultipartUpload(context.Background(), bucket, object, opts)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
uploadID := res.UploadID
|
||||
|
||||
// Upload part1.
|
||||
fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
@ -266,6 +267,7 @@ func setPutObjHeaders(w http.ResponseWriter, objInfo ObjectInfo, delete bool) {
|
||||
lc.SetPredictionHeaders(w, objInfo.ToLifecycleOpts())
|
||||
}
|
||||
}
|
||||
hash.AddChecksumHeader(w, objInfo.Checksum)
|
||||
}
|
||||
|
||||
func deleteObjectVersions(ctx context.Context, o ObjectLayer, bucket string, toDel []ObjectToDelete) {
|
||||
|
@ -519,6 +519,10 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
|
||||
}
|
||||
}
|
||||
|
||||
if r.Header.Get(xhttp.AmzChecksumMode) == "ENABLED" {
|
||||
hash.AddChecksumHeader(w, objInfo.Checksum)
|
||||
}
|
||||
|
||||
if err = setObjectHeaders(w, objInfo, rs, opts); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@ -783,6 +787,10 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
|
||||
}
|
||||
}
|
||||
|
||||
if r.Header.Get(xhttp.AmzChecksumMode) == "ENABLED" {
|
||||
hash.AddChecksumHeader(w, objInfo.Checksum)
|
||||
}
|
||||
|
||||
// Set standard object headers.
|
||||
if err = setObjectHeaders(w, objInfo, rs, opts); err != nil {
|
||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||
@ -1740,7 +1748,10 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = actualReader.AddChecksum(r, false); err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
|
||||
return
|
||||
}
|
||||
// Set compression metrics.
|
||||
var s2c io.ReadCloser
|
||||
wantEncryption := objectAPI.IsEncryptionSupported() && crypto.Requested(r.Header)
|
||||
@ -1758,6 +1769,10 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err := hashReader.AddChecksum(r, size < 0); err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
rawReader := hashReader
|
||||
pReader := NewPutObjReader(rawReader)
|
||||
@ -1895,7 +1910,6 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
setPutObjHeaders(w, objInfo, false)
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
|
||||
// Notify object created event.
|
||||
@ -1915,6 +1929,8 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
enqueueTransitionImmediate(objInfo)
|
||||
logger.LogIf(ctx, os.Sweep())
|
||||
}
|
||||
// Do not send checksums in events to avoid leaks.
|
||||
hash.TransferChecksumHeader(w, r)
|
||||
}
|
||||
|
||||
// PutObjectExtractHandler - PUT Object extract is an extended API
|
||||
@ -2051,6 +2067,10 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = hreader.AddChecksum(r, false); err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err := enforceBucketQuotaHard(ctx, bucket, size); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
@ -2220,6 +2240,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
|
||||
w.Header()[xhttp.ETag] = []string{`"` + hex.EncodeToString(hreader.MD5Current()) + `"`}
|
||||
hash.TransferChecksumHeader(w, r)
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
|
@ -21,10 +21,13 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@ -37,8 +40,9 @@ import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/hash/sha256"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
ioutilx "github.com/minio/minio/internal/ioutil"
|
||||
)
|
||||
@ -1295,26 +1299,29 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
// byte data for PutObject.
|
||||
bytesData := generateBytesData(6 * humanize.KiByte)
|
||||
|
||||
copySourceHeader := http.Header{}
|
||||
copySourceHeader.Set("X-Amz-Copy-Source", "somewhere")
|
||||
invalidMD5Header := http.Header{}
|
||||
invalidMD5Header.Set("Content-Md5", "42")
|
||||
inalidStorageClassHeader := http.Header{}
|
||||
inalidStorageClassHeader.Set(xhttp.AmzStorageClass, "INVALID")
|
||||
copySourceHeader := map[string]string{"X-Amz-Copy-Source": "somewhere"}
|
||||
invalidMD5Header := map[string]string{"Content-Md5": "42"}
|
||||
inalidStorageClassHeader := map[string]string{xhttp.AmzStorageClass: "INVALID"}
|
||||
|
||||
addCustomHeaders := func(req *http.Request, customHeaders http.Header) {
|
||||
for k, values := range customHeaders {
|
||||
for _, value := range values {
|
||||
req.Header.Set(k, value)
|
||||
}
|
||||
addCustomHeaders := func(req *http.Request, customHeaders map[string]string) {
|
||||
for k, value := range customHeaders {
|
||||
req.Header.Set(k, value)
|
||||
}
|
||||
}
|
||||
|
||||
checksumData := func(b []byte, h hash.Hash) string {
|
||||
h.Reset()
|
||||
_, err := h.Write(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
// test cases with inputs and expected result for GetObject.
|
||||
testCases := []struct {
|
||||
bucketName string
|
||||
objectName string
|
||||
headers http.Header
|
||||
headers map[string]string
|
||||
data []byte
|
||||
dataLen int
|
||||
accessKey string
|
||||
@ -1322,10 +1329,11 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
fault Fault
|
||||
// expected output.
|
||||
expectedRespStatus int // expected response status body.
|
||||
wantAPICode string
|
||||
wantHeaders map[string]string
|
||||
}{
|
||||
// Test case - 1.
|
||||
// Fetching the entire object and validating its contents.
|
||||
{
|
||||
0: {
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
data: bytesData,
|
||||
@ -1335,9 +1343,8 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
|
||||
expectedRespStatus: http.StatusOK,
|
||||
},
|
||||
// Test case - 2.
|
||||
// Test Case with invalid accessID.
|
||||
{
|
||||
1: {
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
data: bytesData,
|
||||
@ -1346,10 +1353,10 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
expectedRespStatus: http.StatusForbidden,
|
||||
wantAPICode: "InvalidAccessKeyId",
|
||||
},
|
||||
// Test case - 3.
|
||||
// Test Case with invalid header key X-Amz-Copy-Source.
|
||||
{
|
||||
2: {
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
headers: copySourceHeader,
|
||||
@ -1358,10 +1365,10 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
expectedRespStatus: http.StatusBadRequest,
|
||||
wantAPICode: "InvalidArgument",
|
||||
},
|
||||
// Test case - 4.
|
||||
// Test Case with invalid Content-Md5 value
|
||||
{
|
||||
3: {
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
headers: invalidMD5Header,
|
||||
@ -1370,10 +1377,10 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
expectedRespStatus: http.StatusBadRequest,
|
||||
wantAPICode: "InvalidDigest",
|
||||
},
|
||||
// Test case - 5.
|
||||
// Test Case with object greater than maximum allowed size.
|
||||
{
|
||||
4: {
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
data: bytesData,
|
||||
@ -1382,10 +1389,10 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
secretKey: credentials.SecretKey,
|
||||
fault: TooBigObject,
|
||||
expectedRespStatus: http.StatusBadRequest,
|
||||
wantAPICode: "EntityTooLarge",
|
||||
},
|
||||
// Test case - 6.
|
||||
// Test Case with missing content length
|
||||
{
|
||||
5: {
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
data: bytesData,
|
||||
@ -1394,10 +1401,10 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
secretKey: credentials.SecretKey,
|
||||
fault: MissingContentLength,
|
||||
expectedRespStatus: http.StatusLengthRequired,
|
||||
wantAPICode: "MissingContentLength",
|
||||
},
|
||||
// Test case - 7.
|
||||
// Test Case with invalid header key X-Amz-Storage-Class
|
||||
{
|
||||
6: {
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
headers: inalidStorageClassHeader,
|
||||
@ -1406,6 +1413,92 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
expectedRespStatus: http.StatusBadRequest,
|
||||
wantAPICode: "InvalidStorageClass",
|
||||
},
|
||||
|
||||
// Invalid crc32
|
||||
7: {
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
headers: map[string]string{"x-amz-checksum-crc32": "123"},
|
||||
data: bytesData,
|
||||
dataLen: len(bytesData),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
expectedRespStatus: http.StatusBadRequest,
|
||||
wantAPICode: "InvalidArgument",
|
||||
},
|
||||
// Wrong crc32
|
||||
8: {
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
headers: map[string]string{"x-amz-checksum-crc32": "MTIzNA=="},
|
||||
data: bytesData,
|
||||
dataLen: len(bytesData),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
expectedRespStatus: http.StatusBadRequest,
|
||||
wantAPICode: "XAmzContentChecksumMismatch",
|
||||
},
|
||||
// Correct crc32
|
||||
9: {
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
headers: map[string]string{"x-amz-checksum-crc32": checksumData(bytesData, crc32.New(crc32.IEEETable))},
|
||||
data: bytesData,
|
||||
dataLen: len(bytesData),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
expectedRespStatus: http.StatusOK,
|
||||
wantHeaders: map[string]string{"x-amz-checksum-crc32": checksumData(bytesData, crc32.New(crc32.IEEETable))},
|
||||
},
|
||||
// Correct crc32c
|
||||
10: {
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
headers: map[string]string{"x-amz-checksum-crc32c": checksumData(bytesData, crc32.New(crc32.MakeTable(crc32.Castagnoli)))},
|
||||
data: bytesData,
|
||||
dataLen: len(bytesData),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
expectedRespStatus: http.StatusOK,
|
||||
wantHeaders: map[string]string{"x-amz-checksum-crc32c": checksumData(bytesData, crc32.New(crc32.MakeTable(crc32.Castagnoli)))},
|
||||
},
|
||||
// CRC32 as CRC32C
|
||||
11: {
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
headers: map[string]string{"x-amz-checksum-crc32c": checksumData(bytesData, crc32.New(crc32.IEEETable))},
|
||||
data: bytesData,
|
||||
dataLen: len(bytesData),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
expectedRespStatus: http.StatusBadRequest,
|
||||
wantAPICode: "XAmzContentChecksumMismatch",
|
||||
},
|
||||
// SHA1
|
||||
12: {
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
headers: map[string]string{"x-amz-checksum-sha1": checksumData(bytesData, sha1.New())},
|
||||
data: bytesData,
|
||||
dataLen: len(bytesData),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
expectedRespStatus: http.StatusOK,
|
||||
wantHeaders: map[string]string{"x-amz-checksum-sha1": checksumData(bytesData, sha1.New())},
|
||||
},
|
||||
// SHA256
|
||||
13: {
|
||||
bucketName: bucketName,
|
||||
objectName: objectName,
|
||||
headers: map[string]string{"x-amz-checksum-sha256": checksumData(bytesData, sha256.New())},
|
||||
data: bytesData,
|
||||
dataLen: len(bytesData),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
expectedRespStatus: http.StatusOK,
|
||||
wantHeaders: map[string]string{"x-amz-checksum-sha256": checksumData(bytesData, sha256.New())},
|
||||
},
|
||||
}
|
||||
// Iterating over the cases, fetching the object validating the response.
|
||||
@ -1415,9 +1508,9 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
rec := httptest.NewRecorder()
|
||||
// construct HTTP request for Get Object end point.
|
||||
req, err = newTestSignedRequestV4(http.MethodPut, getPutObjectURL("", testCase.bucketName, testCase.objectName),
|
||||
int64(testCase.dataLen), bytes.NewReader(testCase.data), testCase.accessKey, testCase.secretKey, nil)
|
||||
int64(testCase.dataLen), bytes.NewReader(testCase.data), testCase.accessKey, testCase.secretKey, testCase.headers)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for Put Object: <ERROR> %v", i+1, err)
|
||||
t.Fatalf("Test %d: Failed to create HTTP request for Put Object: <ERROR> %v", i, err)
|
||||
}
|
||||
// Add test case specific headers to the request.
|
||||
addCustomHeaders(req, testCase.headers)
|
||||
@ -1435,22 +1528,48 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
apiRouter.ServeHTTP(rec, req)
|
||||
// Assert the response code with the expected status.
|
||||
if rec.Code != testCase.expectedRespStatus {
|
||||
t.Fatalf("Case %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testCase.expectedRespStatus, rec.Code)
|
||||
b, _ := io.ReadAll(rec.Body)
|
||||
t.Fatalf("Test %d: Expected the response status to be `%d`, but instead found `%d`: %s", i, testCase.expectedRespStatus, rec.Code, string(b))
|
||||
}
|
||||
if testCase.expectedRespStatus != http.StatusOK {
|
||||
b, err := io.ReadAll(rec.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var apiErr APIErrorResponse
|
||||
err = xml.Unmarshal(b, &apiErr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gotErr := apiErr.Code
|
||||
wantErr := testCase.wantAPICode
|
||||
if gotErr != wantErr {
|
||||
t.Errorf("test %d: want api error %q, got %q", i, wantErr, gotErr)
|
||||
}
|
||||
if testCase.wantHeaders != nil {
|
||||
for k, v := range testCase.wantHeaders {
|
||||
got := rec.Header().Get(k)
|
||||
if got != v {
|
||||
t.Errorf("Want header %s = %s, got %#v", k, v, rec.Header())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
if testCase.expectedRespStatus == http.StatusOK {
|
||||
buffer := new(bytes.Buffer)
|
||||
// Fetch the object to check whether the content is same as the one uploaded via PutObject.
|
||||
gr, err := obj.GetObjectNInfo(context.Background(), testCase.bucketName, testCase.objectName, nil, nil, readLock, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
|
||||
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i, instanceType, err)
|
||||
}
|
||||
if _, err = io.Copy(buffer, gr); err != nil {
|
||||
gr.Close()
|
||||
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
|
||||
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i, instanceType, err)
|
||||
}
|
||||
gr.Close()
|
||||
if !bytes.Equal(bytesData, buffer.Bytes()) {
|
||||
t.Errorf("Test %d: %s: Data Mismatch: Data fetched back from the uploaded object doesn't match the original one.", i+1, instanceType)
|
||||
t.Errorf("Test %d: %s: Data Mismatch: Data fetched back from the uploaded object doesn't match the original one.", i, instanceType)
|
||||
}
|
||||
buffer.Reset()
|
||||
}
|
||||
@ -1460,10 +1579,10 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
recV2 := httptest.NewRecorder()
|
||||
// construct HTTP request for PUT Object endpoint.
|
||||
reqV2, err = newTestSignedRequestV2(http.MethodPut, getPutObjectURL("", testCase.bucketName, testCase.objectName),
|
||||
int64(testCase.dataLen), bytes.NewReader(testCase.data), testCase.accessKey, testCase.secretKey, nil)
|
||||
int64(testCase.dataLen), bytes.NewReader(testCase.data), testCase.accessKey, testCase.secretKey, testCase.headers)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutObject: <ERROR> %v", i+1, instanceType, err)
|
||||
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutObject: <ERROR> %v", i, instanceType, err)
|
||||
}
|
||||
|
||||
// Add test case specific headers to the request.
|
||||
@ -1482,7 +1601,8 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
// Call the ServeHTTP to execute the handler.
|
||||
apiRouter.ServeHTTP(recV2, reqV2)
|
||||
if recV2.Code != testCase.expectedRespStatus {
|
||||
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code)
|
||||
b, _ := io.ReadAll(rec.Body)
|
||||
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`: %s", i, instanceType, testCase.expectedRespStatus, recV2.Code, string(b))
|
||||
}
|
||||
|
||||
if testCase.expectedRespStatus == http.StatusOK {
|
||||
@ -1490,17 +1610,26 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
// Fetch the object to check whether the content is same as the one uploaded via PutObject.
|
||||
gr, err := obj.GetObjectNInfo(context.Background(), testCase.bucketName, testCase.objectName, nil, nil, readLock, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
|
||||
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i, instanceType, err)
|
||||
}
|
||||
if _, err = io.Copy(buffer, gr); err != nil {
|
||||
gr.Close()
|
||||
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
|
||||
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i, instanceType, err)
|
||||
}
|
||||
gr.Close()
|
||||
if !bytes.Equal(bytesData, buffer.Bytes()) {
|
||||
t.Errorf("Test %d: %s: Data Mismatch: Data fetched back from the uploaded object doesn't match the original one.", i+1, instanceType)
|
||||
t.Errorf("Test %d: %s: Data Mismatch: Data fetched back from the uploaded object doesn't match the original one.", i, instanceType)
|
||||
}
|
||||
buffer.Reset()
|
||||
|
||||
if testCase.wantHeaders != nil {
|
||||
for k, v := range testCase.wantHeaders {
|
||||
got := recV2.Header().Get(k)
|
||||
if got != v {
|
||||
t.Errorf("Want header %s = %s, got %#v", k, v, recV2.Header())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1585,11 +1714,12 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
|
||||
// PutObjectPart API HTTP Handler has to be tested in isolation,
|
||||
// that is without any other handler being registered,
|
||||
// That's why NewMultipartUpload is initiated using ObjectLayer.
|
||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, opts)
|
||||
res, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, opts)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("MinIO %s : <ERROR> %s", instanceType, err)
|
||||
}
|
||||
uploadID := res.UploadID
|
||||
|
||||
a := 0
|
||||
b := globalMinPartSize
|
||||
@ -1701,11 +1831,12 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
// PutObjectPart API HTTP Handler has to be tested in isolation,
|
||||
// that is without any other handler being registered,
|
||||
// That's why NewMultipartUpload is initiated using ObjectLayer.
|
||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, opts)
|
||||
res, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, opts)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("MinIO %s : <ERROR> %s", instanceType, err)
|
||||
}
|
||||
uploadID := res.UploadID
|
||||
|
||||
// test cases with inputs and expected result for Copy Object.
|
||||
testCases := []struct {
|
||||
@ -2664,20 +2795,18 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
// object used for the test.
|
||||
objectName := "test-object-new-multipart"
|
||||
|
||||
// uploadID obtained from NewMultipart upload.
|
||||
var uploadID string
|
||||
// upload IDs collected.
|
||||
var uploadIDs []string
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
// initiate new multipart uploadID.
|
||||
uploadID, err = obj.NewMultipartUpload(context.Background(), bucketName, objectName, opts)
|
||||
res, err := obj.NewMultipartUpload(context.Background(), bucketName, objectName, opts)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("MinIO %s : <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
||||
uploadIDs = append(uploadIDs, uploadID)
|
||||
uploadIDs = append(uploadIDs, res.UploadID)
|
||||
}
|
||||
|
||||
// Parts with size greater than 5 MiB.
|
||||
@ -2778,7 +2907,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
s3MD5 := getCompleteMultipartMD5(inputParts[3].parts)
|
||||
|
||||
// generating the response body content for the success case.
|
||||
successResponse := generateCompleteMultpartUploadResponse(bucketName, objectName, getGetObjectURL("", bucketName, objectName), s3MD5)
|
||||
successResponse := generateCompleteMultpartUploadResponse(bucketName, objectName, getGetObjectURL("", bucketName, objectName), ObjectInfo{ETag: s3MD5})
|
||||
encodedSuccessResponse := encodeResponse(successResponse)
|
||||
|
||||
ctx := context.Background()
|
||||
@ -3034,20 +3163,18 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
// object used for the test.
|
||||
objectName := "test-object-new-multipart"
|
||||
|
||||
// uploadID obtained from NewMultipart upload.
|
||||
var uploadID string
|
||||
// upload IDs collected.
|
||||
var uploadIDs []string
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
// initiate new multipart uploadID.
|
||||
uploadID, err = obj.NewMultipartUpload(context.Background(), bucketName, objectName, opts)
|
||||
res, err := obj.NewMultipartUpload(context.Background(), bucketName, objectName, opts)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("MinIO %s : <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
||||
uploadIDs = append(uploadIDs, uploadID)
|
||||
uploadIDs = append(uploadIDs, res.UploadID)
|
||||
}
|
||||
|
||||
// Parts with size greater than 5 MiB.
|
||||
@ -3445,161 +3572,132 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
|
||||
// PutObjectPart API HTTP Handler has to be tested in isolation,
|
||||
// that is without any other handler being registered,
|
||||
// That's why NewMultipartUpload is initiated using ObjectLayer.
|
||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, opts)
|
||||
res, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, opts)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("MinIO %s : <ERROR> %s", instanceType, err)
|
||||
}
|
||||
uploadID := res.UploadID
|
||||
|
||||
uploadIDCopy := uploadID
|
||||
|
||||
// expected error types for invalid inputs to PutObjectPartHandler.
|
||||
noAPIErr := APIError{}
|
||||
// expected error when content length is missing in the HTTP request.
|
||||
missingContent := getAPIError(ErrMissingContentLength)
|
||||
// expected error when content length is too large.
|
||||
entityTooLarge := getAPIError(ErrEntityTooLarge)
|
||||
// expected error when the signature check fails.
|
||||
badSigning := getAPIError(ErrSignatureDoesNotMatch)
|
||||
// expected error MD5 sum mismatch occurs.
|
||||
badChecksum := getAPIError(ErrInvalidDigest)
|
||||
// expected error when the part number in the request is invalid.
|
||||
invalidPart := getAPIError(ErrInvalidPart)
|
||||
// expected error when maxPart is beyond the limit.
|
||||
invalidMaxParts := getAPIError(ErrInvalidMaxParts)
|
||||
// expected error the when the uploadID is invalid.
|
||||
noSuchUploadID := getAPIError(ErrNoSuchUpload)
|
||||
// expected error when InvalidAccessID is set.
|
||||
invalidAccessID := getAPIError(ErrInvalidAccessKeyID)
|
||||
|
||||
// SignatureMismatch for various signing types
|
||||
testCases := []struct {
|
||||
objectName string
|
||||
reader io.ReadSeeker
|
||||
content string
|
||||
partNumber string
|
||||
fault Fault
|
||||
accessKey string
|
||||
secretKey string
|
||||
|
||||
expectedAPIError APIError
|
||||
expectedAPIError APIErrorCode
|
||||
}{
|
||||
// Test case - 1.
|
||||
// Success case.
|
||||
{
|
||||
0: {
|
||||
objectName: testObject,
|
||||
reader: bytes.NewReader([]byte("hello")),
|
||||
content: "hello",
|
||||
partNumber: "1",
|
||||
fault: None,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
expectedAPIError: noAPIErr,
|
||||
expectedAPIError: -1,
|
||||
},
|
||||
// Test case - 2.
|
||||
// Case where part number is invalid.
|
||||
{
|
||||
1: {
|
||||
objectName: testObject,
|
||||
reader: bytes.NewReader([]byte("hello")),
|
||||
content: "hello",
|
||||
partNumber: "9999999999999999999",
|
||||
fault: None,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
expectedAPIError: invalidPart,
|
||||
expectedAPIError: ErrInvalidPart,
|
||||
},
|
||||
// Test case - 3.
|
||||
// Case where the part number has exceeded the max allowed parts in an upload.
|
||||
{
|
||||
2: {
|
||||
objectName: testObject,
|
||||
reader: bytes.NewReader([]byte("hello")),
|
||||
content: "hello",
|
||||
partNumber: strconv.Itoa(globalMaxPartID + 1),
|
||||
fault: None,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
expectedAPIError: invalidMaxParts,
|
||||
expectedAPIError: ErrInvalidMaxParts,
|
||||
},
|
||||
// Test case - 4.
|
||||
// Case where the content length is not set in the HTTP request.
|
||||
{
|
||||
3: {
|
||||
objectName: testObject,
|
||||
reader: bytes.NewReader([]byte("hello")),
|
||||
content: "hello",
|
||||
partNumber: "1",
|
||||
fault: MissingContentLength,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
expectedAPIError: missingContent,
|
||||
expectedAPIError: ErrMissingContentLength,
|
||||
},
|
||||
// Test case - 5.
|
||||
// case where the object size is set to a value greater than the max allowed size.
|
||||
{
|
||||
4: {
|
||||
objectName: testObject,
|
||||
reader: bytes.NewReader([]byte("hello")),
|
||||
content: "hello",
|
||||
partNumber: "1",
|
||||
fault: TooBigObject,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
expectedAPIError: entityTooLarge,
|
||||
expectedAPIError: ErrEntityTooLarge,
|
||||
},
|
||||
// Test case - 6.
|
||||
// case where a signature mismatch is introduced and the response is validated.
|
||||
{
|
||||
5: {
|
||||
objectName: testObject,
|
||||
reader: bytes.NewReader([]byte("hello")),
|
||||
content: "hello",
|
||||
partNumber: "1",
|
||||
fault: BadSignature,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
expectedAPIError: badSigning,
|
||||
expectedAPIError: ErrSignatureDoesNotMatch,
|
||||
},
|
||||
// Test case - 7.
|
||||
// Case where incorrect checksum is set and the error response
|
||||
// is asserted with the expected error response.
|
||||
{
|
||||
6: {
|
||||
objectName: testObject,
|
||||
reader: bytes.NewReader([]byte("hello")),
|
||||
content: "hello",
|
||||
partNumber: "1",
|
||||
fault: BadMD5,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
expectedAPIError: badChecksum,
|
||||
expectedAPIError: ErrInvalidDigest,
|
||||
},
|
||||
// Test case - 8.
|
||||
// case where the a non-existent uploadID is set.
|
||||
{
|
||||
7: {
|
||||
objectName: testObject,
|
||||
reader: bytes.NewReader([]byte("hello")),
|
||||
content: "hello",
|
||||
partNumber: "1",
|
||||
fault: MissingUploadID,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
expectedAPIError: noSuchUploadID,
|
||||
expectedAPIError: ErrNoSuchUpload,
|
||||
},
|
||||
// Test case - 9.
|
||||
// case with invalid AccessID.
|
||||
// Forcing the signature check inside the handler to fail.
|
||||
{
|
||||
8: {
|
||||
objectName: testObject,
|
||||
reader: bytes.NewReader([]byte("hello")),
|
||||
content: "hello",
|
||||
partNumber: "1",
|
||||
fault: None,
|
||||
accessKey: "Invalid-AccessID",
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
expectedAPIError: invalidAccessID,
|
||||
expectedAPIError: ErrInvalidAccessKeyID,
|
||||
},
|
||||
}
|
||||
|
||||
reqV2Str := "V2 Signed HTTP request"
|
||||
reqV4Str := "V4 Signed HTTP request"
|
||||
|
||||
// collection of input HTTP request, ResponseRecorder and request type.
|
||||
// Used to make a collection of V4 and V4 HTTP request.
|
||||
type inputReqRec struct {
|
||||
req *http.Request
|
||||
rec *httptest.ResponseRecorder
|
||||
@ -3608,7 +3706,9 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
|
||||
|
||||
for i, test := range testCases {
|
||||
// Using sub-tests introduced in Go 1.7.
|
||||
t.Run(fmt.Sprintf("MinIO %s : Test case %d.", instanceType, i+1), func(t *testing.T) {
|
||||
t.Run(fmt.Sprintf("MinIO-%s-Test-%d.", instanceType, i), func(t *testing.T) {
|
||||
// collection of input HTTP request, ResponseRecorder and request type.
|
||||
// Used to make a collection of V4 and V4 HTTP request.
|
||||
var reqV4, reqV2 *http.Request
|
||||
var recV4, recV2 *httptest.ResponseRecorder
|
||||
|
||||
@ -3623,7 +3723,7 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
|
||||
// constructing a v4 signed HTTP request.
|
||||
reqV4, err = newTestSignedRequestV4(http.MethodPut,
|
||||
getPutObjectPartURL("", bucketName, test.objectName, uploadID, test.partNumber),
|
||||
0, test.reader, test.accessKey, test.secretKey, nil)
|
||||
int64(len(test.content)), bytes.NewReader([]byte(test.content)), test.accessKey, test.secretKey, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create a signed V4 request to upload part for %s/%s: <ERROR> %v",
|
||||
bucketName, test.objectName, err)
|
||||
@ -3632,10 +3732,10 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
|
||||
// construct HTTP request for PutObject Part Object endpoint.
|
||||
reqV2, err = newTestSignedRequestV2(http.MethodPut,
|
||||
getPutObjectPartURL("", bucketName, test.objectName, uploadID, test.partNumber),
|
||||
0, test.reader, test.accessKey, test.secretKey, nil)
|
||||
int64(len(test.content)), bytes.NewReader([]byte(test.content)), test.accessKey, test.secretKey, nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d %s Failed to create a V2 signed request to upload part for %s/%s: <ERROR> %v", i+1, instanceType,
|
||||
t.Fatalf("Test %d %s Failed to create a V2 signed request to upload part for %s/%s: <ERROR> %v", i, instanceType,
|
||||
bucketName, test.objectName, err)
|
||||
}
|
||||
|
||||
@ -3661,6 +3761,9 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
|
||||
// HTTP request type string for V4/V2 requests.
|
||||
reqType := reqRec.reqType
|
||||
|
||||
// Clone so we don't retain values we do not want.
|
||||
req.Header = req.Header.Clone()
|
||||
|
||||
// introduce faults in the request.
|
||||
// deliberately introducing the invalid value to be able to assert the response with the expected error response.
|
||||
switch test.fault {
|
||||
@ -3684,7 +3787,13 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
|
||||
apiRouter.ServeHTTP(rec, req)
|
||||
|
||||
// validate the error response.
|
||||
if test.expectedAPIError != noAPIErr {
|
||||
want := getAPIError(test.expectedAPIError)
|
||||
if test.expectedAPIError == -1 {
|
||||
want.HTTPStatusCode = 200
|
||||
want.Code = "<no error>"
|
||||
want.Description = "<no error>"
|
||||
}
|
||||
if rec.Code != http.StatusOK {
|
||||
var errBytes []byte
|
||||
// read the response body.
|
||||
errBytes, err = ioutil.ReadAll(rec.Result().Body)
|
||||
@ -3700,14 +3809,16 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
|
||||
reqType, bucketName, test.objectName, err)
|
||||
}
|
||||
// Validate whether the error has occurred for the expected reason.
|
||||
if test.expectedAPIError.Code != errXML.Code {
|
||||
t.Errorf("%s, Expected to fail with error \"%s\", but received \"%s\".",
|
||||
reqType, test.expectedAPIError.Code, errXML.Code)
|
||||
if want.Code != errXML.Code {
|
||||
t.Errorf("%s, Expected to fail with error \"%s\", but received \"%s\": %q.",
|
||||
reqType, want.Code, errXML.Code, errXML.Message)
|
||||
}
|
||||
// Validate the HTTP response status code with the expected one.
|
||||
if test.expectedAPIError.HTTPStatusCode != rec.Code {
|
||||
t.Errorf("%s, Expected the HTTP response status code to be %d, got %d.", reqType, test.expectedAPIError.HTTPStatusCode, rec.Code)
|
||||
if want.HTTPStatusCode != rec.Code {
|
||||
t.Errorf("%s, Expected the HTTP response status code to be %d, got %d.", reqType, want.HTTPStatusCode, rec.Code)
|
||||
}
|
||||
} else if want.HTTPStatusCode != http.StatusOK {
|
||||
t.Errorf("got 200 ok, want %d", rec.Code)
|
||||
}
|
||||
}
|
||||
})
|
||||
@ -3849,12 +3960,12 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
// PutObjectPart API HTTP Handler has to be tested in isolation,
|
||||
// that is without any other handler being registered,
|
||||
// That's why NewMultipartUpload is initiated using ObjectLayer.
|
||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, opts)
|
||||
res, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, opts)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("MinIO %s : <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
||||
uploadID := res.UploadID
|
||||
uploadIDCopy := uploadID
|
||||
|
||||
// create an object Part, will be used to test list object parts.
|
||||
|
@ -155,6 +155,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
||||
metadata[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
|
||||
metadata[ReservedMetadataPrefixLower+ReplicationStatus] = dsc.PendingStatus()
|
||||
}
|
||||
|
||||
// We need to preserve the encryption headers set in EncryptRequest,
|
||||
// so we do not want to override them, copy them instead.
|
||||
for k, v := range encMetadata {
|
||||
@ -174,18 +175,30 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
checksumType := hash.NewChecksumType(r.Header.Get(xhttp.AmzChecksumAlgo))
|
||||
if checksumType.Is(hash.ChecksumInvalid) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequestParameter), r.URL)
|
||||
return
|
||||
} else if checksumType.IsSet() && !checksumType.Is(hash.ChecksumTrailing) {
|
||||
opts.WantChecksum = &hash.Checksum{Type: checksumType}
|
||||
}
|
||||
|
||||
newMultipartUpload := objectAPI.NewMultipartUpload
|
||||
if api.CacheAPI() != nil {
|
||||
newMultipartUpload = api.CacheAPI().NewMultipartUpload
|
||||
}
|
||||
|
||||
uploadID, err := newMultipartUpload(ctx, bucket, object, opts)
|
||||
res, err := newMultipartUpload(ctx, bucket, object, opts)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
response := generateInitiateMultipartUploadResponse(bucket, object, uploadID)
|
||||
response := generateInitiateMultipartUploadResponse(bucket, object, res.UploadID)
|
||||
if res.ChecksumAlgo != "" {
|
||||
w.Header().Set(xhttp.AmzChecksumAlgo, res.ChecksumAlgo)
|
||||
}
|
||||
encodedSuccessResponse := encodeResponse(response)
|
||||
|
||||
// Write success response.
|
||||
@ -350,6 +363,10 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err = actualReader.AddChecksum(r, false); err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Set compression metrics.
|
||||
wantEncryption := objectAPI.IsEncryptionSupported() && crypto.Requested(r.Header)
|
||||
@ -367,6 +384,11 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if err := hashReader.AddChecksum(r, size < 0); err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
rawReader := hashReader
|
||||
pReader := NewPutObjReader(rawReader)
|
||||
|
||||
@ -476,6 +498,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
// clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive).
|
||||
// Therefore, we have to set the ETag directly as map entry.
|
||||
w.Header()[xhttp.ETag] = []string{"\"" + etag + "\""}
|
||||
hash.TransferChecksumHeader(w, r)
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
@ -664,7 +687,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
||||
// Get object location.
|
||||
location := getObjectLocation(r, globalDomainNames, bucket, object)
|
||||
// Generate complete multipart response.
|
||||
response := generateCompleteMultpartUploadResponse(bucket, object, location, objInfo.ETag)
|
||||
response := generateCompleteMultpartUploadResponse(bucket, object, location, objInfo)
|
||||
var encodedSuccessResponse []byte
|
||||
if !headerWritten {
|
||||
encodedSuccessResponse = encodeResponse(response)
|
||||
|
@ -95,10 +95,12 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErr
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
uploadID, err := obj.NewMultipartUpload(context.Background(), "bucket", "key", opts)
|
||||
res, err := obj.NewMultipartUpload(context.Background(), "bucket", "key", opts)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
uploadID := res.UploadID
|
||||
|
||||
// Create a byte array of 5MiB.
|
||||
data := bytes.Repeat([]byte("0123456789abcdef"), 5*humanize.MiByte/16)
|
||||
completedParts := CompleteMultipartUpload{}
|
||||
@ -139,10 +141,11 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHan
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
uploadID, err := obj.NewMultipartUpload(context.Background(), "bucket", "key", opts)
|
||||
res, err := obj.NewMultipartUpload(context.Background(), "bucket", "key", opts)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
uploadID := res.UploadID
|
||||
|
||||
parts := make(map[int]string)
|
||||
metadata := make(map[string]string)
|
||||
|
@ -229,6 +229,10 @@ type FileInfo struct {
|
||||
// This is mainly used for detecting a particular issue
|
||||
// reported in https://github.com/minio/minio/pull/13803
|
||||
DiskMTime time.Time `msg:"dmt"`
|
||||
|
||||
// Combined checksum when object was uploaded.
|
||||
// Format is type:base64(checksum).
|
||||
Checksum map[string]string `msg:"cs,allownil"`
|
||||
}
|
||||
|
||||
// Equals checks if fi(FileInfo) matches ofi(FileInfo)
|
||||
|
@ -602,8 +602,8 @@ func (z *FileInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if zb0001 != 27 {
|
||||
err = msgp.ArrayError{Wanted: 27, Got: zb0001}
|
||||
if zb0001 != 28 {
|
||||
err = msgp.ArrayError{Wanted: 28, Got: zb0001}
|
||||
return
|
||||
}
|
||||
z.Volume, err = dc.ReadString()
|
||||
@ -778,13 +778,51 @@ func (z *FileInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "DiskMTime")
|
||||
return
|
||||
}
|
||||
if dc.IsNil() {
|
||||
err = dc.ReadNil()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
z.Checksum = nil
|
||||
} else {
|
||||
var zb0004 uint32
|
||||
zb0004, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
if z.Checksum == nil {
|
||||
z.Checksum = make(map[string]string, zb0004)
|
||||
} else if len(z.Checksum) > 0 {
|
||||
for key := range z.Checksum {
|
||||
delete(z.Checksum, key)
|
||||
}
|
||||
}
|
||||
for zb0004 > 0 {
|
||||
zb0004--
|
||||
var za0004 string
|
||||
var za0005 string
|
||||
za0004, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
za0005, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum", za0004)
|
||||
return
|
||||
}
|
||||
z.Checksum[za0004] = za0005
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *FileInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// array header, size 27
|
||||
err = en.Append(0xdc, 0x0, 0x1b)
|
||||
// array header, size 28
|
||||
err = en.Append(0xdc, 0x0, 0x1c)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -942,14 +980,38 @@ func (z *FileInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
err = msgp.WrapError(err, "DiskMTime")
|
||||
return
|
||||
}
|
||||
if z.Checksum == nil { // allownil: if nil
|
||||
err = en.WriteNil()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = en.WriteMapHeader(uint32(len(z.Checksum)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
for za0004, za0005 := range z.Checksum {
|
||||
err = en.WriteString(za0004)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
err = en.WriteString(za0005)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum", za0004)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *FileInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// array header, size 27
|
||||
o = append(o, 0xdc, 0x0, 0x1b)
|
||||
// array header, size 28
|
||||
o = append(o, 0xdc, 0x0, 0x1c)
|
||||
o = msgp.AppendString(o, z.Volume)
|
||||
o = msgp.AppendString(o, z.Name)
|
||||
o = msgp.AppendString(o, z.VersionID)
|
||||
@ -996,6 +1058,15 @@ func (z *FileInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.AppendBool(o, z.Fresh)
|
||||
o = msgp.AppendInt(o, z.Idx)
|
||||
o = msgp.AppendTime(o, z.DiskMTime)
|
||||
if z.Checksum == nil { // allownil: if nil
|
||||
o = msgp.AppendNil(o)
|
||||
} else {
|
||||
o = msgp.AppendMapHeader(o, uint32(len(z.Checksum)))
|
||||
for za0004, za0005 := range z.Checksum {
|
||||
o = msgp.AppendString(o, za0004)
|
||||
o = msgp.AppendString(o, za0005)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -1007,8 +1078,8 @@ func (z *FileInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err)
|
||||
return
|
||||
}
|
||||
if zb0001 != 27 {
|
||||
err = msgp.ArrayError{Wanted: 27, Got: zb0001}
|
||||
if zb0001 != 28 {
|
||||
err = msgp.ArrayError{Wanted: 28, Got: zb0001}
|
||||
return
|
||||
}
|
||||
z.Volume, bts, err = msgp.ReadStringBytes(bts)
|
||||
@ -1183,6 +1254,40 @@ func (z *FileInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "DiskMTime")
|
||||
return
|
||||
}
|
||||
if msgp.IsNil(bts) {
|
||||
bts = bts[1:]
|
||||
z.Checksum = nil
|
||||
} else {
|
||||
var zb0004 uint32
|
||||
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
if z.Checksum == nil {
|
||||
z.Checksum = make(map[string]string, zb0004)
|
||||
} else if len(z.Checksum) > 0 {
|
||||
for key := range z.Checksum {
|
||||
delete(z.Checksum, key)
|
||||
}
|
||||
}
|
||||
for zb0004 > 0 {
|
||||
var za0004 string
|
||||
var za0005 string
|
||||
zb0004--
|
||||
za0004, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum")
|
||||
return
|
||||
}
|
||||
za0005, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksum", za0004)
|
||||
return
|
||||
}
|
||||
z.Checksum[za0004] = za0005
|
||||
}
|
||||
}
|
||||
o = bts
|
||||
return
|
||||
}
|
||||
@ -1200,7 +1305,13 @@ func (z *FileInfo) Msgsize() (s int) {
|
||||
for za0003 := range z.Parts {
|
||||
s += z.Parts[za0003].Msgsize()
|
||||
}
|
||||
s += z.Erasure.Msgsize() + msgp.BoolSize + z.ReplicationState.Msgsize() + msgp.BytesPrefixSize + len(z.Data) + msgp.IntSize + msgp.TimeSize + msgp.BoolSize + msgp.IntSize + msgp.TimeSize
|
||||
s += z.Erasure.Msgsize() + msgp.BoolSize + z.ReplicationState.Msgsize() + msgp.BytesPrefixSize + len(z.Data) + msgp.IntSize + msgp.TimeSize + msgp.BoolSize + msgp.IntSize + msgp.TimeSize + msgp.MapHeaderSize
|
||||
if z.Checksum != nil {
|
||||
for za0004, za0005 := range z.Checksum {
|
||||
_ = za0005
|
||||
s += msgp.StringPrefixSize + len(za0004) + msgp.StringPrefixSize + len(za0005)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,7 @@
|
||||
package cmd
|
||||
|
||||
const (
|
||||
storageRESTVersion = "v47" // Added ReadMultiple
|
||||
storageRESTVersion = "v48" // Added Checksums
|
||||
storageRESTVersionPrefix = SlashSeparator + storageRESTVersion
|
||||
storageRESTPrefix = minioReservedBucketPath + "/storage"
|
||||
)
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/hash/sha256"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
|
@ -2293,7 +2293,7 @@ func uploadTestObject(t *testing.T, apiRouter http.Handler, creds auth.Credentia
|
||||
if etag == "" {
|
||||
t.Fatalf("Unexpected empty etag")
|
||||
}
|
||||
cp = append(cp, CompletePart{partID, etag[1 : len(etag)-1]})
|
||||
cp = append(cp, CompletePart{PartNumber: partID, ETag: etag[1 : len(etag)-1]})
|
||||
} else {
|
||||
t.Fatalf("Missing etag header")
|
||||
}
|
||||
|
@ -128,12 +128,13 @@ const (
|
||||
// ObjectPartInfo Info of each part kept in the multipart metadata
|
||||
// file after CompleteMultipartUpload() is called.
|
||||
type ObjectPartInfo struct {
|
||||
ETag string `json:"etag,omitempty"`
|
||||
Number int `json:"number"`
|
||||
Size int64 `json:"size"` // Size of the part on the disk.
|
||||
ActualSize int64 `json:"actualSize"` // Original size of the part without compression or encryption bytes.
|
||||
ModTime time.Time `json:"modTime"` // Date and time at which the part was uploaded.
|
||||
Index []byte `json:"index,omitempty" msg:"index,omitempty"`
|
||||
ETag string `json:"etag,omitempty"`
|
||||
Number int `json:"number"`
|
||||
Size int64 `json:"size"` // Size of the part on the disk.
|
||||
ActualSize int64 `json:"actualSize"` // Original size of the part without compression or encryption bytes.
|
||||
ModTime time.Time `json:"modTime"` // Date and time at which the part was uploaded.
|
||||
Index []byte `json:"index,omitempty" msg:"index,omitempty"`
|
||||
Checksums map[string]string `json:"crc,omitempty" msg:"crc,omitempty"` // Content Checksums
|
||||
}
|
||||
|
||||
// ChecksumInfo - carries checksums of individual scattered parts per disk.
|
||||
|
@ -605,6 +605,36 @@ func (z *ObjectPartInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
err = msgp.WrapError(err, "Index")
|
||||
return
|
||||
}
|
||||
case "crc":
|
||||
var zb0002 uint32
|
||||
zb0002, err = dc.ReadMapHeader()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksums")
|
||||
return
|
||||
}
|
||||
if z.Checksums == nil {
|
||||
z.Checksums = make(map[string]string, zb0002)
|
||||
} else if len(z.Checksums) > 0 {
|
||||
for key := range z.Checksums {
|
||||
delete(z.Checksums, key)
|
||||
}
|
||||
}
|
||||
for zb0002 > 0 {
|
||||
zb0002--
|
||||
var za0001 string
|
||||
var za0002 string
|
||||
za0001, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksums")
|
||||
return
|
||||
}
|
||||
za0002, err = dc.ReadString()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksums", za0001)
|
||||
return
|
||||
}
|
||||
z.Checksums[za0001] = za0002
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
@ -619,12 +649,16 @@ func (z *ObjectPartInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *ObjectPartInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// omitempty: check for empty values
|
||||
zb0001Len := uint32(6)
|
||||
var zb0001Mask uint8 /* 6 bits */
|
||||
zb0001Len := uint32(7)
|
||||
var zb0001Mask uint8 /* 7 bits */
|
||||
if z.Index == nil {
|
||||
zb0001Len--
|
||||
zb0001Mask |= 0x20
|
||||
}
|
||||
if z.Checksums == nil {
|
||||
zb0001Len--
|
||||
zb0001Mask |= 0x40
|
||||
}
|
||||
// variable map header, size zb0001Len
|
||||
err = en.Append(0x80 | uint8(zb0001Len))
|
||||
if err != nil {
|
||||
@ -695,6 +729,30 @@ func (z *ObjectPartInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
if (zb0001Mask & 0x40) == 0 { // if not empty
|
||||
// write "crc"
|
||||
err = en.Append(0xa3, 0x63, 0x72, 0x63)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteMapHeader(uint32(len(z.Checksums)))
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksums")
|
||||
return
|
||||
}
|
||||
for za0001, za0002 := range z.Checksums {
|
||||
err = en.WriteString(za0001)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksums")
|
||||
return
|
||||
}
|
||||
err = en.WriteString(za0002)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksums", za0001)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -702,12 +760,16 @@ func (z *ObjectPartInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
func (z *ObjectPartInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// omitempty: check for empty values
|
||||
zb0001Len := uint32(6)
|
||||
var zb0001Mask uint8 /* 6 bits */
|
||||
zb0001Len := uint32(7)
|
||||
var zb0001Mask uint8 /* 7 bits */
|
||||
if z.Index == nil {
|
||||
zb0001Len--
|
||||
zb0001Mask |= 0x20
|
||||
}
|
||||
if z.Checksums == nil {
|
||||
zb0001Len--
|
||||
zb0001Mask |= 0x40
|
||||
}
|
||||
// variable map header, size zb0001Len
|
||||
o = append(o, 0x80|uint8(zb0001Len))
|
||||
if zb0001Len == 0 {
|
||||
@ -733,6 +795,15 @@ func (z *ObjectPartInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = append(o, 0xa5, 0x69, 0x6e, 0x64, 0x65, 0x78)
|
||||
o = msgp.AppendBytes(o, z.Index)
|
||||
}
|
||||
if (zb0001Mask & 0x40) == 0 { // if not empty
|
||||
// string "crc"
|
||||
o = append(o, 0xa3, 0x63, 0x72, 0x63)
|
||||
o = msgp.AppendMapHeader(o, uint32(len(z.Checksums)))
|
||||
for za0001, za0002 := range z.Checksums {
|
||||
o = msgp.AppendString(o, za0001)
|
||||
o = msgp.AppendString(o, za0002)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -790,6 +861,36 @@ func (z *ObjectPartInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
err = msgp.WrapError(err, "Index")
|
||||
return
|
||||
}
|
||||
case "crc":
|
||||
var zb0002 uint32
|
||||
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksums")
|
||||
return
|
||||
}
|
||||
if z.Checksums == nil {
|
||||
z.Checksums = make(map[string]string, zb0002)
|
||||
} else if len(z.Checksums) > 0 {
|
||||
for key := range z.Checksums {
|
||||
delete(z.Checksums, key)
|
||||
}
|
||||
}
|
||||
for zb0002 > 0 {
|
||||
var za0001 string
|
||||
var za0002 string
|
||||
zb0002--
|
||||
za0001, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksums")
|
||||
return
|
||||
}
|
||||
za0002, bts, err = msgp.ReadStringBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Checksums", za0001)
|
||||
return
|
||||
}
|
||||
z.Checksums[za0001] = za0002
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
@ -804,7 +905,13 @@ func (z *ObjectPartInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *ObjectPartInfo) Msgsize() (s int) {
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(z.ETag) + 7 + msgp.IntSize + 5 + msgp.Int64Size + 11 + msgp.Int64Size + 8 + msgp.TimeSize + 6 + msgp.BytesPrefixSize + len(z.Index)
|
||||
s = 1 + 5 + msgp.StringPrefixSize + len(z.ETag) + 7 + msgp.IntSize + 5 + msgp.Int64Size + 11 + msgp.Int64Size + 8 + msgp.TimeSize + 6 + msgp.BytesPrefixSize + len(z.Index) + 4 + msgp.MapHeaderSize
|
||||
if z.Checksums != nil {
|
||||
for za0001, za0002 := range z.Checksums {
|
||||
_ = za0002
|
||||
s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -36,6 +36,7 @@ import (
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
@ -638,6 +639,9 @@ func (j xlMetaV2Object) ToFileInfo(volume, path string) (FileInfo, error) {
|
||||
if sc, ok := j.MetaSys[ReservedMetadataPrefixLower+TransitionTier]; ok {
|
||||
fi.TransitionTier = string(sc)
|
||||
}
|
||||
if crcs := j.MetaSys[ReservedMetadataPrefixLower+"crc"]; len(crcs) > 0 {
|
||||
fi.Checksum = hash.ReadCheckSums(crcs)
|
||||
}
|
||||
return fi, nil
|
||||
}
|
||||
|
||||
@ -1536,6 +1540,16 @@ func (x *xlMetaV2) AddVersion(fi FileInfo) error {
|
||||
if fi.TransitionTier != "" {
|
||||
ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionTier] = []byte(fi.TransitionTier)
|
||||
}
|
||||
if len(fi.Checksum) > 0 {
|
||||
res := make([]byte, 0, len(fi.Checksum)*40)
|
||||
for k, v := range fi.Checksum {
|
||||
crc := hash.NewChecksumString(k, v)
|
||||
if crc.Valid() {
|
||||
res = crc.AppendTo(res)
|
||||
}
|
||||
}
|
||||
ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+"crc"] = res
|
||||
}
|
||||
}
|
||||
|
||||
if !ventry.Valid() {
|
||||
|
359
internal/hash/checksum.go
Normal file
359
internal/hash/checksum.go
Normal file
@ -0,0 +1,359 @@
|
||||
// Copyright (c) 2015-2022 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package hash
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/internal/hash/sha256"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
)
|
||||
|
||||
// MinIOMultipartChecksum is as metadata on multipart uploads to indicate checksum type.
|
||||
const MinIOMultipartChecksum = "x-minio-multipart-checksum"
|
||||
|
||||
// ChecksumType contains information about the checksum type.
|
||||
type ChecksumType uint32
|
||||
|
||||
const (
|
||||
|
||||
// ChecksumTrailing indicates the checksum will be sent in the trailing header.
|
||||
// Another checksum type will be set.
|
||||
ChecksumTrailing ChecksumType = 1 << iota
|
||||
|
||||
// ChecksumSHA256 indicates a SHA256 checksum.
|
||||
ChecksumSHA256
|
||||
// ChecksumSHA1 indicates a SHA-1 checksum.
|
||||
ChecksumSHA1
|
||||
// ChecksumCRC32 indicates a CRC32 checksum with IEEE table.
|
||||
ChecksumCRC32
|
||||
// ChecksumCRC32C indicates a CRC32 checksum with Castagnoli table.
|
||||
ChecksumCRC32C
|
||||
// ChecksumInvalid indicates an invalid checksum.
|
||||
ChecksumInvalid
|
||||
|
||||
// ChecksumNone indicates no checksum.
|
||||
ChecksumNone ChecksumType = 0
|
||||
)
|
||||
|
||||
// Checksum is a type and base 64 encoded value.
|
||||
type Checksum struct {
|
||||
Type ChecksumType
|
||||
Encoded string
|
||||
}
|
||||
|
||||
// Is returns if c is all of t.
|
||||
func (c ChecksumType) Is(t ChecksumType) bool {
|
||||
if t == ChecksumNone {
|
||||
return c == ChecksumNone
|
||||
}
|
||||
return c&t == t
|
||||
}
|
||||
|
||||
// Key returns the header key.
|
||||
// returns empty string if invalid or none.
|
||||
func (c ChecksumType) Key() string {
|
||||
switch {
|
||||
case c.Is(ChecksumCRC32):
|
||||
return xhttp.AmzChecksumCRC32
|
||||
case c.Is(ChecksumCRC32C):
|
||||
return xhttp.AmzChecksumCRC32C
|
||||
case c.Is(ChecksumSHA1):
|
||||
return xhttp.AmzChecksumSHA1
|
||||
case c.Is(ChecksumSHA256):
|
||||
return xhttp.AmzChecksumSHA256
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// RawByteLen returns the size of the un-encoded checksum.
|
||||
func (c ChecksumType) RawByteLen() int {
|
||||
switch {
|
||||
case c.Is(ChecksumCRC32):
|
||||
return 4
|
||||
case c.Is(ChecksumCRC32C):
|
||||
return 4
|
||||
case c.Is(ChecksumSHA1):
|
||||
return sha1.Size
|
||||
case c.Is(ChecksumSHA256):
|
||||
return sha256.Size
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// IsSet returns whether the type is valid and known.
|
||||
func (c ChecksumType) IsSet() bool {
|
||||
return !c.Is(ChecksumInvalid) && !c.Is(ChecksumNone)
|
||||
}
|
||||
|
||||
// NewChecksumType returns a checksum type based on the algorithm string.
|
||||
func NewChecksumType(alg string) ChecksumType {
|
||||
switch strings.ToUpper(alg) {
|
||||
case "CRC32":
|
||||
return ChecksumCRC32
|
||||
case "CRC32C":
|
||||
return ChecksumCRC32C
|
||||
case "SHA1":
|
||||
return ChecksumSHA1
|
||||
case "SHA256":
|
||||
return ChecksumSHA256
|
||||
case "":
|
||||
return ChecksumNone
|
||||
}
|
||||
return ChecksumInvalid
|
||||
}
|
||||
|
||||
// String returns the type as a string.
|
||||
func (c ChecksumType) String() string {
|
||||
switch {
|
||||
case c.Is(ChecksumCRC32):
|
||||
return "CRC32"
|
||||
case c.Is(ChecksumCRC32C):
|
||||
return "CRC32C"
|
||||
case c.Is(ChecksumSHA1):
|
||||
return "SHA1"
|
||||
case c.Is(ChecksumSHA256):
|
||||
return "SHA256"
|
||||
case c.Is(ChecksumNone):
|
||||
return ""
|
||||
}
|
||||
return "invalid"
|
||||
}
|
||||
|
||||
// Hasher returns a hasher corresponding to the checksum type.
|
||||
// Returns nil if no checksum.
|
||||
func (c ChecksumType) Hasher() hash.Hash {
|
||||
switch {
|
||||
case c.Is(ChecksumCRC32):
|
||||
return crc32.NewIEEE()
|
||||
case c.Is(ChecksumCRC32C):
|
||||
return crc32.New(crc32.MakeTable(crc32.Castagnoli))
|
||||
case c.Is(ChecksumSHA1):
|
||||
return sha1.New()
|
||||
case c.Is(ChecksumSHA256):
|
||||
return sha256.New()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Trailing return whether the checksum is traling.
|
||||
func (c ChecksumType) Trailing() bool {
|
||||
return c.Is(ChecksumTrailing)
|
||||
}
|
||||
|
||||
// NewChecksumFromData returns a new checksum from specified algorithm and base64 encoded value.
|
||||
func NewChecksumFromData(t ChecksumType, data []byte) *Checksum {
|
||||
if !t.IsSet() {
|
||||
return nil
|
||||
}
|
||||
h := t.Hasher()
|
||||
h.Write(data)
|
||||
c := Checksum{Type: t, Encoded: base64.StdEncoding.EncodeToString(h.Sum(nil))}
|
||||
if !c.Valid() {
|
||||
return nil
|
||||
}
|
||||
return &c
|
||||
}
|
||||
|
||||
// ReadCheckSums will read checksums from b and return them.
|
||||
func ReadCheckSums(b []byte) map[string]string {
|
||||
res := make(map[string]string, 1)
|
||||
for len(b) > 0 {
|
||||
t, n := binary.Uvarint(b)
|
||||
if n < 0 {
|
||||
break
|
||||
}
|
||||
b = b[n:]
|
||||
|
||||
typ := ChecksumType(t)
|
||||
length := typ.RawByteLen()
|
||||
if length == 0 || len(b) < length {
|
||||
break
|
||||
}
|
||||
res[typ.String()] = base64.StdEncoding.EncodeToString(b[:length])
|
||||
b = b[length:]
|
||||
}
|
||||
if len(res) == 0 {
|
||||
res = nil
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// NewChecksumString returns a new checksum from specified algorithm and base64 encoded value.
|
||||
func NewChecksumString(alg, value string) *Checksum {
|
||||
t := NewChecksumType(alg)
|
||||
if !t.IsSet() {
|
||||
return nil
|
||||
}
|
||||
c := Checksum{Type: t, Encoded: value}
|
||||
if !c.Valid() {
|
||||
return nil
|
||||
}
|
||||
return &c
|
||||
}
|
||||
|
||||
// AppendTo will append the checksum to b.
|
||||
// ReadCheckSums reads the values back.
|
||||
func (c Checksum) AppendTo(b []byte) []byte {
|
||||
var tmp [binary.MaxVarintLen32]byte
|
||||
n := binary.PutUvarint(tmp[:], uint64(c.Type))
|
||||
crc := c.Raw()
|
||||
if len(crc) != c.Type.RawByteLen() {
|
||||
return b
|
||||
}
|
||||
b = append(b, tmp[:n]...)
|
||||
b = append(b, crc...)
|
||||
return b
|
||||
}
|
||||
|
||||
// Valid returns whether checksum is valid.
|
||||
func (c Checksum) Valid() bool {
|
||||
if c.Type == ChecksumInvalid {
|
||||
return false
|
||||
}
|
||||
if len(c.Encoded) == 0 || c.Type.Is(ChecksumTrailing) {
|
||||
return c.Type.Is(ChecksumNone) || c.Type.Is(ChecksumTrailing)
|
||||
}
|
||||
raw := c.Raw()
|
||||
return c.Type.RawByteLen() == len(raw)
|
||||
}
|
||||
|
||||
// Raw returns the Raw checksum.
|
||||
func (c Checksum) Raw() []byte {
|
||||
if len(c.Encoded) == 0 {
|
||||
return nil
|
||||
}
|
||||
v, _ := base64.StdEncoding.DecodeString(c.Encoded)
|
||||
return v
|
||||
}
|
||||
|
||||
// Matches returns whether given content matches c.
|
||||
func (c Checksum) Matches(content []byte) error {
|
||||
if len(c.Encoded) == 0 {
|
||||
return nil
|
||||
}
|
||||
hasher := c.Type.Hasher()
|
||||
_, err := hasher.Write(content)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
got := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
|
||||
if got != c.Encoded {
|
||||
return ChecksumMismatch{
|
||||
Want: c.Encoded,
|
||||
Got: got,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AsMap returns the
|
||||
func (c *Checksum) AsMap() map[string]string {
|
||||
if c == nil || !c.Valid() {
|
||||
return nil
|
||||
}
|
||||
return map[string]string{c.Type.String(): c.Encoded}
|
||||
}
|
||||
|
||||
// TransferChecksumHeader will transfer any checksum value that has been checked.
|
||||
func TransferChecksumHeader(w http.ResponseWriter, r *http.Request) {
|
||||
t, s := getContentChecksum(r)
|
||||
if !t.IsSet() || t.Is(ChecksumTrailing) {
|
||||
// TODO: Add trailing when we can read it.
|
||||
return
|
||||
}
|
||||
w.Header().Set(t.Key(), s)
|
||||
}
|
||||
|
||||
// AddChecksumHeader will transfer any checksum value that has been checked.
|
||||
func AddChecksumHeader(w http.ResponseWriter, c map[string]string) {
|
||||
for k, v := range c {
|
||||
typ := NewChecksumType(k)
|
||||
if !typ.IsSet() {
|
||||
continue
|
||||
}
|
||||
crc := Checksum{Type: typ, Encoded: v}
|
||||
if crc.Valid() {
|
||||
w.Header().Set(typ.Key(), v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetContentChecksum returns content checksum.
|
||||
// Returns ErrInvalidChecksum if so.
|
||||
// Returns nil, nil if no checksum.
|
||||
func GetContentChecksum(r *http.Request) (*Checksum, error) {
|
||||
t, s := getContentChecksum(r)
|
||||
if t == ChecksumNone {
|
||||
if s == "" {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, ErrInvalidChecksum
|
||||
}
|
||||
c := Checksum{Type: t, Encoded: s}
|
||||
if !c.Valid() {
|
||||
return nil, ErrInvalidChecksum
|
||||
}
|
||||
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
// getContentChecksum returns content checksum type and value.
|
||||
// Returns ChecksumInvalid if so.
|
||||
func getContentChecksum(r *http.Request) (t ChecksumType, s string) {
|
||||
t = ChecksumNone
|
||||
alg := r.Header.Get(xhttp.AmzChecksumAlgo)
|
||||
if alg != "" {
|
||||
t |= NewChecksumType(alg)
|
||||
if t.IsSet() {
|
||||
hdr := t.Key()
|
||||
if s = r.Header.Get(hdr); s == "" {
|
||||
if strings.EqualFold(r.Header.Get(xhttp.AmzTrailer), hdr) {
|
||||
t |= ChecksumTrailing
|
||||
} else {
|
||||
t = ChecksumInvalid
|
||||
}
|
||||
return ChecksumNone, ""
|
||||
}
|
||||
}
|
||||
return t, s
|
||||
}
|
||||
checkType := func(c ChecksumType) {
|
||||
if got := r.Header.Get(c.Key()); got != "" {
|
||||
// If already set, invalid
|
||||
if t != ChecksumNone {
|
||||
t = ChecksumInvalid
|
||||
s = ""
|
||||
} else {
|
||||
t = c
|
||||
s = got
|
||||
}
|
||||
}
|
||||
}
|
||||
checkType(ChecksumCRC32)
|
||||
checkType(ChecksumCRC32C)
|
||||
checkType(ChecksumSHA1)
|
||||
checkType(ChecksumSHA256)
|
||||
return t, s
|
||||
}
|
@ -48,3 +48,13 @@ type ErrSizeMismatch struct {
|
||||
func (e ErrSizeMismatch) Error() string {
|
||||
return fmt.Sprintf("Size mismatch: got %d, want %d", e.Got, e.Want)
|
||||
}
|
||||
|
||||
// ChecksumMismatch - when content checksum does not match with what was sent from client.
|
||||
type ChecksumMismatch struct {
|
||||
Want string
|
||||
Got string
|
||||
}
|
||||
|
||||
func (e ChecksumMismatch) Error() string {
|
||||
return "Bad checksum: Want " + e.Want + " does not match calculated " + e.Got
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/minio/minio/internal/etag"
|
||||
"github.com/minio/minio/internal/hash/sha256"
|
||||
@ -46,6 +47,10 @@ type Reader struct {
|
||||
checksum etag.ETag
|
||||
contentSHA256 []byte
|
||||
|
||||
// Content checksum
|
||||
contentHash Checksum
|
||||
contentHasher hash.Hash
|
||||
|
||||
sha256 hash.Hash
|
||||
}
|
||||
|
||||
@ -83,7 +88,7 @@ func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i
|
||||
if r.bytesRead > 0 {
|
||||
return nil, errors.New("hash: already read from hash reader")
|
||||
}
|
||||
if len(r.checksum) != 0 && len(MD5) != 0 && !etag.Equal(r.checksum, etag.ETag(MD5)) {
|
||||
if len(r.checksum) != 0 && len(MD5) != 0 && !etag.Equal(r.checksum, MD5) {
|
||||
return nil, BadDigest{
|
||||
ExpectedMD5: r.checksum.String(),
|
||||
CalculatedMD5: md5Hex,
|
||||
@ -99,7 +104,7 @@ func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i
|
||||
return nil, ErrSizeMismatch{Want: r.size, Got: size}
|
||||
}
|
||||
|
||||
r.checksum = etag.ETag(MD5)
|
||||
r.checksum = MD5
|
||||
r.contentSHA256 = SHA256
|
||||
if r.size < 0 && size >= 0 {
|
||||
r.src = etag.Wrap(io.LimitReader(r.src, size), r.src)
|
||||
@ -114,33 +119,62 @@ func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i
|
||||
if size >= 0 {
|
||||
r := io.LimitReader(src, size)
|
||||
if _, ok := src.(etag.Tagger); !ok {
|
||||
src = etag.NewReader(r, etag.ETag(MD5))
|
||||
src = etag.NewReader(r, MD5)
|
||||
} else {
|
||||
src = etag.Wrap(r, src)
|
||||
}
|
||||
} else if _, ok := src.(etag.Tagger); !ok {
|
||||
src = etag.NewReader(src, etag.ETag(MD5))
|
||||
src = etag.NewReader(src, MD5)
|
||||
}
|
||||
var hash hash.Hash
|
||||
var h hash.Hash
|
||||
if len(SHA256) != 0 {
|
||||
hash = sha256.New()
|
||||
h = sha256.New()
|
||||
}
|
||||
return &Reader{
|
||||
src: src,
|
||||
size: size,
|
||||
actualSize: actualSize,
|
||||
checksum: etag.ETag(MD5),
|
||||
checksum: MD5,
|
||||
contentSHA256: SHA256,
|
||||
sha256: hash,
|
||||
sha256: h,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ErrInvalidChecksum is returned when an invalid checksum is provided in headers.
|
||||
var ErrInvalidChecksum = errors.New("invalid checksum")
|
||||
|
||||
// AddChecksum will add checksum checks as specified in
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
|
||||
// Returns ErrInvalidChecksum if a problem with the checksum is found.
|
||||
func (r *Reader) AddChecksum(req *http.Request, ignoreValue bool) error {
|
||||
cs, err := GetContentChecksum(req)
|
||||
if err != nil {
|
||||
return ErrInvalidChecksum
|
||||
}
|
||||
if cs == nil {
|
||||
return nil
|
||||
}
|
||||
r.contentHash = *cs
|
||||
if cs.Type.Trailing() || ignoreValue {
|
||||
// Ignore until we have trailing headers.
|
||||
return nil
|
||||
}
|
||||
r.contentHasher = cs.Type.Hasher()
|
||||
if r.contentHasher == nil {
|
||||
return ErrInvalidChecksum
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
n, err := r.src.Read(p)
|
||||
r.bytesRead += int64(n)
|
||||
if r.sha256 != nil {
|
||||
r.sha256.Write(p[:n])
|
||||
}
|
||||
if r.contentHasher != nil {
|
||||
r.contentHasher.Write(p[:n])
|
||||
}
|
||||
|
||||
if err == io.EOF { // Verify content SHA256, if set.
|
||||
if r.sha256 != nil {
|
||||
@ -151,6 +185,15 @@ func (r *Reader) Read(p []byte) (int, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if r.contentHasher != nil {
|
||||
if sum := r.contentHasher.Sum(nil); !bytes.Equal(r.contentHash.Raw(), sum) {
|
||||
err := ChecksumMismatch{
|
||||
Want: r.contentHash.Encoded,
|
||||
Got: base64.StdEncoding.EncodeToString(sum),
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
if v, ok := err.(etag.VerifyError); ok {
|
||||
@ -223,6 +266,19 @@ func (r *Reader) SHA256HexString() string {
|
||||
return hex.EncodeToString(r.contentSHA256)
|
||||
}
|
||||
|
||||
// ContentCRCType returns the content checksum type.
|
||||
func (r *Reader) ContentCRCType() ChecksumType {
|
||||
return r.contentHash.Type
|
||||
}
|
||||
|
||||
// ContentCRC returns the content crc if set.
|
||||
func (r *Reader) ContentCRC() map[string]string {
|
||||
if r.contentHash.Type == ChecksumNone || !r.contentHash.Valid() {
|
||||
return nil
|
||||
}
|
||||
return map[string]string{r.contentHash.Type.String(): r.contentHash.Encoded}
|
||||
}
|
||||
|
||||
var _ io.Closer = (*Reader)(nil) // compiler check
|
||||
|
||||
// Close and release resources.
|
||||
|
@ -33,3 +33,6 @@ func New() hash.Hash { return fipssha256.New() }
|
||||
|
||||
// Sum256 returns the SHA256 checksum of the data.
|
||||
func Sum256(data []byte) [fipssha256.Size]byte { return fipssha256.Sum256(data) }
|
||||
|
||||
// Size is the size of a SHA256 checksum in bytes.
|
||||
const Size = fipssha256.Size
|
||||
|
@ -32,3 +32,6 @@ func New() hash.Hash { return nofipssha256.New() }
|
||||
|
||||
// Sum256 returns the SHA256 checksum of the data.
|
||||
func Sum256(data []byte) [nofipssha256.Size]byte { return nofipssha256.Sum256(data) }
|
||||
|
||||
// Size is the size of a SHA256 checksum in bytes.
|
||||
const Size = nofipssha256.Size
|
||||
|
@ -113,6 +113,7 @@ const (
|
||||
AmzCredential = "X-Amz-Credential"
|
||||
AmzSecurityToken = "X-Amz-Security-Token"
|
||||
AmzDecodedContentLength = "X-Amz-Decoded-Content-Length"
|
||||
AmzTrailer = "X-Amz-Trailer"
|
||||
|
||||
AmzMetaUnencryptedContentLength = "X-Amz-Meta-X-Amz-Unencrypted-Content-Length"
|
||||
AmzMetaUnencryptedContentMD5 = "X-Amz-Meta-X-Amz-Unencrypted-Content-Md5"
|
||||
@ -144,6 +145,14 @@ const (
|
||||
// Server-Status
|
||||
MinIOServerStatus = "x-minio-server-status"
|
||||
|
||||
// Content Checksums
|
||||
AmzChecksumAlgo = "x-amz-checksum-algorithm"
|
||||
AmzChecksumCRC32 = "x-amz-checksum-crc32"
|
||||
AmzChecksumCRC32C = "x-amz-checksum-crc32c"
|
||||
AmzChecksumSHA1 = "x-amz-checksum-sha1"
|
||||
AmzChecksumSHA256 = "x-amz-checksum-sha256"
|
||||
AmzChecksumMode = "x-amz-checksum-mode"
|
||||
|
||||
// Delete special flag to force delete a bucket or a prefix
|
||||
MinIOForceDelete = "x-minio-force-delete"
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user