Add extended checksum support (#15433)

This commit is contained in:
Klaus Post 2022-08-30 01:57:16 +02:00 committed by GitHub
parent 929b9e164e
commit a9f1ad7924
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
44 changed files with 1560 additions and 554 deletions

View File

@ -30,7 +30,7 @@ import (
"github.com/Azure/azure-storage-blob-go/azblob" "github.com/Azure/azure-storage-blob-go/azblob"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
minio "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/tags" "github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/internal/auth" "github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/bucket/lifecycle" "github.com/minio/minio/internal/bucket/lifecycle"
@ -232,6 +232,7 @@ const (
// S3 extended errors. // S3 extended errors.
ErrContentSHA256Mismatch ErrContentSHA256Mismatch
ErrContentChecksumMismatch
// Add new extended error codes here. // Add new extended error codes here.
@ -392,6 +393,8 @@ const (
ErrAccountNotEligible ErrAccountNotEligible
ErrAdminServiceAccountNotFound ErrAdminServiceAccountNotFound
ErrPostPolicyConditionInvalidFormat ErrPostPolicyConditionInvalidFormat
ErrInvalidChecksum
) )
type errorCodeMap map[APIErrorCode]APIError type errorCodeMap map[APIErrorCode]APIError
@ -1160,6 +1163,11 @@ var errorCodes = errorCodeMap{
Description: "The provided 'x-amz-content-sha256' header does not match what was computed.", Description: "The provided 'x-amz-content-sha256' header does not match what was computed.",
HTTPStatusCode: http.StatusBadRequest, HTTPStatusCode: http.StatusBadRequest,
}, },
ErrContentChecksumMismatch: {
Code: "XAmzContentChecksumMismatch",
Description: "The provided 'x-amz-checksum' header does not match what was computed.",
HTTPStatusCode: http.StatusBadRequest,
},
// MinIO extensions. // MinIO extensions.
ErrStorageFull: { ErrStorageFull: {
@ -1874,6 +1882,11 @@ var errorCodes = errorCodeMap{
Description: "Invalid according to Policy: Policy Condition failed", Description: "Invalid according to Policy: Policy Condition failed",
HTTPStatusCode: http.StatusForbidden, HTTPStatusCode: http.StatusForbidden,
}, },
ErrInvalidChecksum: {
Code: "InvalidArgument",
Description: "Invalid checksum provided.",
HTTPStatusCode: http.StatusBadRequest,
},
// Add your error structure here. // Add your error structure here.
} }
@ -2046,6 +2059,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrSignatureDoesNotMatch apiErr = ErrSignatureDoesNotMatch
case hash.SHA256Mismatch: case hash.SHA256Mismatch:
apiErr = ErrContentSHA256Mismatch apiErr = ErrContentSHA256Mismatch
case hash.ChecksumMismatch:
apiErr = ErrContentChecksumMismatch
case ObjectTooLarge: case ObjectTooLarge:
apiErr = ErrEntityTooLarge apiErr = ErrEntityTooLarge
case ObjectTooSmall: case ObjectTooSmall:

View File

@ -31,6 +31,7 @@ import (
"github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/handlers" "github.com/minio/minio/internal/handlers"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http" xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/logger"
) )
@ -163,6 +164,12 @@ type Part struct {
LastModified string LastModified string
ETag string ETag string
Size int64 Size int64
// Checksum values
ChecksumCRC32 string
ChecksumCRC32C string
ChecksumSHA1 string
ChecksumSHA256 string
} }
// ListPartsResponse - format for list parts response. // ListPartsResponse - format for list parts response.
@ -184,6 +191,7 @@ type ListPartsResponse struct {
MaxParts int MaxParts int
IsTruncated bool IsTruncated bool
ChecksumAlgorithm string
// List of parts. // List of parts.
Parts []Part `xml:"Part"` Parts []Part `xml:"Part"`
} }
@ -381,6 +389,11 @@ type CompleteMultipartUploadResponse struct {
Bucket string Bucket string
Key string Key string
ETag string ETag string
ChecksumCRC32 string
ChecksumCRC32C string
ChecksumSHA1 string
ChecksumSHA256 string
} }
// DeleteError structure. // DeleteError structure.
@ -690,14 +703,19 @@ func generateInitiateMultipartUploadResponse(bucket, key, uploadID string) Initi
} }
// generates CompleteMultipartUploadResponse for given bucket, key, location and ETag. // generates CompleteMultipartUploadResponse for given bucket, key, location and ETag.
func generateCompleteMultpartUploadResponse(bucket, key, location, etag string) CompleteMultipartUploadResponse { func generateCompleteMultpartUploadResponse(bucket, key, location string, oi ObjectInfo) CompleteMultipartUploadResponse {
return CompleteMultipartUploadResponse{ c := CompleteMultipartUploadResponse{
Location: location, Location: location,
Bucket: bucket, Bucket: bucket,
Key: key, Key: key,
// AWS S3 quotes the ETag in XML, make sure we are compatible here. // AWS S3 quotes the ETag in XML, make sure we are compatible here.
ETag: "\"" + etag + "\"", ETag: "\"" + oi.ETag + "\"",
ChecksumSHA1: oi.Checksum[hash.ChecksumSHA1.String()],
ChecksumSHA256: oi.Checksum[hash.ChecksumSHA256.String()],
ChecksumCRC32: oi.Checksum[hash.ChecksumCRC32.String()],
ChecksumCRC32C: oi.Checksum[hash.ChecksumCRC32C.String()],
} }
return c
} }
// generates ListPartsResponse from ListPartsInfo. // generates ListPartsResponse from ListPartsInfo.
@ -722,6 +740,7 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
listPartsResponse.PartNumberMarker = partsInfo.PartNumberMarker listPartsResponse.PartNumberMarker = partsInfo.PartNumberMarker
listPartsResponse.IsTruncated = partsInfo.IsTruncated listPartsResponse.IsTruncated = partsInfo.IsTruncated
listPartsResponse.NextPartNumberMarker = partsInfo.NextPartNumberMarker listPartsResponse.NextPartNumberMarker = partsInfo.NextPartNumberMarker
listPartsResponse.ChecksumAlgorithm = partsInfo.ChecksumAlgorithm
listPartsResponse.Parts = make([]Part, len(partsInfo.Parts)) listPartsResponse.Parts = make([]Part, len(partsInfo.Parts))
for index, part := range partsInfo.Parts { for index, part := range partsInfo.Parts {
@ -730,6 +749,10 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis
newPart.ETag = "\"" + part.ETag + "\"" newPart.ETag = "\"" + part.ETag + "\""
newPart.Size = part.Size newPart.Size = part.Size
newPart.LastModified = part.LastModified.UTC().Format(iso8601TimeFormat) newPart.LastModified = part.LastModified.UTC().Format(iso8601TimeFormat)
newPart.ChecksumCRC32 = part.ChecksumCRC32
newPart.ChecksumCRC32C = part.ChecksumCRC32C
newPart.ChecksumSHA1 = part.ChecksumSHA1
newPart.ChecksumSHA256 = part.ChecksumSHA256
listPartsResponse.Parts[index] = newPart listPartsResponse.Parts[index] = newPart
} }
return listPartsResponse return listPartsResponse

File diff suppressed because one or more lines are too long

View File

@ -25,7 +25,7 @@ import (
"strconv" "strconv"
"testing" "testing"
humanize "github.com/dustin/go-humanize" "github.com/dustin/go-humanize"
) )
// Benchmark utility functions for ObjectLayer.PutObject(). // Benchmark utility functions for ObjectLayer.PutObject().
@ -85,12 +85,12 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
// PutObjectPart returns etag of the object inserted. // PutObjectPart returns etag of the object inserted.
// etag variable is assigned with that value. // etag variable is assigned with that value.
var etag, uploadID string var etag string
// get text data generated for number of bytes equal to object size. // get text data generated for number of bytes equal to object size.
textData := generateBytesData(objSize) textData := generateBytesData(objSize)
// generate md5sum for the generated data. // generate md5sum for the generated data.
// md5sum of the data to written is required as input for NewMultipartUpload. // md5sum of the data to written is required as input for NewMultipartUpload.
uploadID, err = obj.NewMultipartUpload(context.Background(), bucket, object, ObjectOptions{}) res, err := obj.NewMultipartUpload(context.Background(), bucket, object, ObjectOptions{})
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -113,7 +113,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
} }
md5hex := getMD5Hash(textPartData) md5hex := getMD5Hash(textPartData)
var partInfo PartInfo var partInfo PartInfo
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j, partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, res.UploadID, j,
mustGetPutObjReader(b, bytes.NewReader(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{}) mustGetPutObjReader(b, bytes.NewReader(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)

View File

@ -87,7 +87,7 @@ type CacheObjectLayer interface {
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error)
// Multipart operations. // Multipart operations.
NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (res *NewMultipartUploadResult, err error)
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
@ -122,7 +122,7 @@ type cacheObjects struct {
InnerDeleteObjectFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) InnerDeleteObjectFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
InnerPutObjectFn func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) InnerPutObjectFn func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
InnerCopyObjectFn func(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) InnerCopyObjectFn func(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error)
InnerNewMultipartUploadFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) InnerNewMultipartUploadFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (res *NewMultipartUploadResult, err error)
InnerPutObjectPartFn func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) InnerPutObjectPartFn func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
InnerAbortMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error InnerAbortMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error
InnerCompleteMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) InnerCompleteMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
@ -866,7 +866,7 @@ func newServerCacheObjects(ctx context.Context, config cache.Config) (CacheObjec
InnerCopyObjectFn: func(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) { InnerCopyObjectFn: func(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
return newObjectLayerFn().CopyObject(ctx, srcBucket, srcObject, destBucket, destObject, srcInfo, srcOpts, dstOpts) return newObjectLayerFn().CopyObject(ctx, srcBucket, srcObject, destBucket, destObject, srcInfo, srcOpts, dstOpts)
}, },
InnerNewMultipartUploadFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) { InnerNewMultipartUploadFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (res *NewMultipartUploadResult, err error) {
return newObjectLayerFn().NewMultipartUpload(ctx, bucket, object, opts) return newObjectLayerFn().NewMultipartUpload(ctx, bucket, object, opts)
}, },
InnerPutObjectPartFn: func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) { InnerPutObjectPartFn: func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
@ -961,7 +961,7 @@ func (c *cacheObjects) queuePendingWriteback(ctx context.Context) {
} }
// NewMultipartUpload - Starts a new multipart upload operation to backend - if writethrough mode is enabled, starts caching the multipart. // NewMultipartUpload - Starts a new multipart upload operation to backend - if writethrough mode is enabled, starts caching the multipart.
func (c *cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) { func (c *cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (res *NewMultipartUploadResult, err error) {
newMultipartUploadFn := c.InnerNewMultipartUploadFn newMultipartUploadFn := c.InnerNewMultipartUploadFn
dcache, err := c.getCacheToLoc(ctx, bucket, object) dcache, err := c.getCacheToLoc(ctx, bucket, object)
if err != nil { if err != nil {
@ -996,9 +996,11 @@ func (c *cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object st
} }
// perform multipart upload on backend and cache simultaneously // perform multipart upload on backend and cache simultaneously
uploadID, err = newMultipartUploadFn(ctx, bucket, object, opts) res, err = newMultipartUploadFn(ctx, bucket, object, opts)
dcache.NewMultipartUpload(GlobalContext, bucket, object, uploadID, opts) if err == nil {
return uploadID, err dcache.NewMultipartUpload(GlobalContext, bucket, object, res.UploadID, opts)
}
return res, err
} }
// PutObjectPart streams part to cache concurrently if writethrough mode is enabled. Otherwise redirects the call to remote // PutObjectPart streams part to cache concurrently if writethrough mode is enabled. Otherwise redirects the call to remote

View File

@ -514,6 +514,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
partModTime := latestMeta.Parts[partIndex].ModTime partModTime := latestMeta.Parts[partIndex].ModTime
partNumber := latestMeta.Parts[partIndex].Number partNumber := latestMeta.Parts[partIndex].Number
partIdx := latestMeta.Parts[partIndex].Index partIdx := latestMeta.Parts[partIndex].Index
partChecksums := latestMeta.Parts[partIndex].Checksums
tillOffset := erasure.ShardFileOffset(0, partSize, partSize) tillOffset := erasure.ShardFileOffset(0, partSize, partSize)
readers := make([]io.ReaderAt, len(latestDisks)) readers := make([]io.ReaderAt, len(latestDisks))
checksumAlgo := erasureInfo.GetChecksumInfo(partNumber).Algorithm checksumAlgo := erasureInfo.GetChecksumInfo(partNumber).Algorithm
@ -567,7 +568,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
} }
partsMetadata[i].DataDir = dstDataDir partsMetadata[i].DataDir = dstDataDir
partsMetadata[i].AddObjectPart(partNumber, "", partSize, partActualSize, partModTime, partIdx) partsMetadata[i].AddObjectPart(partNumber, "", partSize, partActualSize, partModTime, partIdx, partChecksums)
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{ partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
PartNumber: partNumber, PartNumber: partNumber,
Algorithm: checksumAlgo, Algorithm: checksumAlgo,

View File

@ -561,14 +561,14 @@ func TestHealCorrectQuorum(t *testing.T) {
// Create an object with multiple parts uploaded in decreasing // Create an object with multiple parts uploaded in decreasing
// part number. // part number.
uploadID, err := objLayer.NewMultipartUpload(ctx, bucket, object, opts) res, err := objLayer.NewMultipartUpload(ctx, bucket, object, opts)
if err != nil { if err != nil {
t.Fatalf("Failed to create a multipart upload - %v", err) t.Fatalf("Failed to create a multipart upload - %v", err)
} }
var uploadedParts []CompletePart var uploadedParts []CompletePart
for _, partID := range []int{2, 1} { for _, partID := range []int{2, 1} {
pInfo, err1 := objLayer.PutObjectPart(ctx, bucket, object, uploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts) pInfo, err1 := objLayer.PutObjectPart(ctx, bucket, object, res.UploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
if err1 != nil { if err1 != nil {
t.Fatalf("Failed to upload a part - %v", err1) t.Fatalf("Failed to upload a part - %v", err1)
} }
@ -578,7 +578,7 @@ func TestHealCorrectQuorum(t *testing.T) {
}) })
} }
_, err = objLayer.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{}) _, err = objLayer.CompleteMultipartUpload(ctx, bucket, object, res.UploadID, uploadedParts, ObjectOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to complete multipart upload - got: %v", err) t.Fatalf("Failed to complete multipart upload - got: %v", err)
} }
@ -694,10 +694,11 @@ func TestHealObjectCorruptedPools(t *testing.T) {
z := objLayer.(*erasureServerPools) z := objLayer.(*erasureServerPools)
set := z.serverPools[1] set := z.serverPools[1]
uploadID, err := set.NewMultipartUpload(ctx, bucket, object, opts) res, err := set.NewMultipartUpload(ctx, bucket, object, opts)
if err != nil { if err != nil {
t.Fatalf("Failed to create a multipart upload - %v", err) t.Fatalf("Failed to create a multipart upload - %v", err)
} }
uploadID := res.UploadID
var uploadedParts []CompletePart var uploadedParts []CompletePart
for _, partID := range []int{2, 1} { for _, partID := range []int{2, 1} {
@ -868,14 +869,14 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
// Create an object with multiple parts uploaded in decreasing // Create an object with multiple parts uploaded in decreasing
// part number. // part number.
uploadID, err := objLayer.NewMultipartUpload(ctx, bucket, object, opts) res, err := objLayer.NewMultipartUpload(ctx, bucket, object, opts)
if err != nil { if err != nil {
t.Fatalf("Failed to create a multipart upload - %v", err) t.Fatalf("Failed to create a multipart upload - %v", err)
} }
var uploadedParts []CompletePart var uploadedParts []CompletePart
for _, partID := range []int{2, 1} { for _, partID := range []int{2, 1} {
pInfo, err1 := objLayer.PutObjectPart(ctx, bucket, object, uploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts) pInfo, err1 := objLayer.PutObjectPart(ctx, bucket, object, res.UploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
if err1 != nil { if err1 != nil {
t.Fatalf("Failed to upload a part - %v", err1) t.Fatalf("Failed to upload a part - %v", err1)
} }
@ -885,7 +886,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
}) })
} }
_, err = objLayer.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{}) _, err = objLayer.CompleteMultipartUpload(ctx, bucket, object, res.UploadID, uploadedParts, ObjectOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to complete multipart upload - %v", err) t.Fatalf("Failed to complete multipart upload - %v", err)
} }
@ -1011,14 +1012,14 @@ func TestHealObjectCorruptedParts(t *testing.T) {
// Create an object with multiple parts uploaded in decreasing // Create an object with multiple parts uploaded in decreasing
// part number. // part number.
uploadID, err := objLayer.NewMultipartUpload(ctx, bucket, object, opts) res, err := objLayer.NewMultipartUpload(ctx, bucket, object, opts)
if err != nil { if err != nil {
t.Fatalf("Failed to create a multipart upload - %v", err) t.Fatalf("Failed to create a multipart upload - %v", err)
} }
var uploadedParts []CompletePart var uploadedParts []CompletePart
for _, partID := range []int{2, 1} { for _, partID := range []int{2, 1} {
pInfo, err1 := objLayer.PutObjectPart(ctx, bucket, object, uploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts) pInfo, err1 := objLayer.PutObjectPart(ctx, bucket, object, res.UploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
if err1 != nil { if err1 != nil {
t.Fatalf("Failed to upload a part - %v", err1) t.Fatalf("Failed to upload a part - %v", err1)
} }
@ -1028,7 +1029,7 @@ func TestHealObjectCorruptedParts(t *testing.T) {
}) })
} }
_, err = objLayer.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{}) _, err = objLayer.CompleteMultipartUpload(ctx, bucket, object, res.UploadID, uploadedParts, ObjectOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to complete multipart upload - %v", err) t.Fatalf("Failed to complete multipart upload - %v", err)
} }
@ -1168,14 +1169,14 @@ func TestHealObjectErasure(t *testing.T) {
// Create an object with multiple parts uploaded in decreasing // Create an object with multiple parts uploaded in decreasing
// part number. // part number.
uploadID, err := obj.NewMultipartUpload(ctx, bucket, object, opts) res, err := obj.NewMultipartUpload(ctx, bucket, object, opts)
if err != nil { if err != nil {
t.Fatalf("Failed to create a multipart upload - %v", err) t.Fatalf("Failed to create a multipart upload - %v", err)
} }
var uploadedParts []CompletePart var uploadedParts []CompletePart
for _, partID := range []int{2, 1} { for _, partID := range []int{2, 1} {
pInfo, err1 := obj.PutObjectPart(ctx, bucket, object, uploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts) pInfo, err1 := obj.PutObjectPart(ctx, bucket, object, res.UploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
if err1 != nil { if err1 != nil {
t.Fatalf("Failed to upload a part - %v", err1) t.Fatalf("Failed to upload a part - %v", err1)
} }
@ -1190,7 +1191,7 @@ func TestHealObjectErasure(t *testing.T) {
er := z.serverPools[0].sets[0] er := z.serverPools[0].sets[0]
firstDisk := er.getDisks()[0] firstDisk := er.getDisks()[0]
_, err = obj.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{}) _, err = obj.CompleteMultipartUpload(ctx, bucket, object, res.UploadID, uploadedParts, ObjectOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to complete multipart upload - %v", err) t.Fatalf("Failed to complete multipart upload - %v", err)
} }

View File

@ -187,6 +187,7 @@ func (fi FileInfo) ToObjectInfo(bucket, object string, versioned bool) ObjectInf
objInfo.RestoreExpires, _ = restoreStatus.Expiry() objInfo.RestoreExpires, _ = restoreStatus.Expiry()
} }
} }
objInfo.Checksum = fi.Checksum
// Success. // Success.
return objInfo return objInfo
} }
@ -237,7 +238,7 @@ func objectPartIndex(parts []ObjectPartInfo, partNumber int) int {
} }
// AddObjectPart - add a new object part in order. // AddObjectPart - add a new object part in order.
func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize, actualSize int64, modTime time.Time, idx []byte) { func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize, actualSize int64, modTime time.Time, idx []byte, checksums map[string]string) {
partInfo := ObjectPartInfo{ partInfo := ObjectPartInfo{
Number: partNumber, Number: partNumber,
ETag: partETag, ETag: partETag,
@ -245,6 +246,7 @@ func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize, act
ActualSize: actualSize, ActualSize: actualSize,
ModTime: modTime, ModTime: modTime,
Index: idx, Index: idx,
Checksums: checksums,
} }
// Update part info if it already exists. // Update part info if it already exists.

View File

@ -58,7 +58,7 @@ func TestAddObjectPart(t *testing.T) {
for _, testCase := range testCases { for _, testCase := range testCases {
if testCase.expectedIndex > -1 { if testCase.expectedIndex > -1 {
partNumString := strconv.Itoa(testCase.partNum) partNumString := strconv.Itoa(testCase.partNum)
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize, UTCNow(), nil) fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize, UTCNow(), nil, nil)
} }
if index := objectPartIndex(fi.Parts, testCase.partNum); index != testCase.expectedIndex { if index := objectPartIndex(fi.Parts, testCase.partNum); index != testCase.expectedIndex {
@ -91,7 +91,7 @@ func TestObjectPartIndex(t *testing.T) {
// Add some parts for testing. // Add some parts for testing.
for _, testCase := range testCases { for _, testCase := range testCases {
partNumString := strconv.Itoa(testCase.partNum) partNumString := strconv.Itoa(testCase.partNum)
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize, UTCNow(), nil) fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize, UTCNow(), nil, nil)
} }
// Add failure test case. // Add failure test case.
@ -121,7 +121,7 @@ func TestObjectToPartOffset(t *testing.T) {
// Total size of all parts is 5,242,899 bytes. // Total size of all parts is 5,242,899 bytes.
for _, partNum := range []int{1, 2, 4, 5, 7} { for _, partNum := range []int{1, 2, 4, 5, 7} {
partNumString := strconv.Itoa(partNum) partNumString := strconv.Itoa(partNum)
fi.AddObjectPart(partNum, "etag."+partNumString, int64(partNum+humanize.MiByte), ActualSize, UTCNow(), nil) fi.AddObjectPart(partNum, "etag."+partNumString, int64(partNum+humanize.MiByte), ActualSize, UTCNow(), nil, nil)
} }
testCases := []struct { testCases := []struct {
@ -160,7 +160,7 @@ func TestObjectToPartOffset(t *testing.T) {
func TestFindFileInfoInQuorum(t *testing.T) { func TestFindFileInfoInQuorum(t *testing.T) {
getNFInfo := func(n int, quorum int, t int64, dataDir string) []FileInfo { getNFInfo := func(n int, quorum int, t int64, dataDir string) []FileInfo {
fi := newFileInfo("test", 8, 8) fi := newFileInfo("test", 8, 8)
fi.AddObjectPart(1, "etag", 100, 100, UTCNow(), nil) fi.AddObjectPart(1, "etag", 100, 100, UTCNow(), nil, nil)
fi.ModTime = time.Unix(t, 0) fi.ModTime = time.Unix(t, 0)
fi.DataDir = dataDir fi.DataDir = dataDir
fis := make([]FileInfo, n) fis := make([]FileInfo, n)

View File

@ -32,6 +32,7 @@ import (
"github.com/klauspost/readahead" "github.com/klauspost/readahead"
"github.com/minio/minio-go/v7/pkg/set" "github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http" xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/sync/errgroup" "github.com/minio/minio/internal/sync/errgroup"
@ -325,7 +326,7 @@ func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, objec
// '.minio.sys/multipart/bucket/object/uploads.json' on all the // '.minio.sys/multipart/bucket/object/uploads.json' on all the
// disks. `uploads.json` carries metadata regarding on-going multipart // disks. `uploads.json` carries metadata regarding on-going multipart
// operation(s) on the object. // operation(s) on the object.
func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string, object string, opts ObjectOptions) (string, error) { func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string, object string, opts ObjectOptions) (*NewMultipartUploadResult, error) {
userDefined := cloneMSS(opts.UserDefined) userDefined := cloneMSS(opts.UserDefined)
onlineDisks := er.getDisks() onlineDisks := er.getDisks()
@ -352,7 +353,6 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
if parityOrig != parityDrives { if parityOrig != parityDrives {
userDefined[minIOErasureUpgraded] = strconv.Itoa(parityOrig) + "->" + strconv.Itoa(parityDrives) userDefined[minIOErasureUpgraded] = strconv.Itoa(parityOrig) + "->" + strconv.Itoa(parityDrives)
} }
dataDrives := len(onlineDisks) - parityDrives dataDrives := len(onlineDisks) - parityDrives
// we now know the number of blocks this object needs for data and parity. // we now know the number of blocks this object needs for data and parity.
@ -382,6 +382,10 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
userDefined["content-type"] = mimedb.TypeByExtension(path.Ext(object)) userDefined["content-type"] = mimedb.TypeByExtension(path.Ext(object))
} }
if opts.WantChecksum != nil && opts.WantChecksum.Type.IsSet() {
userDefined[hash.MinIOMultipartChecksum] = opts.WantChecksum.Type.String()
}
modTime := opts.MTime modTime := opts.MTime
if opts.MTime.IsZero() { if opts.MTime.IsZero() {
modTime = UTCNow() modTime = UTCNow()
@ -402,11 +406,12 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
// Write updated `xl.meta` to all disks. // Write updated `xl.meta` to all disks.
if _, err := writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil { if _, err := writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil {
return "", toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) return nil, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
} }
return &NewMultipartUploadResult{
// Return success. UploadID: uploadID,
return uploadID, nil ChecksumAlgo: userDefined[hash.MinIOMultipartChecksum],
}, nil
} }
// NewMultipartUpload - initialize a new multipart upload, returns a // NewMultipartUpload - initialize a new multipart upload, returns a
@ -414,7 +419,7 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
// subsequent request each UUID is unique. // subsequent request each UUID is unique.
// //
// Implements S3 compatible initiate multipart API. // Implements S3 compatible initiate multipart API.
func (er erasureObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) { func (er erasureObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (*NewMultipartUploadResult, error) {
auditObjectErasureSet(ctx, object, &er) auditObjectErasureSet(ctx, object, &er)
return er.newMultipartUpload(ctx, bucket, object, opts) return er.newMultipartUpload(ctx, bucket, object, opts)
@ -590,9 +595,18 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
// Pick one from the first valid metadata. // Pick one from the first valid metadata.
fi, err := pickValidFileInfo(pctx, partsMetadata, modTime, writeQuorum) fi, err := pickValidFileInfo(pctx, partsMetadata, modTime, writeQuorum)
if err != nil { if err != nil {
return pi, err return pi, toObjectErr(err)
} }
if cs := fi.Metadata[hash.MinIOMultipartChecksum]; cs != "" {
if r.ContentCRCType().String() != cs {
return pi, InvalidArgument{
Bucket: bucket,
Object: fi.Name,
Err: fmt.Errorf("checksum missing"),
}
}
}
onlineDisks = shuffleDisks(onlineDisks, fi.Erasure.Distribution) onlineDisks = shuffleDisks(onlineDisks, fi.Erasure.Distribution)
// Need a unique name for the part being written in minioMetaBucket to // Need a unique name for the part being written in minioMetaBucket to
@ -703,6 +717,7 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
ActualSize: data.ActualSize(), ActualSize: data.ActualSize(),
ModTime: UTCNow(), ModTime: UTCNow(),
Index: index, Index: index,
Checksums: r.ContentCRC(),
} }
partMsg, err := part.MarshalMsg(nil) partMsg, err := part.MarshalMsg(nil)
@ -723,6 +738,10 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
LastModified: part.ModTime, LastModified: part.ModTime,
Size: part.Size, Size: part.Size,
ActualSize: part.ActualSize, ActualSize: part.ActualSize,
ChecksumCRC32: part.Checksums["CRC32"],
ChecksumCRC32C: part.Checksums["CRC32C"],
ChecksumSHA1: part.Checksums["SHA1"],
ChecksumSHA256: part.Checksums["SHA256"],
}, nil }, nil
} }
@ -872,6 +891,7 @@ func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, up
result.MaxParts = maxParts result.MaxParts = maxParts
result.PartNumberMarker = partNumberMarker result.PartNumberMarker = partNumberMarker
result.UserDefined = cloneMSS(fi.Metadata) result.UserDefined = cloneMSS(fi.Metadata)
result.ChecksumAlgorithm = fi.Metadata[hash.MinIOMultipartChecksum]
// For empty number of parts or maxParts as zero, return right here. // For empty number of parts or maxParts as zero, return right here.
if len(partInfoFiles) == 0 || maxParts == 0 { if len(partInfoFiles) == 0 || maxParts == 0 {
@ -898,7 +918,7 @@ func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, up
} }
// Add the current part. // Add the current part.
fi.AddObjectPart(partI.Number, partI.ETag, partI.Size, partI.ActualSize, partI.ModTime, partI.Index) fi.AddObjectPart(partI.Number, partI.ETag, partI.Size, partI.ActualSize, partI.ModTime, partI.Index, partI.Checksums)
} }
// Only parts with higher part numbers will be listed. // Only parts with higher part numbers will be listed.
@ -911,6 +931,10 @@ func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, up
LastModified: part.ModTime, LastModified: part.ModTime,
ActualSize: part.ActualSize, ActualSize: part.ActualSize,
Size: part.Size, Size: part.Size,
ChecksumCRC32: part.Checksums["CRC32"],
ChecksumCRC32C: part.Checksums["CRC32C"],
ChecksumSHA1: part.Checksums["SHA1"],
ChecksumSHA256: part.Checksums["SHA256"],
}) })
if len(result.Parts) >= maxParts { if len(result.Parts) >= maxParts {
break break
@ -1000,7 +1024,20 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
return oi, toObjectErr(err, bucket, object) return oi, toObjectErr(err, bucket, object)
} }
var partI ObjectPartInfo // Checksum type set when upload started.
var checksumType hash.ChecksumType
if cs := fi.Metadata[hash.MinIOMultipartChecksum]; cs != "" {
checksumType = hash.NewChecksumType(cs)
if opts.WantChecksum != nil && !opts.WantChecksum.Type.Is(checksumType) {
return oi, InvalidArgument{
Bucket: bucket,
Object: fi.Name,
Err: fmt.Errorf("checksum type mismatch"),
}
}
}
var checksumCombined []byte
for i, part := range partInfoFiles { for i, part := range partInfoFiles {
partID := parts[i].PartNumber partID := parts[i].PartNumber
if part.Error != "" || !part.Exists { if part.Error != "" || !part.Exists {
@ -1009,6 +1046,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
} }
} }
var partI ObjectPartInfo
_, err := partI.UnmarshalMsg(part.Data) _, err := partI.UnmarshalMsg(part.Data)
if err != nil { if err != nil {
// Maybe crash or similar. // Maybe crash or similar.
@ -1026,7 +1064,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
} }
// Add the current part. // Add the current part.
fi.AddObjectPart(partI.Number, partI.ETag, partI.Size, partI.ActualSize, partI.ModTime, partI.Index) fi.AddObjectPart(partI.Number, partI.ETag, partI.Size, partI.ActualSize, partI.ModTime, partI.Index, partI.Checksums)
} }
// Calculate full object size. // Calculate full object size.
@ -1056,43 +1094,86 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
} }
return oi, invp return oi, invp
} }
gotPart := currentFI.Parts[partIdx]
// ensure that part ETag is canonicalized to strip off extraneous quotes // ensure that part ETag is canonicalized to strip off extraneous quotes
part.ETag = canonicalizeETag(part.ETag) part.ETag = canonicalizeETag(part.ETag)
if currentFI.Parts[partIdx].ETag != part.ETag { if gotPart.ETag != part.ETag {
invp := InvalidPart{ invp := InvalidPart{
PartNumber: part.PartNumber, PartNumber: part.PartNumber,
ExpETag: currentFI.Parts[partIdx].ETag, ExpETag: gotPart.ETag,
GotETag: part.ETag, GotETag: part.ETag,
} }
return oi, invp return oi, invp
} }
if checksumType.IsSet() {
crc := gotPart.Checksums[checksumType.String()]
if crc == "" {
return oi, InvalidPart{
PartNumber: part.PartNumber,
}
}
wantCS := map[string]string{
hash.ChecksumCRC32.String(): part.ChecksumCRC32,
hash.ChecksumCRC32C.String(): part.ChecksumCRC32C,
hash.ChecksumSHA1.String(): part.ChecksumSHA1,
hash.ChecksumSHA256.String(): part.ChecksumSHA256,
}
if wantCS[checksumType.String()] != crc {
return oi, InvalidPart{
PartNumber: part.PartNumber,
ExpETag: wantCS[checksumType.String()],
GotETag: crc,
}
}
cs := hash.NewChecksumString(checksumType.String(), crc)
if !cs.Valid() {
return oi, InvalidPart{
PartNumber: part.PartNumber,
}
}
checksumCombined = append(checksumCombined, cs.Raw()...)
}
// All parts except the last part has to be at least 5MB. // All parts except the last part has to be at least 5MB.
if (i < len(parts)-1) && !isMinAllowedPartSize(currentFI.Parts[partIdx].ActualSize) { if (i < len(parts)-1) && !isMinAllowedPartSize(currentFI.Parts[partIdx].ActualSize) {
return oi, PartTooSmall{ return oi, PartTooSmall{
PartNumber: part.PartNumber, PartNumber: part.PartNumber,
PartSize: currentFI.Parts[partIdx].ActualSize, PartSize: gotPart.ActualSize,
PartETag: part.ETag, PartETag: part.ETag,
} }
} }
// Save for total object size. // Save for total object size.
objectSize += currentFI.Parts[partIdx].Size objectSize += gotPart.Size
// Save the consolidated actual size. // Save the consolidated actual size.
objectActualSize += currentFI.Parts[partIdx].ActualSize objectActualSize += gotPart.ActualSize
// Add incoming parts. // Add incoming parts.
fi.Parts[i] = ObjectPartInfo{ fi.Parts[i] = ObjectPartInfo{
Number: part.PartNumber, Number: part.PartNumber,
Size: currentFI.Parts[partIdx].Size, Size: gotPart.Size,
ActualSize: currentFI.Parts[partIdx].ActualSize, ActualSize: gotPart.ActualSize,
ModTime: currentFI.Parts[partIdx].ModTime, ModTime: gotPart.ModTime,
Index: currentFI.Parts[partIdx].Index, Index: gotPart.Index,
Checksums: nil, // Not transferred since we do not need it.
} }
} }
if opts.WantChecksum != nil {
err := opts.WantChecksum.Matches(checksumCombined)
if err != nil {
return oi, err
}
}
if checksumType.IsSet() {
cs := hash.NewChecksumFromData(checksumType, checksumCombined)
fi.Checksum = map[string]string{cs.Type.String(): cs.Encoded}
}
delete(fi.Metadata, hash.MinIOMultipartChecksum) // Not needed in final object.
// Save the final object size and modtime. // Save the final object size and modtime.
fi.Size = objectSize fi.Size = objectSize
fi.ModTime = opts.MTime fi.ModTime = opts.MTime

View File

@ -837,7 +837,7 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *
continue continue
} }
partsMetadata[i].Data = inlineBuffers[i].Bytes() partsMetadata[i].Data = inlineBuffers[i].Bytes()
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, index) partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, index, nil)
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{ partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
PartNumber: 1, PartNumber: 1,
Algorithm: DefaultBitrotAlgorithm, Algorithm: DefaultBitrotAlgorithm,
@ -962,6 +962,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
} }
fi.DataDir = mustGetUUID() fi.DataDir = mustGetUUID()
fi.Checksum = opts.WantChecksum.AsMap()
uniqueID := mustGetUUID() uniqueID := mustGetUUID()
tempObj := uniqueID tempObj := uniqueID
@ -1105,7 +1106,8 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
} else { } else {
partsMetadata[i].Data = nil partsMetadata[i].Data = nil
} }
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, compIndex) // No need to add checksum to part. We already have it on the object.
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, compIndex, nil)
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{ partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
PartNumber: 1, PartNumber: 1,
Algorithm: DefaultBitrotAlgorithm, Algorithm: DefaultBitrotAlgorithm,
@ -1911,7 +1913,7 @@ func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket s
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object)) return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
} }
uploadID, err := er.NewMultipartUpload(ctx, bucket, object, ropts) res, err := er.NewMultipartUpload(ctx, bucket, object, ropts)
if err != nil { if err != nil {
return setRestoreHeaderFn(oi, err) return setRestoreHeaderFn(oi, err)
} }
@ -1931,7 +1933,7 @@ func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket s
if err != nil { if err != nil {
return setRestoreHeaderFn(oi, err) return setRestoreHeaderFn(oi, err)
} }
pInfo, err := er.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, NewPutObjReader(hr), ObjectOptions{}) pInfo, err := er.PutObjectPart(ctx, bucket, object, res.UploadID, partInfo.Number, NewPutObjReader(hr), ObjectOptions{})
if err != nil { if err != nil {
return setRestoreHeaderFn(oi, err) return setRestoreHeaderFn(oi, err)
} }
@ -1943,7 +1945,7 @@ func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket s
ETag: pInfo.ETag, ETag: pInfo.ETag,
}) })
} }
_, err = er.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{ _, err = er.CompleteMultipartUpload(ctx, bucket, object, res.UploadID, uploadedParts, ObjectOptions{
MTime: oi.ModTime, MTime: oi.ModTime,
}) })
return setRestoreHeaderFn(oi, err) return setRestoreHeaderFn(oi, err)

View File

@ -31,7 +31,7 @@ import (
"strconv" "strconv"
"testing" "testing"
humanize "github.com/dustin/go-humanize" "github.com/dustin/go-humanize"
"github.com/minio/minio/internal/config/storageclass" "github.com/minio/minio/internal/config/storageclass"
) )
@ -58,18 +58,18 @@ func TestRepeatPutObjectPart(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
uploadID, err := objLayer.NewMultipartUpload(ctx, "bucket1", "mpartObj1", opts) res, err := objLayer.NewMultipartUpload(ctx, "bucket1", "mpartObj1", opts)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte) fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
md5Hex := getMD5Hash(fiveMBBytes) md5Hex := getMD5Hash(fiveMBBytes)
_, err = objLayer.PutObjectPart(ctx, "bucket1", "mpartObj1", uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts) _, err = objLayer.PutObjectPart(ctx, "bucket1", "mpartObj1", res.UploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930 // PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930
_, err = objLayer.PutObjectPart(ctx, "bucket1", "mpartObj1", uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts) _, err = objLayer.PutObjectPart(ctx, "bucket1", "mpartObj1", res.UploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -587,7 +587,7 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri
} }
if objInfo.isMultipart() { if objInfo.isMultipart() {
uploadID, err := z.NewMultipartUpload(ctx, bucket, objInfo.Name, ObjectOptions{ res, err := z.NewMultipartUpload(ctx, bucket, objInfo.Name, ObjectOptions{
VersionID: objInfo.VersionID, VersionID: objInfo.VersionID,
MTime: objInfo.ModTime, MTime: objInfo.ModTime,
UserDefined: objInfo.UserDefined, UserDefined: objInfo.UserDefined,
@ -595,14 +595,14 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri
if err != nil { if err != nil {
return fmt.Errorf("decommissionObject: NewMultipartUpload() %w", err) return fmt.Errorf("decommissionObject: NewMultipartUpload() %w", err)
} }
defer z.AbortMultipartUpload(ctx, bucket, objInfo.Name, uploadID, ObjectOptions{}) defer z.AbortMultipartUpload(ctx, bucket, objInfo.Name, res.UploadID, ObjectOptions{})
parts := make([]CompletePart, len(objInfo.Parts)) parts := make([]CompletePart, len(objInfo.Parts))
for i, part := range objInfo.Parts { for i, part := range objInfo.Parts {
hr, err := hash.NewReader(gr, part.Size, "", "", part.ActualSize) hr, err := hash.NewReader(gr, part.Size, "", "", part.ActualSize)
if err != nil { if err != nil {
return fmt.Errorf("decommissionObject: hash.NewReader() %w", err) return fmt.Errorf("decommissionObject: hash.NewReader() %w", err)
} }
pi, err := z.PutObjectPart(ctx, bucket, objInfo.Name, uploadID, pi, err := z.PutObjectPart(ctx, bucket, objInfo.Name, res.UploadID,
part.Number, part.Number,
NewPutObjReader(hr), NewPutObjReader(hr),
ObjectOptions{ ObjectOptions{
@ -617,9 +617,13 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri
parts[i] = CompletePart{ parts[i] = CompletePart{
ETag: pi.ETag, ETag: pi.ETag,
PartNumber: pi.PartNumber, PartNumber: pi.PartNumber,
ChecksumCRC32: pi.ChecksumCRC32,
ChecksumCRC32C: pi.ChecksumCRC32C,
ChecksumSHA256: pi.ChecksumSHA256,
ChecksumSHA1: pi.ChecksumSHA1,
} }
} }
_, err = z.CompleteMultipartUpload(ctx, bucket, objInfo.Name, uploadID, parts, ObjectOptions{ _, err = z.CompleteMultipartUpload(ctx, bucket, objInfo.Name, res.UploadID, parts, ObjectOptions{
MTime: objInfo.ModTime, MTime: objInfo.ModTime,
}) })
if err != nil { if err != nil {

View File

@ -1376,14 +1376,14 @@ func (z *erasureServerPools) ListMultipartUploads(ctx context.Context, bucket, p
} }
// Initiate a new multipart upload on a hashedSet based on object name. // Initiate a new multipart upload on a hashedSet based on object name.
func (z *erasureServerPools) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) { func (z *erasureServerPools) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (*NewMultipartUploadResult, error) {
if err := checkNewMultipartArgs(ctx, bucket, object, z); err != nil { if err := checkNewMultipartArgs(ctx, bucket, object, z); err != nil {
return "", err return nil, err
} }
if z.SinglePool() { if z.SinglePool() {
if !isMinioMetaBucketName(bucket) && !hasSpaceFor(getDiskInfos(ctx, z.serverPools[0].getHashedSet(object).getDisks()...), -1) { if !isMinioMetaBucketName(bucket) && !hasSpaceFor(getDiskInfos(ctx, z.serverPools[0].getHashedSet(object).getDisks()...), -1) {
return "", toObjectErr(errDiskFull) return nil, toObjectErr(errDiskFull)
} }
return z.serverPools[0].NewMultipartUpload(ctx, bucket, object, opts) return z.serverPools[0].NewMultipartUpload(ctx, bucket, object, opts)
} }
@ -1394,7 +1394,7 @@ func (z *erasureServerPools) NewMultipartUpload(ctx context.Context, bucket, obj
} }
result, err := pool.ListMultipartUploads(ctx, bucket, object, "", "", "", maxUploadsList) result, err := pool.ListMultipartUploads(ctx, bucket, object, "", "", "", maxUploadsList)
if err != nil { if err != nil {
return "", err return nil, err
} }
// If there is a multipart upload with the same bucket/object name, // If there is a multipart upload with the same bucket/object name,
// create the new multipart in the same pool, this will avoid // create the new multipart in the same pool, this will avoid
@ -1408,7 +1408,7 @@ func (z *erasureServerPools) NewMultipartUpload(ctx context.Context, bucket, obj
// to return since this holds a read lock on the namespace. // to return since this holds a read lock on the namespace.
idx, err := z.getPoolIdx(ctx, bucket, object, -1) idx, err := z.getPoolIdx(ctx, bucket, object, -1)
if err != nil { if err != nil {
return "", err return nil, err
} }
return z.serverPools[idx].NewMultipartUpload(ctx, bucket, object, opts) return z.serverPools[idx].NewMultipartUpload(ctx, bucket, object, opts)

View File

@ -1079,7 +1079,7 @@ func (s *erasureSets) ListMultipartUploads(ctx context.Context, bucket, prefix,
} }
// Initiate a new multipart upload on a hashedSet based on object name. // Initiate a new multipart upload on a hashedSet based on object name.
func (s *erasureSets) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) { func (s *erasureSets) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (res *NewMultipartUploadResult, err error) {
set := s.getHashedSet(object) set := s.getHashedSet(object)
return set.NewMultipartUpload(ctx, bucket, object, opts) return set.NewMultipartUpload(ctx, bucket, object, opts)
} }

View File

@ -919,7 +919,7 @@ func (es *erasureSingle) putMetacacheObject(ctx context.Context, key string, r *
continue continue
} }
partsMetadata[i].Data = inlineBuffers[i].Bytes() partsMetadata[i].Data = inlineBuffers[i].Bytes()
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, index) partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, index, nil)
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{ partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
PartNumber: 1, PartNumber: 1,
Algorithm: DefaultBitrotAlgorithm, Algorithm: DefaultBitrotAlgorithm,
@ -1163,7 +1163,7 @@ func (es *erasureSingle) putObject(ctx context.Context, bucket string, object st
} else { } else {
partsMetadata[i].Data = nil partsMetadata[i].Data = nil
} }
partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, index) partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, index, nil)
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{ partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
PartNumber: 1, PartNumber: 1,
Algorithm: DefaultBitrotAlgorithm, Algorithm: DefaultBitrotAlgorithm,
@ -1896,7 +1896,7 @@ func (es *erasureSingle) restoreTransitionedObject(ctx context.Context, bucket s
return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object)) return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object))
} }
uploadID, err := es.NewMultipartUpload(ctx, bucket, object, ropts) result, err := es.NewMultipartUpload(ctx, bucket, object, ropts)
if err != nil { if err != nil {
return setRestoreHeaderFn(oi, err) return setRestoreHeaderFn(oi, err)
} }
@ -1916,7 +1916,7 @@ func (es *erasureSingle) restoreTransitionedObject(ctx context.Context, bucket s
if err != nil { if err != nil {
return setRestoreHeaderFn(oi, err) return setRestoreHeaderFn(oi, err)
} }
pInfo, err := es.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, NewPutObjReader(hr), ObjectOptions{}) pInfo, err := es.PutObjectPart(ctx, bucket, object, result.UploadID, partInfo.Number, NewPutObjReader(hr), ObjectOptions{})
if err != nil { if err != nil {
return setRestoreHeaderFn(oi, err) return setRestoreHeaderFn(oi, err)
} }
@ -1928,7 +1928,7 @@ func (es *erasureSingle) restoreTransitionedObject(ctx context.Context, bucket s
ETag: pInfo.ETag, ETag: pInfo.ETag,
}) })
} }
_, err = es.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{ _, err = es.CompleteMultipartUpload(ctx, bucket, object, result.UploadID, uploadedParts, ObjectOptions{
MTime: oi.ModTime, MTime: oi.ModTime,
}) })
return setRestoreHeaderFn(oi, err) return setRestoreHeaderFn(oi, err)
@ -2136,7 +2136,7 @@ func (es *erasureSingle) ListMultipartUploads(ctx context.Context, bucket, objec
// '.minio.sys/multipart/bucket/object/uploads.json' on all the // '.minio.sys/multipart/bucket/object/uploads.json' on all the
// disks. `uploads.json` carries metadata regarding on-going multipart // disks. `uploads.json` carries metadata regarding on-going multipart
// operation(s) on the object. // operation(s) on the object.
func (es *erasureSingle) newMultipartUpload(ctx context.Context, bucket string, object string, opts ObjectOptions) (string, error) { func (es *erasureSingle) newMultipartUpload(ctx context.Context, bucket string, object string, opts ObjectOptions) (*NewMultipartUploadResult, error) {
onlineDisks := []StorageAPI{es.disk} onlineDisks := []StorageAPI{es.disk}
parityDrives := 0 parityDrives := 0
dataDrives := len(onlineDisks) - parityDrives dataDrives := len(onlineDisks) - parityDrives
@ -2188,11 +2188,11 @@ func (es *erasureSingle) newMultipartUpload(ctx context.Context, bucket string,
// Write updated `xl.meta` to all disks. // Write updated `xl.meta` to all disks.
if _, err := writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil { if _, err := writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil {
return "", toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) return nil, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
} }
// Return success. // Return success.
return uploadID, nil return &NewMultipartUploadResult{UploadID: uploadID}, nil
} }
// NewMultipartUpload - initialize a new multipart upload, returns a // NewMultipartUpload - initialize a new multipart upload, returns a
@ -2200,9 +2200,9 @@ func (es *erasureSingle) newMultipartUpload(ctx context.Context, bucket string,
// subsequent request each UUID is unique. // subsequent request each UUID is unique.
// //
// Implements S3 compatible initiate multipart API. // Implements S3 compatible initiate multipart API.
func (es *erasureSingle) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) { func (es *erasureSingle) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (*NewMultipartUploadResult, error) {
if err := checkNewMultipartArgs(ctx, bucket, object, es); err != nil { if err := checkNewMultipartArgs(ctx, bucket, object, es); err != nil {
return "", err return nil, err
} }
// No metadata is set, allocate a new one. // No metadata is set, allocate a new one.
@ -2441,7 +2441,7 @@ func (es *erasureSingle) PutObjectPart(ctx context.Context, bucket, object, uplo
} }
// Add the current part. // Add the current part.
fi.AddObjectPart(partID, md5hex, n, data.ActualSize(), fi.ModTime, index) fi.AddObjectPart(partID, md5hex, n, data.ActualSize(), fi.ModTime, index, nil)
for i, disk := range onlineDisks { for i, disk := range onlineDisks {
if disk == OfflineDisk { if disk == OfflineDisk {

View File

@ -218,13 +218,13 @@ func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, k
// subsequent request each UUID is unique. // subsequent request each UUID is unique.
// //
// Implements S3 compatible initiate multipart API. // Implements S3 compatible initiate multipart API.
func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) { func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (*NewMultipartUploadResult, error) {
if err := checkNewMultipartArgs(ctx, bucket, object, fs); err != nil { if err := checkNewMultipartArgs(ctx, bucket, object, fs); err != nil {
return "", toObjectErr(err, bucket) return nil, toObjectErr(err, bucket)
} }
if _, err := fs.statBucketDir(ctx, bucket); err != nil { if _, err := fs.statBucketDir(ctx, bucket); err != nil {
return "", toObjectErr(err, bucket) return nil, toObjectErr(err, bucket)
} }
uploadID := mustGetUUID() uploadID := mustGetUUID()
@ -233,7 +233,7 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
err := mkdirAll(uploadIDDir, 0o755) err := mkdirAll(uploadIDDir, 0o755)
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return "", err return nil, err
} }
// Initialize fs.json values. // Initialize fs.json values.
@ -243,15 +243,14 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
fsMetaBytes, err := json.Marshal(fsMeta) fsMetaBytes, err := json.Marshal(fsMeta)
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return "", err return nil, err
} }
if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0o666); err != nil { if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0o666); err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return "", err return nil, err
} }
return &NewMultipartUploadResult{UploadID: uploadID}, nil
return uploadID, nil
} }
// CopyObjectPart - similar to PutObjectPart but reads data from an existing // CopyObjectPart - similar to PutObjectPart but reads data from an existing

View File

@ -47,7 +47,7 @@ func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) {
ctx, cancel := context.WithCancel(GlobalContext) ctx, cancel := context.WithCancel(GlobalContext)
obj.MakeBucketWithLocation(ctx, bucketName, MakeBucketOptions{}) obj.MakeBucketWithLocation(ctx, bucketName, MakeBucketOptions{})
uploadID, err := obj.NewMultipartUpload(ctx, bucketName, objectName, ObjectOptions{}) res, err := obj.NewMultipartUpload(ctx, bucketName, objectName, ObjectOptions{})
if err != nil { if err != nil {
t.Fatal("Unexpected err: ", err) t.Fatal("Unexpected err: ", err)
} }
@ -78,7 +78,7 @@ func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) {
cleanupWg.Wait() cleanupWg.Wait()
// Check if upload id was already purged. // Check if upload id was already purged.
if err = obj.AbortMultipartUpload(GlobalContext, bucketName, objectName, uploadID, ObjectOptions{}); err != nil { if err = obj.AbortMultipartUpload(GlobalContext, bucketName, objectName, res.UploadID, ObjectOptions{}); err != nil {
if _, ok := err.(InvalidUploadID); !ok { if _, ok := err.(InvalidUploadID); !ok {
t.Fatal("Unexpected err: ", err) t.Fatal("Unexpected err: ", err)
} }
@ -128,7 +128,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }
uploadID, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}}) res, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}})
if err != nil { if err != nil {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
@ -139,7 +139,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
newDisk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) newDisk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
defer os.RemoveAll(newDisk) defer os.RemoveAll(newDisk)
obj = initFSObjects(newDisk, t) obj = initFSObjects(newDisk, t)
if _, err = obj.PutObjectPart(GlobalContext, bucketName, objectName, uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(data), dataLen, md5Hex, sha256sum), ObjectOptions{}); err != nil { if _, err = obj.PutObjectPart(GlobalContext, bucketName, objectName, res.UploadID, 1, mustGetPutObjReader(t, bytes.NewReader(data), dataLen, md5Hex, sha256sum), ObjectOptions{}); err != nil {
if !isSameType(err, BucketNotFound{}) { if !isSameType(err, BucketNotFound{}) {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
@ -161,7 +161,7 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }
uploadID, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}}) res, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}})
if err != nil { if err != nil {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
@ -172,7 +172,7 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) {
newDisk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) newDisk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
defer os.RemoveAll(newDisk) defer os.RemoveAll(newDisk)
obj = initFSObjects(newDisk, t) obj = initFSObjects(newDisk, t)
if _, err := obj.CompleteMultipartUpload(GlobalContext, bucketName, objectName, uploadID, parts, ObjectOptions{}); err != nil { if _, err := obj.CompleteMultipartUpload(GlobalContext, bucketName, objectName, res.UploadID, parts, ObjectOptions{}); err != nil {
if !isSameType(err, BucketNotFound{}) { if !isSameType(err, BucketNotFound{}) {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
@ -194,19 +194,19 @@ func TestCompleteMultipartUpload(t *testing.T) {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }
uploadID, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}}) res, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}})
if err != nil { if err != nil {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
md5Hex := getMD5Hash(data) md5Hex := getMD5Hash(data)
if _, err := obj.PutObjectPart(GlobalContext, bucketName, objectName, uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(data), 5, md5Hex, ""), ObjectOptions{}); err != nil { if _, err := obj.PutObjectPart(GlobalContext, bucketName, objectName, res.UploadID, 1, mustGetPutObjReader(t, bytes.NewReader(data), 5, md5Hex, ""), ObjectOptions{}); err != nil {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
parts := []CompletePart{{PartNumber: 1, ETag: md5Hex}} parts := []CompletePart{{PartNumber: 1, ETag: md5Hex}}
if _, err := obj.CompleteMultipartUpload(GlobalContext, bucketName, objectName, uploadID, parts, ObjectOptions{}); err != nil { if _, err := obj.CompleteMultipartUpload(GlobalContext, bucketName, objectName, res.UploadID, parts, ObjectOptions{}); err != nil {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
} }
@ -231,7 +231,7 @@ func TestAbortMultipartUpload(t *testing.T) {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }
uploadID, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}}) res, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}})
if err != nil { if err != nil {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
@ -239,10 +239,10 @@ func TestAbortMultipartUpload(t *testing.T) {
md5Hex := getMD5Hash(data) md5Hex := getMD5Hash(data)
opts := ObjectOptions{} opts := ObjectOptions{}
if _, err := obj.PutObjectPart(GlobalContext, bucketName, objectName, uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(data), 5, md5Hex, ""), opts); err != nil { if _, err := obj.PutObjectPart(GlobalContext, bucketName, objectName, res.UploadID, 1, mustGetPutObjReader(t, bytes.NewReader(data), 5, md5Hex, ""), opts); err != nil {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
if err := obj.AbortMultipartUpload(GlobalContext, bucketName, objectName, uploadID, opts); err != nil { if err := obj.AbortMultipartUpload(GlobalContext, bucketName, objectName, res.UploadID, opts); err != nil {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
} }

View File

@ -435,7 +435,7 @@ func (l *s3EncObjects) ListMultipartUploads(ctx context.Context, bucket string,
} }
// NewMultipartUpload uploads object in multiple parts // NewMultipartUpload uploads object in multiple parts
func (l *s3EncObjects) NewMultipartUpload(ctx context.Context, bucket string, object string, o minio.ObjectOptions) (uploadID string, err error) { func (l *s3EncObjects) NewMultipartUpload(ctx context.Context, bucket, object string, o minio.ObjectOptions) (result *minio.NewMultipartUploadResult, err error) {
var sseOpts encrypt.ServerSide var sseOpts encrypt.ServerSide
if o.ServerSideEncryption == nil { if o.ServerSideEncryption == nil {
return l.s3Objects.NewMultipartUpload(ctx, bucket, object, minio.ObjectOptions{UserDefined: o.UserDefined}) return l.s3Objects.NewMultipartUpload(ctx, bucket, object, minio.ObjectOptions{UserDefined: o.UserDefined})
@ -446,7 +446,7 @@ func (l *s3EncObjects) NewMultipartUpload(ctx context.Context, bucket string, ob
sseOpts = o.ServerSideEncryption sseOpts = o.ServerSideEncryption
} }
uploadID, err = l.s3Objects.NewMultipartUpload(ctx, bucket, getGWContentPath(object), minio.ObjectOptions{ServerSideEncryption: sseOpts}) result, err = l.s3Objects.NewMultipartUpload(ctx, bucket, getGWContentPath(object), minio.ObjectOptions{ServerSideEncryption: sseOpts})
if err != nil { if err != nil {
return return
} }
@ -454,11 +454,11 @@ func (l *s3EncObjects) NewMultipartUpload(ctx context.Context, bucket string, ob
gwmeta := newGWMetaV1() gwmeta := newGWMetaV1()
gwmeta.Meta = o.UserDefined gwmeta.Meta = o.UserDefined
gwmeta.Stat.ModTime = time.Now().UTC() gwmeta.Stat.ModTime = time.Now().UTC()
err = l.writeGWMetadata(ctx, bucket, getTmpDareMetaPath(object, uploadID), gwmeta, minio.ObjectOptions{}) err = l.writeGWMetadata(ctx, bucket, getTmpDareMetaPath(object, result.UploadID), gwmeta, minio.ObjectOptions{})
if err != nil { if err != nil {
return uploadID, minio.ErrorRespToObjectError(err) return nil, minio.ErrorRespToObjectError(err)
} }
return uploadID, nil return result, nil
} }
// PutObject creates a new object with the incoming data, // PutObject creates a new object with the incoming data,

View File

@ -607,13 +607,13 @@ func (l *s3Objects) ListMultipartUploads(ctx context.Context, bucket string, pre
} }
// NewMultipartUpload upload object in multiple parts // NewMultipartUpload upload object in multiple parts
func (l *s3Objects) NewMultipartUpload(ctx context.Context, bucket string, object string, o minio.ObjectOptions) (uploadID string, err error) { func (l *s3Objects) NewMultipartUpload(ctx context.Context, bucket, object string, o minio.ObjectOptions) (result *minio.NewMultipartUploadResult, err error) {
var tagMap map[string]string var tagMap map[string]string
userDefined := minio.CloneMSS(o.UserDefined) userDefined := minio.CloneMSS(o.UserDefined)
if tagStr, ok := userDefined[xhttp.AmzObjectTagging]; ok { if tagStr, ok := userDefined[xhttp.AmzObjectTagging]; ok {
tagObj, err := tags.Parse(tagStr, true) tagObj, err := tags.Parse(tagStr, true)
if err != nil { if err != nil {
return uploadID, minio.ErrorRespToObjectError(err, bucket, object) return nil, minio.ErrorRespToObjectError(err, bucket, object)
} }
tagMap = tagObj.ToMap() tagMap = tagObj.ToMap()
delete(userDefined, xhttp.AmzObjectTagging) delete(userDefined, xhttp.AmzObjectTagging)
@ -624,11 +624,11 @@ func (l *s3Objects) NewMultipartUpload(ctx context.Context, bucket string, objec
ServerSideEncryption: o.ServerSideEncryption, ServerSideEncryption: o.ServerSideEncryption,
UserTags: tagMap, UserTags: tagMap,
} }
uploadID, err = l.Client.NewMultipartUpload(ctx, bucket, object, opts) uploadID, err := l.Client.NewMultipartUpload(ctx, bucket, object, opts)
if err != nil { if err != nil {
return uploadID, minio.ErrorRespToObjectError(err, bucket, object) return nil, minio.ErrorRespToObjectError(err, bucket, object)
} }
return uploadID, nil return &minio.NewMultipartUploadResult{UploadID: uploadID}, nil
} }
// PutObjectPart puts a part of object in bucket // PutObjectPart puts a part of object in bucket

View File

@ -23,7 +23,7 @@ import (
"math" "math"
"time" "time"
humanize "github.com/dustin/go-humanize" "github.com/dustin/go-humanize"
"github.com/minio/madmin-go" "github.com/minio/madmin-go"
"github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/hash" "github.com/minio/minio/internal/hash"
@ -178,6 +178,9 @@ type ObjectInfo struct {
NumVersions int NumVersions int
// The modtime of the successor object version if any // The modtime of the successor object version if any
SuccessorModTime time.Time SuccessorModTime time.Time
// User-Defined object tags
Checksum map[string]string
} }
// ArchiveInfo returns any saved zip archive meta information // ArchiveInfo returns any saved zip archive meta information
@ -329,6 +332,9 @@ type ListPartsInfo struct {
// Any metadata set during InitMultipartUpload, including encryption headers. // Any metadata set during InitMultipartUpload, including encryption headers.
UserDefined map[string]string UserDefined map[string]string
// ChecksumAlgorithm if set
ChecksumAlgorithm string
} }
// Lookup - returns if uploadID is valid // Lookup - returns if uploadID is valid
@ -505,6 +511,12 @@ type PartInfo struct {
// Decompressed Size. // Decompressed Size.
ActualSize int64 ActualSize int64
// Checksum values
ChecksumCRC32 string
ChecksumCRC32C string
ChecksumSHA1 string
ChecksumSHA256 string
} }
// CompletePart - represents the part that was completed, this is sent by the client // CompletePart - represents the part that was completed, this is sent by the client
@ -516,6 +528,12 @@ type CompletePart struct {
// Entity tag returned when the part was uploaded. // Entity tag returned when the part was uploaded.
ETag string ETag string
// Checksum values. Optional.
ChecksumCRC32 string
ChecksumCRC32C string
ChecksumSHA1 string
ChecksumSHA256 string
} }
// CompletedParts - is a collection satisfying sort.Interface. // CompletedParts - is a collection satisfying sort.Interface.
@ -530,3 +548,9 @@ func (a CompletedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
type CompleteMultipartUpload struct { type CompleteMultipartUpload struct {
Parts []CompletePart `xml:"Part"` Parts []CompletePart `xml:"Part"`
} }
// NewMultipartUploadResult contains information about a newly created multipart upload.
type NewMultipartUploadResult struct {
UploadID string
ChecksumAlgo string
}

View File

@ -26,6 +26,7 @@ import (
"github.com/minio/madmin-go" "github.com/minio/madmin-go"
"github.com/minio/minio-go/v7/pkg/encrypt" "github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/minio/minio-go/v7/pkg/tags" "github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/internal/hash"
"github.com/minio/pkg/bucket/policy" "github.com/minio/pkg/bucket/policy"
"github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/bucket/replication"
@ -59,6 +60,8 @@ type ObjectOptions struct {
Transition TransitionOptions Transition TransitionOptions
Expiration ExpirationOptions Expiration ExpirationOptions
WantChecksum *hash.Checksum // x-amz-checksum-XXX checksum sent to PutObject/ CompleteMultipartUpload.
NoDecryption bool // indicates if the stream must be decrypted. NoDecryption bool // indicates if the stream must be decrypted.
PreserveETag string // preserves this etag during a PUT call. PreserveETag string // preserves this etag during a PUT call.
NoLock bool // indicates to lower layers if the caller is expecting to hold locks. NoLock bool // indicates to lower layers if the caller is expecting to hold locks.
@ -222,7 +225,7 @@ type ObjectLayer interface {
// Multipart operations. // Multipart operations.
ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error)
NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (result *NewMultipartUploadResult, err error)
CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int,
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (info PartInfo, err error) startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (info PartInfo, err error)
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)

View File

@ -25,7 +25,7 @@ import (
"strings" "strings"
"testing" "testing"
humanize "github.com/dustin/go-humanize" "github.com/dustin/go-humanize"
"github.com/minio/minio/internal/config/storageclass" "github.com/minio/minio/internal/config/storageclass"
"github.com/minio/minio/internal/hash" "github.com/minio/minio/internal/hash"
) )
@ -62,12 +62,12 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, "\\", opts) res, err := obj.NewMultipartUpload(context.Background(), bucket, "\\", opts)
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
err = obj.AbortMultipartUpload(context.Background(), bucket, "\\", uploadID, opts) err = obj.AbortMultipartUpload(context.Background(), bucket, "\\", res.UploadID, opts)
if err != nil { if err != nil {
switch err.(type) { switch err.(type) {
case InvalidUploadID: case InvalidUploadID:
@ -95,10 +95,11 @@ func testObjectAbortMultipartUpload(obj ObjectLayer, instanceType string, t Test
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, opts) res, err := obj.NewMultipartUpload(context.Background(), bucket, object, opts)
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
uploadID := res.UploadID
abortTestCases := []struct { abortTestCases := []struct {
bucketName string bucketName string
@ -173,11 +174,12 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
// Initiate Multipart Upload on the above created bucket. // Initiate Multipart Upload on the above created bucket.
uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, opts) res, err := obj.NewMultipartUpload(context.Background(), bucket, object, opts)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
uploadID := res.UploadID
// Creating a dummy bucket for tests. // Creating a dummy bucket for tests.
err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", MakeBucketOptions{}) err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", MakeBucketOptions{})
if err != nil { if err != nil {
@ -204,50 +206,50 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
}{ }{
// Test case 1-4. // Test case 1-4.
// Cases with invalid bucket name. // Cases with invalid bucket name.
{".test", "obj", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Bucket not found: .test")}, {bucketName: ".test", objName: "obj", PartID: 1, expectedError: fmt.Errorf("%s", "Bucket not found: .test")},
{"------", "obj", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Bucket not found: ------")}, {bucketName: "------", objName: "obj", PartID: 1, expectedError: fmt.Errorf("%s", "Bucket not found: ------")},
{ {
"$this-is-not-valid-too", "obj", "", 1, "", "", "", 0, false, "", bucketName: "$this-is-not-valid-too", objName: "obj", PartID: 1,
fmt.Errorf("%s", "Bucket not found: $this-is-not-valid-too"), expectedError: fmt.Errorf("%s", "Bucket not found: $this-is-not-valid-too"),
}, },
{"a", "obj", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Bucket not found: a")}, {bucketName: "a", objName: "obj", PartID: 1, expectedError: fmt.Errorf("%s", "Bucket not found: a")},
// Test case - 5. // Test case - 5.
// Case with invalid object names. // Case with invalid object names.
{bucket, "", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Object name invalid: minio-bucket/")}, {bucketName: bucket, PartID: 1, expectedError: fmt.Errorf("%s", "Object name invalid: minio-bucket/")},
// Test case - 6. // Test case - 6.
// Valid object and bucket names but non-existent bucket. // Valid object and bucket names but non-existent bucket.
{"abc", "def", "", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Bucket not found: abc")}, {bucketName: "abc", objName: "def", PartID: 1, expectedError: fmt.Errorf("%s", "Bucket not found: abc")},
// Test Case - 7. // Test Case - 7.
// Existing bucket, but using a bucket on which NewMultipartUpload is not Initiated. // Existing bucket, but using a bucket on which NewMultipartUpload is not Initiated.
{"unused-bucket", "def", "xyz", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Invalid upload id xyz")}, {bucketName: "unused-bucket", objName: "def", uploadID: "xyz", PartID: 1, expectedError: fmt.Errorf("%s", "Invalid upload id xyz")},
// Test Case - 8. // Test Case - 8.
// Existing bucket, object name different from which NewMultipartUpload is constructed from. // Existing bucket, object name different from which NewMultipartUpload is constructed from.
// Expecting "Invalid upload id". // Expecting "Invalid upload id".
{bucket, "def", "xyz", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Invalid upload id xyz")}, {bucketName: bucket, objName: "def", uploadID: "xyz", PartID: 1, expectedError: fmt.Errorf("%s", "Invalid upload id xyz")},
// Test Case - 9. // Test Case - 9.
// Existing bucket, bucket and object name are the ones from which NewMultipartUpload is constructed from. // Existing bucket, bucket and object name are the ones from which NewMultipartUpload is constructed from.
// But the uploadID is invalid. // But the uploadID is invalid.
// Expecting "Invalid upload id". // Expecting "Invalid upload id".
{bucket, object, "xyz", 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Invalid upload id xyz")}, {bucketName: bucket, objName: object, uploadID: "xyz", PartID: 1, expectedError: fmt.Errorf("%s", "Invalid upload id xyz")},
// Test Case - 10. // Test Case - 10.
// Case with valid UploadID, existing bucket name. // Case with valid UploadID, existing bucket name.
// But using the bucket name from which NewMultipartUpload is not constructed from. // But using the bucket name from which NewMultipartUpload is not constructed from.
{"unused-bucket", object, uploadID, 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Invalid upload id "+uploadID)}, {bucketName: "unused-bucket", objName: object, uploadID: uploadID, PartID: 1, expectedError: fmt.Errorf("%s", "Invalid upload id "+uploadID)},
// Test Case - 11. // Test Case - 11.
// Case with valid UploadID, existing bucket name. // Case with valid UploadID, existing bucket name.
// But using the object name from which NewMultipartUpload is not constructed from. // But using the object name from which NewMultipartUpload is not constructed from.
{bucket, "none-object", uploadID, 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Invalid upload id "+uploadID)}, {bucketName: bucket, objName: "none-object", uploadID: uploadID, PartID: 1, expectedError: fmt.Errorf("%s", "Invalid upload id "+uploadID)},
// Test case - 12. // Test case - 12.
// Input to replicate Md5 mismatch. // Input to replicate Md5 mismatch.
{ {
bucket, object, uploadID, 1, "", "d41d8cd98f00b204e9800998ecf8427f", "", 0, false, "", bucketName: bucket, objName: object, uploadID: uploadID, PartID: 1, inputMd5: "d41d8cd98f00b204e9800998ecf8427f",
hash.BadDigest{ExpectedMD5: "d41d8cd98f00b204e9800998ecf8427f", CalculatedMD5: "d41d8cd98f00b204e9800998ecf8427e"}, expectedError: hash.BadDigest{ExpectedMD5: "d41d8cd98f00b204e9800998ecf8427f", CalculatedMD5: "d41d8cd98f00b204e9800998ecf8427e"},
}, },
// Test case - 13. // Test case - 13.
// When incorrect sha256 is provided. // When incorrect sha256 is provided.
{ {
bucket, object, uploadID, 1, "", "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b854", 0, false, "", bucketName: bucket, objName: object, uploadID: uploadID, PartID: 1, inputSHA256: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b854",
hash.SHA256Mismatch{ expectedError: hash.SHA256Mismatch{
ExpectedSHA256: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b854", ExpectedSHA256: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b854",
CalculatedSHA256: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", CalculatedSHA256: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
}, },
@ -255,22 +257,22 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
// Test case - 14. // Test case - 14.
// Input with size more than the size of actual data inside the reader. // Input with size more than the size of actual data inside the reader.
{ {
bucket, object, uploadID, 1, "abcd", "e2fc714c4727ee9395f324cd2e7f3335", "", int64(len("abcd") + 1), false, "", bucketName: bucket, objName: object, uploadID: uploadID, PartID: 1, inputReaderData: "abcd", inputMd5: "e2fc714c4727ee9395f324cd2e7f3335", intputDataSize: int64(len("abcd") + 1),
hash.BadDigest{ExpectedMD5: "e2fc714c4727ee9395f324cd2e7f3335", CalculatedMD5: "e2fc714c4727ee9395f324cd2e7f331f"}, expectedError: hash.BadDigest{ExpectedMD5: "e2fc714c4727ee9395f324cd2e7f3335", CalculatedMD5: "e2fc714c4727ee9395f324cd2e7f331f"},
}, },
// Test case - 15. // Test case - 15.
// Input with size less than the size of actual data inside the reader. // Input with size less than the size of actual data inside the reader.
{ {
bucket, object, uploadID, 1, "abcd", "900150983cd24fb0d6963f7d28e17f73", "", int64(len("abcd") - 1), false, "", bucketName: bucket, objName: object, uploadID: uploadID, PartID: 1, inputReaderData: "abcd", inputMd5: "900150983cd24fb0d6963f7d28e17f73", intputDataSize: int64(len("abcd") - 1),
hash.BadDigest{ExpectedMD5: "900150983cd24fb0d6963f7d28e17f73", CalculatedMD5: "900150983cd24fb0d6963f7d28e17f72"}, expectedError: hash.BadDigest{ExpectedMD5: "900150983cd24fb0d6963f7d28e17f73", CalculatedMD5: "900150983cd24fb0d6963f7d28e17f72"},
}, },
// Test case - 16-19. // Test case - 16-19.
// Validating for success cases. // Validating for success cases.
{bucket, object, uploadID, 1, "abcd", "e2fc714c4727ee9395f324cd2e7f331f", "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", int64(len("abcd")), true, "", nil}, {bucketName: bucket, objName: object, uploadID: uploadID, PartID: 1, inputReaderData: "abcd", inputMd5: "e2fc714c4727ee9395f324cd2e7f331f", inputSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", intputDataSize: int64(len("abcd")), shouldPass: true},
{bucket, object, uploadID, 2, "efgh", "1f7690ebdd9b4caf8fab49ca1757bf27", "e5e088a0b66163a0a26a5e053d2a4496dc16ab6e0e3dd1adf2d16aa84a078c9d", int64(len("efgh")), true, "", nil}, {bucketName: bucket, objName: object, uploadID: uploadID, PartID: 2, inputReaderData: "efgh", inputMd5: "1f7690ebdd9b4caf8fab49ca1757bf27", inputSHA256: "e5e088a0b66163a0a26a5e053d2a4496dc16ab6e0e3dd1adf2d16aa84a078c9d", intputDataSize: int64(len("efgh")), shouldPass: true},
{bucket, object, uploadID, 3, "ijkl", "09a0877d04abf8759f99adec02baf579", "005c19658919186b85618c5870463eec8d9b8c1a9d00208a5352891ba5bbe086", int64(len("abcd")), true, "", nil}, {bucketName: bucket, objName: object, uploadID: uploadID, PartID: 3, inputReaderData: "ijkl", inputMd5: "09a0877d04abf8759f99adec02baf579", inputSHA256: "005c19658919186b85618c5870463eec8d9b8c1a9d00208a5352891ba5bbe086", intputDataSize: int64(len("abcd")), shouldPass: true},
{bucket, object, uploadID, 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", "f1afc31479522d6cff1ed068f93998f05a8cd3b22f5c37d7f307084f62d1d270", int64(len("abcd")), true, "", nil}, {bucketName: bucket, objName: object, uploadID: uploadID, PartID: 4, inputReaderData: "mnop", inputMd5: "e132e96a5ddad6da8b07bba6f6131fef", inputSHA256: "f1afc31479522d6cff1ed068f93998f05a8cd3b22f5c37d7f307084f62d1d270", intputDataSize: int64(len("abcd")), shouldPass: true},
} }
// Validate all the test cases. // Validate all the test cases.
@ -320,13 +322,13 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
// Initiate Multipart Upload on the above created bucket. // Initiate Multipart Upload on the above created bucket.
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], opts) res, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], opts)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
uploadIDs = append(uploadIDs, uploadID) uploadIDs = append(uploadIDs, res.UploadID)
// bucketnames[1]. // bucketnames[1].
// objectNames[0]. // objectNames[0].
@ -340,13 +342,13 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
// Initiate Multipart Upload on bucketNames[1] for the same object 3 times. // Initiate Multipart Upload on bucketNames[1] for the same object 3 times.
// Used to test the listing for the case of multiple uploadID's for a given object. // Used to test the listing for the case of multiple uploadID's for a given object.
uploadID, err = obj.NewMultipartUpload(context.Background(), bucketNames[1], objectNames[0], opts) res, err = obj.NewMultipartUpload(context.Background(), bucketNames[1], objectNames[0], opts)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
uploadIDs = append(uploadIDs, uploadID) uploadIDs = append(uploadIDs, res.UploadID)
} }
// Bucket to test for mutiple objects, each with unique UUID. // Bucket to test for mutiple objects, each with unique UUID.
@ -361,14 +363,13 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
// Initiate Multipart Upload on bucketNames[2]. // Initiate Multipart Upload on bucketNames[2].
// Used to test the listing for the case of multiple objects for a given bucket. // Used to test the listing for the case of multiple objects for a given bucket.
for i := 0; i < 6; i++ { for i := 0; i < 6; i++ {
var uploadID string res, err = obj.NewMultipartUpload(context.Background(), bucketNames[2], objectNames[i], opts)
uploadID, err = obj.NewMultipartUpload(context.Background(), bucketNames[2], objectNames[i], opts)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
// uploadIds [4-9]. // uploadIds [4-9].
uploadIDs = append(uploadIDs, uploadID) uploadIDs = append(uploadIDs, res.UploadID)
} }
// Create multipart parts. // Create multipart parts.
// Need parts to be uploaded before MultipartLists can be called and tested. // Need parts to be uploaded before MultipartLists can be called and tested.
@ -1202,7 +1203,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
} }
opts := ObjectOptions{} opts := ObjectOptions{}
// Initiate Multipart Upload on the above created bucket. // Initiate Multipart Upload on the above created bucket.
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], opts) res, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], opts)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -1211,7 +1212,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
// Remove some random disk. // Remove some random disk.
removeDiskN(disks, 1) removeDiskN(disks, 1)
uploadIDs = append(uploadIDs, uploadID) uploadIDs = append(uploadIDs, res.UploadID)
// Create multipart parts. // Create multipart parts.
// Need parts to be uploaded before MultipartLists can be called and tested. // Need parts to be uploaded before MultipartLists can be called and tested.
@ -1445,13 +1446,13 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
// Initiate Multipart Upload on the above created bucket. // Initiate Multipart Upload on the above created bucket.
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], opts) res, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], opts)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
uploadIDs = append(uploadIDs, uploadID) uploadIDs = append(uploadIDs, res.UploadID)
// Create multipart parts. // Create multipart parts.
// Need parts to be uploaded before MultipartLists can be called and tested. // Need parts to be uploaded before MultipartLists can be called and tested.
@ -1672,7 +1673,6 @@ func TestObjectCompleteMultipartUpload(t *testing.T) {
// Tests validate CompleteMultipart functionality. // Tests validate CompleteMultipart functionality.
func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t TestErrHandler) { func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t TestErrHandler) {
var err error var err error
var uploadID string
bucketNames := []string{"minio-bucket", "minio-2-bucket"} bucketNames := []string{"minio-bucket", "minio-2-bucket"}
objectNames := []string{"minio-object-1.txt"} objectNames := []string{"minio-object-1.txt"}
uploadIDs := []string{} uploadIDs := []string{}
@ -1687,13 +1687,13 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
t.Fatalf("%s : %s", instanceType, err) t.Fatalf("%s : %s", instanceType, err)
} }
// Initiate Multipart Upload on the above created bucket. // Initiate Multipart Upload on the above created bucket.
uploadID, err = obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-Id": "id"}}) res, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-Id": "id"}})
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err) t.Fatalf("%s : %s", instanceType, err)
} }
uploadIDs = append(uploadIDs, uploadID) uploadIDs = append(uploadIDs, res.UploadID)
// Parts with size greater than 5 MiB. // Parts with size greater than 5 MiB.
// Generating a 6MiB byte array. // Generating a 6MiB byte array.
validPart := bytes.Repeat([]byte("abcdef"), 1*humanize.MiByte) validPart := bytes.Repeat([]byte("abcdef"), 1*humanize.MiByte)

View File

@ -28,6 +28,7 @@ import (
"github.com/google/uuid" "github.com/google/uuid"
"github.com/minio/minio-go/v7/pkg/encrypt" "github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http" xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/logger"
) )
@ -231,6 +232,7 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada
} }
} }
} }
mtimeStr := strings.TrimSpace(r.Header.Get(xhttp.MinIOSourceMTime)) mtimeStr := strings.TrimSpace(r.Header.Get(xhttp.MinIOSourceMTime))
mtime := UTCNow() mtime := UTCNow()
if mtimeStr != "" { if mtimeStr != "" {
@ -289,6 +291,15 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada
metadata["etag"] = etag metadata["etag"] = etag
} }
wantCRC, err := hash.GetContentChecksum(r)
if err != nil {
return opts, InvalidArgument{
Bucket: bucket,
Object: object,
Err: fmt.Errorf("invalid/unknown checksum sent: %v", err),
}
}
// In the case of multipart custom format, the metadata needs to be checked in addition to header to see if it // In the case of multipart custom format, the metadata needs to be checked in addition to header to see if it
// is SSE-S3 encrypted, primarily because S3 protocol does not require SSE-S3 headers in PutObjectPart calls // is SSE-S3 encrypted, primarily because S3 protocol does not require SSE-S3 headers in PutObjectPart calls
if GlobalGatewaySSE.SSES3() && (crypto.S3.IsRequested(r.Header) || crypto.S3.IsEncrypted(metadata)) { if GlobalGatewaySSE.SSES3() && (crypto.S3.IsRequested(r.Header) || crypto.S3.IsEncrypted(metadata)) {
@ -299,6 +310,7 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada
Versioned: versioned, Versioned: versioned,
VersionSuspended: versionSuspended, VersionSuspended: versionSuspended,
MTime: mtime, MTime: mtime,
WantChecksum: wantCRC,
}, nil }, nil
} }
if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) { if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) {
@ -307,6 +319,7 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada
opts.Versioned = versioned opts.Versioned = versioned
opts.VersionSuspended = versionSuspended opts.VersionSuspended = versionSuspended
opts.UserDefined = metadata opts.UserDefined = metadata
opts.WantChecksum = wantCRC
return return
} }
if crypto.S3KMS.IsRequested(r.Header) { if crypto.S3KMS.IsRequested(r.Header) {
@ -325,6 +338,7 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada
Versioned: versioned, Versioned: versioned,
VersionSuspended: versionSuspended, VersionSuspended: versionSuspended,
MTime: mtime, MTime: mtime,
WantChecksum: wantCRC,
}, nil }, nil
} }
// default case of passing encryption headers and UserDefined metadata to backend // default case of passing encryption headers and UserDefined metadata to backend
@ -339,6 +353,8 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada
opts.ReplicationSourceLegalholdTimestamp = lholdtimestmp opts.ReplicationSourceLegalholdTimestamp = lholdtimestmp
opts.ReplicationSourceRetentionTimestamp = retaintimestmp opts.ReplicationSourceRetentionTimestamp = retaintimestmp
opts.ReplicationSourceTaggingTimestamp = taggingtimestmp opts.ReplicationSourceTaggingTimestamp = taggingtimestmp
opts.WantChecksum = wantCRC
return opts, nil return opts, nil
} }
@ -389,6 +405,14 @@ func completeMultipartOpts(ctx context.Context, r *http.Request, bucket, object
} }
} }
} }
opts.WantChecksum, err = hash.GetContentChecksum(r)
if err != nil {
return opts, InvalidArgument{
Bucket: bucket,
Object: object,
Err: fmt.Errorf("invalid/unknown checksum sent: %v", err),
}
}
opts.MTime = mtime opts.MTime = mtime
opts.UserDefined = make(map[string]string) opts.UserDefined = make(map[string]string)
return opts, nil return opts, nil

View File

@ -28,7 +28,7 @@ import (
"path" "path"
"testing" "testing"
humanize "github.com/dustin/go-humanize" "github.com/dustin/go-humanize"
"github.com/minio/minio/internal/hash" "github.com/minio/minio/internal/hash"
) )
@ -80,125 +80,110 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
expectedMd5 string expectedMd5 string
expectedError error expectedError error
}{ }{
// Test case 1-4.
// Cases with invalid bucket name. // Cases with invalid bucket name.
{".test", "obj", []byte(""), nil, "", 0, "", BucketNotFound{Bucket: ".test"}}, 0: {bucketName: ".test", objName: "obj", inputData: []byte(""), expectedError: BucketNotFound{Bucket: ".test"}},
{"------", "obj", []byte(""), nil, "", 0, "", BucketNotFound{Bucket: "------"}}, 1: {bucketName: "------", objName: "obj", inputData: []byte(""), expectedError: BucketNotFound{Bucket: "------"}},
{ 2: {
"$this-is-not-valid-too", "obj", []byte(""), nil, "", 0, "", bucketName: "$this-is-not-valid-too", objName: "obj", inputData: []byte(""),
BucketNotFound{Bucket: "$this-is-not-valid-too"}, expectedError: BucketNotFound{Bucket: "$this-is-not-valid-too"},
}, },
{"a", "obj", []byte(""), nil, "", 0, "", BucketNotFound{Bucket: "a"}}, 3: {bucketName: "a", objName: "obj", inputData: []byte(""), expectedError: BucketNotFound{Bucket: "a"}},
// Test case - 5.
// Case with invalid object names. // Case with invalid object names.
{bucket, "", []byte(""), nil, "", 0, "", ObjectNameInvalid{Bucket: bucket, Object: ""}}, 4: {bucketName: bucket, inputData: []byte(""), expectedError: ObjectNameInvalid{Bucket: bucket, Object: ""}},
// Test case - 6.
// Valid object and bucket names but non-existent bucket. // Valid object and bucket names but non-existent bucket.
{"abc", "def", []byte(""), nil, "", 0, "", BucketNotFound{Bucket: "abc"}}, 5: {bucketName: "abc", objName: "def", inputData: []byte(""), expectedError: BucketNotFound{Bucket: "abc"}},
// Test case - 7.
// Input to replicate Md5 mismatch. // Input to replicate Md5 mismatch.
{ 6: {
bucket, object, []byte(""), bucketName: bucket, objName: object, inputData: []byte(""),
map[string]string{"etag": "d41d8cd98f00b204e9800998ecf8427f"}, inputMeta: map[string]string{"etag": "d41d8cd98f00b204e9800998ecf8427f"},
"", 0, "", expectedError: hash.BadDigest{ExpectedMD5: "d41d8cd98f00b204e9800998ecf8427f", CalculatedMD5: "d41d8cd98f00b204e9800998ecf8427e"},
hash.BadDigest{ExpectedMD5: "d41d8cd98f00b204e9800998ecf8427f", CalculatedMD5: "d41d8cd98f00b204e9800998ecf8427e"},
}, },
// Test case - 8.
// With incorrect sha256. // With incorrect sha256.
{ 7: {
bucket, object, []byte("abcd"), bucketName: bucket, objName: object, inputData: []byte("abcd"),
map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"}, inputMeta: map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"},
"88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580", int64(len("abcd")), inputSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580", intputDataSize: int64(len("abcd")),
"", expectedError: hash.SHA256Mismatch{
hash.SHA256Mismatch{
ExpectedSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580", ExpectedSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580",
CalculatedSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", CalculatedSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589",
}, },
}, },
// Test case - 9.
// Input with size more than the size of actual data inside the reader. // Input with size more than the size of actual data inside the reader.
{ 8: {
bucket, object, []byte("abcd"), bucketName: bucket, objName: object, inputData: []byte("abcd"),
map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331e"}, inputMeta: map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331e"}, intputDataSize: int64(len("abcd") + 1),
"", int64(len("abcd") + 1), "", expectedError: hash.BadDigest{ExpectedMD5: "e2fc714c4727ee9395f324cd2e7f331e", CalculatedMD5: "e2fc714c4727ee9395f324cd2e7f331f"},
hash.BadDigest{ExpectedMD5: "e2fc714c4727ee9395f324cd2e7f331e", CalculatedMD5: "e2fc714c4727ee9395f324cd2e7f331f"},
}, },
// Test case - 10.
// Input with size less than the size of actual data inside the reader. // Input with size less than the size of actual data inside the reader.
{ 9: {
bucket, object, []byte("abcd"), bucketName: bucket, objName: object, inputData: []byte("abcd"),
map[string]string{"etag": "900150983cd24fb0d6963f7d28e17f73"}, inputMeta: map[string]string{"etag": "900150983cd24fb0d6963f7d28e17f73"}, intputDataSize: int64(len("abcd") - 1),
"", int64(len("abcd") - 1), "", expectedError: hash.BadDigest{ExpectedMD5: "900150983cd24fb0d6963f7d28e17f73", CalculatedMD5: "900150983cd24fb0d6963f7d28e17f72"},
hash.BadDigest{ExpectedMD5: "900150983cd24fb0d6963f7d28e17f73", CalculatedMD5: "900150983cd24fb0d6963f7d28e17f72"},
}, },
// Test case - 11-14.
// Validating for success cases. // Validating for success cases.
{bucket, object, []byte("abcd"), map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"}, "", int64(len("abcd")), "", nil}, 10: {bucketName: bucket, objName: object, inputData: []byte("abcd"), inputMeta: map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"}, intputDataSize: int64(len("abcd"))},
{bucket, object, []byte("efgh"), map[string]string{"etag": "1f7690ebdd9b4caf8fab49ca1757bf27"}, "", int64(len("efgh")), "", nil}, 11: {bucketName: bucket, objName: object, inputData: []byte("efgh"), inputMeta: map[string]string{"etag": "1f7690ebdd9b4caf8fab49ca1757bf27"}, intputDataSize: int64(len("efgh"))},
{bucket, object, []byte("ijkl"), map[string]string{"etag": "09a0877d04abf8759f99adec02baf579"}, "", int64(len("ijkl")), "", nil}, 12: {bucketName: bucket, objName: object, inputData: []byte("ijkl"), inputMeta: map[string]string{"etag": "09a0877d04abf8759f99adec02baf579"}, intputDataSize: int64(len("ijkl"))},
{bucket, object, []byte("mnop"), map[string]string{"etag": "e132e96a5ddad6da8b07bba6f6131fef"}, "", int64(len("mnop")), "", nil}, 13: {bucketName: bucket, objName: object, inputData: []byte("mnop"), inputMeta: map[string]string{"etag": "e132e96a5ddad6da8b07bba6f6131fef"}, intputDataSize: int64(len("mnop"))},
// Test case 15-17.
// With no metadata // With no metadata
{bucket, object, data, nil, "", int64(len(data)), getMD5Hash(data), nil}, 14: {bucketName: bucket, objName: object, inputData: data, intputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)},
{bucket, object, nilBytes, nil, "", int64(len(nilBytes)), getMD5Hash(nilBytes), nil}, 15: {bucketName: bucket, objName: object, inputData: nilBytes, intputDataSize: int64(len(nilBytes)), expectedMd5: getMD5Hash(nilBytes)},
{bucket, object, fiveMBBytes, nil, "", int64(len(fiveMBBytes)), getMD5Hash(fiveMBBytes), nil}, 16: {bucketName: bucket, objName: object, inputData: fiveMBBytes, intputDataSize: int64(len(fiveMBBytes)), expectedMd5: getMD5Hash(fiveMBBytes)},
// Test case 18-20.
// With arbitrary metadata // With arbitrary metadata
{bucket, object, data, map[string]string{"answer": "42"}, "", int64(len(data)), getMD5Hash(data), nil}, 17: {bucketName: bucket, objName: object, inputData: data, inputMeta: map[string]string{"answer": "42"}, intputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)},
{bucket, object, nilBytes, map[string]string{"answer": "42"}, "", int64(len(nilBytes)), getMD5Hash(nilBytes), nil}, 18: {bucketName: bucket, objName: object, inputData: nilBytes, inputMeta: map[string]string{"answer": "42"}, intputDataSize: int64(len(nilBytes)), expectedMd5: getMD5Hash(nilBytes)},
{bucket, object, fiveMBBytes, map[string]string{"answer": "42"}, "", int64(len(fiveMBBytes)), getMD5Hash(fiveMBBytes), nil}, 19: {bucketName: bucket, objName: object, inputData: fiveMBBytes, inputMeta: map[string]string{"answer": "42"}, intputDataSize: int64(len(fiveMBBytes)), expectedMd5: getMD5Hash(fiveMBBytes)},
// Test case 21-23.
// With valid md5sum and sha256. // With valid md5sum and sha256.
{bucket, object, data, md5Header(data), getSHA256Hash(data), int64(len(data)), getMD5Hash(data), nil}, 20: {bucketName: bucket, objName: object, inputData: data, inputMeta: md5Header(data), inputSHA256: getSHA256Hash(data), intputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)},
{bucket, object, nilBytes, md5Header(nilBytes), getSHA256Hash(nilBytes), int64(len(nilBytes)), getMD5Hash(nilBytes), nil}, 21: {bucketName: bucket, objName: object, inputData: nilBytes, inputMeta: md5Header(nilBytes), inputSHA256: getSHA256Hash(nilBytes), intputDataSize: int64(len(nilBytes)), expectedMd5: getMD5Hash(nilBytes)},
{bucket, object, fiveMBBytes, md5Header(fiveMBBytes), getSHA256Hash(fiveMBBytes), int64(len(fiveMBBytes)), getMD5Hash(fiveMBBytes), nil}, 22: {bucketName: bucket, objName: object, inputData: fiveMBBytes, inputMeta: md5Header(fiveMBBytes), inputSHA256: getSHA256Hash(fiveMBBytes), intputDataSize: int64(len(fiveMBBytes)), expectedMd5: getMD5Hash(fiveMBBytes)},
// Test case 24-26.
// data with invalid md5sum in header // data with invalid md5sum in header
{ 23: {
bucket, object, data, invalidMD5Header, "", int64(len(data)), getMD5Hash(data), bucketName: bucket, objName: object, inputData: data, inputMeta: invalidMD5Header, intputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data),
hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(data)}, expectedError: hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(data)},
}, },
{ 24: {
bucket, object, nilBytes, invalidMD5Header, "", int64(len(nilBytes)), getMD5Hash(nilBytes), bucketName: bucket, objName: object, inputData: nilBytes, inputMeta: invalidMD5Header, intputDataSize: int64(len(nilBytes)), expectedMd5: getMD5Hash(nilBytes),
hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(nilBytes)}, expectedError: hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(nilBytes)},
}, },
{ 25: {
bucket, object, fiveMBBytes, invalidMD5Header, "", int64(len(fiveMBBytes)), getMD5Hash(fiveMBBytes), bucketName: bucket, objName: object, inputData: fiveMBBytes, inputMeta: invalidMD5Header, intputDataSize: int64(len(fiveMBBytes)), expectedMd5: getMD5Hash(fiveMBBytes),
hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(fiveMBBytes)}, expectedError: hash.BadDigest{ExpectedMD5: invalidMD5, CalculatedMD5: getMD5Hash(fiveMBBytes)},
}, },
// Test case 27-29.
// data with size different from the actual number of bytes available in the reader // data with size different from the actual number of bytes available in the reader
{bucket, object, data, nil, "", int64(len(data) - 1), getMD5Hash(data[:len(data)-1]), nil}, 26: {bucketName: bucket, objName: object, inputData: data, intputDataSize: int64(len(data) - 1), expectedMd5: getMD5Hash(data[:len(data)-1])},
{bucket, object, nilBytes, nil, "", int64(len(nilBytes) + 1), getMD5Hash(nilBytes), IncompleteBody{Bucket: bucket, Object: object}}, 27: {bucketName: bucket, objName: object, inputData: nilBytes, intputDataSize: int64(len(nilBytes) + 1), expectedMd5: getMD5Hash(nilBytes), expectedError: IncompleteBody{Bucket: bucket, Object: object}},
{bucket, object, fiveMBBytes, nil, "", 0, getMD5Hash(fiveMBBytes), nil}, 28: {bucketName: bucket, objName: object, inputData: fiveMBBytes, expectedMd5: getMD5Hash(fiveMBBytes)},
// Test case 30
// valid data with X-Amz-Meta- meta // valid data with X-Amz-Meta- meta
{bucket, object, data, map[string]string{"X-Amz-Meta-AppID": "a42"}, "", int64(len(data)), getMD5Hash(data), nil}, 29: {bucketName: bucket, objName: object, inputData: data, inputMeta: map[string]string{"X-Amz-Meta-AppID": "a42"}, intputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)},
// Test case 31
// Put an empty object with a trailing slash // Put an empty object with a trailing slash
{bucket, "emptydir/", []byte{}, nil, "", 0, getMD5Hash([]byte{}), nil}, 30: {bucketName: bucket, objName: "emptydir/", inputData: []byte{}, expectedMd5: getMD5Hash([]byte{})},
// Test case 32
// Put an object inside the empty directory // Put an object inside the empty directory
{bucket, "emptydir/" + object, data, nil, "", int64(len(data)), getMD5Hash(data), nil}, 31: {bucketName: bucket, objName: "emptydir/" + object, inputData: data, intputDataSize: int64(len(data)), expectedMd5: getMD5Hash(data)},
// Test case 33
// Put the empty object with a trailing slash again (refer to Test case 31), this needs to succeed // Put the empty object with a trailing slash again (refer to Test case 31), this needs to succeed
{bucket, "emptydir/", []byte{}, nil, "", 0, getMD5Hash([]byte{}), nil}, 32: {bucketName: bucket, objName: "emptydir/", inputData: []byte{}, expectedMd5: getMD5Hash([]byte{})},
}
// With invalid crc32.
33: {
bucketName: bucket, objName: object, inputData: []byte("abcd"),
inputMeta: map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f", "x-amz-checksum-crc32": "abcd"},
intputDataSize: int64(len("abcd")),
},
}
for i, testCase := range testCases { for i, testCase := range testCases {
in := mustGetPutObjReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256) in := mustGetPutObjReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256)
objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, in, ObjectOptions{UserDefined: testCase.inputMeta}) objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, in, ObjectOptions{UserDefined: testCase.inputMeta})
@ -403,11 +388,12 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
} }
opts := ObjectOptions{} opts := ObjectOptions{}
// Initiate Multipart Upload on the above created bucket. // Initiate Multipart Upload on the above created bucket.
uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, opts) res, err := obj.NewMultipartUpload(context.Background(), bucket, object, opts)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
uploadID := res.UploadID
// Upload part1. // Upload part1.
fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte) fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)

View File

@ -25,6 +25,7 @@ import (
"time" "time"
"github.com/minio/minio/internal/event" "github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http" xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/logger"
) )
@ -266,6 +267,7 @@ func setPutObjHeaders(w http.ResponseWriter, objInfo ObjectInfo, delete bool) {
lc.SetPredictionHeaders(w, objInfo.ToLifecycleOpts()) lc.SetPredictionHeaders(w, objInfo.ToLifecycleOpts())
} }
} }
hash.AddChecksumHeader(w, objInfo.Checksum)
} }
func deleteObjectVersions(ctx context.Context, o ObjectLayer, bucket string, toDel []ObjectToDelete) { func deleteObjectVersions(ctx context.Context, o ObjectLayer, bucket string, toDel []ObjectToDelete) {

View File

@ -519,6 +519,10 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
} }
} }
if r.Header.Get(xhttp.AmzChecksumMode) == "ENABLED" {
hash.AddChecksumHeader(w, objInfo.Checksum)
}
if err = setObjectHeaders(w, objInfo, rs, opts); err != nil { if err = setObjectHeaders(w, objInfo, rs, opts); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
@ -783,6 +787,10 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
} }
} }
if r.Header.Get(xhttp.AmzChecksumMode) == "ENABLED" {
hash.AddChecksumHeader(w, objInfo.Checksum)
}
// Set standard object headers. // Set standard object headers.
if err = setObjectHeaders(w, objInfo, rs, opts); err != nil { if err = setObjectHeaders(w, objInfo, rs, opts); err != nil {
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err)) writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
@ -1740,7 +1748,10 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
if err = actualReader.AddChecksum(r, false); err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
return
}
// Set compression metrics. // Set compression metrics.
var s2c io.ReadCloser var s2c io.ReadCloser
wantEncryption := objectAPI.IsEncryptionSupported() && crypto.Requested(r.Header) wantEncryption := objectAPI.IsEncryptionSupported() && crypto.Requested(r.Header)
@ -1758,6 +1769,10 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
if err := hashReader.AddChecksum(r, size < 0); err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
return
}
rawReader := hashReader rawReader := hashReader
pReader := NewPutObjReader(rawReader) pReader := NewPutObjReader(rawReader)
@ -1895,7 +1910,6 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
} }
setPutObjHeaders(w, objInfo, false) setPutObjHeaders(w, objInfo, false)
writeSuccessResponseHeadersOnly(w) writeSuccessResponseHeadersOnly(w)
// Notify object created event. // Notify object created event.
@ -1915,6 +1929,8 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
enqueueTransitionImmediate(objInfo) enqueueTransitionImmediate(objInfo)
logger.LogIf(ctx, os.Sweep()) logger.LogIf(ctx, os.Sweep())
} }
// Do not send checksums in events to avoid leaks.
hash.TransferChecksumHeader(w, r)
} }
// PutObjectExtractHandler - PUT Object extract is an extended API // PutObjectExtractHandler - PUT Object extract is an extended API
@ -2051,6 +2067,10 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
if err = hreader.AddChecksum(r, false); err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
return
}
if err := enforceBucketQuotaHard(ctx, bucket, size); err != nil { if err := enforceBucketQuotaHard(ctx, bucket, size); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
@ -2220,6 +2240,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
} }
w.Header()[xhttp.ETag] = []string{`"` + hex.EncodeToString(hreader.MD5Current()) + `"`} w.Header()[xhttp.ETag] = []string{`"` + hex.EncodeToString(hreader.MD5Current()) + `"`}
hash.TransferChecksumHeader(w, r)
writeSuccessResponseHeadersOnly(w) writeSuccessResponseHeadersOnly(w)
} }

View File

@ -21,10 +21,13 @@ import (
"bytes" "bytes"
"context" "context"
"crypto/md5" "crypto/md5"
"crypto/sha1"
"encoding/base64" "encoding/base64"
"encoding/hex" "encoding/hex"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"hash"
"hash/crc32"
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
@ -37,8 +40,9 @@ import (
"sync" "sync"
"testing" "testing"
humanize "github.com/dustin/go-humanize" "github.com/dustin/go-humanize"
"github.com/minio/minio/internal/auth" "github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/hash/sha256"
xhttp "github.com/minio/minio/internal/http" xhttp "github.com/minio/minio/internal/http"
ioutilx "github.com/minio/minio/internal/ioutil" ioutilx "github.com/minio/minio/internal/ioutil"
) )
@ -1295,26 +1299,29 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
// byte data for PutObject. // byte data for PutObject.
bytesData := generateBytesData(6 * humanize.KiByte) bytesData := generateBytesData(6 * humanize.KiByte)
copySourceHeader := http.Header{} copySourceHeader := map[string]string{"X-Amz-Copy-Source": "somewhere"}
copySourceHeader.Set("X-Amz-Copy-Source", "somewhere") invalidMD5Header := map[string]string{"Content-Md5": "42"}
invalidMD5Header := http.Header{} inalidStorageClassHeader := map[string]string{xhttp.AmzStorageClass: "INVALID"}
invalidMD5Header.Set("Content-Md5", "42")
inalidStorageClassHeader := http.Header{}
inalidStorageClassHeader.Set(xhttp.AmzStorageClass, "INVALID")
addCustomHeaders := func(req *http.Request, customHeaders http.Header) { addCustomHeaders := func(req *http.Request, customHeaders map[string]string) {
for k, values := range customHeaders { for k, value := range customHeaders {
for _, value := range values {
req.Header.Set(k, value) req.Header.Set(k, value)
} }
} }
}
checksumData := func(b []byte, h hash.Hash) string {
h.Reset()
_, err := h.Write(b)
if err != nil {
t.Fatal(err)
}
return base64.StdEncoding.EncodeToString(h.Sum(nil))
}
// test cases with inputs and expected result for GetObject. // test cases with inputs and expected result for GetObject.
testCases := []struct { testCases := []struct {
bucketName string bucketName string
objectName string objectName string
headers http.Header headers map[string]string
data []byte data []byte
dataLen int dataLen int
accessKey string accessKey string
@ -1322,10 +1329,11 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
fault Fault fault Fault
// expected output. // expected output.
expectedRespStatus int // expected response status body. expectedRespStatus int // expected response status body.
wantAPICode string
wantHeaders map[string]string
}{ }{
// Test case - 1.
// Fetching the entire object and validating its contents. // Fetching the entire object and validating its contents.
{ 0: {
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
data: bytesData, data: bytesData,
@ -1335,9 +1343,8 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
expectedRespStatus: http.StatusOK, expectedRespStatus: http.StatusOK,
}, },
// Test case - 2.
// Test Case with invalid accessID. // Test Case with invalid accessID.
{ 1: {
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
data: bytesData, data: bytesData,
@ -1346,10 +1353,10 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
secretKey: credentials.SecretKey, secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusForbidden, expectedRespStatus: http.StatusForbidden,
wantAPICode: "InvalidAccessKeyId",
}, },
// Test case - 3.
// Test Case with invalid header key X-Amz-Copy-Source. // Test Case with invalid header key X-Amz-Copy-Source.
{ 2: {
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
headers: copySourceHeader, headers: copySourceHeader,
@ -1358,10 +1365,10 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey, secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusBadRequest, expectedRespStatus: http.StatusBadRequest,
wantAPICode: "InvalidArgument",
}, },
// Test case - 4.
// Test Case with invalid Content-Md5 value // Test Case with invalid Content-Md5 value
{ 3: {
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
headers: invalidMD5Header, headers: invalidMD5Header,
@ -1370,10 +1377,10 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey, secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusBadRequest, expectedRespStatus: http.StatusBadRequest,
wantAPICode: "InvalidDigest",
}, },
// Test case - 5.
// Test Case with object greater than maximum allowed size. // Test Case with object greater than maximum allowed size.
{ 4: {
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
data: bytesData, data: bytesData,
@ -1382,10 +1389,10 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
secretKey: credentials.SecretKey, secretKey: credentials.SecretKey,
fault: TooBigObject, fault: TooBigObject,
expectedRespStatus: http.StatusBadRequest, expectedRespStatus: http.StatusBadRequest,
wantAPICode: "EntityTooLarge",
}, },
// Test case - 6.
// Test Case with missing content length // Test Case with missing content length
{ 5: {
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
data: bytesData, data: bytesData,
@ -1394,10 +1401,10 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
secretKey: credentials.SecretKey, secretKey: credentials.SecretKey,
fault: MissingContentLength, fault: MissingContentLength,
expectedRespStatus: http.StatusLengthRequired, expectedRespStatus: http.StatusLengthRequired,
wantAPICode: "MissingContentLength",
}, },
// Test case - 7.
// Test Case with invalid header key X-Amz-Storage-Class // Test Case with invalid header key X-Amz-Storage-Class
{ 6: {
bucketName: bucketName, bucketName: bucketName,
objectName: objectName, objectName: objectName,
headers: inalidStorageClassHeader, headers: inalidStorageClassHeader,
@ -1406,6 +1413,92 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey, secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusBadRequest, expectedRespStatus: http.StatusBadRequest,
wantAPICode: "InvalidStorageClass",
},
// Invalid crc32
7: {
bucketName: bucketName,
objectName: objectName,
headers: map[string]string{"x-amz-checksum-crc32": "123"},
data: bytesData,
dataLen: len(bytesData),
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusBadRequest,
wantAPICode: "InvalidArgument",
},
// Wrong crc32
8: {
bucketName: bucketName,
objectName: objectName,
headers: map[string]string{"x-amz-checksum-crc32": "MTIzNA=="},
data: bytesData,
dataLen: len(bytesData),
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusBadRequest,
wantAPICode: "XAmzContentChecksumMismatch",
},
// Correct crc32
9: {
bucketName: bucketName,
objectName: objectName,
headers: map[string]string{"x-amz-checksum-crc32": checksumData(bytesData, crc32.New(crc32.IEEETable))},
data: bytesData,
dataLen: len(bytesData),
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusOK,
wantHeaders: map[string]string{"x-amz-checksum-crc32": checksumData(bytesData, crc32.New(crc32.IEEETable))},
},
// Correct crc32c
10: {
bucketName: bucketName,
objectName: objectName,
headers: map[string]string{"x-amz-checksum-crc32c": checksumData(bytesData, crc32.New(crc32.MakeTable(crc32.Castagnoli)))},
data: bytesData,
dataLen: len(bytesData),
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusOK,
wantHeaders: map[string]string{"x-amz-checksum-crc32c": checksumData(bytesData, crc32.New(crc32.MakeTable(crc32.Castagnoli)))},
},
// CRC32 as CRC32C
11: {
bucketName: bucketName,
objectName: objectName,
headers: map[string]string{"x-amz-checksum-crc32c": checksumData(bytesData, crc32.New(crc32.IEEETable))},
data: bytesData,
dataLen: len(bytesData),
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusBadRequest,
wantAPICode: "XAmzContentChecksumMismatch",
},
// SHA1
12: {
bucketName: bucketName,
objectName: objectName,
headers: map[string]string{"x-amz-checksum-sha1": checksumData(bytesData, sha1.New())},
data: bytesData,
dataLen: len(bytesData),
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusOK,
wantHeaders: map[string]string{"x-amz-checksum-sha1": checksumData(bytesData, sha1.New())},
},
// SHA256
13: {
bucketName: bucketName,
objectName: objectName,
headers: map[string]string{"x-amz-checksum-sha256": checksumData(bytesData, sha256.New())},
data: bytesData,
dataLen: len(bytesData),
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusOK,
wantHeaders: map[string]string{"x-amz-checksum-sha256": checksumData(bytesData, sha256.New())},
}, },
} }
// Iterating over the cases, fetching the object validating the response. // Iterating over the cases, fetching the object validating the response.
@ -1415,9 +1508,9 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
// construct HTTP request for Get Object end point. // construct HTTP request for Get Object end point.
req, err = newTestSignedRequestV4(http.MethodPut, getPutObjectURL("", testCase.bucketName, testCase.objectName), req, err = newTestSignedRequestV4(http.MethodPut, getPutObjectURL("", testCase.bucketName, testCase.objectName),
int64(testCase.dataLen), bytes.NewReader(testCase.data), testCase.accessKey, testCase.secretKey, nil) int64(testCase.dataLen), bytes.NewReader(testCase.data), testCase.accessKey, testCase.secretKey, testCase.headers)
if err != nil { if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for Put Object: <ERROR> %v", i+1, err) t.Fatalf("Test %d: Failed to create HTTP request for Put Object: <ERROR> %v", i, err)
} }
// Add test case specific headers to the request. // Add test case specific headers to the request.
addCustomHeaders(req, testCase.headers) addCustomHeaders(req, testCase.headers)
@ -1435,22 +1528,48 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
apiRouter.ServeHTTP(rec, req) apiRouter.ServeHTTP(rec, req)
// Assert the response code with the expected status. // Assert the response code with the expected status.
if rec.Code != testCase.expectedRespStatus { if rec.Code != testCase.expectedRespStatus {
t.Fatalf("Case %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testCase.expectedRespStatus, rec.Code) b, _ := io.ReadAll(rec.Body)
t.Fatalf("Test %d: Expected the response status to be `%d`, but instead found `%d`: %s", i, testCase.expectedRespStatus, rec.Code, string(b))
}
if testCase.expectedRespStatus != http.StatusOK {
b, err := io.ReadAll(rec.Body)
if err != nil {
t.Fatal(err)
}
var apiErr APIErrorResponse
err = xml.Unmarshal(b, &apiErr)
if err != nil {
t.Fatal(err)
}
gotErr := apiErr.Code
wantErr := testCase.wantAPICode
if gotErr != wantErr {
t.Errorf("test %d: want api error %q, got %q", i, wantErr, gotErr)
}
if testCase.wantHeaders != nil {
for k, v := range testCase.wantHeaders {
got := rec.Header().Get(k)
if got != v {
t.Errorf("Want header %s = %s, got %#v", k, v, rec.Header())
}
}
}
} }
if testCase.expectedRespStatus == http.StatusOK { if testCase.expectedRespStatus == http.StatusOK {
buffer := new(bytes.Buffer) buffer := new(bytes.Buffer)
// Fetch the object to check whether the content is same as the one uploaded via PutObject. // Fetch the object to check whether the content is same as the one uploaded via PutObject.
gr, err := obj.GetObjectNInfo(context.Background(), testCase.bucketName, testCase.objectName, nil, nil, readLock, opts) gr, err := obj.GetObjectNInfo(context.Background(), testCase.bucketName, testCase.objectName, nil, nil, readLock, opts)
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i, instanceType, err)
} }
if _, err = io.Copy(buffer, gr); err != nil { if _, err = io.Copy(buffer, gr); err != nil {
gr.Close() gr.Close()
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i, instanceType, err)
} }
gr.Close() gr.Close()
if !bytes.Equal(bytesData, buffer.Bytes()) { if !bytes.Equal(bytesData, buffer.Bytes()) {
t.Errorf("Test %d: %s: Data Mismatch: Data fetched back from the uploaded object doesn't match the original one.", i+1, instanceType) t.Errorf("Test %d: %s: Data Mismatch: Data fetched back from the uploaded object doesn't match the original one.", i, instanceType)
} }
buffer.Reset() buffer.Reset()
} }
@ -1460,10 +1579,10 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
recV2 := httptest.NewRecorder() recV2 := httptest.NewRecorder()
// construct HTTP request for PUT Object endpoint. // construct HTTP request for PUT Object endpoint.
reqV2, err = newTestSignedRequestV2(http.MethodPut, getPutObjectURL("", testCase.bucketName, testCase.objectName), reqV2, err = newTestSignedRequestV2(http.MethodPut, getPutObjectURL("", testCase.bucketName, testCase.objectName),
int64(testCase.dataLen), bytes.NewReader(testCase.data), testCase.accessKey, testCase.secretKey, nil) int64(testCase.dataLen), bytes.NewReader(testCase.data), testCase.accessKey, testCase.secretKey, testCase.headers)
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutObject: <ERROR> %v", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to create HTTP request for PutObject: <ERROR> %v", i, instanceType, err)
} }
// Add test case specific headers to the request. // Add test case specific headers to the request.
@ -1482,7 +1601,8 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
// Call the ServeHTTP to execute the handler. // Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(recV2, reqV2) apiRouter.ServeHTTP(recV2, reqV2)
if recV2.Code != testCase.expectedRespStatus { if recV2.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code) b, _ := io.ReadAll(rec.Body)
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`: %s", i, instanceType, testCase.expectedRespStatus, recV2.Code, string(b))
} }
if testCase.expectedRespStatus == http.StatusOK { if testCase.expectedRespStatus == http.StatusOK {
@ -1490,17 +1610,26 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
// Fetch the object to check whether the content is same as the one uploaded via PutObject. // Fetch the object to check whether the content is same as the one uploaded via PutObject.
gr, err := obj.GetObjectNInfo(context.Background(), testCase.bucketName, testCase.objectName, nil, nil, readLock, opts) gr, err := obj.GetObjectNInfo(context.Background(), testCase.bucketName, testCase.objectName, nil, nil, readLock, opts)
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i, instanceType, err)
} }
if _, err = io.Copy(buffer, gr); err != nil { if _, err = io.Copy(buffer, gr); err != nil {
gr.Close() gr.Close()
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i, instanceType, err)
} }
gr.Close() gr.Close()
if !bytes.Equal(bytesData, buffer.Bytes()) { if !bytes.Equal(bytesData, buffer.Bytes()) {
t.Errorf("Test %d: %s: Data Mismatch: Data fetched back from the uploaded object doesn't match the original one.", i+1, instanceType) t.Errorf("Test %d: %s: Data Mismatch: Data fetched back from the uploaded object doesn't match the original one.", i, instanceType)
} }
buffer.Reset() buffer.Reset()
if testCase.wantHeaders != nil {
for k, v := range testCase.wantHeaders {
got := recV2.Header().Get(k)
if got != v {
t.Errorf("Want header %s = %s, got %#v", k, v, recV2.Header())
}
}
}
} }
} }
@ -1585,11 +1714,12 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
// PutObjectPart API HTTP Handler has to be tested in isolation, // PutObjectPart API HTTP Handler has to be tested in isolation,
// that is without any other handler being registered, // that is without any other handler being registered,
// That's why NewMultipartUpload is initiated using ObjectLayer. // That's why NewMultipartUpload is initiated using ObjectLayer.
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, opts) res, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, opts)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("MinIO %s : <ERROR> %s", instanceType, err) t.Fatalf("MinIO %s : <ERROR> %s", instanceType, err)
} }
uploadID := res.UploadID
a := 0 a := 0
b := globalMinPartSize b := globalMinPartSize
@ -1701,11 +1831,12 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
// PutObjectPart API HTTP Handler has to be tested in isolation, // PutObjectPart API HTTP Handler has to be tested in isolation,
// that is without any other handler being registered, // that is without any other handler being registered,
// That's why NewMultipartUpload is initiated using ObjectLayer. // That's why NewMultipartUpload is initiated using ObjectLayer.
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, opts) res, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, opts)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("MinIO %s : <ERROR> %s", instanceType, err) t.Fatalf("MinIO %s : <ERROR> %s", instanceType, err)
} }
uploadID := res.UploadID
// test cases with inputs and expected result for Copy Object. // test cases with inputs and expected result for Copy Object.
testCases := []struct { testCases := []struct {
@ -2664,20 +2795,18 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
// object used for the test. // object used for the test.
objectName := "test-object-new-multipart" objectName := "test-object-new-multipart"
// uploadID obtained from NewMultipart upload.
var uploadID string
// upload IDs collected. // upload IDs collected.
var uploadIDs []string var uploadIDs []string
for i := 0; i < 2; i++ { for i := 0; i < 2; i++ {
// initiate new multipart uploadID. // initiate new multipart uploadID.
uploadID, err = obj.NewMultipartUpload(context.Background(), bucketName, objectName, opts) res, err := obj.NewMultipartUpload(context.Background(), bucketName, objectName, opts)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("MinIO %s : <ERROR> %s", instanceType, err) t.Fatalf("MinIO %s : <ERROR> %s", instanceType, err)
} }
uploadIDs = append(uploadIDs, uploadID) uploadIDs = append(uploadIDs, res.UploadID)
} }
// Parts with size greater than 5 MiB. // Parts with size greater than 5 MiB.
@ -2778,7 +2907,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
s3MD5 := getCompleteMultipartMD5(inputParts[3].parts) s3MD5 := getCompleteMultipartMD5(inputParts[3].parts)
// generating the response body content for the success case. // generating the response body content for the success case.
successResponse := generateCompleteMultpartUploadResponse(bucketName, objectName, getGetObjectURL("", bucketName, objectName), s3MD5) successResponse := generateCompleteMultpartUploadResponse(bucketName, objectName, getGetObjectURL("", bucketName, objectName), ObjectInfo{ETag: s3MD5})
encodedSuccessResponse := encodeResponse(successResponse) encodedSuccessResponse := encodeResponse(successResponse)
ctx := context.Background() ctx := context.Background()
@ -3034,20 +3163,18 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
// object used for the test. // object used for the test.
objectName := "test-object-new-multipart" objectName := "test-object-new-multipart"
// uploadID obtained from NewMultipart upload.
var uploadID string
// upload IDs collected. // upload IDs collected.
var uploadIDs []string var uploadIDs []string
for i := 0; i < 2; i++ { for i := 0; i < 2; i++ {
// initiate new multipart uploadID. // initiate new multipart uploadID.
uploadID, err = obj.NewMultipartUpload(context.Background(), bucketName, objectName, opts) res, err := obj.NewMultipartUpload(context.Background(), bucketName, objectName, opts)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("MinIO %s : <ERROR> %s", instanceType, err) t.Fatalf("MinIO %s : <ERROR> %s", instanceType, err)
} }
uploadIDs = append(uploadIDs, uploadID) uploadIDs = append(uploadIDs, res.UploadID)
} }
// Parts with size greater than 5 MiB. // Parts with size greater than 5 MiB.
@ -3445,161 +3572,132 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
// PutObjectPart API HTTP Handler has to be tested in isolation, // PutObjectPart API HTTP Handler has to be tested in isolation,
// that is without any other handler being registered, // that is without any other handler being registered,
// That's why NewMultipartUpload is initiated using ObjectLayer. // That's why NewMultipartUpload is initiated using ObjectLayer.
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, opts) res, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, opts)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("MinIO %s : <ERROR> %s", instanceType, err) t.Fatalf("MinIO %s : <ERROR> %s", instanceType, err)
} }
uploadID := res.UploadID
uploadIDCopy := uploadID uploadIDCopy := uploadID
// expected error types for invalid inputs to PutObjectPartHandler.
noAPIErr := APIError{}
// expected error when content length is missing in the HTTP request.
missingContent := getAPIError(ErrMissingContentLength)
// expected error when content length is too large.
entityTooLarge := getAPIError(ErrEntityTooLarge)
// expected error when the signature check fails.
badSigning := getAPIError(ErrSignatureDoesNotMatch)
// expected error MD5 sum mismatch occurs.
badChecksum := getAPIError(ErrInvalidDigest)
// expected error when the part number in the request is invalid.
invalidPart := getAPIError(ErrInvalidPart)
// expected error when maxPart is beyond the limit.
invalidMaxParts := getAPIError(ErrInvalidMaxParts)
// expected error the when the uploadID is invalid.
noSuchUploadID := getAPIError(ErrNoSuchUpload)
// expected error when InvalidAccessID is set.
invalidAccessID := getAPIError(ErrInvalidAccessKeyID)
// SignatureMismatch for various signing types // SignatureMismatch for various signing types
testCases := []struct { testCases := []struct {
objectName string objectName string
reader io.ReadSeeker content string
partNumber string partNumber string
fault Fault fault Fault
accessKey string accessKey string
secretKey string secretKey string
expectedAPIError APIError expectedAPIError APIErrorCode
}{ }{
// Test case - 1.
// Success case. // Success case.
{ 0: {
objectName: testObject, objectName: testObject,
reader: bytes.NewReader([]byte("hello")), content: "hello",
partNumber: "1", partNumber: "1",
fault: None, fault: None,
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey, secretKey: credentials.SecretKey,
expectedAPIError: noAPIErr, expectedAPIError: -1,
}, },
// Test case - 2.
// Case where part number is invalid. // Case where part number is invalid.
{ 1: {
objectName: testObject, objectName: testObject,
reader: bytes.NewReader([]byte("hello")), content: "hello",
partNumber: "9999999999999999999", partNumber: "9999999999999999999",
fault: None, fault: None,
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey, secretKey: credentials.SecretKey,
expectedAPIError: invalidPart, expectedAPIError: ErrInvalidPart,
}, },
// Test case - 3.
// Case where the part number has exceeded the max allowed parts in an upload. // Case where the part number has exceeded the max allowed parts in an upload.
{ 2: {
objectName: testObject, objectName: testObject,
reader: bytes.NewReader([]byte("hello")), content: "hello",
partNumber: strconv.Itoa(globalMaxPartID + 1), partNumber: strconv.Itoa(globalMaxPartID + 1),
fault: None, fault: None,
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey, secretKey: credentials.SecretKey,
expectedAPIError: invalidMaxParts, expectedAPIError: ErrInvalidMaxParts,
}, },
// Test case - 4.
// Case where the content length is not set in the HTTP request. // Case where the content length is not set in the HTTP request.
{ 3: {
objectName: testObject, objectName: testObject,
reader: bytes.NewReader([]byte("hello")), content: "hello",
partNumber: "1", partNumber: "1",
fault: MissingContentLength, fault: MissingContentLength,
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey, secretKey: credentials.SecretKey,
expectedAPIError: missingContent, expectedAPIError: ErrMissingContentLength,
}, },
// Test case - 5.
// case where the object size is set to a value greater than the max allowed size. // case where the object size is set to a value greater than the max allowed size.
{ 4: {
objectName: testObject, objectName: testObject,
reader: bytes.NewReader([]byte("hello")), content: "hello",
partNumber: "1", partNumber: "1",
fault: TooBigObject, fault: TooBigObject,
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey, secretKey: credentials.SecretKey,
expectedAPIError: entityTooLarge, expectedAPIError: ErrEntityTooLarge,
}, },
// Test case - 6.
// case where a signature mismatch is introduced and the response is validated. // case where a signature mismatch is introduced and the response is validated.
{ 5: {
objectName: testObject, objectName: testObject,
reader: bytes.NewReader([]byte("hello")), content: "hello",
partNumber: "1", partNumber: "1",
fault: BadSignature, fault: BadSignature,
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey, secretKey: credentials.SecretKey,
expectedAPIError: badSigning, expectedAPIError: ErrSignatureDoesNotMatch,
}, },
// Test case - 7.
// Case where incorrect checksum is set and the error response // Case where incorrect checksum is set and the error response
// is asserted with the expected error response. // is asserted with the expected error response.
{ 6: {
objectName: testObject, objectName: testObject,
reader: bytes.NewReader([]byte("hello")), content: "hello",
partNumber: "1", partNumber: "1",
fault: BadMD5, fault: BadMD5,
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey, secretKey: credentials.SecretKey,
expectedAPIError: badChecksum, expectedAPIError: ErrInvalidDigest,
}, },
// Test case - 8.
// case where the a non-existent uploadID is set. // case where the a non-existent uploadID is set.
{ 7: {
objectName: testObject, objectName: testObject,
reader: bytes.NewReader([]byte("hello")), content: "hello",
partNumber: "1", partNumber: "1",
fault: MissingUploadID, fault: MissingUploadID,
accessKey: credentials.AccessKey, accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey, secretKey: credentials.SecretKey,
expectedAPIError: noSuchUploadID, expectedAPIError: ErrNoSuchUpload,
}, },
// Test case - 9.
// case with invalid AccessID. // case with invalid AccessID.
// Forcing the signature check inside the handler to fail. // Forcing the signature check inside the handler to fail.
{ 8: {
objectName: testObject, objectName: testObject,
reader: bytes.NewReader([]byte("hello")), content: "hello",
partNumber: "1", partNumber: "1",
fault: None, fault: None,
accessKey: "Invalid-AccessID", accessKey: "Invalid-AccessID",
secretKey: credentials.SecretKey, secretKey: credentials.SecretKey,
expectedAPIError: invalidAccessID, expectedAPIError: ErrInvalidAccessKeyID,
}, },
} }
reqV2Str := "V2 Signed HTTP request" reqV2Str := "V2 Signed HTTP request"
reqV4Str := "V4 Signed HTTP request" reqV4Str := "V4 Signed HTTP request"
// collection of input HTTP request, ResponseRecorder and request type.
// Used to make a collection of V4 and V4 HTTP request.
type inputReqRec struct { type inputReqRec struct {
req *http.Request req *http.Request
rec *httptest.ResponseRecorder rec *httptest.ResponseRecorder
@ -3608,7 +3706,9 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
for i, test := range testCases { for i, test := range testCases {
// Using sub-tests introduced in Go 1.7. // Using sub-tests introduced in Go 1.7.
t.Run(fmt.Sprintf("MinIO %s : Test case %d.", instanceType, i+1), func(t *testing.T) { t.Run(fmt.Sprintf("MinIO-%s-Test-%d.", instanceType, i), func(t *testing.T) {
// collection of input HTTP request, ResponseRecorder and request type.
// Used to make a collection of V4 and V4 HTTP request.
var reqV4, reqV2 *http.Request var reqV4, reqV2 *http.Request
var recV4, recV2 *httptest.ResponseRecorder var recV4, recV2 *httptest.ResponseRecorder
@ -3623,7 +3723,7 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
// constructing a v4 signed HTTP request. // constructing a v4 signed HTTP request.
reqV4, err = newTestSignedRequestV4(http.MethodPut, reqV4, err = newTestSignedRequestV4(http.MethodPut,
getPutObjectPartURL("", bucketName, test.objectName, uploadID, test.partNumber), getPutObjectPartURL("", bucketName, test.objectName, uploadID, test.partNumber),
0, test.reader, test.accessKey, test.secretKey, nil) int64(len(test.content)), bytes.NewReader([]byte(test.content)), test.accessKey, test.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create a signed V4 request to upload part for %s/%s: <ERROR> %v", t.Fatalf("Failed to create a signed V4 request to upload part for %s/%s: <ERROR> %v",
bucketName, test.objectName, err) bucketName, test.objectName, err)
@ -3632,10 +3732,10 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
// construct HTTP request for PutObject Part Object endpoint. // construct HTTP request for PutObject Part Object endpoint.
reqV2, err = newTestSignedRequestV2(http.MethodPut, reqV2, err = newTestSignedRequestV2(http.MethodPut,
getPutObjectPartURL("", bucketName, test.objectName, uploadID, test.partNumber), getPutObjectPartURL("", bucketName, test.objectName, uploadID, test.partNumber),
0, test.reader, test.accessKey, test.secretKey, nil) int64(len(test.content)), bytes.NewReader([]byte(test.content)), test.accessKey, test.secretKey, nil)
if err != nil { if err != nil {
t.Fatalf("Test %d %s Failed to create a V2 signed request to upload part for %s/%s: <ERROR> %v", i+1, instanceType, t.Fatalf("Test %d %s Failed to create a V2 signed request to upload part for %s/%s: <ERROR> %v", i, instanceType,
bucketName, test.objectName, err) bucketName, test.objectName, err)
} }
@ -3661,6 +3761,9 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
// HTTP request type string for V4/V2 requests. // HTTP request type string for V4/V2 requests.
reqType := reqRec.reqType reqType := reqRec.reqType
// Clone so we don't retain values we do not want.
req.Header = req.Header.Clone()
// introduce faults in the request. // introduce faults in the request.
// deliberately introducing the invalid value to be able to assert the response with the expected error response. // deliberately introducing the invalid value to be able to assert the response with the expected error response.
switch test.fault { switch test.fault {
@ -3684,7 +3787,13 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
apiRouter.ServeHTTP(rec, req) apiRouter.ServeHTTP(rec, req)
// validate the error response. // validate the error response.
if test.expectedAPIError != noAPIErr { want := getAPIError(test.expectedAPIError)
if test.expectedAPIError == -1 {
want.HTTPStatusCode = 200
want.Code = "<no error>"
want.Description = "<no error>"
}
if rec.Code != http.StatusOK {
var errBytes []byte var errBytes []byte
// read the response body. // read the response body.
errBytes, err = ioutil.ReadAll(rec.Result().Body) errBytes, err = ioutil.ReadAll(rec.Result().Body)
@ -3700,14 +3809,16 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
reqType, bucketName, test.objectName, err) reqType, bucketName, test.objectName, err)
} }
// Validate whether the error has occurred for the expected reason. // Validate whether the error has occurred for the expected reason.
if test.expectedAPIError.Code != errXML.Code { if want.Code != errXML.Code {
t.Errorf("%s, Expected to fail with error \"%s\", but received \"%s\".", t.Errorf("%s, Expected to fail with error \"%s\", but received \"%s\": %q.",
reqType, test.expectedAPIError.Code, errXML.Code) reqType, want.Code, errXML.Code, errXML.Message)
} }
// Validate the HTTP response status code with the expected one. // Validate the HTTP response status code with the expected one.
if test.expectedAPIError.HTTPStatusCode != rec.Code { if want.HTTPStatusCode != rec.Code {
t.Errorf("%s, Expected the HTTP response status code to be %d, got %d.", reqType, test.expectedAPIError.HTTPStatusCode, rec.Code) t.Errorf("%s, Expected the HTTP response status code to be %d, got %d.", reqType, want.HTTPStatusCode, rec.Code)
} }
} else if want.HTTPStatusCode != http.StatusOK {
t.Errorf("got 200 ok, want %d", rec.Code)
} }
} }
}) })
@ -3849,12 +3960,12 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str
// PutObjectPart API HTTP Handler has to be tested in isolation, // PutObjectPart API HTTP Handler has to be tested in isolation,
// that is without any other handler being registered, // that is without any other handler being registered,
// That's why NewMultipartUpload is initiated using ObjectLayer. // That's why NewMultipartUpload is initiated using ObjectLayer.
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, opts) res, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, opts)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("MinIO %s : <ERROR> %s", instanceType, err) t.Fatalf("MinIO %s : <ERROR> %s", instanceType, err)
} }
uploadID := res.UploadID
uploadIDCopy := uploadID uploadIDCopy := uploadID
// create an object Part, will be used to test list object parts. // create an object Part, will be used to test list object parts.

View File

@ -155,6 +155,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
metadata[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano) metadata[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
metadata[ReservedMetadataPrefixLower+ReplicationStatus] = dsc.PendingStatus() metadata[ReservedMetadataPrefixLower+ReplicationStatus] = dsc.PendingStatus()
} }
// We need to preserve the encryption headers set in EncryptRequest, // We need to preserve the encryption headers set in EncryptRequest,
// so we do not want to override them, copy them instead. // so we do not want to override them, copy them instead.
for k, v := range encMetadata { for k, v := range encMetadata {
@ -174,18 +175,30 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
checksumType := hash.NewChecksumType(r.Header.Get(xhttp.AmzChecksumAlgo))
if checksumType.Is(hash.ChecksumInvalid) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequestParameter), r.URL)
return
} else if checksumType.IsSet() && !checksumType.Is(hash.ChecksumTrailing) {
opts.WantChecksum = &hash.Checksum{Type: checksumType}
}
newMultipartUpload := objectAPI.NewMultipartUpload newMultipartUpload := objectAPI.NewMultipartUpload
if api.CacheAPI() != nil { if api.CacheAPI() != nil {
newMultipartUpload = api.CacheAPI().NewMultipartUpload newMultipartUpload = api.CacheAPI().NewMultipartUpload
} }
uploadID, err := newMultipartUpload(ctx, bucket, object, opts) res, err := newMultipartUpload(ctx, bucket, object, opts)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
response := generateInitiateMultipartUploadResponse(bucket, object, uploadID) response := generateInitiateMultipartUploadResponse(bucket, object, res.UploadID)
if res.ChecksumAlgo != "" {
w.Header().Set(xhttp.AmzChecksumAlgo, res.ChecksumAlgo)
}
encodedSuccessResponse := encodeResponse(response) encodedSuccessResponse := encodeResponse(response)
// Write success response. // Write success response.
@ -350,6 +363,10 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
if err = actualReader.AddChecksum(r, false); err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
return
}
// Set compression metrics. // Set compression metrics.
wantEncryption := objectAPI.IsEncryptionSupported() && crypto.Requested(r.Header) wantEncryption := objectAPI.IsEncryptionSupported() && crypto.Requested(r.Header)
@ -367,6 +384,11 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
if err := hashReader.AddChecksum(r, size < 0); err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
return
}
rawReader := hashReader rawReader := hashReader
pReader := NewPutObjReader(rawReader) pReader := NewPutObjReader(rawReader)
@ -476,6 +498,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
// clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive). // clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive).
// Therefore, we have to set the ETag directly as map entry. // Therefore, we have to set the ETag directly as map entry.
w.Header()[xhttp.ETag] = []string{"\"" + etag + "\""} w.Header()[xhttp.ETag] = []string{"\"" + etag + "\""}
hash.TransferChecksumHeader(w, r)
writeSuccessResponseHeadersOnly(w) writeSuccessResponseHeadersOnly(w)
} }
@ -664,7 +687,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
// Get object location. // Get object location.
location := getObjectLocation(r, globalDomainNames, bucket, object) location := getObjectLocation(r, globalDomainNames, bucket, object)
// Generate complete multipart response. // Generate complete multipart response.
response := generateCompleteMultpartUploadResponse(bucket, object, location, objInfo.ETag) response := generateCompleteMultpartUploadResponse(bucket, object, location, objInfo)
var encodedSuccessResponse []byte var encodedSuccessResponse []byte
if !headerWritten { if !headerWritten {
encodedSuccessResponse = encodeResponse(response) encodedSuccessResponse = encodeResponse(response)

View File

@ -95,10 +95,12 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErr
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
uploadID, err := obj.NewMultipartUpload(context.Background(), "bucket", "key", opts) res, err := obj.NewMultipartUpload(context.Background(), "bucket", "key", opts)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
uploadID := res.UploadID
// Create a byte array of 5MiB. // Create a byte array of 5MiB.
data := bytes.Repeat([]byte("0123456789abcdef"), 5*humanize.MiByte/16) data := bytes.Repeat([]byte("0123456789abcdef"), 5*humanize.MiByte/16)
completedParts := CompleteMultipartUpload{} completedParts := CompleteMultipartUpload{}
@ -139,10 +141,11 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHan
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
uploadID, err := obj.NewMultipartUpload(context.Background(), "bucket", "key", opts) res, err := obj.NewMultipartUpload(context.Background(), "bucket", "key", opts)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
uploadID := res.UploadID
parts := make(map[int]string) parts := make(map[int]string)
metadata := make(map[string]string) metadata := make(map[string]string)

View File

@ -229,6 +229,10 @@ type FileInfo struct {
// This is mainly used for detecting a particular issue // This is mainly used for detecting a particular issue
// reported in https://github.com/minio/minio/pull/13803 // reported in https://github.com/minio/minio/pull/13803
DiskMTime time.Time `msg:"dmt"` DiskMTime time.Time `msg:"dmt"`
// Combined checksum when object was uploaded.
// Format is type:base64(checksum).
Checksum map[string]string `msg:"cs,allownil"`
} }
// Equals checks if fi(FileInfo) matches ofi(FileInfo) // Equals checks if fi(FileInfo) matches ofi(FileInfo)

View File

@ -602,8 +602,8 @@ func (z *FileInfo) DecodeMsg(dc *msgp.Reader) (err error) {
err = msgp.WrapError(err) err = msgp.WrapError(err)
return return
} }
if zb0001 != 27 { if zb0001 != 28 {
err = msgp.ArrayError{Wanted: 27, Got: zb0001} err = msgp.ArrayError{Wanted: 28, Got: zb0001}
return return
} }
z.Volume, err = dc.ReadString() z.Volume, err = dc.ReadString()
@ -778,13 +778,51 @@ func (z *FileInfo) DecodeMsg(dc *msgp.Reader) (err error) {
err = msgp.WrapError(err, "DiskMTime") err = msgp.WrapError(err, "DiskMTime")
return return
} }
if dc.IsNil() {
err = dc.ReadNil()
if err != nil {
err = msgp.WrapError(err)
return
}
z.Checksum = nil
} else {
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Checksum")
return
}
if z.Checksum == nil {
z.Checksum = make(map[string]string, zb0004)
} else if len(z.Checksum) > 0 {
for key := range z.Checksum {
delete(z.Checksum, key)
}
}
for zb0004 > 0 {
zb0004--
var za0004 string
var za0005 string
za0004, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Checksum")
return
}
za0005, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Checksum", za0004)
return
}
z.Checksum[za0004] = za0005
}
}
return return
} }
// EncodeMsg implements msgp.Encodable // EncodeMsg implements msgp.Encodable
func (z *FileInfo) EncodeMsg(en *msgp.Writer) (err error) { func (z *FileInfo) EncodeMsg(en *msgp.Writer) (err error) {
// array header, size 27 // array header, size 28
err = en.Append(0xdc, 0x0, 0x1b) err = en.Append(0xdc, 0x0, 0x1c)
if err != nil { if err != nil {
return return
} }
@ -942,14 +980,38 @@ func (z *FileInfo) EncodeMsg(en *msgp.Writer) (err error) {
err = msgp.WrapError(err, "DiskMTime") err = msgp.WrapError(err, "DiskMTime")
return return
} }
if z.Checksum == nil { // allownil: if nil
err = en.WriteNil()
if err != nil {
return
}
} else {
err = en.WriteMapHeader(uint32(len(z.Checksum)))
if err != nil {
err = msgp.WrapError(err, "Checksum")
return
}
for za0004, za0005 := range z.Checksum {
err = en.WriteString(za0004)
if err != nil {
err = msgp.WrapError(err, "Checksum")
return
}
err = en.WriteString(za0005)
if err != nil {
err = msgp.WrapError(err, "Checksum", za0004)
return
}
}
}
return return
} }
// MarshalMsg implements msgp.Marshaler // MarshalMsg implements msgp.Marshaler
func (z *FileInfo) MarshalMsg(b []byte) (o []byte, err error) { func (z *FileInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize()) o = msgp.Require(b, z.Msgsize())
// array header, size 27 // array header, size 28
o = append(o, 0xdc, 0x0, 0x1b) o = append(o, 0xdc, 0x0, 0x1c)
o = msgp.AppendString(o, z.Volume) o = msgp.AppendString(o, z.Volume)
o = msgp.AppendString(o, z.Name) o = msgp.AppendString(o, z.Name)
o = msgp.AppendString(o, z.VersionID) o = msgp.AppendString(o, z.VersionID)
@ -996,6 +1058,15 @@ func (z *FileInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.AppendBool(o, z.Fresh) o = msgp.AppendBool(o, z.Fresh)
o = msgp.AppendInt(o, z.Idx) o = msgp.AppendInt(o, z.Idx)
o = msgp.AppendTime(o, z.DiskMTime) o = msgp.AppendTime(o, z.DiskMTime)
if z.Checksum == nil { // allownil: if nil
o = msgp.AppendNil(o)
} else {
o = msgp.AppendMapHeader(o, uint32(len(z.Checksum)))
for za0004, za0005 := range z.Checksum {
o = msgp.AppendString(o, za0004)
o = msgp.AppendString(o, za0005)
}
}
return return
} }
@ -1007,8 +1078,8 @@ func (z *FileInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err) err = msgp.WrapError(err)
return return
} }
if zb0001 != 27 { if zb0001 != 28 {
err = msgp.ArrayError{Wanted: 27, Got: zb0001} err = msgp.ArrayError{Wanted: 28, Got: zb0001}
return return
} }
z.Volume, bts, err = msgp.ReadStringBytes(bts) z.Volume, bts, err = msgp.ReadStringBytes(bts)
@ -1183,6 +1254,40 @@ func (z *FileInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "DiskMTime") err = msgp.WrapError(err, "DiskMTime")
return return
} }
if msgp.IsNil(bts) {
bts = bts[1:]
z.Checksum = nil
} else {
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Checksum")
return
}
if z.Checksum == nil {
z.Checksum = make(map[string]string, zb0004)
} else if len(z.Checksum) > 0 {
for key := range z.Checksum {
delete(z.Checksum, key)
}
}
for zb0004 > 0 {
var za0004 string
var za0005 string
zb0004--
za0004, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Checksum")
return
}
za0005, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Checksum", za0004)
return
}
z.Checksum[za0004] = za0005
}
}
o = bts o = bts
return return
} }
@ -1200,7 +1305,13 @@ func (z *FileInfo) Msgsize() (s int) {
for za0003 := range z.Parts { for za0003 := range z.Parts {
s += z.Parts[za0003].Msgsize() s += z.Parts[za0003].Msgsize()
} }
s += z.Erasure.Msgsize() + msgp.BoolSize + z.ReplicationState.Msgsize() + msgp.BytesPrefixSize + len(z.Data) + msgp.IntSize + msgp.TimeSize + msgp.BoolSize + msgp.IntSize + msgp.TimeSize s += z.Erasure.Msgsize() + msgp.BoolSize + z.ReplicationState.Msgsize() + msgp.BytesPrefixSize + len(z.Data) + msgp.IntSize + msgp.TimeSize + msgp.BoolSize + msgp.IntSize + msgp.TimeSize + msgp.MapHeaderSize
if z.Checksum != nil {
for za0004, za0005 := range z.Checksum {
_ = za0005
s += msgp.StringPrefixSize + len(za0004) + msgp.StringPrefixSize + len(za0005)
}
}
return return
} }

View File

@ -18,7 +18,7 @@
package cmd package cmd
const ( const (
storageRESTVersion = "v47" // Added ReadMultiple storageRESTVersion = "v48" // Added Checksums
storageRESTVersionPrefix = SlashSeparator + storageRESTVersion storageRESTVersionPrefix = SlashSeparator + storageRESTVersion
storageRESTPrefix = minioReservedBucketPath + "/storage" storageRESTPrefix = minioReservedBucketPath + "/storage"
) )

View File

@ -29,7 +29,7 @@ import (
"net/http" "net/http"
"time" "time"
humanize "github.com/dustin/go-humanize" "github.com/dustin/go-humanize"
"github.com/minio/minio/internal/auth" "github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/hash/sha256" "github.com/minio/minio/internal/hash/sha256"
xhttp "github.com/minio/minio/internal/http" xhttp "github.com/minio/minio/internal/http"

View File

@ -2293,7 +2293,7 @@ func uploadTestObject(t *testing.T, apiRouter http.Handler, creds auth.Credentia
if etag == "" { if etag == "" {
t.Fatalf("Unexpected empty etag") t.Fatalf("Unexpected empty etag")
} }
cp = append(cp, CompletePart{partID, etag[1 : len(etag)-1]}) cp = append(cp, CompletePart{PartNumber: partID, ETag: etag[1 : len(etag)-1]})
} else { } else {
t.Fatalf("Missing etag header") t.Fatalf("Missing etag header")
} }

View File

@ -134,6 +134,7 @@ type ObjectPartInfo struct {
ActualSize int64 `json:"actualSize"` // Original size of the part without compression or encryption bytes. ActualSize int64 `json:"actualSize"` // Original size of the part without compression or encryption bytes.
ModTime time.Time `json:"modTime"` // Date and time at which the part was uploaded. ModTime time.Time `json:"modTime"` // Date and time at which the part was uploaded.
Index []byte `json:"index,omitempty" msg:"index,omitempty"` Index []byte `json:"index,omitempty" msg:"index,omitempty"`
Checksums map[string]string `json:"crc,omitempty" msg:"crc,omitempty"` // Content Checksums
} }
// ChecksumInfo - carries checksums of individual scattered parts per disk. // ChecksumInfo - carries checksums of individual scattered parts per disk.

View File

@ -605,6 +605,36 @@ func (z *ObjectPartInfo) DecodeMsg(dc *msgp.Reader) (err error) {
err = msgp.WrapError(err, "Index") err = msgp.WrapError(err, "Index")
return return
} }
case "crc":
var zb0002 uint32
zb0002, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, "Checksums")
return
}
if z.Checksums == nil {
z.Checksums = make(map[string]string, zb0002)
} else if len(z.Checksums) > 0 {
for key := range z.Checksums {
delete(z.Checksums, key)
}
}
for zb0002 > 0 {
zb0002--
var za0001 string
var za0002 string
za0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Checksums")
return
}
za0002, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Checksums", za0001)
return
}
z.Checksums[za0001] = za0002
}
default: default:
err = dc.Skip() err = dc.Skip()
if err != nil { if err != nil {
@ -619,12 +649,16 @@ func (z *ObjectPartInfo) DecodeMsg(dc *msgp.Reader) (err error) {
// EncodeMsg implements msgp.Encodable // EncodeMsg implements msgp.Encodable
func (z *ObjectPartInfo) EncodeMsg(en *msgp.Writer) (err error) { func (z *ObjectPartInfo) EncodeMsg(en *msgp.Writer) (err error) {
// omitempty: check for empty values // omitempty: check for empty values
zb0001Len := uint32(6) zb0001Len := uint32(7)
var zb0001Mask uint8 /* 6 bits */ var zb0001Mask uint8 /* 7 bits */
if z.Index == nil { if z.Index == nil {
zb0001Len-- zb0001Len--
zb0001Mask |= 0x20 zb0001Mask |= 0x20
} }
if z.Checksums == nil {
zb0001Len--
zb0001Mask |= 0x40
}
// variable map header, size zb0001Len // variable map header, size zb0001Len
err = en.Append(0x80 | uint8(zb0001Len)) err = en.Append(0x80 | uint8(zb0001Len))
if err != nil { if err != nil {
@ -695,6 +729,30 @@ func (z *ObjectPartInfo) EncodeMsg(en *msgp.Writer) (err error) {
return return
} }
} }
if (zb0001Mask & 0x40) == 0 { // if not empty
// write "crc"
err = en.Append(0xa3, 0x63, 0x72, 0x63)
if err != nil {
return
}
err = en.WriteMapHeader(uint32(len(z.Checksums)))
if err != nil {
err = msgp.WrapError(err, "Checksums")
return
}
for za0001, za0002 := range z.Checksums {
err = en.WriteString(za0001)
if err != nil {
err = msgp.WrapError(err, "Checksums")
return
}
err = en.WriteString(za0002)
if err != nil {
err = msgp.WrapError(err, "Checksums", za0001)
return
}
}
}
return return
} }
@ -702,12 +760,16 @@ func (z *ObjectPartInfo) EncodeMsg(en *msgp.Writer) (err error) {
func (z *ObjectPartInfo) MarshalMsg(b []byte) (o []byte, err error) { func (z *ObjectPartInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize()) o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values // omitempty: check for empty values
zb0001Len := uint32(6) zb0001Len := uint32(7)
var zb0001Mask uint8 /* 6 bits */ var zb0001Mask uint8 /* 7 bits */
if z.Index == nil { if z.Index == nil {
zb0001Len-- zb0001Len--
zb0001Mask |= 0x20 zb0001Mask |= 0x20
} }
if z.Checksums == nil {
zb0001Len--
zb0001Mask |= 0x40
}
// variable map header, size zb0001Len // variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len)) o = append(o, 0x80|uint8(zb0001Len))
if zb0001Len == 0 { if zb0001Len == 0 {
@ -733,6 +795,15 @@ func (z *ObjectPartInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = append(o, 0xa5, 0x69, 0x6e, 0x64, 0x65, 0x78) o = append(o, 0xa5, 0x69, 0x6e, 0x64, 0x65, 0x78)
o = msgp.AppendBytes(o, z.Index) o = msgp.AppendBytes(o, z.Index)
} }
if (zb0001Mask & 0x40) == 0 { // if not empty
// string "crc"
o = append(o, 0xa3, 0x63, 0x72, 0x63)
o = msgp.AppendMapHeader(o, uint32(len(z.Checksums)))
for za0001, za0002 := range z.Checksums {
o = msgp.AppendString(o, za0001)
o = msgp.AppendString(o, za0002)
}
}
return return
} }
@ -790,6 +861,36 @@ func (z *ObjectPartInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "Index") err = msgp.WrapError(err, "Index")
return return
} }
case "crc":
var zb0002 uint32
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Checksums")
return
}
if z.Checksums == nil {
z.Checksums = make(map[string]string, zb0002)
} else if len(z.Checksums) > 0 {
for key := range z.Checksums {
delete(z.Checksums, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 string
zb0002--
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Checksums")
return
}
za0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Checksums", za0001)
return
}
z.Checksums[za0001] = za0002
}
default: default:
bts, err = msgp.Skip(bts) bts, err = msgp.Skip(bts)
if err != nil { if err != nil {
@ -804,7 +905,13 @@ func (z *ObjectPartInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *ObjectPartInfo) Msgsize() (s int) { func (z *ObjectPartInfo) Msgsize() (s int) {
s = 1 + 5 + msgp.StringPrefixSize + len(z.ETag) + 7 + msgp.IntSize + 5 + msgp.Int64Size + 11 + msgp.Int64Size + 8 + msgp.TimeSize + 6 + msgp.BytesPrefixSize + len(z.Index) s = 1 + 5 + msgp.StringPrefixSize + len(z.ETag) + 7 + msgp.IntSize + 5 + msgp.Int64Size + 11 + msgp.Int64Size + 8 + msgp.TimeSize + 6 + msgp.BytesPrefixSize + len(z.Index) + 4 + msgp.MapHeaderSize
if z.Checksums != nil {
for za0001, za0002 := range z.Checksums {
_ = za0002
s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002)
}
}
return return
} }

View File

@ -36,6 +36,7 @@ import (
jsoniter "github.com/json-iterator/go" jsoniter "github.com/json-iterator/go"
"github.com/minio/minio/internal/bucket/lifecycle" "github.com/minio/minio/internal/bucket/lifecycle"
"github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http" xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/logger"
"github.com/tinylib/msgp/msgp" "github.com/tinylib/msgp/msgp"
@ -638,6 +639,9 @@ func (j xlMetaV2Object) ToFileInfo(volume, path string) (FileInfo, error) {
if sc, ok := j.MetaSys[ReservedMetadataPrefixLower+TransitionTier]; ok { if sc, ok := j.MetaSys[ReservedMetadataPrefixLower+TransitionTier]; ok {
fi.TransitionTier = string(sc) fi.TransitionTier = string(sc)
} }
if crcs := j.MetaSys[ReservedMetadataPrefixLower+"crc"]; len(crcs) > 0 {
fi.Checksum = hash.ReadCheckSums(crcs)
}
return fi, nil return fi, nil
} }
@ -1536,6 +1540,16 @@ func (x *xlMetaV2) AddVersion(fi FileInfo) error {
if fi.TransitionTier != "" { if fi.TransitionTier != "" {
ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionTier] = []byte(fi.TransitionTier) ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+TransitionTier] = []byte(fi.TransitionTier)
} }
if len(fi.Checksum) > 0 {
res := make([]byte, 0, len(fi.Checksum)*40)
for k, v := range fi.Checksum {
crc := hash.NewChecksumString(k, v)
if crc.Valid() {
res = crc.AppendTo(res)
}
}
ventry.ObjectV2.MetaSys[ReservedMetadataPrefixLower+"crc"] = res
}
} }
if !ventry.Valid() { if !ventry.Valid() {

359
internal/hash/checksum.go Normal file
View File

@ -0,0 +1,359 @@
// Copyright (c) 2015-2022 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package hash
import (
"crypto/sha1"
"encoding/base64"
"encoding/binary"
"hash"
"hash/crc32"
"net/http"
"strings"
"github.com/minio/minio/internal/hash/sha256"
xhttp "github.com/minio/minio/internal/http"
)
// MinIOMultipartChecksum is as metadata on multipart uploads to indicate checksum type.
const MinIOMultipartChecksum = "x-minio-multipart-checksum"
// ChecksumType contains information about the checksum type.
type ChecksumType uint32
const (
// ChecksumTrailing indicates the checksum will be sent in the trailing header.
// Another checksum type will be set.
ChecksumTrailing ChecksumType = 1 << iota
// ChecksumSHA256 indicates a SHA256 checksum.
ChecksumSHA256
// ChecksumSHA1 indicates a SHA-1 checksum.
ChecksumSHA1
// ChecksumCRC32 indicates a CRC32 checksum with IEEE table.
ChecksumCRC32
// ChecksumCRC32C indicates a CRC32 checksum with Castagnoli table.
ChecksumCRC32C
// ChecksumInvalid indicates an invalid checksum.
ChecksumInvalid
// ChecksumNone indicates no checksum.
ChecksumNone ChecksumType = 0
)
// Checksum is a type and base 64 encoded value.
type Checksum struct {
Type ChecksumType
Encoded string
}
// Is returns if c is all of t.
func (c ChecksumType) Is(t ChecksumType) bool {
if t == ChecksumNone {
return c == ChecksumNone
}
return c&t == t
}
// Key returns the header key.
// returns empty string if invalid or none.
func (c ChecksumType) Key() string {
switch {
case c.Is(ChecksumCRC32):
return xhttp.AmzChecksumCRC32
case c.Is(ChecksumCRC32C):
return xhttp.AmzChecksumCRC32C
case c.Is(ChecksumSHA1):
return xhttp.AmzChecksumSHA1
case c.Is(ChecksumSHA256):
return xhttp.AmzChecksumSHA256
}
return ""
}
// RawByteLen returns the size of the un-encoded checksum.
func (c ChecksumType) RawByteLen() int {
switch {
case c.Is(ChecksumCRC32):
return 4
case c.Is(ChecksumCRC32C):
return 4
case c.Is(ChecksumSHA1):
return sha1.Size
case c.Is(ChecksumSHA256):
return sha256.Size
}
return 0
}
// IsSet returns whether the type is valid and known.
func (c ChecksumType) IsSet() bool {
return !c.Is(ChecksumInvalid) && !c.Is(ChecksumNone)
}
// NewChecksumType returns a checksum type based on the algorithm string.
func NewChecksumType(alg string) ChecksumType {
switch strings.ToUpper(alg) {
case "CRC32":
return ChecksumCRC32
case "CRC32C":
return ChecksumCRC32C
case "SHA1":
return ChecksumSHA1
case "SHA256":
return ChecksumSHA256
case "":
return ChecksumNone
}
return ChecksumInvalid
}
// String returns the type as a string.
func (c ChecksumType) String() string {
switch {
case c.Is(ChecksumCRC32):
return "CRC32"
case c.Is(ChecksumCRC32C):
return "CRC32C"
case c.Is(ChecksumSHA1):
return "SHA1"
case c.Is(ChecksumSHA256):
return "SHA256"
case c.Is(ChecksumNone):
return ""
}
return "invalid"
}
// Hasher returns a hasher corresponding to the checksum type.
// Returns nil if no checksum.
func (c ChecksumType) Hasher() hash.Hash {
switch {
case c.Is(ChecksumCRC32):
return crc32.NewIEEE()
case c.Is(ChecksumCRC32C):
return crc32.New(crc32.MakeTable(crc32.Castagnoli))
case c.Is(ChecksumSHA1):
return sha1.New()
case c.Is(ChecksumSHA256):
return sha256.New()
}
return nil
}
// Trailing return whether the checksum is traling.
func (c ChecksumType) Trailing() bool {
return c.Is(ChecksumTrailing)
}
// NewChecksumFromData returns a new checksum from specified algorithm and base64 encoded value.
func NewChecksumFromData(t ChecksumType, data []byte) *Checksum {
if !t.IsSet() {
return nil
}
h := t.Hasher()
h.Write(data)
c := Checksum{Type: t, Encoded: base64.StdEncoding.EncodeToString(h.Sum(nil))}
if !c.Valid() {
return nil
}
return &c
}
// ReadCheckSums will read checksums from b and return them.
func ReadCheckSums(b []byte) map[string]string {
res := make(map[string]string, 1)
for len(b) > 0 {
t, n := binary.Uvarint(b)
if n < 0 {
break
}
b = b[n:]
typ := ChecksumType(t)
length := typ.RawByteLen()
if length == 0 || len(b) < length {
break
}
res[typ.String()] = base64.StdEncoding.EncodeToString(b[:length])
b = b[length:]
}
if len(res) == 0 {
res = nil
}
return res
}
// NewChecksumString returns a new checksum from specified algorithm and base64 encoded value.
func NewChecksumString(alg, value string) *Checksum {
t := NewChecksumType(alg)
if !t.IsSet() {
return nil
}
c := Checksum{Type: t, Encoded: value}
if !c.Valid() {
return nil
}
return &c
}
// AppendTo will append the checksum to b.
// ReadCheckSums reads the values back.
func (c Checksum) AppendTo(b []byte) []byte {
var tmp [binary.MaxVarintLen32]byte
n := binary.PutUvarint(tmp[:], uint64(c.Type))
crc := c.Raw()
if len(crc) != c.Type.RawByteLen() {
return b
}
b = append(b, tmp[:n]...)
b = append(b, crc...)
return b
}
// Valid returns whether checksum is valid.
func (c Checksum) Valid() bool {
if c.Type == ChecksumInvalid {
return false
}
if len(c.Encoded) == 0 || c.Type.Is(ChecksumTrailing) {
return c.Type.Is(ChecksumNone) || c.Type.Is(ChecksumTrailing)
}
raw := c.Raw()
return c.Type.RawByteLen() == len(raw)
}
// Raw returns the Raw checksum.
func (c Checksum) Raw() []byte {
if len(c.Encoded) == 0 {
return nil
}
v, _ := base64.StdEncoding.DecodeString(c.Encoded)
return v
}
// Matches returns whether given content matches c.
func (c Checksum) Matches(content []byte) error {
if len(c.Encoded) == 0 {
return nil
}
hasher := c.Type.Hasher()
_, err := hasher.Write(content)
if err != nil {
return err
}
got := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
if got != c.Encoded {
return ChecksumMismatch{
Want: c.Encoded,
Got: got,
}
}
return nil
}
// AsMap returns the
func (c *Checksum) AsMap() map[string]string {
if c == nil || !c.Valid() {
return nil
}
return map[string]string{c.Type.String(): c.Encoded}
}
// TransferChecksumHeader will transfer any checksum value that has been checked.
func TransferChecksumHeader(w http.ResponseWriter, r *http.Request) {
t, s := getContentChecksum(r)
if !t.IsSet() || t.Is(ChecksumTrailing) {
// TODO: Add trailing when we can read it.
return
}
w.Header().Set(t.Key(), s)
}
// AddChecksumHeader will transfer any checksum value that has been checked.
func AddChecksumHeader(w http.ResponseWriter, c map[string]string) {
for k, v := range c {
typ := NewChecksumType(k)
if !typ.IsSet() {
continue
}
crc := Checksum{Type: typ, Encoded: v}
if crc.Valid() {
w.Header().Set(typ.Key(), v)
}
}
}
// GetContentChecksum returns content checksum.
// Returns ErrInvalidChecksum if so.
// Returns nil, nil if no checksum.
func GetContentChecksum(r *http.Request) (*Checksum, error) {
t, s := getContentChecksum(r)
if t == ChecksumNone {
if s == "" {
return nil, nil
}
return nil, ErrInvalidChecksum
}
c := Checksum{Type: t, Encoded: s}
if !c.Valid() {
return nil, ErrInvalidChecksum
}
return &c, nil
}
// getContentChecksum returns content checksum type and value.
// Returns ChecksumInvalid if so.
func getContentChecksum(r *http.Request) (t ChecksumType, s string) {
t = ChecksumNone
alg := r.Header.Get(xhttp.AmzChecksumAlgo)
if alg != "" {
t |= NewChecksumType(alg)
if t.IsSet() {
hdr := t.Key()
if s = r.Header.Get(hdr); s == "" {
if strings.EqualFold(r.Header.Get(xhttp.AmzTrailer), hdr) {
t |= ChecksumTrailing
} else {
t = ChecksumInvalid
}
return ChecksumNone, ""
}
}
return t, s
}
checkType := func(c ChecksumType) {
if got := r.Header.Get(c.Key()); got != "" {
// If already set, invalid
if t != ChecksumNone {
t = ChecksumInvalid
s = ""
} else {
t = c
s = got
}
}
}
checkType(ChecksumCRC32)
checkType(ChecksumCRC32C)
checkType(ChecksumSHA1)
checkType(ChecksumSHA256)
return t, s
}

View File

@ -48,3 +48,13 @@ type ErrSizeMismatch struct {
func (e ErrSizeMismatch) Error() string { func (e ErrSizeMismatch) Error() string {
return fmt.Sprintf("Size mismatch: got %d, want %d", e.Got, e.Want) return fmt.Sprintf("Size mismatch: got %d, want %d", e.Got, e.Want)
} }
// ChecksumMismatch - when content checksum does not match with what was sent from client.
type ChecksumMismatch struct {
Want string
Got string
}
func (e ChecksumMismatch) Error() string {
return "Bad checksum: Want " + e.Want + " does not match calculated " + e.Got
}

View File

@ -24,6 +24,7 @@ import (
"errors" "errors"
"hash" "hash"
"io" "io"
"net/http"
"github.com/minio/minio/internal/etag" "github.com/minio/minio/internal/etag"
"github.com/minio/minio/internal/hash/sha256" "github.com/minio/minio/internal/hash/sha256"
@ -46,6 +47,10 @@ type Reader struct {
checksum etag.ETag checksum etag.ETag
contentSHA256 []byte contentSHA256 []byte
// Content checksum
contentHash Checksum
contentHasher hash.Hash
sha256 hash.Hash sha256 hash.Hash
} }
@ -83,7 +88,7 @@ func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i
if r.bytesRead > 0 { if r.bytesRead > 0 {
return nil, errors.New("hash: already read from hash reader") return nil, errors.New("hash: already read from hash reader")
} }
if len(r.checksum) != 0 && len(MD5) != 0 && !etag.Equal(r.checksum, etag.ETag(MD5)) { if len(r.checksum) != 0 && len(MD5) != 0 && !etag.Equal(r.checksum, MD5) {
return nil, BadDigest{ return nil, BadDigest{
ExpectedMD5: r.checksum.String(), ExpectedMD5: r.checksum.String(),
CalculatedMD5: md5Hex, CalculatedMD5: md5Hex,
@ -99,7 +104,7 @@ func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i
return nil, ErrSizeMismatch{Want: r.size, Got: size} return nil, ErrSizeMismatch{Want: r.size, Got: size}
} }
r.checksum = etag.ETag(MD5) r.checksum = MD5
r.contentSHA256 = SHA256 r.contentSHA256 = SHA256
if r.size < 0 && size >= 0 { if r.size < 0 && size >= 0 {
r.src = etag.Wrap(io.LimitReader(r.src, size), r.src) r.src = etag.Wrap(io.LimitReader(r.src, size), r.src)
@ -114,33 +119,62 @@ func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i
if size >= 0 { if size >= 0 {
r := io.LimitReader(src, size) r := io.LimitReader(src, size)
if _, ok := src.(etag.Tagger); !ok { if _, ok := src.(etag.Tagger); !ok {
src = etag.NewReader(r, etag.ETag(MD5)) src = etag.NewReader(r, MD5)
} else { } else {
src = etag.Wrap(r, src) src = etag.Wrap(r, src)
} }
} else if _, ok := src.(etag.Tagger); !ok { } else if _, ok := src.(etag.Tagger); !ok {
src = etag.NewReader(src, etag.ETag(MD5)) src = etag.NewReader(src, MD5)
} }
var hash hash.Hash var h hash.Hash
if len(SHA256) != 0 { if len(SHA256) != 0 {
hash = sha256.New() h = sha256.New()
} }
return &Reader{ return &Reader{
src: src, src: src,
size: size, size: size,
actualSize: actualSize, actualSize: actualSize,
checksum: etag.ETag(MD5), checksum: MD5,
contentSHA256: SHA256, contentSHA256: SHA256,
sha256: hash, sha256: h,
}, nil }, nil
} }
// ErrInvalidChecksum is returned when an invalid checksum is provided in headers.
var ErrInvalidChecksum = errors.New("invalid checksum")
// AddChecksum will add checksum checks as specified in
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
// Returns ErrInvalidChecksum if a problem with the checksum is found.
func (r *Reader) AddChecksum(req *http.Request, ignoreValue bool) error {
cs, err := GetContentChecksum(req)
if err != nil {
return ErrInvalidChecksum
}
if cs == nil {
return nil
}
r.contentHash = *cs
if cs.Type.Trailing() || ignoreValue {
// Ignore until we have trailing headers.
return nil
}
r.contentHasher = cs.Type.Hasher()
if r.contentHasher == nil {
return ErrInvalidChecksum
}
return nil
}
func (r *Reader) Read(p []byte) (int, error) { func (r *Reader) Read(p []byte) (int, error) {
n, err := r.src.Read(p) n, err := r.src.Read(p)
r.bytesRead += int64(n) r.bytesRead += int64(n)
if r.sha256 != nil { if r.sha256 != nil {
r.sha256.Write(p[:n]) r.sha256.Write(p[:n])
} }
if r.contentHasher != nil {
r.contentHasher.Write(p[:n])
}
if err == io.EOF { // Verify content SHA256, if set. if err == io.EOF { // Verify content SHA256, if set.
if r.sha256 != nil { if r.sha256 != nil {
@ -151,6 +185,15 @@ func (r *Reader) Read(p []byte) (int, error) {
} }
} }
} }
if r.contentHasher != nil {
if sum := r.contentHasher.Sum(nil); !bytes.Equal(r.contentHash.Raw(), sum) {
err := ChecksumMismatch{
Want: r.contentHash.Encoded,
Got: base64.StdEncoding.EncodeToString(sum),
}
return n, err
}
}
} }
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
if v, ok := err.(etag.VerifyError); ok { if v, ok := err.(etag.VerifyError); ok {
@ -223,6 +266,19 @@ func (r *Reader) SHA256HexString() string {
return hex.EncodeToString(r.contentSHA256) return hex.EncodeToString(r.contentSHA256)
} }
// ContentCRCType returns the content checksum type.
func (r *Reader) ContentCRCType() ChecksumType {
return r.contentHash.Type
}
// ContentCRC returns the content crc if set.
func (r *Reader) ContentCRC() map[string]string {
if r.contentHash.Type == ChecksumNone || !r.contentHash.Valid() {
return nil
}
return map[string]string{r.contentHash.Type.String(): r.contentHash.Encoded}
}
var _ io.Closer = (*Reader)(nil) // compiler check var _ io.Closer = (*Reader)(nil) // compiler check
// Close and release resources. // Close and release resources.

View File

@ -33,3 +33,6 @@ func New() hash.Hash { return fipssha256.New() }
// Sum256 returns the SHA256 checksum of the data. // Sum256 returns the SHA256 checksum of the data.
func Sum256(data []byte) [fipssha256.Size]byte { return fipssha256.Sum256(data) } func Sum256(data []byte) [fipssha256.Size]byte { return fipssha256.Sum256(data) }
// Size is the size of a SHA256 checksum in bytes.
const Size = fipssha256.Size

View File

@ -32,3 +32,6 @@ func New() hash.Hash { return nofipssha256.New() }
// Sum256 returns the SHA256 checksum of the data. // Sum256 returns the SHA256 checksum of the data.
func Sum256(data []byte) [nofipssha256.Size]byte { return nofipssha256.Sum256(data) } func Sum256(data []byte) [nofipssha256.Size]byte { return nofipssha256.Sum256(data) }
// Size is the size of a SHA256 checksum in bytes.
const Size = nofipssha256.Size

View File

@ -113,6 +113,7 @@ const (
AmzCredential = "X-Amz-Credential" AmzCredential = "X-Amz-Credential"
AmzSecurityToken = "X-Amz-Security-Token" AmzSecurityToken = "X-Amz-Security-Token"
AmzDecodedContentLength = "X-Amz-Decoded-Content-Length" AmzDecodedContentLength = "X-Amz-Decoded-Content-Length"
AmzTrailer = "X-Amz-Trailer"
AmzMetaUnencryptedContentLength = "X-Amz-Meta-X-Amz-Unencrypted-Content-Length" AmzMetaUnencryptedContentLength = "X-Amz-Meta-X-Amz-Unencrypted-Content-Length"
AmzMetaUnencryptedContentMD5 = "X-Amz-Meta-X-Amz-Unencrypted-Content-Md5" AmzMetaUnencryptedContentMD5 = "X-Amz-Meta-X-Amz-Unencrypted-Content-Md5"
@ -144,6 +145,14 @@ const (
// Server-Status // Server-Status
MinIOServerStatus = "x-minio-server-status" MinIOServerStatus = "x-minio-server-status"
// Content Checksums
AmzChecksumAlgo = "x-amz-checksum-algorithm"
AmzChecksumCRC32 = "x-amz-checksum-crc32"
AmzChecksumCRC32C = "x-amz-checksum-crc32c"
AmzChecksumSHA1 = "x-amz-checksum-sha1"
AmzChecksumSHA256 = "x-amz-checksum-sha256"
AmzChecksumMode = "x-amz-checksum-mode"
// Delete special flag to force delete a bucket or a prefix // Delete special flag to force delete a bucket or a prefix
MinIOForceDelete = "x-minio-force-delete" MinIOForceDelete = "x-minio-force-delete"