diff --git a/cmd/bucket-replication.go b/cmd/bucket-replication.go index 82c00f631..979c3924b 100644 --- a/cmd/bucket-replication.go +++ b/cmd/bucket-replication.go @@ -869,7 +869,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje defer cancel() } r := bandwidth.NewMonitoredReader(newCtx, globalBucketMonitor, gr, opts) - if objInfo.Multipart { + if objInfo.isMultipart() { if err := replicateObjectWithMultipart(ctx, c, dest.Bucket, object, r, objInfo, putOpts); err != nil { replicationStatus = replication.Failed diff --git a/cmd/encryption-v1.go b/cmd/encryption-v1.go index e36e918e1..20a22c2cd 100644 --- a/cmd/encryption-v1.go +++ b/cmd/encryption-v1.go @@ -68,10 +68,10 @@ const ( ) -// isEncryptedMultipart returns true if the current object is +// isMultipart returns true if the current object is // uploaded by the user using multipart mechanism: // initiate new multipart, upload part, complete upload -func (o *ObjectInfo) isEncryptedMultipart() bool { +func (o *ObjectInfo) isMultipart() bool { if len(o.Parts) == 0 { return false } @@ -427,7 +427,7 @@ func DecryptBlocksRequestR(inputReader io.Reader, h http.Header, seqNumber uint3 bucket, object := oi.Bucket, oi.Name // Single part case - if !oi.isEncryptedMultipart() { + if !oi.isMultipart() { var reader io.Reader var err error if copySource { @@ -589,7 +589,7 @@ func (o *ObjectInfo) DecryptedSize() (int64, error) { if _, ok := crypto.IsEncrypted(o.UserDefined); !ok { return 0, errors.New("Cannot compute decrypted size of an unencrypted object") } - if !o.isEncryptedMultipart() { + if !o.isMultipart() { size, err := sio.DecryptedSize(uint64(o.Size)) if err != nil { err = errObjectTampered // assign correct error type @@ -732,7 +732,7 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk // Assemble slice of (decrypted) part sizes in `sizes` var sizes []int64 var decObjSize int64 // decrypted total object size - if o.isEncryptedMultipart() { + if o.isMultipart() { sizes = make([]int64, len(o.Parts)) for i, part := range o.Parts { var partSize uint64 diff --git a/cmd/erasure-metadata.go b/cmd/erasure-metadata.go index c2631ccb6..964530cfd 100644 --- a/cmd/erasure-metadata.go +++ b/cmd/erasure-metadata.go @@ -27,8 +27,6 @@ import ( "time" "github.com/minio/minio/internal/bucket/replication" - "github.com/minio/minio/internal/crypto" - "github.com/minio/minio/internal/etag" xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/sync/errgroup" @@ -143,13 +141,6 @@ func (fi FileInfo) ToObjectInfo(bucket, object string) ObjectInfo { // Extract etag from metadata. objInfo.ETag = extractETag(fi.Metadata) - // Verify if Etag is parseable, if yes - // then check if its multipart etag. - et, e := etag.Parse(objInfo.ETag) - if e == nil { - objInfo.Multipart = et.IsMultipart() - } - // Add user tags to the object info tags := fi.Metadata[xhttp.AmzObjectTagging] if len(tags) != 0 { @@ -176,14 +167,6 @@ func (fi FileInfo) ToObjectInfo(bucket, object string) ObjectInfo { // Tags have also been extracted, we remove that as well. objInfo.UserDefined = cleanMetadata(fi.Metadata) - // Set multipart for encryption properly if - // not set already. - if !objInfo.Multipart { - if _, ok := crypto.IsEncrypted(objInfo.UserDefined); ok { - objInfo.Multipart = crypto.IsMultiPart(objInfo.UserDefined) - } - } - // All the parts per object. objInfo.Parts = fi.Parts diff --git a/cmd/erasure-multipart.go b/cmd/erasure-multipart.go index e591e1997..aeea297ce 100644 --- a/cmd/erasure-multipart.go +++ b/cmd/erasure-multipart.go @@ -783,9 +783,6 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str return oi, toObjectErr(err, bucket, object, uploadID) } - // Calculate s3 compatible md5sum for complete multipart. - s3MD5 := getCompleteMultipartMD5(parts) - uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) storageDisks := er.getDisks() @@ -882,9 +879,9 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str } // Save successfully calculated md5sum. - fi.Metadata["etag"] = s3MD5 - if opts.UserDefined["etag"] != "" { // preserve ETag if set - fi.Metadata["etag"] = opts.UserDefined["etag"] + fi.Metadata["etag"] = opts.UserDefined["etag"] + if fi.Metadata["etag"] == "" { + fi.Metadata["etag"] = getCompleteMultipartMD5(parts) } // Save the consolidated actual size. diff --git a/cmd/fs-v1-metadata.go b/cmd/fs-v1-metadata.go index 60cd5deb2..5db3c2587 100644 --- a/cmd/fs-v1-metadata.go +++ b/cmd/fs-v1-metadata.go @@ -29,8 +29,6 @@ import ( "time" jsoniter "github.com/json-iterator/go" - "github.com/minio/minio/internal/crypto" - "github.com/minio/minio/internal/etag" xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/lock" "github.com/minio/minio/internal/logger" @@ -170,11 +168,6 @@ func (m fsMetaV1) ToObjectInfo(bucket, object string, fi os.FileInfo) ObjectInfo objInfo.ETag = extractETag(m.Meta) - et, e := etag.Parse(objInfo.ETag) - if e == nil { - objInfo.Multipart = et.IsMultipart() - } - objInfo.ContentType = m.Meta["content-type"] objInfo.ContentEncoding = m.Meta["content-encoding"] if storageClass, ok := m.Meta[xhttp.AmzStorageClass]; ok { @@ -184,6 +177,7 @@ func (m fsMetaV1) ToObjectInfo(bucket, object string, fi os.FileInfo) ObjectInfo } var ( t time.Time + e error ) if exp, ok := m.Meta["expires"]; ok { if t, e = time.Parse(http.TimeFormat, exp); e == nil { @@ -200,14 +194,6 @@ func (m fsMetaV1) ToObjectInfo(bucket, object string, fi os.FileInfo) ObjectInfo // Tags have also been extracted, we remove that as well. objInfo.UserDefined = cleanMetadata(m.Meta) - // Set multipart for encryption properly if - // not set already. - if !objInfo.Multipart { - if _, ok := crypto.IsEncrypted(objInfo.UserDefined); ok { - objInfo.Multipart = crypto.IsMultiPart(objInfo.UserDefined) - } - } - // All the parts per object. objInfo.Parts = m.Parts diff --git a/cmd/fs-v1-multipart.go b/cmd/fs-v1-multipart.go index 73382b66e..b3417014a 100644 --- a/cmd/fs-v1-multipart.go +++ b/cmd/fs-v1-multipart.go @@ -564,9 +564,6 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string, return oi, toObjectErr(err, bucket, object) } - // Calculate s3 compatible md5sum for complete multipart. - s3MD5 := getCompleteMultipartMD5(parts) - // ensure that part ETag is canonicalized to strip off extraneous quotes for i := range parts { parts[i].ETag = canonicalizeETag(parts[i].ETag) @@ -755,7 +752,12 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string, if fsMeta.Meta == nil { fsMeta.Meta = make(map[string]string) } - fsMeta.Meta["etag"] = s3MD5 + + fsMeta.Meta["etag"] = opts.UserDefined["etag"] + if fsMeta.Meta["etag"] == "" { + fsMeta.Meta["etag"] = getCompleteMultipartMD5(parts) + } + // Save consolidated actual size. fsMeta.Meta[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(objectActualSize, 10) if _, err = fsMeta.WriteTo(metaFile); err != nil { diff --git a/cmd/object-api-datatypes.go b/cmd/object-api-datatypes.go index 03779211e..8aef0c8b9 100644 --- a/cmd/object-api-datatypes.go +++ b/cmd/object-api-datatypes.go @@ -165,8 +165,6 @@ type ObjectInfo struct { Legacy bool // indicates object on disk is in legacy data format - Multipart bool // indicates if object is multipart object. - // backendType indicates which backend filled this structure backendType BackendType @@ -187,7 +185,6 @@ func (o ObjectInfo) Clone() (cinfo ObjectInfo) { Size: o.Size, IsDir: o.IsDir, ETag: o.ETag, - Multipart: o.Multipart, InnerETag: o.InnerETag, VersionID: o.VersionID, IsLatest: o.IsLatest, diff --git a/cmd/object-api-options.go b/cmd/object-api-options.go index 26cf230fd..7162f0c39 100644 --- a/cmd/object-api-options.go +++ b/cmd/object-api-options.go @@ -357,5 +357,6 @@ func completeMultipartOpts(ctx context.Context, r *http.Request, bucket, object } } opts.MTime = mtime + opts.UserDefined = make(map[string]string) return opts, nil } diff --git a/cmd/object-api-utils.go b/cmd/object-api-utils.go index 961be2f7b..0f9ae1a84 100644 --- a/cmd/object-api-utils.go +++ b/cmd/object-api-utils.go @@ -290,10 +290,10 @@ func cleanMetadataKeys(metadata map[string]string, keyNames ...string) map[strin // Extracts etag value from the metadata. func extractETag(metadata map[string]string) string { - // md5Sum tag is kept for backward compatibility. - etag, ok := metadata["md5Sum"] + etag, ok := metadata["etag"] if !ok { - etag = metadata["etag"] + // md5Sum tag is kept for backward compatibility. + etag = metadata["md5Sum"] } // Success. return etag diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index d720f1a57..0c7710e3b 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -3046,8 +3046,10 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite // Complete parts. completeParts := make([]CompletePart, 0, len(complMultipartUpload.Parts)) + originalCompleteParts := make([]CompletePart, 0, len(complMultipartUpload.Parts)) for _, part := range complMultipartUpload.Parts { part.ETag = canonicalizeETag(part.ETag) + originalCompleteParts = append(originalCompleteParts, part) if isEncrypted { // ETag is stored in the backend in encrypted form. Validate client sent ETag with // decrypted ETag. @@ -3063,6 +3065,9 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite completeParts = append(completeParts, part) } + // Calculate s3 compatible md5sum for complete multipart. + s3MD5 := getCompleteMultipartMD5(originalCompleteParts) + completeMultiPartUpload := objectAPI.CompleteMultipartUpload // This code is specifically to handle the requirements for slow @@ -3104,6 +3109,11 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite return } + // preserve ETag if set, or set from parts. + if _, ok := opts.UserDefined["etag"]; !ok { + opts.UserDefined["etag"] = s3MD5 + } + w = &whiteSpaceWriter{ResponseWriter: w, Flusher: w.(http.Flusher)} completeDoneCh := sendWhiteSpace(w) objInfo, err := completeMultiPartUpload(ctx, bucket, object, uploadID, completeParts, opts)