2023-06-28 12:43:50 -04:00
|
|
|
// Copyright (c) 2015-2023 MinIO, Inc.
|
2022-07-19 11:35:29 -04:00
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bufio"
|
2024-02-12 02:41:53 -05:00
|
|
|
"context"
|
2022-07-19 11:35:29 -04:00
|
|
|
"io"
|
|
|
|
"net/http"
|
|
|
|
"net/url"
|
|
|
|
"sort"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2022-12-12 04:40:29 -05:00
|
|
|
"github.com/google/uuid"
|
2023-03-28 06:45:24 -04:00
|
|
|
"github.com/minio/minio-go/v7"
|
2022-12-12 04:40:29 -05:00
|
|
|
"github.com/minio/minio-go/v7/pkg/encrypt"
|
2022-09-28 14:25:46 -04:00
|
|
|
"github.com/minio/minio-go/v7/pkg/tags"
|
2022-12-12 13:28:30 -05:00
|
|
|
"github.com/minio/minio/internal/amztime"
|
2022-07-19 11:35:29 -04:00
|
|
|
sse "github.com/minio/minio/internal/bucket/encryption"
|
|
|
|
objectlock "github.com/minio/minio/internal/bucket/object/lock"
|
|
|
|
"github.com/minio/minio/internal/bucket/replication"
|
2023-11-22 16:46:17 -05:00
|
|
|
"github.com/minio/minio/internal/config/cache"
|
2022-12-12 04:40:29 -05:00
|
|
|
"github.com/minio/minio/internal/config/dns"
|
2022-07-19 11:35:29 -04:00
|
|
|
"github.com/minio/minio/internal/config/storageclass"
|
|
|
|
"github.com/minio/minio/internal/crypto"
|
|
|
|
"github.com/minio/minio/internal/etag"
|
|
|
|
"github.com/minio/minio/internal/event"
|
|
|
|
"github.com/minio/minio/internal/fips"
|
|
|
|
"github.com/minio/minio/internal/handlers"
|
|
|
|
"github.com/minio/minio/internal/hash"
|
|
|
|
xhttp "github.com/minio/minio/internal/http"
|
|
|
|
"github.com/minio/minio/internal/logger"
|
2023-01-23 06:12:47 -05:00
|
|
|
"github.com/minio/mux"
|
2023-09-04 15:57:37 -04:00
|
|
|
"github.com/minio/pkg/v2/policy"
|
2022-07-19 11:35:29 -04:00
|
|
|
"github.com/minio/sio"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Multipart objectAPIHandlers
|
|
|
|
|
|
|
|
// NewMultipartUploadHandler - New multipart upload.
|
|
|
|
// Notice: The S3 client can send secret keys in headers for encryption related jobs,
|
|
|
|
// the handler should ensure to remove these keys before sending them to the object layer.
|
|
|
|
// Currently these keys are:
|
|
|
|
// - X-Amz-Server-Side-Encryption-Customer-Key
|
|
|
|
// - X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key
|
|
|
|
func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "NewMultipartUpload")
|
|
|
|
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
|
|
|
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
if objectAPI == nil {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
bucket := vars["bucket"]
|
|
|
|
object, err := unescapePath(vars["object"])
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, bucket, object); s3Error != ErrNone {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if bucket encryption is enabled
|
|
|
|
sseConfig, _ := globalBucketSSEConfigSys.Get(bucket)
|
|
|
|
sseConfig.Apply(r.Header, sse.ApplyOptions{
|
|
|
|
AutoEncrypt: globalAutoEncryption,
|
|
|
|
})
|
|
|
|
|
|
|
|
// Validate storage class metadata if present
|
|
|
|
if sc := r.Header.Get(xhttp.AmzStorageClass); sc != "" {
|
|
|
|
if !storageclass.IsValid(sc) {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidStorageClass), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
encMetadata := map[string]string{}
|
|
|
|
|
2023-01-17 09:07:47 -05:00
|
|
|
if crypto.Requested(r.Header) {
|
2023-06-13 16:52:33 -04:00
|
|
|
if crypto.SSECopy.IsRequested(r.Header) {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if crypto.SSEC.IsRequested(r.Header) && crypto.S3.IsRequested(r.Header) {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, crypto.ErrIncompatibleEncryptionMethod), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if crypto.SSEC.IsRequested(r.Header) && crypto.S3KMS.IsRequested(r.Header) {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, crypto.ErrIncompatibleEncryptionMethod), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-01-24 18:46:33 -05:00
|
|
|
if crypto.SSEC.IsRequested(r.Header) && isReplicationEnabled(ctx, bucket) {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParametersSSEC), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-01-17 09:07:47 -05:00
|
|
|
if err = setEncryptionMetadata(r, bucket, object, encMetadata); err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
2022-07-19 11:35:29 -04:00
|
|
|
}
|
2023-01-17 09:07:47 -05:00
|
|
|
// Set this for multipart only operations, we need to differentiate during
|
|
|
|
// decryption if the file was actually multipart or not.
|
|
|
|
encMetadata[ReservedMetadataPrefix+"Encrypted-Multipart"] = ""
|
2022-07-19 11:35:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Extract metadata that needs to be saved.
|
2023-11-22 13:51:46 -05:00
|
|
|
metadata, err := extractMetadataFromReq(ctx, r)
|
2022-07-19 11:35:29 -04:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-09-28 14:25:46 -04:00
|
|
|
if objTags := r.Header.Get(xhttp.AmzObjectTagging); objTags != "" {
|
|
|
|
if _, err := tags.ParseObjectTags(objTags); err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
metadata[xhttp.AmzObjectTagging] = objTags
|
|
|
|
}
|
2023-10-26 00:24:10 -04:00
|
|
|
if r.Header.Get(xhttp.AmzBucketReplicationStatus) == replication.Replica.String() {
|
|
|
|
metadata[ReservedMetadataPrefixLower+ReplicaStatus] = replication.Replica.String()
|
|
|
|
metadata[ReservedMetadataPrefixLower+ReplicaTimestamp] = UTCNow().Format(time.RFC3339Nano)
|
|
|
|
}
|
2023-09-04 15:57:37 -04:00
|
|
|
retPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, policy.PutObjectRetentionAction)
|
|
|
|
holdPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, policy.PutObjectLegalHoldAction)
|
2022-07-19 11:35:29 -04:00
|
|
|
|
|
|
|
getObjectInfo := objectAPI.GetObjectInfo
|
|
|
|
|
|
|
|
retentionMode, retentionDate, legalHold, s3Err := checkPutObjectLockAllowed(ctx, r, bucket, object, getObjectInfo, retPerms, holdPerms)
|
|
|
|
if s3Err == ErrNone && retentionMode.Valid() {
|
|
|
|
metadata[strings.ToLower(xhttp.AmzObjectLockMode)] = string(retentionMode)
|
2022-12-12 13:28:30 -05:00
|
|
|
metadata[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = amztime.ISO8601Format(retentionDate.UTC())
|
2022-07-19 11:35:29 -04:00
|
|
|
}
|
|
|
|
if s3Err == ErrNone && legalHold.Status.Valid() {
|
|
|
|
metadata[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = string(legalHold.Status)
|
|
|
|
}
|
|
|
|
if s3Err != ErrNone {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2023-09-16 05:28:06 -04:00
|
|
|
if dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(metadata, "", "", replication.ObjectReplicationType, ObjectOptions{})); dsc.ReplicateAny() {
|
2022-07-19 11:35:29 -04:00
|
|
|
metadata[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
|
|
|
|
metadata[ReservedMetadataPrefixLower+ReplicationStatus] = dsc.PendingStatus()
|
|
|
|
}
|
2022-08-29 19:57:16 -04:00
|
|
|
|
2022-07-19 11:35:29 -04:00
|
|
|
// We need to preserve the encryption headers set in EncryptRequest,
|
|
|
|
// so we do not want to override them, copy them instead.
|
|
|
|
for k, v := range encMetadata {
|
|
|
|
metadata[k] = v
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that metadata does not contain sensitive information
|
|
|
|
crypto.RemoveSensitiveEntries(metadata)
|
|
|
|
|
2023-01-17 09:07:47 -05:00
|
|
|
if isCompressible(r.Header, object) {
|
2022-07-19 11:35:29 -04:00
|
|
|
// Storing the compression metadata.
|
|
|
|
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
|
|
|
|
}
|
|
|
|
|
2023-11-22 13:51:46 -05:00
|
|
|
opts, err := putOptsFromReq(ctx, r, bucket, object, metadata)
|
2022-07-19 11:35:29 -04:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2022-08-29 19:57:16 -04:00
|
|
|
|
2023-08-24 17:33:58 -04:00
|
|
|
if opts.PreserveETag != "" ||
|
|
|
|
r.Header.Get(xhttp.IfMatch) != "" ||
|
|
|
|
r.Header.Get(xhttp.IfNoneMatch) != "" {
|
2022-09-14 21:44:04 -04:00
|
|
|
opts.CheckPrecondFn = func(oi ObjectInfo) bool {
|
2023-01-17 09:07:47 -05:00
|
|
|
if _, err := DecryptObjectInfo(&oi, r); err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return true
|
2022-09-14 21:44:04 -04:00
|
|
|
}
|
|
|
|
return checkPreconditionsPUT(ctx, w, r, oi, opts)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-29 19:57:16 -04:00
|
|
|
checksumType := hash.NewChecksumType(r.Header.Get(xhttp.AmzChecksumAlgo))
|
|
|
|
if checksumType.Is(hash.ChecksumInvalid) {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequestParameter), r.URL)
|
|
|
|
return
|
|
|
|
} else if checksumType.IsSet() && !checksumType.Is(hash.ChecksumTrailing) {
|
|
|
|
opts.WantChecksum = &hash.Checksum{Type: checksumType}
|
|
|
|
}
|
|
|
|
|
2022-07-19 11:35:29 -04:00
|
|
|
newMultipartUpload := objectAPI.NewMultipartUpload
|
|
|
|
|
2022-08-29 19:57:16 -04:00
|
|
|
res, err := newMultipartUpload(ctx, bucket, object, opts)
|
2022-07-19 11:35:29 -04:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-08-29 19:57:16 -04:00
|
|
|
response := generateInitiateMultipartUploadResponse(bucket, object, res.UploadID)
|
|
|
|
if res.ChecksumAlgo != "" {
|
|
|
|
w.Header().Set(xhttp.AmzChecksumAlgo, res.ChecksumAlgo)
|
|
|
|
}
|
2022-07-19 11:35:29 -04:00
|
|
|
encodedSuccessResponse := encodeResponse(response)
|
|
|
|
|
|
|
|
// Write success response.
|
|
|
|
writeSuccessResponseXML(w, encodedSuccessResponse)
|
|
|
|
}
|
|
|
|
|
2022-12-12 04:40:29 -05:00
|
|
|
// CopyObjectPartHandler - uploads a part by copying data from an existing object as data source.
|
|
|
|
func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "CopyObjectPart")
|
|
|
|
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
|
|
|
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
if objectAPI == nil {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
dstBucket := vars["bucket"]
|
|
|
|
dstObject, err := unescapePath(vars["object"])
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, dstBucket, dstObject); s3Error != ErrNone {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read escaped copy source path to check for parameters.
|
|
|
|
cpSrcPath := r.Header.Get(xhttp.AmzCopySource)
|
|
|
|
var vid string
|
|
|
|
if u, err := url.Parse(cpSrcPath); err == nil {
|
|
|
|
vid = strings.TrimSpace(u.Query().Get(xhttp.VersionID))
|
|
|
|
// Note that url.Parse does the unescaping
|
|
|
|
cpSrcPath = u.Path
|
|
|
|
}
|
|
|
|
|
|
|
|
srcBucket, srcObject := path2BucketObject(cpSrcPath)
|
|
|
|
// If source object is empty or bucket is empty, reply back invalid copy source.
|
|
|
|
if srcObject == "" || srcBucket == "" {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if vid != "" && vid != nullVersionID {
|
|
|
|
_, err := uuid.Parse(vid)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, VersionNotFound{
|
|
|
|
Bucket: srcBucket,
|
|
|
|
Object: srcObject,
|
|
|
|
VersionID: vid,
|
|
|
|
}), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, srcBucket, srcObject); s3Error != ErrNone {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
uploadID := r.Form.Get(xhttp.UploadID)
|
|
|
|
partIDString := r.Form.Get(xhttp.PartNumber)
|
|
|
|
|
|
|
|
partID, err := strconv.Atoi(partIDString)
|
2023-05-24 11:00:47 -04:00
|
|
|
if err != nil || partID <= 0 {
|
2022-12-12 04:40:29 -05:00
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPart), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// check partID with maximum part ID for multipart objects
|
|
|
|
if isMaxPartID(partID) {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMaxParts), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var srcOpts, dstOpts ObjectOptions
|
|
|
|
srcOpts, err = copySrcOpts(ctx, r, srcBucket, srcObject)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
srcOpts.VersionID = vid
|
|
|
|
|
|
|
|
// convert copy src and dst encryption options for GET/PUT calls
|
|
|
|
getOpts := ObjectOptions{VersionID: srcOpts.VersionID}
|
|
|
|
if srcOpts.ServerSideEncryption != nil {
|
|
|
|
getOpts.ServerSideEncryption = encrypt.SSE(srcOpts.ServerSideEncryption)
|
|
|
|
}
|
|
|
|
|
|
|
|
dstOpts, err = copyDstOpts(ctx, r, dstBucket, dstObject, nil)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
getObjectNInfo := objectAPI.GetObjectNInfo
|
|
|
|
|
|
|
|
// Get request range.
|
|
|
|
var rs *HTTPRangeSpec
|
|
|
|
var parseRangeErr error
|
|
|
|
if rangeHeader := r.Header.Get(xhttp.AmzCopySourceRange); rangeHeader != "" {
|
|
|
|
rs, parseRangeErr = parseCopyPartRangeSpec(rangeHeader)
|
|
|
|
} else {
|
|
|
|
// This check is to see if client specified a header but the value
|
|
|
|
// is empty for 'x-amz-copy-source-range'
|
|
|
|
_, ok := r.Header[xhttp.AmzCopySourceRange]
|
|
|
|
if ok {
|
|
|
|
parseRangeErr = errInvalidRange
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
checkCopyPartPrecondFn := func(o ObjectInfo) bool {
|
2023-01-17 09:07:47 -05:00
|
|
|
if _, err := DecryptObjectInfo(&o, r); err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return true
|
2022-12-12 04:40:29 -05:00
|
|
|
}
|
|
|
|
if checkCopyObjectPartPreconditions(ctx, w, r, o) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if parseRangeErr != nil {
|
|
|
|
writeCopyPartErr(ctx, w, parseRangeErr, r.URL)
|
|
|
|
// Range header mismatch is pre-condition like failure
|
|
|
|
// so return true to indicate Range precondition failed.
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
getOpts.CheckPrecondFn = checkCopyPartPrecondFn
|
2023-04-17 15:16:37 -04:00
|
|
|
gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, getOpts)
|
2022-12-12 04:40:29 -05:00
|
|
|
if err != nil {
|
|
|
|
if isErrPreconditionFailed(err) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if globalBucketVersioningSys.PrefixEnabled(srcBucket, srcObject) && gr != nil {
|
|
|
|
// Versioning enabled quite possibly object is deleted might be delete-marker
|
|
|
|
// if present set the headers, no idea why AWS S3 sets these headers.
|
|
|
|
if gr.ObjInfo.VersionID != "" && gr.ObjInfo.DeleteMarker {
|
|
|
|
w.Header()[xhttp.AmzVersionID] = []string{gr.ObjInfo.VersionID}
|
|
|
|
w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(gr.ObjInfo.DeleteMarker)}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer gr.Close()
|
|
|
|
srcInfo := gr.ObjInfo
|
|
|
|
|
|
|
|
actualPartSize, err := srcInfo.GetActualSize()
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := enforceBucketQuotaHard(ctx, dstBucket, actualPartSize); err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Special care for CopyObjectPart
|
|
|
|
if partRangeErr := checkCopyPartRangeWithSize(rs, actualPartSize); partRangeErr != nil {
|
|
|
|
writeCopyPartErr(ctx, w, partRangeErr, r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the object offset & length
|
|
|
|
startOffset, length, err := rs.GetOffsetLength(actualPartSize)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// maximum copy size for multipart objects in a single operation
|
|
|
|
if isMaxObjectSize(length) {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if isRemoteCopyRequired(ctx, srcBucket, dstBucket, objectAPI) {
|
|
|
|
var dstRecords []dns.SrvRecord
|
|
|
|
dstRecords, err = globalDNSConfig.Get(dstBucket)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send PutObject request to appropriate instance (in federated deployment)
|
|
|
|
core, rerr := getRemoteInstanceClient(r, getHostFromSrv(dstRecords))
|
|
|
|
if rerr != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, rerr), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-03-28 06:45:24 -04:00
|
|
|
popts := minio.PutObjectPartOptions{
|
|
|
|
SSE: dstOpts.ServerSideEncryption,
|
|
|
|
}
|
|
|
|
|
|
|
|
partInfo, err := core.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, gr, length, popts)
|
2022-12-12 04:40:29 -05:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
response := generateCopyObjectPartResponse(partInfo.ETag, partInfo.LastModified)
|
|
|
|
encodedSuccessResponse := encodeResponse(response)
|
|
|
|
|
|
|
|
// Write success response.
|
|
|
|
writeSuccessResponseXML(w, encodedSuccessResponse)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
actualPartSize = length
|
2023-09-18 13:00:54 -04:00
|
|
|
var reader io.Reader = etag.NewReader(ctx, gr, nil, nil)
|
2022-12-12 04:40:29 -05:00
|
|
|
|
|
|
|
mi, err := objectAPI.GetMultipartInfo(ctx, dstBucket, dstObject, uploadID, dstOpts)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-06-28 12:43:50 -04:00
|
|
|
_, isEncrypted := crypto.IsEncrypted(mi.UserDefined)
|
|
|
|
|
2022-12-12 04:40:29 -05:00
|
|
|
// Read compression metadata preserved in the init multipart for the decision.
|
|
|
|
_, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"]
|
|
|
|
// Compress only if the compression is enabled during initial multipart.
|
|
|
|
var idxCb func() []byte
|
|
|
|
if isCompressed {
|
2023-06-28 12:43:50 -04:00
|
|
|
wantEncryption := crypto.Requested(r.Header) || isEncrypted
|
2022-12-12 04:40:29 -05:00
|
|
|
s2c, cb := newS2CompressReader(reader, actualPartSize, wantEncryption)
|
|
|
|
idxCb = cb
|
|
|
|
defer s2c.Close()
|
|
|
|
reader = etag.Wrap(s2c, reader)
|
|
|
|
length = -1
|
|
|
|
}
|
|
|
|
|
2023-09-18 13:00:54 -04:00
|
|
|
srcInfo.Reader, err = hash.NewReader(ctx, reader, length, "", "", actualPartSize)
|
2022-12-12 04:40:29 -05:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
dstOpts, err = copyDstOpts(ctx, r, dstBucket, dstObject, mi.UserDefined)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
dstOpts.IndexCB = idxCb
|
|
|
|
|
|
|
|
rawReader := srcInfo.Reader
|
|
|
|
pReader := NewPutObjReader(rawReader)
|
|
|
|
|
|
|
|
var objectEncryptionKey crypto.ObjectKey
|
2023-01-17 09:07:47 -05:00
|
|
|
if isEncrypted {
|
2022-12-12 04:40:29 -05:00
|
|
|
if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(mi.UserDefined) {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if crypto.S3.IsEncrypted(mi.UserDefined) && crypto.SSEC.IsRequested(r.Header) {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
var key []byte
|
|
|
|
if crypto.SSEC.IsRequested(r.Header) {
|
|
|
|
key, err = ParseSSECustomerRequest(r)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
key, err = decryptObjectMeta(key, dstBucket, dstObject, mi.UserDefined)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
copy(objectEncryptionKey[:], key)
|
|
|
|
|
|
|
|
partEncryptionKey := objectEncryptionKey.DerivePartKey(uint32(partID))
|
|
|
|
encReader, err := sio.EncryptReader(reader, sio.Config{Key: partEncryptionKey[:], CipherSuites: fips.DARECiphers()})
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
reader = etag.Wrap(encReader, reader)
|
|
|
|
|
|
|
|
wantSize := int64(-1)
|
|
|
|
if length >= 0 {
|
|
|
|
info := ObjectInfo{Size: length}
|
|
|
|
wantSize = info.EncryptedSize()
|
|
|
|
}
|
|
|
|
|
2023-09-18 13:00:54 -04:00
|
|
|
srcInfo.Reader, err = hash.NewReader(ctx, reader, wantSize, "", "", actualPartSize)
|
2022-12-12 04:40:29 -05:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
pReader, err = pReader.WithEncryption(srcInfo.Reader, &objectEncryptionKey)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if dstOpts.IndexCB != nil {
|
|
|
|
dstOpts.IndexCB = compressionIndexEncrypter(objectEncryptionKey, dstOpts.IndexCB)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
srcInfo.PutObjReader = pReader
|
|
|
|
copyObjectPart := objectAPI.CopyObjectPart
|
2023-10-12 00:18:17 -04:00
|
|
|
|
2022-12-12 04:40:29 -05:00
|
|
|
// Copy source object to destination, if source and destination
|
|
|
|
// object is same then only metadata is updated.
|
|
|
|
partInfo, err := copyObjectPart(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID,
|
|
|
|
startOffset, length, srcInfo, srcOpts, dstOpts)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if isEncrypted {
|
2023-06-28 12:43:50 -04:00
|
|
|
sseS3 := crypto.S3.IsRequested(r.Header) || crypto.S3.IsEncrypted(mi.UserDefined)
|
|
|
|
partInfo.ETag = tryDecryptETag(objectEncryptionKey[:], partInfo.ETag, sseS3)
|
2022-12-12 04:40:29 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
response := generateCopyObjectPartResponse(partInfo.ETag, partInfo.LastModified)
|
|
|
|
encodedSuccessResponse := encodeResponse(response)
|
|
|
|
|
|
|
|
// Write success response.
|
|
|
|
writeSuccessResponseXML(w, encodedSuccessResponse)
|
|
|
|
}
|
|
|
|
|
2022-07-19 11:35:29 -04:00
|
|
|
// PutObjectPartHandler - uploads an incoming part for an ongoing multipart operation.
|
|
|
|
func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "PutObjectPart")
|
|
|
|
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
|
|
|
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
if objectAPI == nil {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
bucket := vars["bucket"]
|
|
|
|
object, err := unescapePath(vars["object"])
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// X-Amz-Copy-Source shouldn't be set for this call.
|
|
|
|
if _, ok := r.Header[xhttp.AmzCopySource]; ok {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
clientETag, err := etag.FromContentMD5(r.Header)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDigest), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// if Content-Length is unknown/missing, throw away
|
|
|
|
size := r.ContentLength
|
|
|
|
|
|
|
|
rAuthType := getRequestAuthType(r)
|
|
|
|
// For auth type streaming signature, we need to gather a different content length.
|
2023-05-05 22:53:12 -04:00
|
|
|
switch rAuthType {
|
|
|
|
// Check signature types that must have content length
|
|
|
|
case authTypeStreamingSigned, authTypeStreamingSignedTrailer, authTypeStreamingUnsignedTrailer:
|
2022-07-19 11:35:29 -04:00
|
|
|
if sizeStr, ok := r.Header[xhttp.AmzDecodedContentLength]; ok {
|
|
|
|
if sizeStr[0] == "" {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
size, err = strconv.ParseInt(sizeStr[0], 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-05-05 22:53:12 -04:00
|
|
|
|
2022-07-19 11:35:29 -04:00
|
|
|
if size == -1 {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
uploadID := r.Form.Get(xhttp.UploadID)
|
|
|
|
partIDString := r.Form.Get(xhttp.PartNumber)
|
|
|
|
|
|
|
|
partID, err := strconv.Atoi(partIDString)
|
2023-05-24 11:00:47 -04:00
|
|
|
if err != nil || partID <= 0 {
|
2022-07-19 11:35:29 -04:00
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPart), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-12-12 04:40:29 -05:00
|
|
|
// maximum size for multipart objects in a single operation
|
|
|
|
if isMaxObjectSize(size) {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-07-19 11:35:29 -04:00
|
|
|
// check partID with maximum part ID for multipart objects
|
|
|
|
if isMaxPartID(partID) {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMaxParts), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
md5hex = clientETag.String()
|
|
|
|
sha256hex = ""
|
|
|
|
reader io.Reader = r.Body
|
|
|
|
s3Error APIErrorCode
|
|
|
|
)
|
2023-09-04 15:57:37 -04:00
|
|
|
if s3Error = isPutActionAllowed(ctx, rAuthType, bucket, object, r, policy.PutObjectAction); s3Error != ErrNone {
|
2022-07-19 11:35:29 -04:00
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
switch rAuthType {
|
2023-05-05 22:53:12 -04:00
|
|
|
case authTypeStreamingSigned, authTypeStreamingSignedTrailer:
|
2022-07-19 11:35:29 -04:00
|
|
|
// Initialize stream signature verifier.
|
2023-05-05 22:53:12 -04:00
|
|
|
reader, s3Error = newSignV4ChunkedReader(r, rAuthType == authTypeStreamingSignedTrailer)
|
|
|
|
if s3Error != ErrNone {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
case authTypeStreamingUnsignedTrailer:
|
|
|
|
// Initialize stream signature verifier.
|
|
|
|
reader, s3Error = newUnsignedV4ChunkedReader(r, true)
|
2022-07-19 11:35:29 -04:00
|
|
|
if s3Error != ErrNone {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
case authTypeSignedV2, authTypePresignedV2:
|
|
|
|
if s3Error = isReqAuthenticatedV2(r); s3Error != ErrNone {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
case authTypePresigned, authTypeSigned:
|
|
|
|
if s3Error = reqSignatureV4Verify(r, globalSite.Region, serviceS3); s3Error != ErrNone {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if !skipContentSha256Cksum(r) {
|
|
|
|
sha256hex = getContentSha256Cksum(r, serviceS3)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := enforceBucketQuotaHard(ctx, bucket, size); err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
actualSize := size
|
|
|
|
|
|
|
|
// get encryption options
|
|
|
|
var opts ObjectOptions
|
|
|
|
if crypto.SSEC.IsRequested(r.Header) {
|
|
|
|
opts, err = getOpts(ctx, r, bucket, object)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mi, err := objectAPI.GetMultipartInfo(ctx, bucket, object, uploadID, opts)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read compression metadata preserved in the init multipart for the decision.
|
|
|
|
_, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"]
|
|
|
|
var idxCb func() []byte
|
2023-01-17 09:07:47 -05:00
|
|
|
if isCompressed {
|
2023-09-18 13:00:54 -04:00
|
|
|
actualReader, err := hash.NewReader(ctx, reader, size, md5hex, sha256hex, actualSize)
|
2022-07-19 11:35:29 -04:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2022-10-24 20:44:15 -04:00
|
|
|
if err = actualReader.AddChecksum(r, false); err != nil {
|
2022-08-29 19:57:16 -04:00
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2022-07-19 11:35:29 -04:00
|
|
|
|
|
|
|
// Set compression metrics.
|
2023-01-17 09:07:47 -05:00
|
|
|
wantEncryption := crypto.Requested(r.Header)
|
2022-07-19 11:35:29 -04:00
|
|
|
s2c, cb := newS2CompressReader(actualReader, actualSize, wantEncryption)
|
|
|
|
idxCb = cb
|
|
|
|
defer s2c.Close()
|
|
|
|
reader = etag.Wrap(s2c, actualReader)
|
|
|
|
size = -1 // Since compressed size is un-predictable.
|
|
|
|
md5hex = "" // Do not try to verify the content.
|
|
|
|
sha256hex = ""
|
|
|
|
}
|
|
|
|
|
2023-12-21 11:59:38 -05:00
|
|
|
hashReader, err := hash.NewReaderWithOpts(ctx, reader, hash.Options{
|
|
|
|
Size: size,
|
|
|
|
MD5Hex: md5hex,
|
|
|
|
SHA256Hex: sha256hex,
|
|
|
|
ActualSize: actualSize,
|
|
|
|
DisableMD5: false,
|
|
|
|
ForceMD5: nil,
|
|
|
|
})
|
2022-07-19 11:35:29 -04:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2023-05-05 22:53:12 -04:00
|
|
|
|
2022-10-24 20:44:15 -04:00
|
|
|
if err := hashReader.AddChecksum(r, size < 0); err != nil {
|
2022-08-29 19:57:16 -04:00
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-08-31 11:13:23 -04:00
|
|
|
pReader := NewPutObjReader(hashReader)
|
2022-07-19 11:35:29 -04:00
|
|
|
|
|
|
|
_, isEncrypted := crypto.IsEncrypted(mi.UserDefined)
|
|
|
|
var objectEncryptionKey crypto.ObjectKey
|
2023-01-17 09:07:47 -05:00
|
|
|
if isEncrypted {
|
2022-07-19 11:35:29 -04:00
|
|
|
if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(mi.UserDefined) {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-11-22 13:51:46 -05:00
|
|
|
opts, err = putOptsFromReq(ctx, r, bucket, object, mi.UserDefined)
|
2022-07-19 11:35:29 -04:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var key []byte
|
|
|
|
if crypto.SSEC.IsRequested(r.Header) {
|
|
|
|
key, err = ParseSSECustomerRequest(r)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Calculating object encryption key
|
2022-09-24 00:17:08 -04:00
|
|
|
key, err = decryptObjectMeta(key, bucket, object, mi.UserDefined)
|
2022-07-19 11:35:29 -04:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
copy(objectEncryptionKey[:], key)
|
|
|
|
|
|
|
|
partEncryptionKey := objectEncryptionKey.DerivePartKey(uint32(partID))
|
|
|
|
in := io.Reader(hashReader)
|
|
|
|
if size > encryptBufferThreshold {
|
|
|
|
// The encryption reads in blocks of 64KB.
|
|
|
|
// We add a buffer on bigger files to reduce the number of syscalls upstream.
|
|
|
|
in = bufio.NewReaderSize(hashReader, encryptBufferSize)
|
|
|
|
}
|
|
|
|
reader, err = sio.EncryptReader(in, sio.Config{Key: partEncryptionKey[:], CipherSuites: fips.DARECiphers()})
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
wantSize := int64(-1)
|
|
|
|
if size >= 0 {
|
|
|
|
info := ObjectInfo{Size: size}
|
|
|
|
wantSize = info.EncryptedSize()
|
|
|
|
}
|
|
|
|
// do not try to verify encrypted content
|
2023-09-18 13:00:54 -04:00
|
|
|
hashReader, err = hash.NewReader(ctx, etag.Wrap(reader, hashReader), wantSize, "", "", actualSize)
|
2022-07-19 11:35:29 -04:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2022-08-31 11:13:23 -04:00
|
|
|
if err := hashReader.AddChecksum(r, true); err != nil {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-07-19 11:35:29 -04:00
|
|
|
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2022-08-31 11:13:23 -04:00
|
|
|
|
2022-07-19 11:35:29 -04:00
|
|
|
if idxCb != nil {
|
|
|
|
idxCb = compressionIndexEncrypter(objectEncryptionKey, idxCb)
|
|
|
|
}
|
2022-08-31 11:13:23 -04:00
|
|
|
opts.EncryptFn = metadataEncrypter(objectEncryptionKey)
|
2022-07-19 11:35:29 -04:00
|
|
|
}
|
|
|
|
opts.IndexCB = idxCb
|
|
|
|
|
|
|
|
putObjectPart := objectAPI.PutObjectPart
|
|
|
|
|
|
|
|
partInfo, err := putObjectPart(ctx, bucket, object, uploadID, partID, pReader, opts)
|
|
|
|
if err != nil {
|
|
|
|
// Verify if the underlying error is signature mismatch.
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
etag := partInfo.ETag
|
|
|
|
if kind, encrypted := crypto.IsEncrypted(mi.UserDefined); encrypted {
|
|
|
|
switch kind {
|
|
|
|
case crypto.S3KMS:
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionKmsID, mi.KMSKeyID())
|
|
|
|
if kmsCtx, ok := mi.UserDefined[crypto.MetaContext]; ok {
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionKmsContext, kmsCtx)
|
|
|
|
}
|
|
|
|
if len(etag) >= 32 && strings.Count(etag, "-") != 1 {
|
|
|
|
etag = etag[len(etag)-32:]
|
|
|
|
}
|
|
|
|
case crypto.S3:
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
|
|
|
|
etag, _ = DecryptETag(objectEncryptionKey, ObjectInfo{ETag: etag})
|
|
|
|
case crypto.SSEC:
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm))
|
|
|
|
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5))
|
|
|
|
|
|
|
|
if len(etag) >= 32 && strings.Count(etag, "-") != 1 {
|
|
|
|
etag = etag[len(etag)-32:]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We must not use the http.Header().Set method here because some (broken)
|
|
|
|
// clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive).
|
|
|
|
// Therefore, we have to set the ETag directly as map entry.
|
|
|
|
w.Header()[xhttp.ETag] = []string{"\"" + etag + "\""}
|
2022-08-29 19:57:16 -04:00
|
|
|
hash.TransferChecksumHeader(w, r)
|
2022-07-19 11:35:29 -04:00
|
|
|
|
|
|
|
writeSuccessResponseHeadersOnly(w)
|
|
|
|
}
|
|
|
|
|
|
|
|
// CompleteMultipartUploadHandler - Complete multipart upload.
|
|
|
|
func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "CompleteMultipartUpload")
|
|
|
|
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
bucket := vars["bucket"]
|
|
|
|
object, err := unescapePath(vars["object"])
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
if objectAPI == nil {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, bucket, object); s3Error != ErrNone {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get upload id.
|
|
|
|
uploadID, _, _, _, s3Error := getObjectResources(r.Form)
|
|
|
|
if s3Error != ErrNone {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-06-11 00:59:51 -04:00
|
|
|
// Content-Length is required and should be non-zero
|
|
|
|
if r.ContentLength <= 0 {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingPart), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-07-19 11:35:29 -04:00
|
|
|
complMultipartUpload := &CompleteMultipartUpload{}
|
|
|
|
if err = xmlDecoder(r.Body, complMultipartUpload, r.ContentLength); err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(complMultipartUpload.Parts) == 0 {
|
2023-06-11 00:59:51 -04:00
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingPart), r.URL)
|
2022-07-19 11:35:29 -04:00
|
|
|
return
|
|
|
|
}
|
2023-06-11 00:59:51 -04:00
|
|
|
|
2023-04-24 16:28:18 -04:00
|
|
|
if !sort.SliceIsSorted(complMultipartUpload.Parts, func(i, j int) bool {
|
|
|
|
return complMultipartUpload.Parts[i].PartNumber < complMultipartUpload.Parts[j].PartNumber
|
|
|
|
}) {
|
2022-07-19 11:35:29 -04:00
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPartOrder), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reject retention or governance headers if set, CompleteMultipartUpload spec
|
|
|
|
// does not use these headers, and should not be passed down to checkPutObjectLockAllowed
|
|
|
|
if objectlock.IsObjectLockRequested(r.Header) || objectlock.IsObjectLockGovernanceBypassSet(r.Header) {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, _, _, s3Err := checkPutObjectLockAllowed(ctx, r, bucket, object, objectAPI.GetObjectInfo, ErrNone, ErrNone); s3Err != ErrNone {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
completeMultiPartUpload := objectAPI.CompleteMultipartUpload
|
|
|
|
|
|
|
|
versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object)
|
|
|
|
suspended := globalBucketVersioningSys.PrefixSuspended(bucket, object)
|
|
|
|
os := newObjSweeper(bucket, object).WithVersioning(versioned, suspended)
|
|
|
|
if !globalTierConfigMgr.Empty() {
|
|
|
|
// Get appropriate object info to identify the remote object to delete
|
|
|
|
goiOpts := os.GetOpts()
|
|
|
|
if goi, gerr := objectAPI.GetObjectInfo(ctx, bucket, object, goiOpts); gerr == nil {
|
|
|
|
os.SetTransitionState(goi.TransitionedObject)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
opts, err := completeMultipartOpts(ctx, r, bucket, object)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2023-09-29 12:08:24 -04:00
|
|
|
opts.Versioned = versioned
|
|
|
|
opts.VersionSuspended = suspended
|
2022-07-19 11:35:29 -04:00
|
|
|
|
|
|
|
// First, we compute the ETag of the multipart object.
|
|
|
|
// The ETag of a multi-part object is always:
|
|
|
|
// ETag := MD5(ETag_p1, ETag_p2, ...)+"-N" (N being the number of parts)
|
|
|
|
//
|
|
|
|
// This is independent of encryption. An encrypted multipart
|
|
|
|
// object also has an ETag that is the MD5 of its part ETags.
|
|
|
|
// The fact the in case of encryption the ETag of a part is
|
|
|
|
// not the MD5 of the part content does not change that.
|
|
|
|
var completeETags []etag.ETag
|
|
|
|
for _, part := range complMultipartUpload.Parts {
|
|
|
|
ETag, err := etag.Parse(part.ETag)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
completeETags = append(completeETags, ETag)
|
|
|
|
}
|
|
|
|
multipartETag := etag.Multipart(completeETags...)
|
|
|
|
opts.UserDefined["etag"] = multipartETag.String()
|
|
|
|
|
|
|
|
objInfo, err := completeMultiPartUpload(ctx, bucket, object, uploadID, complMultipartUpload.Parts, opts)
|
|
|
|
if err != nil {
|
2023-07-21 01:14:38 -04:00
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
2022-07-19 11:35:29 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-12-07 17:56:07 -05:00
|
|
|
opts.EncryptFn, err = objInfo.metadataEncryptFn(r.Header)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2023-01-24 18:46:33 -05:00
|
|
|
if r.Header.Get(xMinIOExtract) == "true" && HasSuffix(object, archiveExt) {
|
2022-07-19 11:35:29 -04:00
|
|
|
opts := ObjectOptions{VersionID: objInfo.VersionID, MTime: objInfo.ModTime}
|
|
|
|
if _, err := updateObjectMetadataWithZipInfo(ctx, objectAPI, bucket, object, opts); err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
setPutObjHeaders(w, objInfo, false)
|
2023-09-16 05:28:06 -04:00
|
|
|
if dsc := mustReplicate(ctx, bucket, object, objInfo.getMustReplicateOptions(replication.ObjectReplicationType, opts)); dsc.ReplicateAny() {
|
|
|
|
scheduleReplication(ctx, objInfo, objectAPI, dsc, replication.ObjectReplicationType)
|
2022-07-19 11:35:29 -04:00
|
|
|
}
|
|
|
|
if _, ok := r.Header[xhttp.MinIOSourceReplicationRequest]; ok {
|
|
|
|
actualSize, _ := objInfo.GetActualSize()
|
|
|
|
defer globalReplicationStats.UpdateReplicaStat(bucket, actualSize)
|
|
|
|
}
|
|
|
|
|
2023-07-21 01:14:38 -04:00
|
|
|
// Get object location.
|
|
|
|
location := getObjectLocation(r, globalDomainNames, bucket, object)
|
|
|
|
// Generate complete multipart response.
|
|
|
|
response := generateCompleteMultpartUploadResponse(bucket, object, location, objInfo)
|
|
|
|
encodedSuccessResponse := encodeResponse(response)
|
|
|
|
|
2022-07-19 11:35:29 -04:00
|
|
|
// Write success response.
|
|
|
|
writeSuccessResponseXML(w, encodedSuccessResponse)
|
|
|
|
|
|
|
|
// Notify object created event.
|
2023-02-21 12:33:33 -05:00
|
|
|
evt := eventArgs{
|
2022-07-19 11:35:29 -04:00
|
|
|
EventName: event.ObjectCreatedCompleteMultipartUpload,
|
|
|
|
BucketName: bucket,
|
|
|
|
Object: objInfo,
|
|
|
|
ReqParams: extractReqParams(r),
|
|
|
|
RespElements: extractRespElements(w),
|
|
|
|
UserAgent: r.UserAgent(),
|
|
|
|
Host: handlers.GetSourceIP(r),
|
2023-02-21 12:33:33 -05:00
|
|
|
}
|
|
|
|
sendEvent(evt)
|
|
|
|
|
2023-11-22 16:46:17 -05:00
|
|
|
asize, err := objInfo.GetActualSize()
|
|
|
|
if err != nil {
|
|
|
|
asize = objInfo.Size
|
|
|
|
}
|
|
|
|
|
|
|
|
defer globalCacheConfig.Set(&cache.ObjectInfo{
|
|
|
|
Key: objInfo.Name,
|
|
|
|
Bucket: objInfo.Bucket,
|
|
|
|
ETag: objInfo.ETag,
|
|
|
|
ModTime: objInfo.ModTime,
|
|
|
|
Expires: objInfo.ExpiresStr(),
|
|
|
|
CacheControl: objInfo.CacheControl,
|
|
|
|
Size: asize,
|
|
|
|
Metadata: cleanReservedKeys(objInfo.UserDefined),
|
|
|
|
})
|
|
|
|
|
2024-02-12 02:41:53 -05:00
|
|
|
if objInfo.NumVersions > int(scannerExcessObjectVersions.Load()) {
|
2023-02-21 12:33:33 -05:00
|
|
|
evt.EventName = event.ObjectManyVersions
|
|
|
|
sendEvent(evt)
|
2024-02-12 02:41:53 -05:00
|
|
|
|
|
|
|
auditLogInternal(context.Background(), AuditLogOptions{
|
|
|
|
Event: "scanner:manyversions",
|
|
|
|
APIName: "CompleteMultipartUpload",
|
|
|
|
Bucket: objInfo.Bucket,
|
|
|
|
Object: objInfo.Name,
|
|
|
|
VersionID: objInfo.VersionID,
|
|
|
|
Status: http.StatusText(http.StatusOK),
|
|
|
|
})
|
2023-02-21 12:33:33 -05:00
|
|
|
}
|
2022-07-19 11:35:29 -04:00
|
|
|
|
|
|
|
// Remove the transitioned object whose object version is being overwritten.
|
|
|
|
if !globalTierConfigMgr.Empty() {
|
|
|
|
// Schedule object for immediate transition if eligible.
|
2023-05-22 18:28:56 -04:00
|
|
|
enqueueTransitionImmediate(objInfo, lcEventSrc_s3CompleteMultipartUpload)
|
2023-10-10 03:33:42 -04:00
|
|
|
os.Sweep()
|
2022-07-19 11:35:29 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// AbortMultipartUploadHandler - Abort multipart upload
|
|
|
|
func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "AbortMultipartUpload")
|
|
|
|
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
bucket := vars["bucket"]
|
|
|
|
object, err := unescapePath(vars["object"])
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
if objectAPI == nil {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
abortMultipartUpload := objectAPI.AbortMultipartUpload
|
|
|
|
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.AbortMultipartUploadAction, bucket, object); s3Error != ErrNone {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
uploadID, _, _, _, s3Error := getObjectResources(r.Form)
|
|
|
|
if s3Error != ErrNone {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
opts := ObjectOptions{}
|
|
|
|
if err := abortMultipartUpload(ctx, bucket, object, uploadID, opts); err != nil {
|
2023-09-29 13:28:19 -04:00
|
|
|
switch err.(type) {
|
|
|
|
case InvalidUploadID:
|
|
|
|
// Do not have return an error for non-existent upload-id
|
|
|
|
default:
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
2022-07-19 11:35:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
writeSuccessNoContent(w)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjectPartsHandler - List object parts
|
|
|
|
func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *http.Request) {
|
|
|
|
ctx := newContext(r, w, "ListObjectParts")
|
|
|
|
|
|
|
|
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
|
|
|
|
|
|
|
|
vars := mux.Vars(r)
|
|
|
|
bucket := vars["bucket"]
|
|
|
|
object, err := unescapePath(vars["object"])
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
objectAPI := api.ObjectAPI()
|
|
|
|
if objectAPI == nil {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if s3Error := checkRequestAuthType(ctx, r, policy.ListMultipartUploadPartsAction, bucket, object); s3Error != ErrNone {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
uploadID, partNumberMarker, maxParts, encodingType, s3Error := getObjectResources(r.Form)
|
|
|
|
if s3Error != ErrNone {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if partNumberMarker < 0 {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPartNumberMarker), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if maxParts < 0 {
|
|
|
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMaxParts), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
opts := ObjectOptions{}
|
|
|
|
listPartsInfo, err := objectAPI.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts)
|
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// We have to adjust the size of encrypted parts since encrypted parts
|
|
|
|
// are slightly larger due to encryption overhead.
|
|
|
|
// Further, we have to adjust the ETags of parts when using SSE-S3.
|
|
|
|
// Due to AWS S3, SSE-S3 encrypted parts return the plaintext ETag
|
|
|
|
// being the content MD5 of that particular part. This is not the
|
|
|
|
// case for SSE-C and SSE-KMS objects.
|
2023-01-17 09:07:47 -05:00
|
|
|
if kind, ok := crypto.IsEncrypted(listPartsInfo.UserDefined); ok {
|
2022-07-19 11:35:29 -04:00
|
|
|
var objectEncryptionKey []byte
|
|
|
|
if kind == crypto.S3 {
|
2022-09-24 00:17:08 -04:00
|
|
|
objectEncryptionKey, err = decryptObjectMeta(nil, bucket, object, listPartsInfo.UserDefined)
|
2022-07-19 11:35:29 -04:00
|
|
|
if err != nil {
|
|
|
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for i, p := range listPartsInfo.Parts {
|
2023-06-05 16:08:51 -04:00
|
|
|
listPartsInfo.Parts[i].ETag = tryDecryptETag(objectEncryptionKey, p.ETag, kind == crypto.S3)
|
2022-07-19 21:56:24 -04:00
|
|
|
listPartsInfo.Parts[i].Size = p.ActualSize
|
2022-07-19 11:35:29 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
response := generateListPartsResponse(listPartsInfo, encodingType)
|
|
|
|
encodedSuccessResponse := encodeResponse(response)
|
|
|
|
|
|
|
|
// Write success response.
|
|
|
|
writeSuccessResponseXML(w, encodedSuccessResponse)
|
|
|
|
}
|