minio/cmd/object-handlers.go

3575 lines
118 KiB
Go
Raw Normal View History

// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
2015-02-23 19:46:48 -05:00
package cmd
import (
"context"
"encoding/hex"
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"net/http/httptest"
"net/url"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/google/uuid"
2018-04-21 22:23:54 -04:00
"github.com/gorilla/mux"
"github.com/klauspost/compress/gzhttp"
miniogo "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/minio/minio-go/v7/pkg/tags"
sse "github.com/minio/minio/internal/bucket/encryption"
"github.com/minio/minio/internal/bucket/lifecycle"
objectlock "github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/config/dns"
"github.com/minio/minio/internal/config/storageclass"
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/etag"
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/fips"
"github.com/minio/minio/internal/handlers"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/s3select"
"github.com/minio/pkg/bucket/policy"
iampolicy "github.com/minio/pkg/iam/policy"
xnet "github.com/minio/pkg/net"
"github.com/minio/sio"
)
// supportedHeadGetReqParams - supported request parameters for GET and HEAD presigned request.
var supportedHeadGetReqParams = map[string]string{
"response-expires": xhttp.Expires,
"response-content-type": xhttp.ContentType,
"response-cache-control": xhttp.CacheControl,
"response-content-encoding": xhttp.ContentEncoding,
"response-content-language": xhttp.ContentLanguage,
"response-content-disposition": xhttp.ContentDisposition,
}
const (
compressionAlgorithmV1 = "golang/snappy/LZ77"
compressionAlgorithmV2 = "klauspost/compress/s2"
// When an upload exceeds encryptBufferThreshold ...
encryptBufferThreshold = 1 << 20
// add an input buffer of this size.
encryptBufferSize = 1 << 20
// minCompressibleSize is the minimum size at which we enable compression.
minCompressibleSize = 4096
)
// setHeadGetRespHeaders - set any requested parameters as response headers.
func setHeadGetRespHeaders(w http.ResponseWriter, reqParams url.Values) {
for k, v := range reqParams {
if header, ok := supportedHeadGetReqParams[strings.ToLower(k)]; ok {
w.Header()[header] = v
}
}
}
// SelectObjectContentHandler - GET Object?select
// ----------
// This implementation of the GET operation retrieves object content based
// on an SQL expression. In the request, along with the sql expression, you must
// also specify a data serialization format (JSON, CSV) of the object.
func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SelectObject")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// Fetch object stat info.
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if crypto.S3.IsRequested(r.Header) || crypto.S3KMS.IsRequested(r.Header) { // If SSE-S3 or SSE-KMS present -> AWS fails with undefined error
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
return
}
if crypto.Requested(r.Header) && !objectAPI.IsEncryptionSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
opts, err := getOpts(ctx, r, bucket, object)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
getObjectInfo := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
getObjectInfo = api.CacheAPI().GetObjectInfo
}
// Check for auth type to return S3 compatible error.
// type to return the correct error (NoSuchKey vs AccessDenied)
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
if getRequestAuthType(r) == authTypeAnonymous {
// As per "Permission" section in
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
// If the object you request does not exist,
// the error Amazon S3 returns depends on
// whether you also have the s3:ListBucket
// permission.
// * If you have the s3:ListBucket permission
// on the bucket, Amazon S3 will return an
// HTTP status code 404 ("no such key")
// error.
// * if you dont have the s3:ListBucket
// permission, Amazon S3 will return an HTTP
// status code 403 ("access denied") error.`
if globalPolicySys.IsAllowed(policy.Args{
Action: policy.ListBucketAction,
BucketName: bucket,
ConditionValues: getConditionValues(r, "", "", nil),
IsOwner: false,
}) {
_, err = getObjectInfo(ctx, bucket, object, opts)
if toAPIError(ctx, err).Code == "NoSuchKey" {
s3Error = ErrNoSuchKey
}
}
}
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Get request range.
rangeHeader := r.Header.Get(xhttp.Range)
if rangeHeader != "" {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrUnsupportedRangeHeader), r.URL)
return
}
if r.ContentLength <= 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEmptyRequestBody), r.URL)
return
}
// Take read lock on object, here so subsequent lower-level
// calls do not need to.
lock := objectAPI.NewNSLock(bucket, object)
lkctx, err := lock.GetRLock(ctx, globalOperationTimeout)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
ctx = lkctx.Context()
defer lock.RUnlock(lkctx.Cancel)
getObjectNInfo := objectAPI.GetObjectNInfo
if api.CacheAPI() != nil {
getObjectNInfo = api.CacheAPI().GetObjectNInfo
}
gopts := opts
gopts.NoLock = true // We already have a lock, we can live with it.
objInfo, err := getObjectInfo(ctx, bucket, object, gopts)
if err != nil {
if globalBucketVersioningSys.PrefixEnabled(bucket, object) {
// Versioning enabled quite possibly object is deleted might be delete-marker
// if present set the headers, no idea why AWS S3 sets these headers.
if objInfo.VersionID != "" && objInfo.DeleteMarker {
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(objInfo.DeleteMarker)}
}
}
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// filter object lock metadata if permission does not permit
getRetPerms := checkRequestAuthType(ctx, r, policy.GetObjectRetentionAction, bucket, object)
legalHoldPerms := checkRequestAuthType(ctx, r, policy.GetObjectLegalHoldAction, bucket, object)
// filter object lock metadata if permission does not permit
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
if objectAPI.IsEncryptionSupported() {
if _, err = DecryptObjectInfo(&objInfo, r); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
actualSize, err := objInfo.GetActualSize()
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objectRSC := s3select.NewObjectReadSeekCloser(
func(offset int64) (io.ReadCloser, error) {
rs := &HTTPRangeSpec{
IsSuffixLength: false,
Start: offset,
End: -1,
}
return getObjectNInfo(ctx, bucket, object, rs, r.Header, noLock, opts)
},
actualSize,
)
s3Select, err := s3select.NewS3Select(r.Body)
if err != nil {
if serr, ok := err.(s3select.SelectError); ok {
encodedErrorResponse := encodeResponse(APIErrorResponse{
Code: serr.ErrorCode(),
Message: serr.ErrorMessage(),
BucketName: bucket,
Key: object,
Resource: r.URL.Path,
RequestID: w.Header().Get(xhttp.AmzRequestID),
HostID: globalDeploymentID,
})
writeResponse(w, serr.HTTPStatusCode(), encodedErrorResponse, mimeXML)
} else {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
}
return
}
defer s3Select.Close()
if err = s3Select.Open(objectRSC); err != nil {
if serr, ok := err.(s3select.SelectError); ok {
encodedErrorResponse := encodeResponse(APIErrorResponse{
Code: serr.ErrorCode(),
Message: serr.ErrorMessage(),
BucketName: bucket,
Key: object,
Resource: r.URL.Path,
RequestID: w.Header().Get(xhttp.AmzRequestID),
HostID: globalDeploymentID,
})
writeResponse(w, serr.HTTPStatusCode(), encodedErrorResponse, mimeXML)
} else {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
}
return
}
// Set encryption response headers
if objectAPI.IsEncryptionSupported() {
crypto: add support for decrypting SSE-KMS metadata (#11415) This commit refactors the SSE implementation and add S3-compatible SSE-KMS context handling. SSE-KMS differs from SSE-S3 in two main aspects: 1. The client can request a particular key and specify a KMS context as part of the request. 2. The ETag of an SSE-KMS encrypted object is not the MD5 sum of the object content. This commit only focuses on the 1st aspect. A client can send an optional SSE context when using SSE-KMS. This context is remembered by the S3 server such that the client does not have to specify the context again (during multipart PUT / GET / HEAD ...). The crypto. context also includes the bucket/object name to prevent renaming objects at the backend. Now, AWS S3 behaves as following: - If the user does not provide a SSE-KMS context it does not store one - resp. does not include the SSE-KMS context header in the response (e.g. HEAD). - If the user specifies a SSE-KMS context without the bucket/object name then AWS stores the exact context the client provided but adds the bucket/object name internally. The response contains the KMS context without the bucket/object name. - If the user specifies a SSE-KMS context with the bucket/object name then AWS again stores the exact context provided by the client. The response contains the KMS context with the bucket/object name. This commit implements this behavior w.r.t. SSE-KMS. However, as of now, no such object can be created since the server rejects SSE-KMS encryption requests. This commit is one stepping stone for SSE-KMS support. Co-authored-by: Harshavardhana <harsha@minio.io>
2021-02-03 18:19:08 -05:00
switch kind, _ := crypto.IsEncrypted(objInfo.UserDefined); kind {
case crypto.S3:
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
case crypto.S3KMS:
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
w.Header().Set(xhttp.AmzServerSideEncryptionKmsID, objInfo.KMSKeyID())
if kmsCtx, ok := objInfo.UserDefined[crypto.MetaContext]; ok {
w.Header().Set(xhttp.AmzServerSideEncryptionKmsContext, kmsCtx)
}
crypto: add support for decrypting SSE-KMS metadata (#11415) This commit refactors the SSE implementation and add S3-compatible SSE-KMS context handling. SSE-KMS differs from SSE-S3 in two main aspects: 1. The client can request a particular key and specify a KMS context as part of the request. 2. The ETag of an SSE-KMS encrypted object is not the MD5 sum of the object content. This commit only focuses on the 1st aspect. A client can send an optional SSE context when using SSE-KMS. This context is remembered by the S3 server such that the client does not have to specify the context again (during multipart PUT / GET / HEAD ...). The crypto. context also includes the bucket/object name to prevent renaming objects at the backend. Now, AWS S3 behaves as following: - If the user does not provide a SSE-KMS context it does not store one - resp. does not include the SSE-KMS context header in the response (e.g. HEAD). - If the user specifies a SSE-KMS context without the bucket/object name then AWS stores the exact context the client provided but adds the bucket/object name internally. The response contains the KMS context without the bucket/object name. - If the user specifies a SSE-KMS context with the bucket/object name then AWS again stores the exact context provided by the client. The response contains the KMS context with the bucket/object name. This commit implements this behavior w.r.t. SSE-KMS. However, as of now, no such object can be created since the server rejects SSE-KMS encryption requests. This commit is one stepping stone for SSE-KMS support. Co-authored-by: Harshavardhana <harsha@minio.io>
2021-02-03 18:19:08 -05:00
case crypto.SSEC:
// Validate the SSE-C Key set in the header.
if _, err = crypto.SSEC.UnsealObjectKey(r.Header, objInfo.UserDefined, bucket, object); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
crypto: add support for decrypting SSE-KMS metadata (#11415) This commit refactors the SSE implementation and add S3-compatible SSE-KMS context handling. SSE-KMS differs from SSE-S3 in two main aspects: 1. The client can request a particular key and specify a KMS context as part of the request. 2. The ETag of an SSE-KMS encrypted object is not the MD5 sum of the object content. This commit only focuses on the 1st aspect. A client can send an optional SSE context when using SSE-KMS. This context is remembered by the S3 server such that the client does not have to specify the context again (during multipart PUT / GET / HEAD ...). The crypto. context also includes the bucket/object name to prevent renaming objects at the backend. Now, AWS S3 behaves as following: - If the user does not provide a SSE-KMS context it does not store one - resp. does not include the SSE-KMS context header in the response (e.g. HEAD). - If the user specifies a SSE-KMS context without the bucket/object name then AWS stores the exact context the client provided but adds the bucket/object name internally. The response contains the KMS context without the bucket/object name. - If the user specifies a SSE-KMS context with the bucket/object name then AWS again stores the exact context provided by the client. The response contains the KMS context with the bucket/object name. This commit implements this behavior w.r.t. SSE-KMS. However, as of now, no such object can be created since the server rejects SSE-KMS encryption requests. This commit is one stepping stone for SSE-KMS support. Co-authored-by: Harshavardhana <harsha@minio.io>
2021-02-03 18:19:08 -05:00
return
}
crypto: add support for decrypting SSE-KMS metadata (#11415) This commit refactors the SSE implementation and add S3-compatible SSE-KMS context handling. SSE-KMS differs from SSE-S3 in two main aspects: 1. The client can request a particular key and specify a KMS context as part of the request. 2. The ETag of an SSE-KMS encrypted object is not the MD5 sum of the object content. This commit only focuses on the 1st aspect. A client can send an optional SSE context when using SSE-KMS. This context is remembered by the S3 server such that the client does not have to specify the context again (during multipart PUT / GET / HEAD ...). The crypto. context also includes the bucket/object name to prevent renaming objects at the backend. Now, AWS S3 behaves as following: - If the user does not provide a SSE-KMS context it does not store one - resp. does not include the SSE-KMS context header in the response (e.g. HEAD). - If the user specifies a SSE-KMS context without the bucket/object name then AWS stores the exact context the client provided but adds the bucket/object name internally. The response contains the KMS context without the bucket/object name. - If the user specifies a SSE-KMS context with the bucket/object name then AWS again stores the exact context provided by the client. The response contains the KMS context with the bucket/object name. This commit implements this behavior w.r.t. SSE-KMS. However, as of now, no such object can be created since the server rejects SSE-KMS encryption requests. This commit is one stepping stone for SSE-KMS support. Co-authored-by: Harshavardhana <harsha@minio.io>
2021-02-03 18:19:08 -05:00
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm))
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5))
}
}
s3Select.Evaluate(w)
// Notify object accessed via a GET request.
sendEvent(eventArgs{
EventName: event.ObjectAccessedGet,
BucketName: bucket,
Object: objInfo,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: handlers.GetSourceIP(r),
})
}
func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI ObjectLayer, bucket, object string, w http.ResponseWriter, r *http.Request) {
if crypto.S3.IsRequested(r.Header) || crypto.S3KMS.IsRequested(r.Header) { // If SSE-S3 or SSE-KMS present -> AWS fails with undefined error
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
return
}
if crypto.Requested(r.Header) && !objectAPI.IsEncryptionSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
return
}
opts, err := getOpts(ctx, r, bucket, object)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Check for auth type to return S3 compatible error.
// type to return the correct error (NoSuchKey vs AccessDenied)
if s3Error := authenticateRequest(ctx, r, policy.GetObjectAction); s3Error != ErrNone {
if getRequestAuthType(r) == authTypeAnonymous {
// As per "Permission" section in
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
// If the object you request does not exist,
// the error Amazon S3 returns depends on
// whether you also have the s3:ListBucket
// permission.
// * If you have the s3:ListBucket permission
// on the bucket, Amazon S3 will return an
// HTTP status code 404 ("no such key")
// error.
// * if you dont have the s3:ListBucket
// permission, Amazon S3 will return an HTTP
// status code 403 ("access denied") error.`
if globalPolicySys.IsAllowed(policy.Args{
Action: policy.ListBucketAction,
BucketName: bucket,
ConditionValues: getConditionValues(r, "", "", nil),
IsOwner: false,
}) {
getObjectInfo := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
getObjectInfo = api.CacheAPI().GetObjectInfo
}
_, err = getObjectInfo(ctx, bucket, object, opts)
if toAPIError(ctx, err).Code == "NoSuchKey" {
s3Error = ErrNoSuchKey
}
}
2015-08-03 19:17:21 -04:00
}
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
getObjectNInfo := objectAPI.GetObjectNInfo
if api.CacheAPI() != nil {
getObjectNInfo = api.CacheAPI().GetObjectNInfo
}
// Get request range.
var rs *HTTPRangeSpec
var rangeErr error
rangeHeader := r.Header.Get(xhttp.Range)
if rangeHeader != "" {
// Both 'Range' and 'partNumber' cannot be specified at the same time
if opts.PartNumber > 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRangePartNumber), r.URL)
return
}
rs, rangeErr = parseRequestRangeSpec(rangeHeader)
// Handle only errInvalidRange. Ignore other
// parse error and treat it as regular Get
// request like Amazon S3.
if rangeErr == errInvalidRange {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRange), r.URL)
return
}
if rangeErr != nil {
logger.LogIf(ctx, rangeErr, logger.Application)
}
}
// Validate pre-conditions if any.
opts.CheckPrecondFn = func(oi ObjectInfo) bool {
if objectAPI.IsEncryptionSupported() {
if _, err := DecryptObjectInfo(&oi, r); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return true
}
}
return checkPreconditions(ctx, w, r, oi, opts)
}
var proxy proxyResult
gr, err := getObjectNInfo(ctx, bucket, object, rs, r.Header, readLock, opts)
if err != nil {
var (
reader *GetObjectReader
perr error
)
proxytgts := getProxyTargets(ctx, bucket, object, opts)
if !proxytgts.Empty() {
// proxy to replication target if active-active replication is in place.
reader, proxy, perr = proxyGetToReplicationTarget(ctx, bucket, object, rs, r.Header, opts, proxytgts)
if perr != nil {
proxyGetErr := ErrorRespToObjectError(perr, bucket, object)
if !isErrObjectNotFound(proxyGetErr) && !isErrVersionNotFound(proxyGetErr) &&
!isErrPreconditionFailed(proxyGetErr) && !isErrInvalidRange(proxyGetErr) {
logger.LogIf(ctx, fmt.Errorf("Replication proxy failed for %s/%s(%s) - %w", bucket, object, opts.VersionID, perr))
}
}
if reader != nil && proxy.Proxy && perr == nil {
gr = reader
}
}
if reader == nil || !proxy.Proxy {
// validate if the request indeed was authorized, if it wasn't we need to return "ErrAccessDenied"
// instead of any namespace related error.
if s3Error := authorizeRequest(ctx, r, policy.GetObjectAction); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
if isErrPreconditionFailed(err) {
return
}
if proxy.Err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, proxy.Err), r.URL)
return
}
if globalBucketVersioningSys.PrefixEnabled(bucket, object) && gr != nil {
if !gr.ObjInfo.VersionPurgeStatus.Empty() {
// Shows the replication status of a permanent delete of a version
w.Header()[xhttp.MinIODeleteReplicationStatus] = []string{string(gr.ObjInfo.VersionPurgeStatus)}
}
if !gr.ObjInfo.ReplicationStatus.Empty() && gr.ObjInfo.DeleteMarker {
w.Header()[xhttp.MinIODeleteMarkerReplicationStatus] = []string{string(gr.ObjInfo.ReplicationStatus)}
}
// Versioning enabled quite possibly object is deleted might be delete-marker
// if present set the headers, no idea why AWS S3 sets these headers.
if gr.ObjInfo.VersionID != "" && gr.ObjInfo.DeleteMarker {
w.Header()[xhttp.AmzVersionID] = []string{gr.ObjInfo.VersionID}
w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(gr.ObjInfo.DeleteMarker)}
}
QueueReplicationHeal(ctx, bucket, gr.ObjInfo)
}
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
defer gr.Close()
objInfo := gr.ObjInfo
if objInfo.UserTags != "" {
r.Header.Set(xhttp.AmzObjectTagging, objInfo.UserTags)
}
if s3Error := authorizeRequest(ctx, r, policy.GetObjectAction); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
if !proxy.Proxy { // apply lifecycle rules only for local requests
// Automatically remove the object/version is an expiry lifecycle rule can be applied
if lc, err := globalLifecycleSys.Get(bucket); err == nil {
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
evt := evalActionFromLifecycle(ctx, *lc, rcfg, objInfo)
var success bool
switch evt.Action {
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
success = applyExpiryRule(objInfo, false, evt.Action == lifecycle.DeleteVersionAction)
case lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction:
// Restored object delete would be still allowed to proceed as success
// since transition behavior is slightly different.
applyExpiryRule(objInfo, true, evt.Action == lifecycle.DeleteRestoredVersionAction)
}
if success {
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrNoSuchKey))
return
}
}
QueueReplicationHeal(ctx, bucket, gr.ObjInfo)
}
// filter object lock metadata if permission does not permit
getRetPerms := checkRequestAuthType(ctx, r, policy.GetObjectRetentionAction, bucket, object)
legalHoldPerms := checkRequestAuthType(ctx, r, policy.GetObjectLegalHoldAction, bucket, object)
// filter object lock metadata if permission does not permit
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
// Set encryption response headers
if objectAPI.IsEncryptionSupported() {
crypto: add support for decrypting SSE-KMS metadata (#11415) This commit refactors the SSE implementation and add S3-compatible SSE-KMS context handling. SSE-KMS differs from SSE-S3 in two main aspects: 1. The client can request a particular key and specify a KMS context as part of the request. 2. The ETag of an SSE-KMS encrypted object is not the MD5 sum of the object content. This commit only focuses on the 1st aspect. A client can send an optional SSE context when using SSE-KMS. This context is remembered by the S3 server such that the client does not have to specify the context again (during multipart PUT / GET / HEAD ...). The crypto. context also includes the bucket/object name to prevent renaming objects at the backend. Now, AWS S3 behaves as following: - If the user does not provide a SSE-KMS context it does not store one - resp. does not include the SSE-KMS context header in the response (e.g. HEAD). - If the user specifies a SSE-KMS context without the bucket/object name then AWS stores the exact context the client provided but adds the bucket/object name internally. The response contains the KMS context without the bucket/object name. - If the user specifies a SSE-KMS context with the bucket/object name then AWS again stores the exact context provided by the client. The response contains the KMS context with the bucket/object name. This commit implements this behavior w.r.t. SSE-KMS. However, as of now, no such object can be created since the server rejects SSE-KMS encryption requests. This commit is one stepping stone for SSE-KMS support. Co-authored-by: Harshavardhana <harsha@minio.io>
2021-02-03 18:19:08 -05:00
switch kind, _ := crypto.IsEncrypted(objInfo.UserDefined); kind {
case crypto.S3:
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
case crypto.S3KMS:
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
w.Header().Set(xhttp.AmzServerSideEncryptionKmsID, objInfo.KMSKeyID())
if kmsCtx, ok := objInfo.UserDefined[crypto.MetaContext]; ok {
w.Header().Set(xhttp.AmzServerSideEncryptionKmsContext, kmsCtx)
}
crypto: add support for decrypting SSE-KMS metadata (#11415) This commit refactors the SSE implementation and add S3-compatible SSE-KMS context handling. SSE-KMS differs from SSE-S3 in two main aspects: 1. The client can request a particular key and specify a KMS context as part of the request. 2. The ETag of an SSE-KMS encrypted object is not the MD5 sum of the object content. This commit only focuses on the 1st aspect. A client can send an optional SSE context when using SSE-KMS. This context is remembered by the S3 server such that the client does not have to specify the context again (during multipart PUT / GET / HEAD ...). The crypto. context also includes the bucket/object name to prevent renaming objects at the backend. Now, AWS S3 behaves as following: - If the user does not provide a SSE-KMS context it does not store one - resp. does not include the SSE-KMS context header in the response (e.g. HEAD). - If the user specifies a SSE-KMS context without the bucket/object name then AWS stores the exact context the client provided but adds the bucket/object name internally. The response contains the KMS context without the bucket/object name. - If the user specifies a SSE-KMS context with the bucket/object name then AWS again stores the exact context provided by the client. The response contains the KMS context with the bucket/object name. This commit implements this behavior w.r.t. SSE-KMS. However, as of now, no such object can be created since the server rejects SSE-KMS encryption requests. This commit is one stepping stone for SSE-KMS support. Co-authored-by: Harshavardhana <harsha@minio.io>
2021-02-03 18:19:08 -05:00
case crypto.SSEC:
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm))
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5))
}
}
2022-08-29 19:57:16 -04:00
if r.Header.Get(xhttp.AmzChecksumMode) == "ENABLED" {
hash.AddChecksumHeader(w, objInfo.decryptChecksums())
2022-08-29 19:57:16 -04:00
}
if err = setObjectHeaders(w, objInfo, rs, opts); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Set Parts Count Header
if opts.PartNumber > 0 && len(objInfo.Parts) > 0 {
setPartsCountHeaders(w, objInfo)
}
setHeadGetRespHeaders(w, r.Form)
statusCodeWritten := false
httpWriter := xioutil.WriteOnClose(w)
if rs != nil || opts.PartNumber > 0 {
statusCodeWritten = true
w.WriteHeader(http.StatusPartialContent)
}
// Write object content to response body
if _, err = xioutil.Copy(httpWriter, gr); err != nil {
if !httpWriter.HasWritten() && !statusCodeWritten {
// write error response only if no data or headers has been written to client yet
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients
logger.LogIf(ctx, fmt.Errorf("Unable to write all the data to client %w", err))
}
return
fs: Break fs package to top-level and introduce ObjectAPI interface. ObjectAPI interface brings in changes needed for XL ObjectAPI layer. The new interface for any ObjectAPI layer is as below ``` // ObjectAPI interface. type ObjectAPI interface { // Bucket resource API. DeleteBucket(bucket string) *probe.Error ListBuckets() ([]BucketInfo, *probe.Error) MakeBucket(bucket string) *probe.Error GetBucketInfo(bucket string) (BucketInfo, *probe.Error) // Bucket query API. ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error) // Object resource API. GetObject(bucket, object string, startOffset int64) (io.ReadCloser, *probe.Error) GetObjectInfo(bucket, object string) (ObjectInfo, *probe.Error) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string) (ObjectInfo, *probe.Error) DeleteObject(bucket, object string) *probe.Error // Object query API. NewMultipartUpload(bucket, object string) (string, *probe.Error) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string) (string, *probe.Error) ListObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (ObjectInfo, *probe.Error) AbortMultipartUpload(bucket, object, uploadID string) *probe.Error } ```
2016-03-30 19:15:28 -04:00
}
if err = httpWriter.Close(); err != nil {
if !httpWriter.HasWritten() && !statusCodeWritten { // write error response only if no data or headers has been written to client yet
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients
logger.LogIf(ctx, fmt.Errorf("Unable to write all the data to client %w", err))
}
return
}
// Notify object accessed via a GET request.
sendEvent(eventArgs{
EventName: event.ObjectAccessedGet,
BucketName: bucket,
Object: objInfo,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: handlers.GetSourceIP(r),
})
}
// GetObjectHandler - GET Object
// ----------
// This implementation of the GET operation retrieves object. To use GET,
// you must have READ access to the object.
func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetObject")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if !globalAPIConfig.shouldGzipObjects() {
w.Header().Set(gzhttp.HeaderNoCompression, "true")
}
if r.Header.Get(xMinIOExtract) == "true" && strings.Contains(object, archivePattern) {
api.getObjectInArchiveFileHandler(ctx, objectAPI, bucket, object, w, r)
} else {
api.getObjectHandler(ctx, objectAPI, bucket, object, w, r)
}
}
func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI ObjectLayer, bucket, object string, w http.ResponseWriter, r *http.Request) {
if crypto.S3.IsRequested(r.Header) || crypto.S3KMS.IsRequested(r.Header) { // If SSE-S3 or SSE-KMS present -> AWS fails with undefined error
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrBadRequest))
return
}
if crypto.Requested(r.Header) && !objectAPI.IsEncryptionSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
return
}
getObjectInfo := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
getObjectInfo = api.CacheAPI().GetObjectInfo
}
opts, err := getOpts(ctx, r, bucket, object)
if err != nil {
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
return
}
// Check for auth type to return S3 compatible error.
// type to return the correct error (NoSuchKey vs AccessDenied)
if s3Error := authenticateRequest(ctx, r, policy.GetObjectAction); s3Error != ErrNone {
if getRequestAuthType(r) == authTypeAnonymous {
// As per "Permission" section in
// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html
// If the object you request does not exist,
// the error Amazon S3 returns depends on
// whether you also have the s3:ListBucket
// permission.
// * If you have the s3:ListBucket permission
// on the bucket, Amazon S3 will return an
// HTTP status code 404 ("no such key")
// error.
// * if you dont have the s3:ListBucket
// permission, Amazon S3 will return an HTTP
// status code 403 ("access denied") error.`
if globalPolicySys.IsAllowed(policy.Args{
Action: policy.ListBucketAction,
BucketName: bucket,
ConditionValues: getConditionValues(r, "", "", nil),
IsOwner: false,
}) {
getObjectInfo := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
getObjectInfo = api.CacheAPI().GetObjectInfo
}
_, err = getObjectInfo(ctx, bucket, object, opts)
if toAPIError(ctx, err).Code == "NoSuchKey" {
s3Error = ErrNoSuchKey
}
}
}
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error))
return
}
// Get request range.
var rs *HTTPRangeSpec
rangeHeader := r.Header.Get(xhttp.Range)
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
var proxy proxyResult
if err != nil {
// proxy HEAD to replication target if active-active replication configured on bucket
proxytgts := getProxyTargets(ctx, bucket, object, opts)
if !proxytgts.Empty() {
if rangeHeader != "" {
rs, _ = parseRequestRangeSpec(rangeHeader)
}
var oi ObjectInfo
oi, proxy = proxyHeadToReplicationTarget(ctx, bucket, object, rs, opts, proxytgts)
if proxy.Proxy {
objInfo = oi
}
if proxy.Err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, proxy.Err), r.URL)
return
}
}
}
if objInfo.UserTags != "" {
// Set this such that authorization policies can be applied on the object tags.
r.Header.Set(xhttp.AmzObjectTagging, objInfo.UserTags)
}
if s3Error := authorizeRequest(ctx, r, policy.GetObjectAction); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
if err != nil && !proxy.Proxy {
if globalBucketVersioningSys.PrefixEnabled(bucket, object) {
switch {
case !objInfo.VersionPurgeStatus.Empty():
w.Header()[xhttp.MinIODeleteReplicationStatus] = []string{string(objInfo.VersionPurgeStatus)}
case !objInfo.ReplicationStatus.Empty() && objInfo.DeleteMarker:
w.Header()[xhttp.MinIODeleteMarkerReplicationStatus] = []string{string(objInfo.ReplicationStatus)}
}
// Versioning enabled quite possibly object is deleted might be delete-marker
// if present set the headers, no idea why AWS S3 sets these headers.
if objInfo.VersionID != "" && objInfo.DeleteMarker {
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(objInfo.DeleteMarker)}
}
QueueReplicationHeal(ctx, bucket, objInfo)
// do an additional verification whether object exists when object is deletemarker and request
// is from replication
if opts.CheckDMReplicationReady {
topts := opts
topts.VersionID = ""
goi, gerr := getObjectInfo(ctx, bucket, object, topts)
if gerr == nil || goi.VersionID != "" { // object layer returned more info because object is deleted
w.Header().Set(xhttp.MinIOTargetReplicationReady, "true")
}
}
}
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
return
2015-08-03 19:17:21 -04:00
}
if !proxy.Proxy { // apply lifecycle rules only locally not for proxied requests
// Automatically remove the object/version is an expiry lifecycle rule can be applied
if lc, err := globalLifecycleSys.Get(bucket); err == nil {
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
evt := evalActionFromLifecycle(ctx, *lc, rcfg, objInfo)
var success bool
switch evt.Action {
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction:
success = applyExpiryRule(objInfo, false, evt.Action == lifecycle.DeleteVersionAction)
case lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction:
// Restored object delete would be still allowed to proceed as success
// since transition behavior is slightly different.
applyExpiryRule(objInfo, true, evt.Action == lifecycle.DeleteRestoredVersionAction)
}
if success {
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrNoSuchKey))
return
}
}
QueueReplicationHeal(ctx, bucket, objInfo)
}
// filter object lock metadata if permission does not permit
getRetPerms := checkRequestAuthType(ctx, r, policy.GetObjectRetentionAction, bucket, object)
legalHoldPerms := checkRequestAuthType(ctx, r, policy.GetObjectLegalHoldAction, bucket, object)
// filter object lock metadata if permission does not permit
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
if objectAPI.IsEncryptionSupported() {
if _, err = DecryptObjectInfo(&objInfo, r); err != nil {
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
return
}
}
// Validate pre-conditions if any.
if checkPreconditions(ctx, w, r, objInfo, opts) {
return
}
if rangeHeader != "" {
// Both 'Range' and 'partNumber' cannot be specified at the same time
if opts.PartNumber > 0 {
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrInvalidRangePartNumber))
return
}
if rs, err = parseRequestRangeSpec(rangeHeader); err != nil {
// Handle only errInvalidRange. Ignore other
// parse error and treat it as regular Get
// request like Amazon S3.
if err == errInvalidRange {
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrInvalidRange))
return
}
logger.LogIf(ctx, err)
}
}
// Set encryption response headers
if objectAPI.IsEncryptionSupported() {
crypto: add support for decrypting SSE-KMS metadata (#11415) This commit refactors the SSE implementation and add S3-compatible SSE-KMS context handling. SSE-KMS differs from SSE-S3 in two main aspects: 1. The client can request a particular key and specify a KMS context as part of the request. 2. The ETag of an SSE-KMS encrypted object is not the MD5 sum of the object content. This commit only focuses on the 1st aspect. A client can send an optional SSE context when using SSE-KMS. This context is remembered by the S3 server such that the client does not have to specify the context again (during multipart PUT / GET / HEAD ...). The crypto. context also includes the bucket/object name to prevent renaming objects at the backend. Now, AWS S3 behaves as following: - If the user does not provide a SSE-KMS context it does not store one - resp. does not include the SSE-KMS context header in the response (e.g. HEAD). - If the user specifies a SSE-KMS context without the bucket/object name then AWS stores the exact context the client provided but adds the bucket/object name internally. The response contains the KMS context without the bucket/object name. - If the user specifies a SSE-KMS context with the bucket/object name then AWS again stores the exact context provided by the client. The response contains the KMS context with the bucket/object name. This commit implements this behavior w.r.t. SSE-KMS. However, as of now, no such object can be created since the server rejects SSE-KMS encryption requests. This commit is one stepping stone for SSE-KMS support. Co-authored-by: Harshavardhana <harsha@minio.io>
2021-02-03 18:19:08 -05:00
switch kind, _ := crypto.IsEncrypted(objInfo.UserDefined); kind {
case crypto.S3:
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
case crypto.S3KMS:
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
w.Header().Set(xhttp.AmzServerSideEncryptionKmsID, objInfo.KMSKeyID())
if kmsCtx, ok := objInfo.UserDefined[crypto.MetaContext]; ok {
w.Header().Set(xhttp.AmzServerSideEncryptionKmsContext, kmsCtx)
}
crypto: add support for decrypting SSE-KMS metadata (#11415) This commit refactors the SSE implementation and add S3-compatible SSE-KMS context handling. SSE-KMS differs from SSE-S3 in two main aspects: 1. The client can request a particular key and specify a KMS context as part of the request. 2. The ETag of an SSE-KMS encrypted object is not the MD5 sum of the object content. This commit only focuses on the 1st aspect. A client can send an optional SSE context when using SSE-KMS. This context is remembered by the S3 server such that the client does not have to specify the context again (during multipart PUT / GET / HEAD ...). The crypto. context also includes the bucket/object name to prevent renaming objects at the backend. Now, AWS S3 behaves as following: - If the user does not provide a SSE-KMS context it does not store one - resp. does not include the SSE-KMS context header in the response (e.g. HEAD). - If the user specifies a SSE-KMS context without the bucket/object name then AWS stores the exact context the client provided but adds the bucket/object name internally. The response contains the KMS context without the bucket/object name. - If the user specifies a SSE-KMS context with the bucket/object name then AWS again stores the exact context provided by the client. The response contains the KMS context with the bucket/object name. This commit implements this behavior w.r.t. SSE-KMS. However, as of now, no such object can be created since the server rejects SSE-KMS encryption requests. This commit is one stepping stone for SSE-KMS support. Co-authored-by: Harshavardhana <harsha@minio.io>
2021-02-03 18:19:08 -05:00
case crypto.SSEC:
// Validate the SSE-C Key set in the header.
if _, err = crypto.SSEC.UnsealObjectKey(r.Header, objInfo.UserDefined, bucket, object); err != nil {
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
return
}
crypto: add support for decrypting SSE-KMS metadata (#11415) This commit refactors the SSE implementation and add S3-compatible SSE-KMS context handling. SSE-KMS differs from SSE-S3 in two main aspects: 1. The client can request a particular key and specify a KMS context as part of the request. 2. The ETag of an SSE-KMS encrypted object is not the MD5 sum of the object content. This commit only focuses on the 1st aspect. A client can send an optional SSE context when using SSE-KMS. This context is remembered by the S3 server such that the client does not have to specify the context again (during multipart PUT / GET / HEAD ...). The crypto. context also includes the bucket/object name to prevent renaming objects at the backend. Now, AWS S3 behaves as following: - If the user does not provide a SSE-KMS context it does not store one - resp. does not include the SSE-KMS context header in the response (e.g. HEAD). - If the user specifies a SSE-KMS context without the bucket/object name then AWS stores the exact context the client provided but adds the bucket/object name internally. The response contains the KMS context without the bucket/object name. - If the user specifies a SSE-KMS context with the bucket/object name then AWS again stores the exact context provided by the client. The response contains the KMS context with the bucket/object name. This commit implements this behavior w.r.t. SSE-KMS. However, as of now, no such object can be created since the server rejects SSE-KMS encryption requests. This commit is one stepping stone for SSE-KMS support. Co-authored-by: Harshavardhana <harsha@minio.io>
2021-02-03 18:19:08 -05:00
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm))
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5))
}
}
2022-08-29 19:57:16 -04:00
if r.Header.Get(xhttp.AmzChecksumMode) == "ENABLED" {
hash.AddChecksumHeader(w, objInfo.decryptChecksums())
2022-08-29 19:57:16 -04:00
}
// Set standard object headers.
if err = setObjectHeaders(w, objInfo, rs, opts); err != nil {
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
return
}
// Set Parts Count Header
if opts.PartNumber > 0 && len(objInfo.Parts) > 0 {
setPartsCountHeaders(w, objInfo)
}
// Set any additional requested response headers.
setHeadGetRespHeaders(w, r.Form)
// Successful response.
if rs != nil || opts.PartNumber > 0 {
w.WriteHeader(http.StatusPartialContent)
} else {
w.WriteHeader(http.StatusOK)
}
// Notify object accessed via a HEAD request.
sendEvent(eventArgs{
EventName: event.ObjectAccessedHead,
BucketName: bucket,
Object: objInfo,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: handlers.GetSourceIP(r),
})
}
// HeadObjectHandler - HEAD Object
// -----------
// The HEAD operation retrieves metadata from an object without returning the object itself.
func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "HeadObject")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrServerNotInitialized))
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if r.Header.Get(xMinIOExtract) == "true" && strings.Contains(object, archivePattern) {
api.headObjectInArchiveFileHandler(ctx, objectAPI, bucket, object, w, r)
} else {
api.headObjectHandler(ctx, objectAPI, bucket, object, w, r)
}
}
// Extract metadata relevant for an CopyObject operation based on conditional
// header values specified in X-Amz-Metadata-Directive.
func getCpObjMetadataFromHeader(ctx context.Context, r *http.Request, userMeta map[string]string) (map[string]string, error) {
// Make a copy of the supplied metadata to avoid
// to change the original one.
defaultMeta := make(map[string]string, len(userMeta))
for k, v := range userMeta {
// skip tier metadata when copying metadata from source object
switch k {
case metaTierName, metaTierStatus, metaTierObjName, metaTierVersionID:
continue
}
defaultMeta[k] = v
}
// remove SSE Headers from source info
crypto.RemoveSSEHeaders(defaultMeta)
// Storage class is special, it can be replaced regardless of the
// metadata directive, if set should be preserved and replaced
// to the destination metadata.
sc := r.Header.Get(xhttp.AmzStorageClass)
if sc == "" {
sc = r.Form.Get(xhttp.AmzStorageClass)
}
// if x-amz-metadata-directive says REPLACE then
// we extract metadata from the input headers.
if isDirectiveReplace(r.Header.Get(xhttp.AmzMetadataDirective)) {
emetadata, err := extractMetadata(ctx, r)
if err != nil {
return nil, err
}
if sc != "" {
emetadata[xhttp.AmzStorageClass] = sc
}
return emetadata, nil
}
if sc != "" {
defaultMeta[xhttp.AmzStorageClass] = sc
}
// if x-amz-metadata-directive says COPY then we
// return the default metadata.
if isDirectiveCopy(r.Header.Get(xhttp.AmzMetadataDirective)) {
return defaultMeta, nil
}
// Copy is default behavior if not x-amz-metadata-directive is set.
return defaultMeta, nil
}
// getRemoteInstanceTransport contains a singleton roundtripper.
var (
getRemoteInstanceTransport *http.Transport
getRemoteInstanceTransportOnce sync.Once
)
// Returns a minio-go Client configured to access remote host described by destDNSRecord
// Applicable only in a federated deployment
var getRemoteInstanceClient = func(r *http.Request, host string) (*miniogo.Core, error) {
cred := getReqAccessCred(r, globalSite.Region)
// In a federated deployment, all the instances share config files
// and hence expected to have same credentials.
return miniogo.NewCore(host, &miniogo.Options{
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, ""),
Secure: globalIsTLS,
Transport: getRemoteInstanceTransport,
})
}
// Check if the destination bucket is on a remote site, this code only gets executed
// when federation is enabled, ie when globalDNSConfig is non 'nil'.
//
// This function is similar to isRemoteCallRequired but specifically for COPY object API
// if destination and source are same we do not need to check for destination bucket
// to exist locally.
func isRemoteCopyRequired(ctx context.Context, srcBucket, dstBucket string, objAPI ObjectLayer) bool {
if srcBucket == dstBucket {
return false
}
return isRemoteCallRequired(ctx, dstBucket, objAPI)
}
// Check if the bucket is on a remote site, this code only gets executed when federation is enabled.
func isRemoteCallRequired(ctx context.Context, bucket string, objAPI ObjectLayer) bool {
if globalDNSConfig == nil {
return false
}
if globalBucketFederation {
_, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{})
return err == toObjectErr(errVolumeNotFound, bucket)
}
return false
}
// CopyObjectHandler - Copy Object
// ----------
// This implementation of the PUT operation adds an object to a bucket
// while reading the object from another source.
// Notice: The S3 client can send secret keys in headers for encryption related jobs,
// the handler should ensure to remove these keys before sending them to the object layer.
// Currently these keys are:
// - X-Amz-Server-Side-Encryption-Customer-Key
// - X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key
func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "CopyObject")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if crypto.Requested(r.Header) {
2022-10-24 20:44:15 -04:00
if !objectAPI.IsEncryptionSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
}
vars := mux.Vars(r)
dstBucket := vars["bucket"]
dstObject, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, dstBucket, dstObject); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Read escaped copy source path to check for parameters.
cpSrcPath := r.Header.Get(xhttp.AmzCopySource)
var vid string
if u, err := url.Parse(cpSrcPath); err == nil {
vid = strings.TrimSpace(u.Query().Get(xhttp.VersionID))
// Note that url.Parse does the unescaping
cpSrcPath = u.Path
}
srcBucket, srcObject := path2BucketObject(cpSrcPath)
// If source object is empty or bucket is empty, reply back invalid copy source.
if srcObject == "" || srcBucket == "" {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL)
return
}
if vid != "" && vid != nullVersionID {
_, err := uuid.Parse(vid)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, VersionNotFound{
Bucket: srcBucket,
Object: srcObject,
VersionID: vid,
}), r.URL)
return
}
}
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, srcBucket, srcObject); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Check if metadata directive is valid.
if !isDirectiveValid(r.Header.Get(xhttp.AmzMetadataDirective)) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMetadataDirective), r.URL)
return
}
// check if tag directive is valid
if !isDirectiveValid(r.Header.Get(xhttp.AmzTagDirective)) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidTagDirective), r.URL)
return
}
// Validate storage class metadata if present
dstSc := r.Header.Get(xhttp.AmzStorageClass)
if dstSc != "" && !storageclass.IsValid(dstSc) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidStorageClass), r.URL)
return
}
// Check if bucket encryption is enabled
sseConfig, _ := globalBucketSSEConfigSys.Get(dstBucket)
sseConfig.Apply(r.Header, sse.ApplyOptions{
AutoEncrypt: globalAutoEncryption,
})
var srcOpts, dstOpts ObjectOptions
srcOpts, err = copySrcOpts(ctx, r, srcBucket, srcObject)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
srcOpts.VersionID = vid
// convert copy src encryption options for GET calls
getOpts := ObjectOptions{
VersionID: srcOpts.VersionID,
Versioned: srcOpts.Versioned,
VersionSuspended: srcOpts.VersionSuspended,
}
getSSE := encrypt.SSE(srcOpts.ServerSideEncryption)
if getSSE != srcOpts.ServerSideEncryption {
getOpts.ServerSideEncryption = getSSE
}
dstOpts, err = copyDstOpts(ctx, r, dstBucket, dstObject, nil)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
getObjectNInfo := objectAPI.GetObjectNInfo
if api.CacheAPI() != nil {
getObjectNInfo = api.CacheAPI().GetObjectNInfo
}
checkCopyPrecondFn := func(o ObjectInfo) bool {
if objectAPI.IsEncryptionSupported() {
if _, err := DecryptObjectInfo(&o, r); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return true
}
}
return checkCopyObjectPreconditions(ctx, w, r, o)
}
getOpts.CheckPrecondFn = checkCopyPrecondFn
// FIXME: a possible race exists between a parallel
// GetObject v/s CopyObject with metadata updates, ideally
// we should be holding write lock here but it is not
// possible due to other constraints such as knowing
// the type of source content etc.
lock := noLock
if !cpSrcDstSame {
lock = readLock
}
var rs *HTTPRangeSpec
gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, lock, getOpts)
if err != nil {
if isErrPreconditionFailed(err) {
return
}
if globalBucketVersioningSys.PrefixEnabled(srcBucket, srcObject) && gr != nil {
// Versioning enabled quite possibly object is deleted might be delete-marker
// if present set the headers, no idea why AWS S3 sets these headers.
if gr.ObjInfo.VersionID != "" && gr.ObjInfo.DeleteMarker {
w.Header()[xhttp.AmzVersionID] = []string{gr.ObjInfo.VersionID}
w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(gr.ObjInfo.DeleteMarker)}
}
}
// Update context bucket & object names for correct S3 XML error response
reqInfo := logger.GetReqInfo(ctx)
reqInfo.BucketName = srcBucket
reqInfo.ObjectName = srcObject
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
defer gr.Close()
srcInfo := gr.ObjInfo
// maximum Upload size for object in a single CopyObject operation.
if isMaxObjectSize(srcInfo.Size) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
return
}
// We have to copy metadata only if source and destination are same.
// this changes for encryption which can be observed below.
if cpSrcDstSame {
srcInfo.metadataOnly = true
}
var chStorageClass bool
if dstSc != "" && dstSc != srcInfo.StorageClass {
chStorageClass = true
srcInfo.metadataOnly = false
} // no changes in storage-class expected so its a metadataonly operation.
pkg/etag: add new package for S3 ETag handling (#11577) This commit adds a new package `etag` for dealing with S3 ETags. Even though ETag is often viewed as MD5 checksum of an object, handling S3 ETags correctly is a surprisingly complex task. While it is true that the ETag corresponds to the MD5 for the most basic S3 API operations, there are many exceptions in case of multipart uploads or encryption. In worse, some S3 clients expect very specific behavior when it comes to ETags. For example, some clients expect that the ETag is a double-quoted string and fail otherwise. Non-AWS compliant ETag handling has been a source of many bugs in the past. Therefore, this commit adds a dedicated `etag` package that provides functionality for parsing, generating and converting S3 ETags. Further, this commit removes the ETag computation from the `hash` package. Instead, the `hash` package (i.e. `hash.Reader`) should focus only on computing and verifying the content-sha256. One core feature of this commit is to provide a mechanism to communicate a computed ETag from a low-level `io.Reader` to a high-level `io.Reader`. This problem occurs when an S3 server receives a request and has to compute the ETag of the content. However, the server may also wrap the initial body with several other `io.Reader`, e.g. when encrypting or compressing the content: ``` reader := Encrypt(Compress(ETag(content))) ``` In such a case, the ETag should be accessible by the high-level `io.Reader`. The `etag` provides a mechanism to wrap `io.Reader` implementations such that the `ETag` can be accessed by a type-check. This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
var reader io.Reader = gr
// Set the actual size to the compressed/decrypted size if encrypted.
actualSize, err := srcInfo.GetActualSize()
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
length := actualSize
if !cpSrcDstSame {
if err := enforceBucketQuotaHard(ctx, dstBucket, actualSize); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
// Check if either the source is encrypted or the destination will be encrypted.
objectEncryption := crypto.Requested(r.Header)
objectEncryption = objectEncryption || crypto.IsSourceEncrypted(srcInfo.UserDefined)
var compressMetadata map[string]string
// No need to compress for remote etcd calls
// Pass the decompressed stream to such calls.
isDstCompressed := objectAPI.IsCompressionSupported() &&
isCompressible(r.Header, dstObject) &&
length > minCompressibleSize &&
!isRemoteCopyRequired(ctx, srcBucket, dstBucket, objectAPI) && !cpSrcDstSame && !objectEncryption
if isDstCompressed {
compressMetadata = make(map[string]string, 2)
// Preserving the compression metadata.
compressMetadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
compressMetadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(actualSize, 10)
reader = etag.NewReader(reader, nil)
wantEncryption := objectAPI.IsEncryptionSupported() && crypto.Requested(r.Header)
s2c, cb := newS2CompressReader(reader, actualSize, wantEncryption)
2022-07-11 20:30:56 -04:00
dstOpts.IndexCB = cb
defer s2c.Close()
pkg/etag: add new package for S3 ETag handling (#11577) This commit adds a new package `etag` for dealing with S3 ETags. Even though ETag is often viewed as MD5 checksum of an object, handling S3 ETags correctly is a surprisingly complex task. While it is true that the ETag corresponds to the MD5 for the most basic S3 API operations, there are many exceptions in case of multipart uploads or encryption. In worse, some S3 clients expect very specific behavior when it comes to ETags. For example, some clients expect that the ETag is a double-quoted string and fail otherwise. Non-AWS compliant ETag handling has been a source of many bugs in the past. Therefore, this commit adds a dedicated `etag` package that provides functionality for parsing, generating and converting S3 ETags. Further, this commit removes the ETag computation from the `hash` package. Instead, the `hash` package (i.e. `hash.Reader`) should focus only on computing and verifying the content-sha256. One core feature of this commit is to provide a mechanism to communicate a computed ETag from a low-level `io.Reader` to a high-level `io.Reader`. This problem occurs when an S3 server receives a request and has to compute the ETag of the content. However, the server may also wrap the initial body with several other `io.Reader`, e.g. when encrypting or compressing the content: ``` reader := Encrypt(Compress(ETag(content))) ``` In such a case, the ETag should be accessible by the high-level `io.Reader`. The `etag` provides a mechanism to wrap `io.Reader` implementations such that the `ETag` can be accessed by a type-check. This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
reader = etag.Wrap(s2c, reader)
length = -1
} else {
delete(srcInfo.UserDefined, ReservedMetadataPrefix+"compression")
delete(srcInfo.UserDefined, ReservedMetadataPrefix+"actual-size")
reader = gr
}
pkg/etag: add new package for S3 ETag handling (#11577) This commit adds a new package `etag` for dealing with S3 ETags. Even though ETag is often viewed as MD5 checksum of an object, handling S3 ETags correctly is a surprisingly complex task. While it is true that the ETag corresponds to the MD5 for the most basic S3 API operations, there are many exceptions in case of multipart uploads or encryption. In worse, some S3 clients expect very specific behavior when it comes to ETags. For example, some clients expect that the ETag is a double-quoted string and fail otherwise. Non-AWS compliant ETag handling has been a source of many bugs in the past. Therefore, this commit adds a dedicated `etag` package that provides functionality for parsing, generating and converting S3 ETags. Further, this commit removes the ETag computation from the `hash` package. Instead, the `hash` package (i.e. `hash.Reader`) should focus only on computing and verifying the content-sha256. One core feature of this commit is to provide a mechanism to communicate a computed ETag from a low-level `io.Reader` to a high-level `io.Reader`. This problem occurs when an S3 server receives a request and has to compute the ETag of the content. However, the server may also wrap the initial body with several other `io.Reader`, e.g. when encrypting or compressing the content: ``` reader := Encrypt(Compress(ETag(content))) ``` In such a case, the ETag should be accessible by the high-level `io.Reader`. The `etag` provides a mechanism to wrap `io.Reader` implementations such that the `ETag` can be accessed by a type-check. This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualSize)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
pReader := NewPutObjReader(srcInfo.Reader)
// Handle encryption
encMetadata := make(map[string]string)
if objectAPI.IsEncryptionSupported() {
// Encryption parameters not applicable for this object.
if _, ok := crypto.IsEncrypted(srcInfo.UserDefined); !ok && crypto.SSECopy.IsRequested(r.Header) {
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL)
return
}
// Encryption parameters not present for this object.
if crypto.SSEC.IsEncrypted(srcInfo.UserDefined) && !crypto.SSECopy.IsRequested(r.Header) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidSSECustomerAlgorithm), r.URL)
return
}
var oldKey, newKey []byte
var newKeyID string
var kmsCtx kms.Context
var objEncKey crypto.ObjectKey
sseCopyKMS := crypto.S3KMS.IsEncrypted(srcInfo.UserDefined)
sseCopyS3 := crypto.S3.IsEncrypted(srcInfo.UserDefined)
sseCopyC := crypto.SSEC.IsEncrypted(srcInfo.UserDefined) && crypto.SSECopy.IsRequested(r.Header)
sseC := crypto.SSEC.IsRequested(r.Header)
sseS3 := crypto.S3.IsRequested(r.Header)
sseKMS := crypto.S3KMS.IsRequested(r.Header)
isSourceEncrypted := sseCopyC || sseCopyS3 || sseCopyKMS
isTargetEncrypted := sseC || sseS3 || sseKMS
if sseC {
newKey, err = ParseSSECustomerRequest(r)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
if crypto.S3KMS.IsRequested(r.Header) {
newKeyID, kmsCtx, err = crypto.S3KMS.ParseHTTP(r.Header)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
// If src == dst and either
// - the object is encrypted using SSE-C and two different SSE-C keys are present
// - the object is encrypted using SSE-S3 and the SSE-S3 header is present
// - the object storage class is not changing
// then execute a key rotation.
if cpSrcDstSame && (sseCopyC && sseC) && !chStorageClass {
oldKey, err = ParseSSECopyCustomerRequest(r.Header, srcInfo.UserDefined)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
for k, v := range srcInfo.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
encMetadata[k] = v
}
}
if err = rotateKey(ctx, oldKey, newKeyID, newKey, srcBucket, srcObject, encMetadata, kmsCtx); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Since we are rotating the keys, make sure to update the metadata.
srcInfo.metadataOnly = true
srcInfo.keyRotation = true
} else {
if isSourceEncrypted || isTargetEncrypted {
// We are not only copying just metadata instead
// we are creating a new object at this point, even
// if source and destination are same objects.
if !srcInfo.keyRotation {
srcInfo.metadataOnly = false
}
}
// Calculate the size of the target object
var targetSize int64
switch {
case isDstCompressed:
targetSize = -1
case !isSourceEncrypted && !isTargetEncrypted:
targetSize, _ = srcInfo.GetActualSize()
case isSourceEncrypted && isTargetEncrypted:
objInfo := ObjectInfo{Size: actualSize}
targetSize = objInfo.EncryptedSize()
case !isSourceEncrypted && isTargetEncrypted:
targetSize = srcInfo.EncryptedSize()
case isSourceEncrypted && !isTargetEncrypted:
targetSize, _ = srcInfo.DecryptedSize()
}
if isTargetEncrypted {
pkg/etag: add new package for S3 ETag handling (#11577) This commit adds a new package `etag` for dealing with S3 ETags. Even though ETag is often viewed as MD5 checksum of an object, handling S3 ETags correctly is a surprisingly complex task. While it is true that the ETag corresponds to the MD5 for the most basic S3 API operations, there are many exceptions in case of multipart uploads or encryption. In worse, some S3 clients expect very specific behavior when it comes to ETags. For example, some clients expect that the ETag is a double-quoted string and fail otherwise. Non-AWS compliant ETag handling has been a source of many bugs in the past. Therefore, this commit adds a dedicated `etag` package that provides functionality for parsing, generating and converting S3 ETags. Further, this commit removes the ETag computation from the `hash` package. Instead, the `hash` package (i.e. `hash.Reader`) should focus only on computing and verifying the content-sha256. One core feature of this commit is to provide a mechanism to communicate a computed ETag from a low-level `io.Reader` to a high-level `io.Reader`. This problem occurs when an S3 server receives a request and has to compute the ETag of the content. However, the server may also wrap the initial body with several other `io.Reader`, e.g. when encrypting or compressing the content: ``` reader := Encrypt(Compress(ETag(content))) ``` In such a case, the ETag should be accessible by the high-level `io.Reader`. The `etag` provides a mechanism to wrap `io.Reader` implementations such that the `ETag` can be accessed by a type-check. This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
var encReader io.Reader
kind, _ := crypto.IsRequested(r.Header)
encReader, objEncKey, err = newEncryptReader(ctx, srcInfo.Reader, kind, newKeyID, newKey, dstBucket, dstObject, encMetadata, kmsCtx)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
pkg/etag: add new package for S3 ETag handling (#11577) This commit adds a new package `etag` for dealing with S3 ETags. Even though ETag is often viewed as MD5 checksum of an object, handling S3 ETags correctly is a surprisingly complex task. While it is true that the ETag corresponds to the MD5 for the most basic S3 API operations, there are many exceptions in case of multipart uploads or encryption. In worse, some S3 clients expect very specific behavior when it comes to ETags. For example, some clients expect that the ETag is a double-quoted string and fail otherwise. Non-AWS compliant ETag handling has been a source of many bugs in the past. Therefore, this commit adds a dedicated `etag` package that provides functionality for parsing, generating and converting S3 ETags. Further, this commit removes the ETag computation from the `hash` package. Instead, the `hash` package (i.e. `hash.Reader`) should focus only on computing and verifying the content-sha256. One core feature of this commit is to provide a mechanism to communicate a computed ETag from a low-level `io.Reader` to a high-level `io.Reader`. This problem occurs when an S3 server receives a request and has to compute the ETag of the content. However, the server may also wrap the initial body with several other `io.Reader`, e.g. when encrypting or compressing the content: ``` reader := Encrypt(Compress(ETag(content))) ``` In such a case, the ETag should be accessible by the high-level `io.Reader`. The `etag` provides a mechanism to wrap `io.Reader` implementations such that the `ETag` can be accessed by a type-check. This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
reader = etag.Wrap(encReader, srcInfo.Reader)
}
if isSourceEncrypted {
// Remove all source encrypted related metadata to
// avoid copying them in target object.
crypto.RemoveInternalEntries(srcInfo.UserDefined)
}
// do not try to verify encrypted content
pkg/etag: add new package for S3 ETag handling (#11577) This commit adds a new package `etag` for dealing with S3 ETags. Even though ETag is often viewed as MD5 checksum of an object, handling S3 ETags correctly is a surprisingly complex task. While it is true that the ETag corresponds to the MD5 for the most basic S3 API operations, there are many exceptions in case of multipart uploads or encryption. In worse, some S3 clients expect very specific behavior when it comes to ETags. For example, some clients expect that the ETag is a double-quoted string and fail otherwise. Non-AWS compliant ETag handling has been a source of many bugs in the past. Therefore, this commit adds a dedicated `etag` package that provides functionality for parsing, generating and converting S3 ETags. Further, this commit removes the ETag computation from the `hash` package. Instead, the `hash` package (i.e. `hash.Reader`) should focus only on computing and verifying the content-sha256. One core feature of this commit is to provide a mechanism to communicate a computed ETag from a low-level `io.Reader` to a high-level `io.Reader`. This problem occurs when an S3 server receives a request and has to compute the ETag of the content. However, the server may also wrap the initial body with several other `io.Reader`, e.g. when encrypting or compressing the content: ``` reader := Encrypt(Compress(ETag(content))) ``` In such a case, the ETag should be accessible by the high-level `io.Reader`. The `etag` provides a mechanism to wrap `io.Reader` implementations such that the `ETag` can be accessed by a type-check. This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
srcInfo.Reader, err = hash.NewReader(reader, targetSize, "", "", actualSize)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if isTargetEncrypted {
pReader, err = pReader.WithEncryption(srcInfo.Reader, &objEncKey)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
2022-07-11 20:30:56 -04:00
if dstOpts.IndexCB != nil {
dstOpts.IndexCB = compressionIndexEncrypter(objEncKey, dstOpts.IndexCB)
}
}
}
}
srcInfo.PutObjReader = pReader
srcInfo.UserDefined, err = getCpObjMetadataFromHeader(ctx, r, srcInfo.UserDefined)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objTags := srcInfo.UserTags
// If x-amz-tagging-directive header is REPLACE, get passed tags.
if isDirectiveReplace(r.Header.Get(xhttp.AmzTagDirective)) {
objTags = r.Header.Get(xhttp.AmzObjectTagging)
if _, err := tags.ParseObjectTags(objTags); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
if objTags != "" {
lastTaggingTimestamp := srcInfo.UserDefined[ReservedMetadataPrefixLower+TaggingTimestamp]
if dstOpts.ReplicationRequest {
srcTimestamp := dstOpts.ReplicationSourceTaggingTimestamp
if !srcTimestamp.IsZero() {
ondiskTimestamp, err := time.Parse(lastTaggingTimestamp, time.RFC3339Nano)
// update tagging metadata only if replica timestamp is newer than what's on disk
if err != nil || (err == nil && ondiskTimestamp.Before(srcTimestamp)) {
srcInfo.UserDefined[ReservedMetadataPrefixLower+TaggingTimestamp] = srcTimestamp.UTC().Format(time.RFC3339Nano)
srcInfo.UserDefined[xhttp.AmzObjectTagging] = objTags
}
}
} else {
srcInfo.UserDefined[xhttp.AmzObjectTagging] = objTags
srcInfo.UserDefined[ReservedMetadataPrefixLower+TaggingTimestamp] = UTCNow().Format(time.RFC3339Nano)
}
}
srcInfo.UserDefined = filterReplicationStatusMetadata(srcInfo.UserDefined)
srcInfo.UserDefined = objectlock.FilterObjectLockMetadata(srcInfo.UserDefined, true, true)
retPerms := isPutActionAllowed(ctx, getRequestAuthType(r), dstBucket, dstObject, r, iampolicy.PutObjectRetentionAction)
holdPerms := isPutActionAllowed(ctx, getRequestAuthType(r), dstBucket, dstObject, r, iampolicy.PutObjectLegalHoldAction)
getObjectInfo := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
getObjectInfo = api.CacheAPI().GetObjectInfo
}
// apply default bucket configuration/governance headers for dest side.
retentionMode, retentionDate, legalHold, s3Err := checkPutObjectLockAllowed(ctx, r, dstBucket, dstObject, getObjectInfo, retPerms, holdPerms)
if s3Err == ErrNone && retentionMode.Valid() {
lastretentionTimestamp := srcInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockRetentionTimestamp]
if dstOpts.ReplicationRequest {
srcTimestamp := dstOpts.ReplicationSourceRetentionTimestamp
if !srcTimestamp.IsZero() {
ondiskTimestamp, err := time.Parse(lastretentionTimestamp, time.RFC3339Nano)
// update retention metadata only if replica timestamp is newer than what's on disk
if err != nil || (err == nil && ondiskTimestamp.Before(srcTimestamp)) {
srcInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockMode)] = string(retentionMode)
srcInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = retentionDate.UTC().Format(iso8601TimeFormat)
srcInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockRetentionTimestamp] = srcTimestamp.UTC().Format(time.RFC3339Nano)
}
}
} else {
srcInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockMode)] = string(retentionMode)
srcInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = retentionDate.UTC().Format(iso8601TimeFormat)
srcInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockRetentionTimestamp] = UTCNow().Format(time.RFC3339Nano)
}
}
if s3Err == ErrNone && legalHold.Status.Valid() {
lastLegalHoldTimestamp := srcInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockLegalHoldTimestamp]
if dstOpts.ReplicationRequest {
srcTimestamp := dstOpts.ReplicationSourceLegalholdTimestamp
if !srcTimestamp.IsZero() {
ondiskTimestamp, err := time.Parse(lastLegalHoldTimestamp, time.RFC3339Nano)
// update legalhold metadata only if replica timestamp is newer than what's on disk
if err != nil || (err == nil && ondiskTimestamp.Before(srcTimestamp)) {
srcInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = string(legalHold.Status)
srcInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockRetentionTimestamp] = srcTimestamp.Format(time.RFC3339Nano)
}
}
} else {
srcInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = string(legalHold.Status)
}
}
if s3Err != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
if rs := r.Header.Get(xhttp.AmzBucketReplicationStatus); rs != "" {
srcInfo.UserDefined[ReservedMetadataPrefixLower+ReplicaStatus] = replication.Replica.String()
srcInfo.UserDefined[ReservedMetadataPrefixLower+ReplicaTimestamp] = UTCNow().Format(time.RFC3339Nano)
srcInfo.UserDefined[xhttp.AmzBucketReplicationStatus] = rs
}
if dsc := mustReplicate(ctx, dstBucket, dstObject, getMustReplicateOptions(srcInfo, replication.UnsetReplicationType, dstOpts)); dsc.ReplicateAny() {
srcInfo.UserDefined[ReservedMetadataPrefixLower+ReplicationStatus] = dsc.PendingStatus()
srcInfo.UserDefined[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
}
// Store the preserved compression metadata.
for k, v := range compressMetadata {
srcInfo.UserDefined[k] = v
}
// We need to preserve the encryption headers set in EncryptRequest,
// so we do not want to override them, copy them instead.
for k, v := range encMetadata {
srcInfo.UserDefined[k] = v
}
// Ensure that metadata does not contain sensitive information
crypto.RemoveSensitiveEntries(srcInfo.UserDefined)
// If we see legacy source, metadataOnly we have to overwrite the content.
if srcInfo.Legacy {
srcInfo.metadataOnly = false
}
// Check if x-amz-metadata-directive or x-amz-tagging-directive was not set to REPLACE and source,
// destination are same objects. Apply this restriction also when
// metadataOnly is true indicating that we are not overwriting the object.
// if encryption is enabled we do not need explicit "REPLACE" metadata to
// be enabled as well - this is to allow for key-rotation.
if !isDirectiveReplace(r.Header.Get(xhttp.AmzMetadataDirective)) && !isDirectiveReplace(r.Header.Get(xhttp.AmzTagDirective)) &&
srcInfo.metadataOnly && srcOpts.VersionID == "" && !objectEncryption {
// If x-amz-metadata-directive is not set to REPLACE then we need
// to error out if source and destination are same.
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopyDest), r.URL)
return
}
remoteCallRequired := isRemoteCopyRequired(ctx, srcBucket, dstBucket, objectAPI)
var objInfo ObjectInfo
var os *objSweeper
if remoteCallRequired {
var dstRecords []dns.SrvRecord
dstRecords, err = globalDNSConfig.Get(dstBucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Send PutObject request to appropriate instance (in federated deployment)
core, rerr := getRemoteInstanceClient(r, getHostFromSrv(dstRecords))
if rerr != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, rerr), r.URL)
return
}
tag, err := tags.ParseObjectTags(objTags)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Remove the metadata for remote calls.
delete(srcInfo.UserDefined, ReservedMetadataPrefix+"compression")
delete(srcInfo.UserDefined, ReservedMetadataPrefix+"actual-size")
opts := miniogo.PutObjectOptions{
UserMetadata: srcInfo.UserDefined,
ServerSideEncryption: dstOpts.ServerSideEncryption,
UserTags: tag.ToMap(),
}
remoteObjInfo, rerr := core.PutObject(ctx, dstBucket, dstObject, srcInfo.Reader,
srcInfo.Size, "", "", opts)
if rerr != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, rerr), r.URL)
return
}
objInfo.ETag = remoteObjInfo.ETag
objInfo.ModTime = remoteObjInfo.LastModified
} else {
os = newObjSweeper(dstBucket, dstObject).WithVersioning(dstOpts.Versioned, dstOpts.VersionSuspended)
// Get appropriate object info to identify the remote object to delete
if !srcInfo.metadataOnly {
goiOpts := os.GetOpts()
if !globalTierConfigMgr.Empty() {
if goi, gerr := getObjectInfo(ctx, dstBucket, dstObject, goiOpts); gerr == nil {
os.SetTransitionState(goi.TransitionedObject)
}
}
}
copyObjectFn := objectAPI.CopyObject
if api.CacheAPI() != nil {
copyObjectFn = api.CacheAPI().CopyObject
}
// Copy source object to destination, if source and destination
// object is same then only metadata is updated.
objInfo, err = copyObjectFn(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
origETag := objInfo.ETag
objInfo.ETag = getDecryptedETag(r.Header, objInfo, false)
response := generateCopyObjectResponse(objInfo.ETag, objInfo.ModTime)
encodedSuccessResponse := encodeResponse(response)
if dsc := mustReplicate(ctx, dstBucket, dstObject, getMustReplicateOptions(objInfo, replication.UnsetReplicationType, dstOpts)); dsc.ReplicateAny() {
scheduleReplication(ctx, objInfo.Clone(), objectAPI, dsc, replication.ObjectReplicationType)
}
setPutObjHeaders(w, objInfo, false)
// We must not use the http.Header().Set method here because some (broken)
// clients expect the x-amz-copy-source-version-id header key to be literally
// "x-amz-copy-source-version-id"- not in canonicalized form, preserve it.
if srcOpts.VersionID != "" {
w.Header()[strings.ToLower(xhttp.AmzCopySourceVersionID)] = []string{srcOpts.VersionID}
}
// Write success response.
writeSuccessResponseXML(w, encodedSuccessResponse)
// Notify object created event.
sendEvent(eventArgs{
EventName: event.ObjectCreatedCopy,
BucketName: dstBucket,
Object: objInfo,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: handlers.GetSourceIP(r),
})
if !remoteCallRequired && !globalTierConfigMgr.Empty() {
// Schedule object for immediate transition if eligible.
objInfo.ETag = origETag
enqueueTransitionImmediate(objInfo)
// Remove the transitioned object whose object version is being overwritten.
logger.LogIf(ctx, os.Sweep())
}
}
// PutObjectHandler - PUT Object
2015-02-23 19:46:48 -05:00
// ----------
// This implementation of the PUT operation adds an object to a bucket.
// Notice: The S3 client can send secret keys in headers for encryption related jobs,
// the handler should ensure to remove these keys before sending them to the object layer.
// Currently these keys are:
// - X-Amz-Server-Side-Encryption-Customer-Key
// - X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key
func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutObject")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if crypto.Requested(r.Header) {
2022-10-24 20:44:15 -04:00
if !objectAPI.IsEncryptionSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
}
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// X-Amz-Copy-Source shouldn't be set for this call.
if _, ok := r.Header[xhttp.AmzCopySource]; ok {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL)
return
}
// Validate storage class metadata if present
if sc := r.Header.Get(xhttp.AmzStorageClass); sc != "" {
if !storageclass.IsValid(sc) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidStorageClass), r.URL)
return
}
}
clientETag, err := etag.FromContentMD5(r.Header)
if err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDigest), r.URL)
2015-04-22 19:28:13 -04:00
return
}
// if Content-Length is unknown/missing, deny the request
size := r.ContentLength
rAuthType := getRequestAuthType(r)
if rAuthType == authTypeStreamingSigned {
if sizeStr, ok := r.Header[xhttp.AmzDecodedContentLength]; ok {
if sizeStr[0] == "" {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
return
}
size, err = strconv.ParseInt(sizeStr[0], 10, 64)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
}
if size == -1 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
return
}
// maximum Upload size for objects in a single operation
if isMaxObjectSize(size) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
return
}
metadata, err := extractMetadata(ctx, r)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if objTags := r.Header.Get(xhttp.AmzObjectTagging); objTags != "" {
if !objectAPI.IsTaggingSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
if _, err := tags.ParseObjectTags(objTags); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
metadata[xhttp.AmzObjectTagging] = objTags
}
var (
md5hex = clientETag.String()
pkg/etag: add new package for S3 ETag handling (#11577) This commit adds a new package `etag` for dealing with S3 ETags. Even though ETag is often viewed as MD5 checksum of an object, handling S3 ETags correctly is a surprisingly complex task. While it is true that the ETag corresponds to the MD5 for the most basic S3 API operations, there are many exceptions in case of multipart uploads or encryption. In worse, some S3 clients expect very specific behavior when it comes to ETags. For example, some clients expect that the ETag is a double-quoted string and fail otherwise. Non-AWS compliant ETag handling has been a source of many bugs in the past. Therefore, this commit adds a dedicated `etag` package that provides functionality for parsing, generating and converting S3 ETags. Further, this commit removes the ETag computation from the `hash` package. Instead, the `hash` package (i.e. `hash.Reader`) should focus only on computing and verifying the content-sha256. One core feature of this commit is to provide a mechanism to communicate a computed ETag from a low-level `io.Reader` to a high-level `io.Reader`. This problem occurs when an S3 server receives a request and has to compute the ETag of the content. However, the server may also wrap the initial body with several other `io.Reader`, e.g. when encrypting or compressing the content: ``` reader := Encrypt(Compress(ETag(content))) ``` In such a case, the ETag should be accessible by the high-level `io.Reader`. The `etag` provides a mechanism to wrap `io.Reader` implementations such that the `ETag` can be accessed by a type-check. This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
sha256hex = ""
reader io.Reader = r.Body
s3Err APIErrorCode
putObject = objectAPI.PutObject
)
// Check if put is allowed
if s3Err = isPutActionAllowed(ctx, rAuthType, bucket, object, r, iampolicy.PutObjectAction); s3Err != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
switch rAuthType {
case authTypeStreamingSigned:
// Initialize stream signature verifier.
reader, s3Err = newSignV4ChunkedReader(r)
if s3Err != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
case authTypeSignedV2, authTypePresignedV2:
s3Err = isReqAuthenticatedV2(r)
if s3Err != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
case authTypePresigned, authTypeSigned:
if s3Err = reqSignatureV4Verify(r, globalSite.Region, serviceS3); s3Err != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
if !skipContentSha256Cksum(r) {
sha256hex = getContentSha256Cksum(r, serviceS3)
}
}
if err := enforceBucketQuotaHard(ctx, bucket, size); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if r.Header.Get(xhttp.AmzBucketReplicationStatus) == replication.Replica.String() {
if s3Err = isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.ReplicateObjectAction); s3Err != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
metadata[ReservedMetadataPrefixLower+ReplicaStatus] = replication.Replica.String()
metadata[ReservedMetadataPrefixLower+ReplicaTimestamp] = UTCNow().Format(time.RFC3339Nano)
defer globalReplicationStats.UpdateReplicaStat(bucket, size)
}
// Check if bucket encryption is enabled
sseConfig, _ := globalBucketSSEConfigSys.Get(bucket)
sseConfig.Apply(r.Header, sse.ApplyOptions{
AutoEncrypt: globalAutoEncryption,
})
actualSize := size
2022-07-11 20:30:56 -04:00
var idxCb func() []byte
if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) && size > minCompressibleSize {
// Storing the compression metadata.
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10)
pkg/etag: add new package for S3 ETag handling (#11577) This commit adds a new package `etag` for dealing with S3 ETags. Even though ETag is often viewed as MD5 checksum of an object, handling S3 ETags correctly is a surprisingly complex task. While it is true that the ETag corresponds to the MD5 for the most basic S3 API operations, there are many exceptions in case of multipart uploads or encryption. In worse, some S3 clients expect very specific behavior when it comes to ETags. For example, some clients expect that the ETag is a double-quoted string and fail otherwise. Non-AWS compliant ETag handling has been a source of many bugs in the past. Therefore, this commit adds a dedicated `etag` package that provides functionality for parsing, generating and converting S3 ETags. Further, this commit removes the ETag computation from the `hash` package. Instead, the `hash` package (i.e. `hash.Reader`) should focus only on computing and verifying the content-sha256. One core feature of this commit is to provide a mechanism to communicate a computed ETag from a low-level `io.Reader` to a high-level `io.Reader`. This problem occurs when an S3 server receives a request and has to compute the ETag of the content. However, the server may also wrap the initial body with several other `io.Reader`, e.g. when encrypting or compressing the content: ``` reader := Encrypt(Compress(ETag(content))) ``` In such a case, the ETag should be accessible by the high-level `io.Reader`. The `etag` provides a mechanism to wrap `io.Reader` implementations such that the `ETag` can be accessed by a type-check. This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
2022-10-24 20:44:15 -04:00
if err = actualReader.AddChecksum(r, false); err != nil {
2022-08-29 19:57:16 -04:00
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
return
}
// Set compression metrics.
2022-07-11 20:30:56 -04:00
var s2c io.ReadCloser
wantEncryption := objectAPI.IsEncryptionSupported() && crypto.Requested(r.Header)
s2c, idxCb = newS2CompressReader(actualReader, actualSize, wantEncryption)
defer s2c.Close()
2022-07-11 20:30:56 -04:00
pkg/etag: add new package for S3 ETag handling (#11577) This commit adds a new package `etag` for dealing with S3 ETags. Even though ETag is often viewed as MD5 checksum of an object, handling S3 ETags correctly is a surprisingly complex task. While it is true that the ETag corresponds to the MD5 for the most basic S3 API operations, there are many exceptions in case of multipart uploads or encryption. In worse, some S3 clients expect very specific behavior when it comes to ETags. For example, some clients expect that the ETag is a double-quoted string and fail otherwise. Non-AWS compliant ETag handling has been a source of many bugs in the past. Therefore, this commit adds a dedicated `etag` package that provides functionality for parsing, generating and converting S3 ETags. Further, this commit removes the ETag computation from the `hash` package. Instead, the `hash` package (i.e. `hash.Reader`) should focus only on computing and verifying the content-sha256. One core feature of this commit is to provide a mechanism to communicate a computed ETag from a low-level `io.Reader` to a high-level `io.Reader`. This problem occurs when an S3 server receives a request and has to compute the ETag of the content. However, the server may also wrap the initial body with several other `io.Reader`, e.g. when encrypting or compressing the content: ``` reader := Encrypt(Compress(ETag(content))) ``` In such a case, the ETag should be accessible by the high-level `io.Reader`. The `etag` provides a mechanism to wrap `io.Reader` implementations such that the `ETag` can be accessed by a type-check. This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
reader = etag.Wrap(s2c, actualReader)
size = -1 // Since compressed size is un-predictable.
md5hex = "" // Do not try to verify the content.
sha256hex = ""
}
pkg/etag: add new package for S3 ETag handling (#11577) This commit adds a new package `etag` for dealing with S3 ETags. Even though ETag is often viewed as MD5 checksum of an object, handling S3 ETags correctly is a surprisingly complex task. While it is true that the ETag corresponds to the MD5 for the most basic S3 API operations, there are many exceptions in case of multipart uploads or encryption. In worse, some S3 clients expect very specific behavior when it comes to ETags. For example, some clients expect that the ETag is a double-quoted string and fail otherwise. Non-AWS compliant ETag handling has been a source of many bugs in the past. Therefore, this commit adds a dedicated `etag` package that provides functionality for parsing, generating and converting S3 ETags. Further, this commit removes the ETag computation from the `hash` package. Instead, the `hash` package (i.e. `hash.Reader`) should focus only on computing and verifying the content-sha256. One core feature of this commit is to provide a mechanism to communicate a computed ETag from a low-level `io.Reader` to a high-level `io.Reader`. This problem occurs when an S3 server receives a request and has to compute the ETag of the content. However, the server may also wrap the initial body with several other `io.Reader`, e.g. when encrypting or compressing the content: ``` reader := Encrypt(Compress(ETag(content))) ``` In such a case, the ETag should be accessible by the high-level `io.Reader`. The `etag` provides a mechanism to wrap `io.Reader` implementations such that the `ETag` can be accessed by a type-check. This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
2022-10-24 20:44:15 -04:00
if err := hashReader.AddChecksum(r, size < 0); err != nil {
2022-08-29 19:57:16 -04:00
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
return
}
rawReader := hashReader
pReader := NewPutObjReader(rawReader)
var opts ObjectOptions
opts, err = putOpts(ctx, r, bucket, object, metadata)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
2022-07-11 20:30:56 -04:00
opts.IndexCB = idxCb
if !opts.MTime.IsZero() && opts.PreserveETag != "" {
opts.CheckPrecondFn = func(oi ObjectInfo) bool {
if objectAPI.IsEncryptionSupported() {
if _, err := DecryptObjectInfo(&oi, r); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return true
}
}
return checkPreconditionsPUT(ctx, w, r, oi, opts)
}
}
if api.CacheAPI() != nil {
putObject = api.CacheAPI().PutObject
}
retPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectRetentionAction)
holdPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectLegalHoldAction)
getObjectInfo := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
getObjectInfo = api.CacheAPI().GetObjectInfo
}
retentionMode, retentionDate, legalHold, s3Err := checkPutObjectLockAllowed(ctx, r, bucket, object, getObjectInfo, retPerms, holdPerms)
if s3Err == ErrNone && retentionMode.Valid() {
metadata[strings.ToLower(xhttp.AmzObjectLockMode)] = string(retentionMode)
metadata[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = retentionDate.UTC().Format(iso8601TimeFormat)
}
if s3Err == ErrNone && legalHold.Status.Valid() {
metadata[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = string(legalHold.Status)
}
if s3Err != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
if dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(ObjectInfo{
UserDefined: metadata,
}, replication.ObjectReplicationType, opts)); dsc.ReplicateAny() {
metadata[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
metadata[ReservedMetadataPrefixLower+ReplicationStatus] = dsc.PendingStatus()
}
var objectEncryptionKey crypto.ObjectKey
if objectAPI.IsEncryptionSupported() {
if crypto.Requested(r.Header) && !HasSuffix(object, SlashSeparator) { // handle SSE requests
if crypto.SSECopy.IsRequested(r.Header) {
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL)
return
}
reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
wantSize := int64(-1)
if size >= 0 {
info := ObjectInfo{Size: size}
wantSize = info.EncryptedSize()
}
// do not try to verify encrypted content
pkg/etag: add new package for S3 ETag handling (#11577) This commit adds a new package `etag` for dealing with S3 ETags. Even though ETag is often viewed as MD5 checksum of an object, handling S3 ETags correctly is a surprisingly complex task. While it is true that the ETag corresponds to the MD5 for the most basic S3 API operations, there are many exceptions in case of multipart uploads or encryption. In worse, some S3 clients expect very specific behavior when it comes to ETags. For example, some clients expect that the ETag is a double-quoted string and fail otherwise. Non-AWS compliant ETag handling has been a source of many bugs in the past. Therefore, this commit adds a dedicated `etag` package that provides functionality for parsing, generating and converting S3 ETags. Further, this commit removes the ETag computation from the `hash` package. Instead, the `hash` package (i.e. `hash.Reader`) should focus only on computing and verifying the content-sha256. One core feature of this commit is to provide a mechanism to communicate a computed ETag from a low-level `io.Reader` to a high-level `io.Reader`. This problem occurs when an S3 server receives a request and has to compute the ETag of the content. However, the server may also wrap the initial body with several other `io.Reader`, e.g. when encrypting or compressing the content: ``` reader := Encrypt(Compress(ETag(content))) ``` In such a case, the ETag should be accessible by the high-level `io.Reader`. The `etag` provides a mechanism to wrap `io.Reader` implementations such that the `ETag` can be accessed by a type-check. This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
hashReader, err = hash.NewReader(etag.Wrap(reader, hashReader), wantSize, "", "", actualSize)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
2022-07-11 20:30:56 -04:00
if opts.IndexCB != nil {
opts.IndexCB = compressionIndexEncrypter(objectEncryptionKey, opts.IndexCB)
}
opts.EncryptFn = metadataEncrypter(objectEncryptionKey)
}
}
// Ensure that metadata does not contain sensitive information
crypto.RemoveSensitiveEntries(metadata)
os := newObjSweeper(bucket, object).WithVersioning(opts.Versioned, opts.VersionSuspended)
if !globalTierConfigMgr.Empty() {
// Get appropriate object info to identify the remote object to delete
goiOpts := os.GetOpts()
if goi, gerr := getObjectInfo(ctx, bucket, object, goiOpts); gerr == nil {
os.SetTransitionState(goi.TransitionedObject)
}
}
// Create the object..
objInfo, err := putObject(ctx, bucket, object, pReader, opts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
2015-08-03 19:17:21 -04:00
return
}
if r.Header.Get(xMinIOExtract) == "true" && strings.HasSuffix(object, archiveExt) {
opts := ObjectOptions{VersionID: objInfo.VersionID, MTime: objInfo.ModTime}
if _, err := updateObjectMetadataWithZipInfo(ctx, objectAPI, bucket, object, opts); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
origETag := objInfo.ETag
if kind, encrypted := crypto.IsEncrypted(objInfo.UserDefined); encrypted {
crypto: add support for decrypting SSE-KMS metadata (#11415) This commit refactors the SSE implementation and add S3-compatible SSE-KMS context handling. SSE-KMS differs from SSE-S3 in two main aspects: 1. The client can request a particular key and specify a KMS context as part of the request. 2. The ETag of an SSE-KMS encrypted object is not the MD5 sum of the object content. This commit only focuses on the 1st aspect. A client can send an optional SSE context when using SSE-KMS. This context is remembered by the S3 server such that the client does not have to specify the context again (during multipart PUT / GET / HEAD ...). The crypto. context also includes the bucket/object name to prevent renaming objects at the backend. Now, AWS S3 behaves as following: - If the user does not provide a SSE-KMS context it does not store one - resp. does not include the SSE-KMS context header in the response (e.g. HEAD). - If the user specifies a SSE-KMS context without the bucket/object name then AWS stores the exact context the client provided but adds the bucket/object name internally. The response contains the KMS context without the bucket/object name. - If the user specifies a SSE-KMS context with the bucket/object name then AWS again stores the exact context provided by the client. The response contains the KMS context with the bucket/object name. This commit implements this behavior w.r.t. SSE-KMS. However, as of now, no such object can be created since the server rejects SSE-KMS encryption requests. This commit is one stepping stone for SSE-KMS support. Co-authored-by: Harshavardhana <harsha@minio.io>
2021-02-03 18:19:08 -05:00
switch kind {
case crypto.S3:
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
objInfo.ETag, _ = DecryptETag(objectEncryptionKey, ObjectInfo{ETag: objInfo.ETag})
case crypto.S3KMS:
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
w.Header().Set(xhttp.AmzServerSideEncryptionKmsID, objInfo.KMSKeyID())
if kmsCtx, ok := objInfo.UserDefined[crypto.MetaContext]; ok {
w.Header().Set(xhttp.AmzServerSideEncryptionKmsContext, kmsCtx)
}
if len(objInfo.ETag) >= 32 && strings.Count(objInfo.ETag, "-") != 1 {
objInfo.ETag = objInfo.ETag[len(objInfo.ETag)-32:]
}
crypto: add support for decrypting SSE-KMS metadata (#11415) This commit refactors the SSE implementation and add S3-compatible SSE-KMS context handling. SSE-KMS differs from SSE-S3 in two main aspects: 1. The client can request a particular key and specify a KMS context as part of the request. 2. The ETag of an SSE-KMS encrypted object is not the MD5 sum of the object content. This commit only focuses on the 1st aspect. A client can send an optional SSE context when using SSE-KMS. This context is remembered by the S3 server such that the client does not have to specify the context again (during multipart PUT / GET / HEAD ...). The crypto. context also includes the bucket/object name to prevent renaming objects at the backend. Now, AWS S3 behaves as following: - If the user does not provide a SSE-KMS context it does not store one - resp. does not include the SSE-KMS context header in the response (e.g. HEAD). - If the user specifies a SSE-KMS context without the bucket/object name then AWS stores the exact context the client provided but adds the bucket/object name internally. The response contains the KMS context without the bucket/object name. - If the user specifies a SSE-KMS context with the bucket/object name then AWS again stores the exact context provided by the client. The response contains the KMS context with the bucket/object name. This commit implements this behavior w.r.t. SSE-KMS. However, as of now, no such object can be created since the server rejects SSE-KMS encryption requests. This commit is one stepping stone for SSE-KMS support. Co-authored-by: Harshavardhana <harsha@minio.io>
2021-02-03 18:19:08 -05:00
case crypto.SSEC:
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm))
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5))
if len(objInfo.ETag) >= 32 && strings.Count(objInfo.ETag, "-") != 1 {
objInfo.ETag = objInfo.ETag[len(objInfo.ETag)-32:]
}
}
}
if dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(ObjectInfo{
UserDefined: metadata,
}, replication.ObjectReplicationType, opts)); dsc.ReplicateAny() {
scheduleReplication(ctx, objInfo.Clone(), objectAPI, dsc, replication.ObjectReplicationType)
}
setPutObjHeaders(w, objInfo, false)
writeSuccessResponseHeadersOnly(w)
// Notify object created event.
sendEvent(eventArgs{
EventName: event.ObjectCreatedPut,
BucketName: bucket,
Object: objInfo,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: handlers.GetSourceIP(r),
})
// Remove the transitioned object whose object version is being overwritten.
if !globalTierConfigMgr.Empty() {
// Schedule object for immediate transition if eligible.
objInfo.ETag = origETag
enqueueTransitionImmediate(objInfo)
logger.LogIf(ctx, os.Sweep())
}
2022-08-29 19:57:16 -04:00
// Do not send checksums in events to avoid leaks.
hash.TransferChecksumHeader(w, r)
}
2015-05-07 22:55:30 -04:00
// PutObjectExtractHandler - PUT Object extract is an extended API
// based off from AWS Snowball feature to auto extract compressed
// stream will be extracted in the same directory it is stored in
// and the folder structures will be built out accordingly.
func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutObjectExtract")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
if crypto.Requested(r.Header) {
2022-10-24 20:44:15 -04:00
if !objectAPI.IsEncryptionSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
}
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// X-Amz-Copy-Source shouldn't be set for this call.
if _, ok := r.Header[xhttp.AmzCopySource]; ok {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL)
return
}
// Validate storage class metadata if present
sc := r.Header.Get(xhttp.AmzStorageClass)
if sc != "" {
if !storageclass.IsValid(sc) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidStorageClass), r.URL)
return
}
}
clientETag, err := etag.FromContentMD5(r.Header)
if err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDigest), r.URL)
return
}
// if Content-Length is unknown/missing, deny the request
size := r.ContentLength
rAuthType := getRequestAuthType(r)
if rAuthType == authTypeStreamingSigned {
if sizeStr, ok := r.Header[xhttp.AmzDecodedContentLength]; ok {
if sizeStr[0] == "" {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
return
}
size, err = strconv.ParseInt(sizeStr[0], 10, 64)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
}
if size == -1 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
return
}
// maximum Upload size for objects in a single operation
if isMaxObjectSize(size) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
return
}
var (
md5hex = clientETag.String()
sha256hex = ""
reader io.Reader = r.Body
s3Err APIErrorCode
putObject = objectAPI.PutObject
)
// Check if put is allowed
if s3Err = isPutActionAllowed(ctx, rAuthType, bucket, object, r, iampolicy.PutObjectAction); s3Err != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
switch rAuthType {
case authTypeStreamingSigned:
// Initialize stream signature verifier.
reader, s3Err = newSignV4ChunkedReader(r)
if s3Err != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
case authTypeSignedV2, authTypePresignedV2:
s3Err = isReqAuthenticatedV2(r)
if s3Err != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
case authTypePresigned, authTypeSigned:
if s3Err = reqSignatureV4Verify(r, globalSite.Region, serviceS3); s3Err != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
if !skipContentSha256Cksum(r) {
sha256hex = getContentSha256Cksum(r, serviceS3)
}
}
hreader, err := hash.NewReader(reader, size, md5hex, sha256hex, size)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
2022-10-24 20:44:15 -04:00
if err = hreader.AddChecksum(r, false); err != nil {
2022-08-29 19:57:16 -04:00
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL)
return
}
if err := enforceBucketQuotaHard(ctx, bucket, size); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Check if bucket encryption is enabled
sseConfig, _ := globalBucketSSEConfigSys.Get(bucket)
sseConfig.Apply(r.Header, sse.ApplyOptions{
AutoEncrypt: globalAutoEncryption,
})
retPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectRetentionAction)
holdPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectLegalHoldAction)
if api.CacheAPI() != nil {
putObject = api.CacheAPI().PutObject
}
getObjectInfo := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
getObjectInfo = api.CacheAPI().GetObjectInfo
}
putObjectTar := func(reader io.Reader, info os.FileInfo, object string) error {
size := info.Size()
metadata := map[string]string{
xhttp.AmzStorageClass: sc,
}
actualSize := size
2022-07-11 20:30:56 -04:00
var idxCb func() []byte
if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) && size > minCompressibleSize {
// Storing the compression metadata.
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10)
actualReader, err := hash.NewReader(reader, size, "", "", actualSize)
if err != nil {
return err
}
// Set compression metrics.
wantEncryption := objectAPI.IsEncryptionSupported() && crypto.Requested(r.Header)
s2c, cb := newS2CompressReader(actualReader, actualSize, wantEncryption)
defer s2c.Close()
2022-07-11 20:30:56 -04:00
idxCb = cb
reader = etag.Wrap(s2c, actualReader)
size = -1 // Since compressed size is un-predictable.
}
hashReader, err := hash.NewReader(reader, size, "", "", actualSize)
if err != nil {
return err
}
rawReader := hashReader
pReader := NewPutObjReader(rawReader)
if r.Header.Get(xhttp.AmzBucketReplicationStatus) == replication.Replica.String() {
if s3Err = isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.ReplicateObjectAction); s3Err != ErrNone {
return err
}
metadata[ReservedMetadataPrefixLower+ReplicaStatus] = replication.Replica.String()
metadata[ReservedMetadataPrefixLower+ReplicaTimestamp] = UTCNow().Format(time.RFC3339Nano)
}
// get encryption options
opts, err := putOpts(ctx, r, bucket, object, metadata)
if err != nil {
return err
}
opts.MTime = info.ModTime()
2022-07-11 20:30:56 -04:00
opts.IndexCB = idxCb
retentionMode, retentionDate, legalHold, s3err := checkPutObjectLockAllowed(ctx, r, bucket, object, getObjectInfo, retPerms, holdPerms)
if s3err == ErrNone && retentionMode.Valid() {
metadata[strings.ToLower(xhttp.AmzObjectLockMode)] = string(retentionMode)
metadata[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = retentionDate.UTC().Format(iso8601TimeFormat)
}
if s3err == ErrNone && legalHold.Status.Valid() {
metadata[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = string(legalHold.Status)
}
if s3err != ErrNone {
s3Err = s3err
return ObjectLocked{}
}
if dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(ObjectInfo{
UserDefined: metadata,
}, replication.ObjectReplicationType, opts)); dsc.ReplicateAny() {
metadata[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
metadata[ReservedMetadataPrefixLower+ReplicationStatus] = dsc.PendingStatus()
}
var objectEncryptionKey crypto.ObjectKey
if objectAPI.IsEncryptionSupported() {
if crypto.Requested(r.Header) && !HasSuffix(object, SlashSeparator) { // handle SSE requests
if crypto.SSECopy.IsRequested(r.Header) {
return errInvalidEncryptionParameters
}
reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata)
if err != nil {
return err
}
wantSize := int64(-1)
if size >= 0 {
info := ObjectInfo{Size: size}
wantSize = info.EncryptedSize()
}
// do not try to verify encrypted content
hashReader, err = hash.NewReader(etag.Wrap(reader, hashReader), wantSize, "", "", actualSize)
if err != nil {
return err
}
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
if err != nil {
return err
}
}
2022-07-11 20:30:56 -04:00
if opts.IndexCB != nil {
opts.IndexCB = compressionIndexEncrypter(objectEncryptionKey, opts.IndexCB)
}
}
// Ensure that metadata does not contain sensitive information
crypto.RemoveSensitiveEntries(metadata)
// Create the object..
objInfo, err := putObject(ctx, bucket, object, pReader, opts)
if err != nil {
return err
}
if dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(ObjectInfo{
UserDefined: metadata,
}, replication.ObjectReplicationType, opts)); dsc.ReplicateAny() {
scheduleReplication(ctx, objInfo.Clone(), objectAPI, dsc, replication.ObjectReplicationType)
}
return nil
}
var opts untarOptions
opts.ignoreDirs = strings.EqualFold(r.Header.Get(xhttp.MinIOSnowballIgnoreDirs), "true")
opts.ignoreErrs = strings.EqualFold(r.Header.Get(xhttp.MinIOSnowballIgnoreErrors), "true")
opts.prefixAll = r.Header.Get(xhttp.MinIOSnowballPrefix)
if opts.prefixAll != "" {
opts.prefixAll = trimLeadingSlash(pathJoin(opts.prefixAll, slashSeparator))
}
if err = untar(ctx, hreader, putObjectTar, opts); err != nil {
apiErr := errorCodes.ToAPIErr(s3Err)
// If not set, convert or use BadRequest
if s3Err == ErrNone {
apiErr = toAPIError(ctx, err)
if apiErr.Code == "InternalError" {
// Convert generic internal errors to bad requests.
apiErr = APIError{
Code: "BadRequest",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
}
}
writeErrorResponse(ctx, w, apiErr, r.URL)
return
}
w.Header()[xhttp.ETag] = []string{`"` + hex.EncodeToString(hreader.MD5Current()) + `"`}
2022-08-29 19:57:16 -04:00
hash.TransferChecksumHeader(w, r)
writeSuccessResponseHeadersOnly(w)
}
// CopyObjectPartHandler - uploads a part by copying data from an existing object as data source.
func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "CopyObjectPart")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if crypto.S3KMS.IsRequested(r.Header) { // SSE-KMS is not supported
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
if crypto.Requested(r.Header) && !objectAPI.IsEncryptionSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
vars := mux.Vars(r)
dstBucket := vars["bucket"]
dstObject, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, dstBucket, dstObject); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
// Read escaped copy source path to check for parameters.
cpSrcPath := r.Header.Get(xhttp.AmzCopySource)
var vid string
if u, err := url.Parse(cpSrcPath); err == nil {
vid = strings.TrimSpace(u.Query().Get(xhttp.VersionID))
// Note that url.Parse does the unescaping
cpSrcPath = u.Path
}
srcBucket, srcObject := path2BucketObject(cpSrcPath)
// If source object is empty or bucket is empty, reply back invalid copy source.
if srcObject == "" || srcBucket == "" {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopySource), r.URL)
return
}
if vid != "" && vid != nullVersionID {
_, err := uuid.Parse(vid)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, VersionNotFound{
Bucket: srcBucket,
Object: srcObject,
VersionID: vid,
}), r.URL)
return
}
}
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, srcBucket, srcObject); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
uploadID := r.Form.Get(xhttp.UploadID)
partIDString := r.Form.Get(xhttp.PartNumber)
partID, err := strconv.Atoi(partIDString)
if err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPart), r.URL)
return
}
// check partID with maximum part ID for multipart objects
if isMaxPartID(partID) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidMaxParts), r.URL)
return
}
var srcOpts, dstOpts ObjectOptions
srcOpts, err = copySrcOpts(ctx, r, srcBucket, srcObject)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
srcOpts.VersionID = vid
// convert copy src and dst encryption options for GET/PUT calls
getOpts := ObjectOptions{VersionID: srcOpts.VersionID}
if srcOpts.ServerSideEncryption != nil {
getOpts.ServerSideEncryption = encrypt.SSE(srcOpts.ServerSideEncryption)
}
dstOpts, err = copyDstOpts(ctx, r, dstBucket, dstObject, nil)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
getObjectNInfo := objectAPI.GetObjectNInfo
if api.CacheAPI() != nil {
getObjectNInfo = api.CacheAPI().GetObjectNInfo
}
// Get request range.
var rs *HTTPRangeSpec
var parseRangeErr error
if rangeHeader := r.Header.Get(xhttp.AmzCopySourceRange); rangeHeader != "" {
rs, parseRangeErr = parseCopyPartRangeSpec(rangeHeader)
} else {
// This check is to see if client specified a header but the value
// is empty for 'x-amz-copy-source-range'
_, ok := r.Header[xhttp.AmzCopySourceRange]
if ok {
parseRangeErr = errInvalidRange
}
}
checkCopyPartPrecondFn := func(o ObjectInfo) bool {
if objectAPI.IsEncryptionSupported() {
if _, err := DecryptObjectInfo(&o, r); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return true
}
}
if checkCopyObjectPartPreconditions(ctx, w, r, o) {
return true
}
if parseRangeErr != nil {
logger.LogIf(ctx, parseRangeErr)
writeCopyPartErr(ctx, w, parseRangeErr, r.URL)
// Range header mismatch is pre-condition like failure
// so return true to indicate Range precondition failed.
return true
}
return false
}
getOpts.CheckPrecondFn = checkCopyPartPrecondFn
gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, readLock, getOpts)
if err != nil {
if isErrPreconditionFailed(err) {
return
}
if globalBucketVersioningSys.PrefixEnabled(srcBucket, srcObject) && gr != nil {
// Versioning enabled quite possibly object is deleted might be delete-marker
// if present set the headers, no idea why AWS S3 sets these headers.
if gr.ObjInfo.VersionID != "" && gr.ObjInfo.DeleteMarker {
w.Header()[xhttp.AmzVersionID] = []string{gr.ObjInfo.VersionID}
w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(gr.ObjInfo.DeleteMarker)}
}
}
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
defer gr.Close()
srcInfo := gr.ObjInfo
actualPartSize, err := srcInfo.GetActualSize()
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if err := enforceBucketQuotaHard(ctx, dstBucket, actualPartSize); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Special care for CopyObjectPart
if partRangeErr := checkCopyPartRangeWithSize(rs, actualPartSize); partRangeErr != nil {
writeCopyPartErr(ctx, w, partRangeErr, r.URL)
return
}
// Get the object offset & length
startOffset, length, err := rs.GetOffsetLength(actualPartSize)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// maximum copy size for multipart objects in a single operation
if isMaxAllowedPartSize(length) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
return
}
if isRemoteCopyRequired(ctx, srcBucket, dstBucket, objectAPI) {
var dstRecords []dns.SrvRecord
dstRecords, err = globalDNSConfig.Get(dstBucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Send PutObject request to appropriate instance (in federated deployment)
core, rerr := getRemoteInstanceClient(r, getHostFromSrv(dstRecords))
if rerr != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, rerr), r.URL)
return
}
pkg/etag: add new package for S3 ETag handling (#11577) This commit adds a new package `etag` for dealing with S3 ETags. Even though ETag is often viewed as MD5 checksum of an object, handling S3 ETags correctly is a surprisingly complex task. While it is true that the ETag corresponds to the MD5 for the most basic S3 API operations, there are many exceptions in case of multipart uploads or encryption. In worse, some S3 clients expect very specific behavior when it comes to ETags. For example, some clients expect that the ETag is a double-quoted string and fail otherwise. Non-AWS compliant ETag handling has been a source of many bugs in the past. Therefore, this commit adds a dedicated `etag` package that provides functionality for parsing, generating and converting S3 ETags. Further, this commit removes the ETag computation from the `hash` package. Instead, the `hash` package (i.e. `hash.Reader`) should focus only on computing and verifying the content-sha256. One core feature of this commit is to provide a mechanism to communicate a computed ETag from a low-level `io.Reader` to a high-level `io.Reader`. This problem occurs when an S3 server receives a request and has to compute the ETag of the content. However, the server may also wrap the initial body with several other `io.Reader`, e.g. when encrypting or compressing the content: ``` reader := Encrypt(Compress(ETag(content))) ``` In such a case, the ETag should be accessible by the high-level `io.Reader`. The `etag` provides a mechanism to wrap `io.Reader` implementations such that the `ETag` can be accessed by a type-check. This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
partInfo, err := core.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, gr, length, "", "", dstOpts.ServerSideEncryption)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
response := generateCopyObjectPartResponse(partInfo.ETag, partInfo.LastModified)
encodedSuccessResponse := encodeResponse(response)
// Write success response.
writeSuccessResponseXML(w, encodedSuccessResponse)
return
}
actualPartSize = length
pkg/etag: add new package for S3 ETag handling (#11577) This commit adds a new package `etag` for dealing with S3 ETags. Even though ETag is often viewed as MD5 checksum of an object, handling S3 ETags correctly is a surprisingly complex task. While it is true that the ETag corresponds to the MD5 for the most basic S3 API operations, there are many exceptions in case of multipart uploads or encryption. In worse, some S3 clients expect very specific behavior when it comes to ETags. For example, some clients expect that the ETag is a double-quoted string and fail otherwise. Non-AWS compliant ETag handling has been a source of many bugs in the past. Therefore, this commit adds a dedicated `etag` package that provides functionality for parsing, generating and converting S3 ETags. Further, this commit removes the ETag computation from the `hash` package. Instead, the `hash` package (i.e. `hash.Reader`) should focus only on computing and verifying the content-sha256. One core feature of this commit is to provide a mechanism to communicate a computed ETag from a low-level `io.Reader` to a high-level `io.Reader`. This problem occurs when an S3 server receives a request and has to compute the ETag of the content. However, the server may also wrap the initial body with several other `io.Reader`, e.g. when encrypting or compressing the content: ``` reader := Encrypt(Compress(ETag(content))) ``` In such a case, the ETag should be accessible by the high-level `io.Reader`. The `etag` provides a mechanism to wrap `io.Reader` implementations such that the `ETag` can be accessed by a type-check. This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
var reader io.Reader = etag.NewReader(gr, nil)
mi, err := objectAPI.GetMultipartInfo(ctx, dstBucket, dstObject, uploadID, dstOpts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Read compression metadata preserved in the init multipart for the decision.
_, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"]
// Compress only if the compression is enabled during initial multipart.
2022-07-11 20:30:56 -04:00
var idxCb func() []byte
if isCompressed {
wantEncryption := objectAPI.IsEncryptionSupported() && crypto.Requested(r.Header)
s2c, cb := newS2CompressReader(reader, actualPartSize, wantEncryption)
2022-07-11 20:30:56 -04:00
idxCb = cb
defer s2c.Close()
pkg/etag: add new package for S3 ETag handling (#11577) This commit adds a new package `etag` for dealing with S3 ETags. Even though ETag is often viewed as MD5 checksum of an object, handling S3 ETags correctly is a surprisingly complex task. While it is true that the ETag corresponds to the MD5 for the most basic S3 API operations, there are many exceptions in case of multipart uploads or encryption. In worse, some S3 clients expect very specific behavior when it comes to ETags. For example, some clients expect that the ETag is a double-quoted string and fail otherwise. Non-AWS compliant ETag handling has been a source of many bugs in the past. Therefore, this commit adds a dedicated `etag` package that provides functionality for parsing, generating and converting S3 ETags. Further, this commit removes the ETag computation from the `hash` package. Instead, the `hash` package (i.e. `hash.Reader`) should focus only on computing and verifying the content-sha256. One core feature of this commit is to provide a mechanism to communicate a computed ETag from a low-level `io.Reader` to a high-level `io.Reader`. This problem occurs when an S3 server receives a request and has to compute the ETag of the content. However, the server may also wrap the initial body with several other `io.Reader`, e.g. when encrypting or compressing the content: ``` reader := Encrypt(Compress(ETag(content))) ``` In such a case, the ETag should be accessible by the high-level `io.Reader`. The `etag` provides a mechanism to wrap `io.Reader` implementations such that the `ETag` can be accessed by a type-check. This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
reader = etag.Wrap(s2c, reader)
length = -1
}
pkg/etag: add new package for S3 ETag handling (#11577) This commit adds a new package `etag` for dealing with S3 ETags. Even though ETag is often viewed as MD5 checksum of an object, handling S3 ETags correctly is a surprisingly complex task. While it is true that the ETag corresponds to the MD5 for the most basic S3 API operations, there are many exceptions in case of multipart uploads or encryption. In worse, some S3 clients expect very specific behavior when it comes to ETags. For example, some clients expect that the ETag is a double-quoted string and fail otherwise. Non-AWS compliant ETag handling has been a source of many bugs in the past. Therefore, this commit adds a dedicated `etag` package that provides functionality for parsing, generating and converting S3 ETags. Further, this commit removes the ETag computation from the `hash` package. Instead, the `hash` package (i.e. `hash.Reader`) should focus only on computing and verifying the content-sha256. One core feature of this commit is to provide a mechanism to communicate a computed ETag from a low-level `io.Reader` to a high-level `io.Reader`. This problem occurs when an S3 server receives a request and has to compute the ETag of the content. However, the server may also wrap the initial body with several other `io.Reader`, e.g. when encrypting or compressing the content: ``` reader := Encrypt(Compress(ETag(content))) ``` In such a case, the ETag should be accessible by the high-level `io.Reader`. The `etag` provides a mechanism to wrap `io.Reader` implementations such that the `ETag` can be accessed by a type-check. This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualPartSize)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
dstOpts, err = copyDstOpts(ctx, r, dstBucket, dstObject, mi.UserDefined)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
2022-07-11 20:30:56 -04:00
dstOpts.IndexCB = idxCb
rawReader := srcInfo.Reader
pReader := NewPutObjReader(rawReader)
crypto: add support for decrypting SSE-KMS metadata (#11415) This commit refactors the SSE implementation and add S3-compatible SSE-KMS context handling. SSE-KMS differs from SSE-S3 in two main aspects: 1. The client can request a particular key and specify a KMS context as part of the request. 2. The ETag of an SSE-KMS encrypted object is not the MD5 sum of the object content. This commit only focuses on the 1st aspect. A client can send an optional SSE context when using SSE-KMS. This context is remembered by the S3 server such that the client does not have to specify the context again (during multipart PUT / GET / HEAD ...). The crypto. context also includes the bucket/object name to prevent renaming objects at the backend. Now, AWS S3 behaves as following: - If the user does not provide a SSE-KMS context it does not store one - resp. does not include the SSE-KMS context header in the response (e.g. HEAD). - If the user specifies a SSE-KMS context without the bucket/object name then AWS stores the exact context the client provided but adds the bucket/object name internally. The response contains the KMS context without the bucket/object name. - If the user specifies a SSE-KMS context with the bucket/object name then AWS again stores the exact context provided by the client. The response contains the KMS context with the bucket/object name. This commit implements this behavior w.r.t. SSE-KMS. However, as of now, no such object can be created since the server rejects SSE-KMS encryption requests. This commit is one stepping stone for SSE-KMS support. Co-authored-by: Harshavardhana <harsha@minio.io>
2021-02-03 18:19:08 -05:00
_, isEncrypted := crypto.IsEncrypted(mi.UserDefined)
var objectEncryptionKey crypto.ObjectKey
if objectAPI.IsEncryptionSupported() && isEncrypted {
if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(mi.UserDefined) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL)
return
}
if crypto.S3.IsEncrypted(mi.UserDefined) && crypto.SSEC.IsRequested(r.Header) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL)
return
}
var key []byte
if crypto.SSEC.IsRequested(r.Header) {
key, err = ParseSSECustomerRequest(r)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
key, err = decryptObjectMeta(key, dstBucket, dstObject, mi.UserDefined)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
copy(objectEncryptionKey[:], key)
partEncryptionKey := objectEncryptionKey.DerivePartKey(uint32(partID))
encReader, err := sio.EncryptReader(reader, sio.Config{Key: partEncryptionKey[:], CipherSuites: fips.DARECiphers()})
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
pkg/etag: add new package for S3 ETag handling (#11577) This commit adds a new package `etag` for dealing with S3 ETags. Even though ETag is often viewed as MD5 checksum of an object, handling S3 ETags correctly is a surprisingly complex task. While it is true that the ETag corresponds to the MD5 for the most basic S3 API operations, there are many exceptions in case of multipart uploads or encryption. In worse, some S3 clients expect very specific behavior when it comes to ETags. For example, some clients expect that the ETag is a double-quoted string and fail otherwise. Non-AWS compliant ETag handling has been a source of many bugs in the past. Therefore, this commit adds a dedicated `etag` package that provides functionality for parsing, generating and converting S3 ETags. Further, this commit removes the ETag computation from the `hash` package. Instead, the `hash` package (i.e. `hash.Reader`) should focus only on computing and verifying the content-sha256. One core feature of this commit is to provide a mechanism to communicate a computed ETag from a low-level `io.Reader` to a high-level `io.Reader`. This problem occurs when an S3 server receives a request and has to compute the ETag of the content. However, the server may also wrap the initial body with several other `io.Reader`, e.g. when encrypting or compressing the content: ``` reader := Encrypt(Compress(ETag(content))) ``` In such a case, the ETag should be accessible by the high-level `io.Reader`. The `etag` provides a mechanism to wrap `io.Reader` implementations such that the `ETag` can be accessed by a type-check. This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
reader = etag.Wrap(encReader, reader)
wantSize := int64(-1)
if length >= 0 {
info := ObjectInfo{Size: length}
wantSize = info.EncryptedSize()
}
pkg/etag: add new package for S3 ETag handling (#11577) This commit adds a new package `etag` for dealing with S3 ETags. Even though ETag is often viewed as MD5 checksum of an object, handling S3 ETags correctly is a surprisingly complex task. While it is true that the ETag corresponds to the MD5 for the most basic S3 API operations, there are many exceptions in case of multipart uploads or encryption. In worse, some S3 clients expect very specific behavior when it comes to ETags. For example, some clients expect that the ETag is a double-quoted string and fail otherwise. Non-AWS compliant ETag handling has been a source of many bugs in the past. Therefore, this commit adds a dedicated `etag` package that provides functionality for parsing, generating and converting S3 ETags. Further, this commit removes the ETag computation from the `hash` package. Instead, the `hash` package (i.e. `hash.Reader`) should focus only on computing and verifying the content-sha256. One core feature of this commit is to provide a mechanism to communicate a computed ETag from a low-level `io.Reader` to a high-level `io.Reader`. This problem occurs when an S3 server receives a request and has to compute the ETag of the content. However, the server may also wrap the initial body with several other `io.Reader`, e.g. when encrypting or compressing the content: ``` reader := Encrypt(Compress(ETag(content))) ``` In such a case, the ETag should be accessible by the high-level `io.Reader`. The `etag` provides a mechanism to wrap `io.Reader` implementations such that the `ETag` can be accessed by a type-check. This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
srcInfo.Reader, err = hash.NewReader(reader, wantSize, "", "", actualPartSize)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
pReader, err = pReader.WithEncryption(srcInfo.Reader, &objectEncryptionKey)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
2022-07-11 20:30:56 -04:00
if dstOpts.IndexCB != nil {
dstOpts.IndexCB = compressionIndexEncrypter(objectEncryptionKey, dstOpts.IndexCB)
}
}
srcInfo.PutObjReader = pReader
copyObjectPart := objectAPI.CopyObjectPart
if api.CacheAPI() != nil {
copyObjectPart = api.CacheAPI().CopyObjectPart
}
// Copy source object to destination, if source and destination
// object is same then only metadata is updated.
partInfo, err := copyObjectPart(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID,
startOffset, length, srcInfo, srcOpts, dstOpts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if isEncrypted {
partInfo.ETag = tryDecryptETag(objectEncryptionKey[:], partInfo.ETag, crypto.SSEC.IsRequested(r.Header))
}
response := generateCopyObjectPartResponse(partInfo.ETag, partInfo.LastModified)
encodedSuccessResponse := encodeResponse(response)
// Write success response.
writeSuccessResponseXML(w, encodedSuccessResponse)
}
// Delete objectAPIHandlers
// DeleteObjectHandler - delete an object
func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteObject")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
if globalDNSConfig != nil {
_, err := globalDNSConfig.Get(bucket)
if err != nil && err != dns.ErrNotImplemented {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
opts, err := delOpts(ctx, r, bucket, object)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
var (
goi ObjectInfo
gerr error
)
getObjectInfo := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
getObjectInfo = api.CacheAPI().GetObjectInfo
}
os := newObjSweeper(bucket, object).WithVersion(opts.VersionID).WithVersioning(opts.Versioned, opts.VersionSuspended)
// Mutations of objects on versioning suspended buckets
// affect its null version. Through opts below we select
// the null version's remote object to delete if
// transitioned.
goiOpts := os.GetOpts()
goi, gerr = getObjectInfo(ctx, bucket, object, goiOpts)
if gerr == nil {
os.SetTransitionState(goi.TransitionedObject)
}
dsc := checkReplicateDelete(ctx, bucket, ObjectToDelete{
ObjectV: ObjectV{
ObjectName: object,
VersionID: opts.VersionID,
},
}, goi, opts, gerr)
if dsc.ReplicateAny() {
opts.SetDeleteReplicationState(dsc, opts.VersionID)
}
vID := opts.VersionID
if r.Header.Get(xhttp.AmzBucketReplicationStatus) == replication.Replica.String() {
// check if replica has permission to be deleted.
if apiErrCode := checkRequestAuthType(ctx, r, policy.ReplicateDeleteAction, bucket, object); apiErrCode != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErrCode), r.URL)
return
}
opts.SetReplicaStatus(replication.Replica)
if opts.VersionPurgeStatus().Empty() {
// opts.VersionID holds delete marker version ID to replicate and not yet present on disk
vID = ""
}
}
apiErr := ErrNone
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
if opts.DeletePrefix {
writeErrorResponse(ctx, w, toAPIError(ctx, errors.New("force-delete is forbidden in a locked-enabled bucket")), r.URL)
return
}
if vID != "" {
apiErr = enforceRetentionBypassForDelete(ctx, r, bucket, ObjectToDelete{
ObjectV: ObjectV{
ObjectName: object,
VersionID: vID,
},
}, goi, gerr)
if apiErr != ErrNone && apiErr != ErrNoSuchKey {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErr), r.URL)
return
}
}
}
if apiErr == ErrNoSuchKey {
writeSuccessNoContent(w)
return
}
deleteObject := objectAPI.DeleteObject
if api.CacheAPI() != nil {
deleteObject = api.CacheAPI().DeleteObject
}
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html
objInfo, err := deleteObject(ctx, bucket, object, opts)
if err != nil {
switch err.(type) {
case BucketNotFound:
// When bucket doesn't exist specially handle it.
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
}
if objInfo.Name == "" {
writeSuccessNoContent(w)
return
}
setPutObjHeaders(w, objInfo, true)
writeSuccessNoContent(w)
eventName := event.ObjectRemovedDelete
if objInfo.DeleteMarker {
eventName = event.ObjectRemovedDeleteMarkerCreated
}
// Notify object deleted event.
sendEvent(eventArgs{
EventName: eventName,
BucketName: bucket,
Object: objInfo,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: handlers.GetSourceIP(r),
})
if dsc.ReplicateAny() {
dmVersionID := ""
versionID := ""
if objInfo.DeleteMarker {
dmVersionID = objInfo.VersionID
} else {
versionID = objInfo.VersionID
}
dobj := DeletedObjectReplicationInfo{
DeletedObject: DeletedObject{
ObjectName: object,
VersionID: versionID,
DeleteMarkerVersionID: dmVersionID,
DeleteMarkerMTime: DeleteMarkerMTime{objInfo.ModTime},
DeleteMarker: objInfo.DeleteMarker,
ReplicationState: objInfo.getReplicationState(dsc.String(), opts.VersionID, false),
},
Bucket: bucket,
EventType: ReplicateIncomingDelete,
}
scheduleReplicationDelete(ctx, dobj, objectAPI)
}
// Remove the transitioned object whose object version is being overwritten.
if !globalTierConfigMgr.Empty() {
os.Sweep()
}
}
// PutObjectLegalHoldHandler - set legal hold configuration to object,
func (api objectAPIHandlers) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutObjectLegalHold")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
// Check permissions to perform this legal hold operation
if s3Err := checkRequestAuthType(ctx, r, policy.PutObjectLegalHoldAction, bucket, object); s3Err != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if !hasContentMD5(r.Header) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL)
return
}
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); !rcfg.LockEnabled {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketObjectLockConfiguration), r.URL)
return
}
legalHold, err := objectlock.ParseObjectLegalHold(io.LimitReader(r.Body, r.ContentLength))
if err != nil {
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
apiErr.Description = err.Error()
writeErrorResponse(ctx, w, apiErr, r.URL)
return
}
opts, err := getOpts(ctx, r, bucket, object)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
popts := ObjectOptions{
MTime: opts.MTime,
VersionID: opts.VersionID,
EvalMetadataFn: func(oi ObjectInfo) error {
oi.UserDefined[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = strings.ToUpper(string(legalHold.Status))
oi.UserDefined[ReservedMetadataPrefixLower+ObjectLockLegalHoldTimestamp] = UTCNow().Format(time.RFC3339Nano)
dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(oi, replication.MetadataReplicationType, opts))
if dsc.ReplicateAny() {
oi.UserDefined[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
oi.UserDefined[ReservedMetadataPrefixLower+ReplicationStatus] = dsc.PendingStatus()
}
return nil
},
}
objInfo, err := objectAPI.PutObjectMetadata(ctx, bucket, object, popts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(objInfo, replication.MetadataReplicationType, opts))
if dsc.ReplicateAny() {
scheduleReplication(ctx, objInfo.Clone(), objectAPI, dsc, replication.MetadataReplicationType)
}
writeSuccessResponseHeadersOnly(w)
// Notify object event.
sendEvent(eventArgs{
EventName: event.ObjectCreatedPutLegalHold,
BucketName: bucket,
Object: objInfo,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: handlers.GetSourceIP(r),
})
}
// GetObjectLegalHoldHandler - get legal hold configuration to object,
func (api objectAPIHandlers) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetObjectLegalHold")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectLegalHoldAction, bucket, object); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
getObjectInfo := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
getObjectInfo = api.CacheAPI().GetObjectInfo
}
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); !rcfg.LockEnabled {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketObjectLockConfiguration), r.URL)
return
}
opts, err := getOpts(ctx, r, bucket, object)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
legalHold := objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined)
if legalHold.IsEmpty() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchObjectLockConfiguration), r.URL)
return
}
writeSuccessResponseXML(w, encodeResponse(legalHold))
// Notify object legal hold accessed via a GET request.
sendEvent(eventArgs{
EventName: event.ObjectAccessedGetLegalHold,
BucketName: bucket,
Object: objInfo,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: handlers.GetSourceIP(r),
})
}
// PutObjectRetentionHandler - set object hold configuration to object,
func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutObjectRetention")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, owner, s3Err := validateSignature(getRequestAuthType(r), r)
if s3Err != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if !hasContentMD5(r.Header) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL)
return
}
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); !rcfg.LockEnabled {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketObjectLockConfiguration), r.URL)
return
}
objRetention, err := objectlock.ParseObjectRetention(r.Body)
if err != nil {
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
apiErr.Description = err.Error()
writeErrorResponse(ctx, w, apiErr, r.URL)
return
}
opts, err := getOpts(ctx, r, bucket, object)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
popts := ObjectOptions{
MTime: opts.MTime,
VersionID: opts.VersionID,
EvalMetadataFn: func(oi ObjectInfo) error {
if err := enforceRetentionBypassForPut(ctx, r, oi, objRetention, cred, owner); err != nil {
return err
}
if objRetention.Mode.Valid() {
oi.UserDefined[strings.ToLower(xhttp.AmzObjectLockMode)] = string(objRetention.Mode)
oi.UserDefined[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = objRetention.RetainUntilDate.UTC().Format(iso8601TimeFormat)
} else {
oi.UserDefined[strings.ToLower(xhttp.AmzObjectLockMode)] = ""
oi.UserDefined[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = ""
}
oi.UserDefined[ReservedMetadataPrefixLower+ObjectLockRetentionTimestamp] = UTCNow().Format(time.RFC3339Nano)
dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(oi, replication.MetadataReplicationType, opts))
if dsc.ReplicateAny() {
oi.UserDefined[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
oi.UserDefined[ReservedMetadataPrefixLower+ReplicationStatus] = dsc.PendingStatus()
}
return nil
},
}
objInfo, err := objectAPI.PutObjectMetadata(ctx, bucket, object, popts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(objInfo, replication.MetadataReplicationType, opts))
if dsc.ReplicateAny() {
scheduleReplication(ctx, objInfo.Clone(), objectAPI, dsc, replication.MetadataReplicationType)
}
writeSuccessNoContent(w)
// Notify object event.
sendEvent(eventArgs{
EventName: event.ObjectCreatedPutRetention,
BucketName: bucket,
Object: objInfo,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: handlers.GetSourceIP(r),
})
}
// GetObjectRetentionHandler - get object retention configuration of object,
func (api objectAPIHandlers) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetObjectRetention")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectRetentionAction, bucket, object); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
getObjectInfo := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
getObjectInfo = api.CacheAPI().GetObjectInfo
}
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); !rcfg.LockEnabled {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketObjectLockConfiguration), r.URL)
return
}
opts, err := getOpts(ctx, r, bucket, object)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
retention := objectlock.GetObjectRetentionMeta(objInfo.UserDefined)
if !retention.Mode.Valid() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchObjectLockConfiguration), r.URL)
return
}
writeSuccessResponseXML(w, encodeResponse(retention))
// Notify object retention accessed via a GET request.
sendEvent(eventArgs{
EventName: event.ObjectAccessedGetRetention,
BucketName: bucket,
Object: objInfo,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: handlers.GetSourceIP(r),
})
}
// ObjectTagSet key value tags
type ObjectTagSet struct {
Tags []tags.Tag `xml:"Tag"`
}
type objectTagging struct {
XMLName xml.Name `xml:"Tagging"`
TagSet *ObjectTagSet `xml:"TagSet"`
}
// GetObjectTaggingHandler - GET object tagging
func (api objectAPIHandlers) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetObjectTagging")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if !objAPI.IsTaggingSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
// Allow getObjectTagging if policy action is set.
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectTaggingAction, bucket, object); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
opts, err := getOpts(ctx, r, bucket, object)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Get object tags
ot, err := objAPI.GetObjectTags(ctx, bucket, object, opts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if opts.VersionID != "" {
w.Header()[xhttp.AmzVersionID] = []string{opts.VersionID}
}
otags := &objectTagging{
TagSet: &ObjectTagSet{},
}
var list []tags.Tag
for k, v := range ot.ToMap() {
list = append(list, tags.Tag{
Key: k,
Value: v,
})
}
otags.TagSet.Tags = list
writeSuccessResponseXML(w, encodeResponse(otags))
}
// PutObjectTaggingHandler - PUT object tagging
func (api objectAPIHandlers) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutObjectTagging")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if !objAPI.IsTaggingSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
// Tags XML will not be bigger than 1MiB in size, fail if its bigger.
tags, err := tags.ParseObjectXML(io.LimitReader(r.Body, 1<<20))
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Set this such that authorization policies can be applied on the object tags.
r.Header.Set(xhttp.AmzObjectTagging, tags.String())
// Allow putObjectTagging if policy action is set
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectTaggingAction, bucket, object); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
opts, err := getOpts(ctx, r, bucket, object)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objInfo, err := objAPI.GetObjectInfo(ctx, bucket, object, opts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
tagsStr := tags.String()
oi := objInfo.Clone()
oi.UserTags = tagsStr
dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(oi, replication.MetadataReplicationType, opts))
if dsc.ReplicateAny() {
opts.UserDefined = make(map[string]string)
opts.UserDefined[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
opts.UserDefined[ReservedMetadataPrefixLower+ReplicationStatus] = dsc.PendingStatus()
opts.UserDefined[ReservedMetadataPrefixLower+TaggingTimestamp] = UTCNow().Format(time.RFC3339Nano)
}
// Put object tags
objInfo, err = objAPI.PutObjectTags(ctx, bucket, object, tagsStr, opts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if dsc.ReplicateAny() {
scheduleReplication(ctx, objInfo.Clone(), objAPI, dsc, replication.MetadataReplicationType)
}
if objInfo.VersionID != "" {
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
}
writeSuccessResponseHeadersOnly(w)
sendEvent(eventArgs{
EventName: event.ObjectCreatedPutTagging,
BucketName: bucket,
Object: objInfo,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: handlers.GetSourceIP(r),
})
}
// DeleteObjectTaggingHandler - DELETE object tagging
func (api objectAPIHandlers) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteObjectTagging")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if !objAPI.IsTaggingSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
opts, err := getOpts(ctx, r, bucket, object)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
oi, err := objAPI.GetObjectInfo(ctx, bucket, object, opts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if userTags := oi.UserTags; userTags != "" {
// Set this such that authorization policies can be applied on the object tags.
r.Header.Set(xhttp.AmzObjectTagging, oi.UserTags)
}
// Allow deleteObjectTagging if policy action is set
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteObjectTaggingAction, bucket, object); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(oi, replication.MetadataReplicationType, opts))
if dsc.ReplicateAny() {
opts.UserDefined = make(map[string]string)
opts.UserDefined[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano)
opts.UserDefined[ReservedMetadataPrefixLower+ReplicationStatus] = dsc.PendingStatus()
}
oi, err = objAPI.DeleteObjectTags(ctx, bucket, object, opts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if dsc.ReplicateAny() {
scheduleReplication(ctx, oi.Clone(), objAPI, dsc, replication.MetadataReplicationType)
}
if oi.VersionID != "" {
w.Header()[xhttp.AmzVersionID] = []string{oi.VersionID}
}
writeSuccessNoContent(w)
sendEvent(eventArgs{
EventName: event.ObjectCreatedDeleteTagging,
BucketName: bucket,
Object: oi,
ReqParams: extractReqParams(r),
RespElements: extractRespElements(w),
UserAgent: r.UserAgent(),
Host: handlers.GetSourceIP(r),
})
}
// RestoreObjectHandler - POST restore object handler.
// ----------
func (api objectAPIHandlers) PostRestoreObjectHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PostRestoreObject")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := unescapePath(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
// Fetch object stat info.
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
getObjectInfo := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
getObjectInfo = api.CacheAPI().GetObjectInfo
}
// Check for auth type to return S3 compatible error.
if s3Error := checkRequestAuthType(ctx, r, policy.RestoreObjectAction, bucket, object); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
return
}
if r.ContentLength <= 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEmptyRequestBody), r.URL)
return
}
opts, err := postRestoreOpts(ctx, r, bucket, object)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
if objInfo.TransitionedObject.Status != lifecycle.TransitionComplete {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidObjectState), r.URL)
return
}
rreq, err := parseRestoreRequest(io.LimitReader(r.Body, r.ContentLength))
if err != nil {
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
apiErr.Description = err.Error()
writeErrorResponse(ctx, w, apiErr, r.URL)
return
}
// validate the request
if err := rreq.validate(ctx, objectAPI); err != nil {
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
apiErr.Description = err.Error()
writeErrorResponse(ctx, w, apiErr, r.URL)
return
}
statusCode := http.StatusOK
alreadyRestored := false
if err == nil {
if objInfo.RestoreOngoing && rreq.Type != SelectRestoreRequest {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrObjectRestoreAlreadyInProgress), r.URL)
return
}
if !objInfo.RestoreOngoing && !objInfo.RestoreExpires.IsZero() {
statusCode = http.StatusAccepted
alreadyRestored = true
}
}
// set or upgrade restore expiry
restoreExpiry := lifecycle.ExpectedExpiryTime(time.Now(), rreq.Days)
metadata := cloneMSS(objInfo.UserDefined)
// update self with restore metadata
if rreq.Type != SelectRestoreRequest {
objInfo.metadataOnly = true // Perform only metadata updates.
metadata[xhttp.AmzRestoreExpiryDays] = strconv.Itoa(rreq.Days)
metadata[xhttp.AmzRestoreRequestDate] = time.Now().UTC().Format(http.TimeFormat)
if alreadyRestored {
metadata[xhttp.AmzRestore] = completedRestoreObj(restoreExpiry).String()
} else {
metadata[xhttp.AmzRestore] = ongoingRestoreObj().String()
}
objInfo.UserDefined = metadata
if _, err := objectAPI.CopyObject(GlobalContext, bucket, object, bucket, object, objInfo, ObjectOptions{
VersionID: objInfo.VersionID,
}, ObjectOptions{
VersionID: objInfo.VersionID,
}); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to update replication metadata for %s: %s", objInfo.VersionID, err))
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidObjectState), r.URL)
return
}
// for previously restored object, just update the restore expiry
if alreadyRestored {
return
}
}
restoreObject := mustGetUUID()
if rreq.OutputLocation.S3.BucketName != "" {
w.Header()[xhttp.AmzRestoreOutputPath] = []string{pathJoin(rreq.OutputLocation.S3.BucketName, rreq.OutputLocation.S3.Prefix, restoreObject)}
}
w.WriteHeader(statusCode)
// Notify object restore started via a POST request.
sendEvent(eventArgs{
EventName: event.ObjectRestorePostInitiated,
BucketName: bucket,
Object: objInfo,
ReqParams: extractReqParams(r),
UserAgent: r.UserAgent(),
Host: handlers.GetSourceIP(r),
})
// now process the restore in background
go func() {
rctx := GlobalContext
if !rreq.SelectParameters.IsEmpty() {
actualSize, err := objInfo.GetActualSize()
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
objectRSC := s3select.NewObjectReadSeekCloser(
func(offset int64) (io.ReadCloser, error) {
rs := &HTTPRangeSpec{
IsSuffixLength: false,
Start: offset,
End: -1,
}
return getTransitionedObjectReader(rctx, bucket, object, rs, r.Header,
objInfo, ObjectOptions{VersionID: objInfo.VersionID})
},
actualSize,
)
if err = rreq.SelectParameters.Open(objectRSC); err != nil {
if serr, ok := err.(s3select.SelectError); ok {
encodedErrorResponse := encodeResponse(APIErrorResponse{
Code: serr.ErrorCode(),
Message: serr.ErrorMessage(),
BucketName: bucket,
Key: object,
Resource: r.URL.Path,
RequestID: w.Header().Get(xhttp.AmzRequestID),
HostID: globalDeploymentID,
})
writeResponse(w, serr.HTTPStatusCode(), encodedErrorResponse, mimeXML)
} else {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
}
return
}
nr := httptest.NewRecorder()
rw := logger.NewResponseWriter(nr)
rw.LogErrBody = true
rw.LogAllBody = true
rreq.SelectParameters.Evaluate(rw)
rreq.SelectParameters.Close()
return
}
opts := ObjectOptions{
Transition: TransitionOptions{
RestoreRequest: rreq,
RestoreExpiry: restoreExpiry,
},
VersionID: objInfo.VersionID,
}
if err := objectAPI.RestoreTransitionedObject(rctx, bucket, object, opts); err != nil {
logger.LogIf(ctx, err)
return
}
// Notify object restore completed via a POST request.
sendEvent(eventArgs{
EventName: event.ObjectRestorePostCompleted,
BucketName: bucket,
Object: objInfo,
ReqParams: extractReqParams(r),
UserAgent: r.UserAgent(),
Host: handlers.GetSourceIP(r),
})
}()
}