mirror of
https://github.com/minio/minio.git
synced 2024-12-23 21:55:53 -05:00
remove unnecessary code checking for supported features (#16423)
This commit is contained in:
parent
3db658e51e
commit
b4ef5ff294
@ -51,11 +51,6 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if !objAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
|
@ -890,11 +890,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
if crypto.Requested(r.Header) && !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
bucket := mux.Vars(r)["bucket"]
|
||||
|
||||
// Require Content-Length to be set in the request
|
||||
@ -1066,51 +1061,50 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||
return
|
||||
}
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if crypto.Requested(formValues) && !HasSuffix(object, SlashSeparator) { // handle SSE requests
|
||||
if crypto.SSECopy.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL)
|
||||
return
|
||||
}
|
||||
var (
|
||||
reader io.Reader
|
||||
keyID string
|
||||
key []byte
|
||||
kmsCtx kms.Context
|
||||
)
|
||||
kind, _ := crypto.IsRequested(formValues)
|
||||
switch kind {
|
||||
case crypto.SSEC:
|
||||
key, err = ParseSSECustomerHeader(formValues)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
case crypto.S3KMS:
|
||||
keyID, kmsCtx, err = crypto.S3KMS.ParseHTTP(formValues)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
reader, objectEncryptionKey, err = newEncryptReader(ctx, hashReader, kind, keyID, key, bucket, object, metadata, kmsCtx)
|
||||
|
||||
if crypto.Requested(formValues) && !HasSuffix(object, SlashSeparator) { // handle SSE requests
|
||||
if crypto.SSECopy.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL)
|
||||
return
|
||||
}
|
||||
var (
|
||||
reader io.Reader
|
||||
keyID string
|
||||
key []byte
|
||||
kmsCtx kms.Context
|
||||
)
|
||||
kind, _ := crypto.IsRequested(formValues)
|
||||
switch kind {
|
||||
case crypto.SSEC:
|
||||
key, err = ParseSSECustomerHeader(formValues)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
info := ObjectInfo{Size: fileSize}
|
||||
// do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
|
||||
case crypto.S3KMS:
|
||||
keyID, kmsCtx, err = crypto.S3KMS.ParseHTTP(formValues)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
reader, objectEncryptionKey, err = newEncryptReader(ctx, hashReader, kind, keyID, key, bucket, object, metadata, kmsCtx)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
info := ObjectInfo{Size: fileSize}
|
||||
// do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.PutObject(ctx, bucket, object, pReader, opts)
|
||||
|
@ -50,11 +50,6 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
if !objAPI.IsNotificationSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketNotificationAction, bucketName, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
return
|
||||
@ -119,11 +114,6 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
if !objectAPI.IsNotificationSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars["bucket"]
|
||||
|
||||
|
@ -297,16 +297,9 @@ func validateSubSysConfig(s config.Config, subSys string, objAPI ObjectLayer) er
|
||||
return err
|
||||
}
|
||||
case config.CompressionSubSys:
|
||||
compCfg, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default])
|
||||
if err != nil {
|
||||
if _, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if objAPI != nil {
|
||||
if compCfg.Enabled && !objAPI.IsCompressionSupported() {
|
||||
return fmt.Errorf("Backend does not support compression")
|
||||
}
|
||||
}
|
||||
case config.HealSubSys:
|
||||
if _, err := heal.LookupConfig(s[config.HealSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
@ -548,10 +541,6 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to setup Compression: %w", err)
|
||||
}
|
||||
// Validate if the object layer supports compression.
|
||||
if cmpCfg.Enabled && !objAPI.IsCompressionSupported() {
|
||||
return fmt.Errorf("Backend does not support compression")
|
||||
}
|
||||
globalCompressConfigMu.Lock()
|
||||
globalCompressConfig = cmpCfg
|
||||
globalCompressConfigMu.Unlock()
|
||||
|
@ -1,100 +0,0 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/minio/minio/internal/sync/errgroup"
|
||||
)
|
||||
|
||||
// list all errors that can be ignore in a bucket operation.
|
||||
var bucketOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnformattedDisk)
|
||||
|
||||
// list all errors that can be ignored in a bucket metadata operation.
|
||||
var bucketMetadataOpIgnoredErrs = append(bucketOpIgnoredErrs, errVolumeNotFound)
|
||||
|
||||
// markDelete creates a vol entry in .minio.sys/buckets/.deleted until site replication
|
||||
// syncs the delete to peers
|
||||
func (er erasureObjects) markDelete(ctx context.Context, bucket, prefix string) error {
|
||||
storageDisks := er.getDisks()
|
||||
g := errgroup.WithNErrs(len(storageDisks))
|
||||
// Make a volume entry on all underlying storage disks.
|
||||
for index := range storageDisks {
|
||||
index := index
|
||||
if storageDisks[index] == nil {
|
||||
continue
|
||||
}
|
||||
g.Go(func() error {
|
||||
if err := storageDisks[index].MakeVol(ctx, pathJoin(bucket, prefix)); err != nil {
|
||||
if errors.Is(err, errVolumeExists) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
err := reduceWriteQuorumErrs(ctx, g.Wait(), bucketOpIgnoredErrs, er.defaultWQuorum())
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
|
||||
// purgeDelete deletes vol entry in .minio.sys/buckets/.deleted after site replication
|
||||
// syncs the delete to peers OR on a new MakeBucket call.
|
||||
func (er erasureObjects) purgeDelete(ctx context.Context, bucket, prefix string) error {
|
||||
storageDisks := er.getDisks()
|
||||
g := errgroup.WithNErrs(len(storageDisks))
|
||||
// Make a volume entry on all underlying storage disks.
|
||||
for index := range storageDisks {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
if storageDisks[index] != nil {
|
||||
return storageDisks[index].DeleteVol(ctx, pathJoin(bucket, prefix), true)
|
||||
}
|
||||
return errDiskNotFound
|
||||
}, index)
|
||||
}
|
||||
err := reduceWriteQuorumErrs(ctx, g.Wait(), bucketOpIgnoredErrs, er.defaultWQuorum())
|
||||
return toObjectErr(err, bucket)
|
||||
}
|
||||
|
||||
// IsNotificationSupported returns whether bucket notification is applicable for this layer.
|
||||
func (er erasureObjects) IsNotificationSupported() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsListenSupported returns whether listen bucket notification is applicable for this layer.
|
||||
func (er erasureObjects) IsListenSupported() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsEncryptionSupported returns whether server side encryption is implemented for this layer.
|
||||
func (er erasureObjects) IsEncryptionSupported() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsCompressionSupported returns whether compression is applicable for this layer.
|
||||
func (er erasureObjects) IsCompressionSupported() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsTaggingSupported indicates whether erasureObjects implements tagging support.
|
||||
func (er erasureObjects) IsTaggingSupported() bool {
|
||||
return true
|
||||
}
|
@ -1612,30 +1612,6 @@ func (z *erasureServerPools) GetBucketInfo(ctx context.Context, bucket string, o
|
||||
return bucketInfo, nil
|
||||
}
|
||||
|
||||
// IsNotificationSupported returns whether bucket notification is applicable for this layer.
|
||||
func (z *erasureServerPools) IsNotificationSupported() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsListenSupported returns whether listen bucket notification is applicable for this layer.
|
||||
func (z *erasureServerPools) IsListenSupported() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsEncryptionSupported returns whether server side encryption is implemented for this layer.
|
||||
func (z *erasureServerPools) IsEncryptionSupported() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsCompressionSupported returns whether compression is applicable for this layer.
|
||||
func (z *erasureServerPools) IsCompressionSupported() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (z *erasureServerPools) IsTaggingSupported() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// DeleteBucket - deletes a bucket on all serverPools simultaneously,
|
||||
// even if one of the serverPools fail to delete buckets, we proceed to
|
||||
// undo a successful operation.
|
||||
@ -1666,7 +1642,7 @@ func (z *erasureServerPools) DeleteBucket(ctx context.Context, bucket string, op
|
||||
// If site replication is configured, hold on to deleted bucket state until sites sync
|
||||
switch opts.SRDeleteOp {
|
||||
case MarkDelete:
|
||||
z.markDelete(context.Background(), minioMetaBucket, pathJoin(bucketMetaPrefix, deletedBucketsPrefix, bucket))
|
||||
z.s3Peer.MakeBucket(context.Background(), pathJoin(minioMetaBucket, bucketMetaPrefix, deletedBucketsPrefix, bucket), MakeBucketOptions{})
|
||||
}
|
||||
}
|
||||
|
||||
@ -1697,27 +1673,6 @@ func (z *erasureServerPools) deleteAll(ctx context.Context, bucket, prefix strin
|
||||
}
|
||||
}
|
||||
|
||||
// markDelete will create a directory of deleted bucket in .minio.sys/buckets/.deleted across all disks
|
||||
// in situations where the deleted bucket needs to be held on to until all sites are in sync for
|
||||
// site replication
|
||||
func (z *erasureServerPools) markDelete(ctx context.Context, bucket, prefix string) {
|
||||
for _, servers := range z.serverPools {
|
||||
for _, set := range servers.sets {
|
||||
set.markDelete(ctx, bucket, prefix)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// purgeDelete deletes vol entry in .minio.sys/buckets/.deleted after site replication
|
||||
// syncs the delete to peers.
|
||||
func (z *erasureServerPools) purgeDelete(ctx context.Context, bucket, prefix string) {
|
||||
for _, servers := range z.serverPools {
|
||||
for _, set := range servers.sets {
|
||||
set.purgeDelete(ctx, bucket, prefix)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// List all buckets from one of the serverPools, we are not doing merge
|
||||
// sort here just for simplification. As per design it is assumed
|
||||
// that all buckets are present on all serverPools.
|
||||
|
@ -718,30 +718,6 @@ func (s *erasureSets) getHashedSet(input string) (set *erasureObjects) {
|
||||
return s.sets[s.getHashedSetIndex(input)]
|
||||
}
|
||||
|
||||
// IsNotificationSupported returns whether bucket notification is applicable for this layer.
|
||||
func (s *erasureSets) IsNotificationSupported() bool {
|
||||
return s.getHashedSet("").IsNotificationSupported()
|
||||
}
|
||||
|
||||
// IsListenSupported returns whether listen bucket notification is applicable for this layer.
|
||||
func (s *erasureSets) IsListenSupported() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IsEncryptionSupported returns whether server side encryption is implemented for this layer.
|
||||
func (s *erasureSets) IsEncryptionSupported() bool {
|
||||
return s.getHashedSet("").IsEncryptionSupported()
|
||||
}
|
||||
|
||||
// IsCompressionSupported returns whether compression is applicable for this layer.
|
||||
func (s *erasureSets) IsCompressionSupported() bool {
|
||||
return s.getHashedSet("").IsCompressionSupported()
|
||||
}
|
||||
|
||||
func (s *erasureSets) IsTaggingSupported() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// listDeletedBuckets lists deleted buckets from all disks.
|
||||
func listDeletedBuckets(ctx context.Context, storageDisks []StorageAPI, delBuckets map[string]VolInfo, readQuorum int) error {
|
||||
g := errgroup.WithNErrs(len(storageDisks))
|
||||
|
@ -36,6 +36,12 @@ import (
|
||||
"github.com/minio/pkg/console"
|
||||
)
|
||||
|
||||
// list all errors that can be ignore in a bucket operation.
|
||||
var bucketOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnformattedDisk)
|
||||
|
||||
// list all errors that can be ignored in a bucket metadata operation.
|
||||
var bucketMetadataOpIgnoredErrs = append(bucketOpIgnoredErrs, errVolumeNotFound)
|
||||
|
||||
// OfflineDisk represents an unavailable disk.
|
||||
var OfflineDisk StorageAPI // zero value is nil
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2023 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
@ -41,16 +41,6 @@ func (api objectAPIHandlers) ListenNotificationHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if !objAPI.IsNotificationSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if !objAPI.IsListenSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars["bucket"]
|
||||
|
||||
|
@ -239,12 +239,6 @@ type ObjectLayer interface {
|
||||
AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error
|
||||
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
|
||||
// Supported operations check
|
||||
IsNotificationSupported() bool
|
||||
IsListenSupported() bool
|
||||
IsEncryptionSupported() bool
|
||||
IsTaggingSupported() bool
|
||||
IsCompressionSupported() bool
|
||||
SetDriveCounts() []int // list of erasure stripe size for each pool in order.
|
||||
|
||||
// Healing operations.
|
||||
|
@ -116,11 +116,6 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if crypto.Requested(r.Header) && !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object, err := unescapePath(vars["object"])
|
||||
@ -224,11 +219,9 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
|
||||
// filter object lock metadata if permission does not permit
|
||||
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
|
||||
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if _, err = DecryptObjectInfo(&objInfo, r); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if _, err = DecryptObjectInfo(&objInfo, r); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
actualSize, err := objInfo.GetActualSize()
|
||||
@ -288,25 +281,23 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
// Set encryption response headers
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
switch kind, _ := crypto.IsEncrypted(objInfo.UserDefined); kind {
|
||||
case crypto.S3:
|
||||
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
|
||||
case crypto.S3KMS:
|
||||
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionKmsID, objInfo.KMSKeyID())
|
||||
if kmsCtx, ok := objInfo.UserDefined[crypto.MetaContext]; ok {
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionKmsContext, kmsCtx)
|
||||
}
|
||||
case crypto.SSEC:
|
||||
// Validate the SSE-C Key set in the header.
|
||||
if _, err = crypto.SSEC.UnsealObjectKey(r.Header, objInfo.UserDefined, bucket, object); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm))
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5))
|
||||
switch kind, _ := crypto.IsEncrypted(objInfo.UserDefined); kind {
|
||||
case crypto.S3:
|
||||
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
|
||||
case crypto.S3KMS:
|
||||
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionKmsID, objInfo.KMSKeyID())
|
||||
if kmsCtx, ok := objInfo.UserDefined[crypto.MetaContext]; ok {
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionKmsContext, kmsCtx)
|
||||
}
|
||||
case crypto.SSEC:
|
||||
// Validate the SSE-C Key set in the header.
|
||||
if _, err = crypto.SSEC.UnsealObjectKey(r.Header, objInfo.UserDefined, bucket, object); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm))
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5))
|
||||
}
|
||||
|
||||
s3Select.Evaluate(w)
|
||||
@ -328,10 +319,6 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
|
||||
return
|
||||
}
|
||||
if crypto.Requested(r.Header) && !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
opts, err := getOpts(ctx, r, bucket, object)
|
||||
if err != nil {
|
||||
@ -408,11 +395,9 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
|
||||
|
||||
// Validate pre-conditions if any.
|
||||
opts.CheckPrecondFn = func(oi ObjectInfo) bool {
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if _, err := DecryptObjectInfo(&oi, r); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return true
|
||||
}
|
||||
if _, err := DecryptObjectInfo(&oi, r); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return true
|
||||
}
|
||||
|
||||
return checkPreconditions(ctx, w, r, oi, opts)
|
||||
@ -519,20 +504,18 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
|
||||
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
|
||||
|
||||
// Set encryption response headers
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
switch kind, _ := crypto.IsEncrypted(objInfo.UserDefined); kind {
|
||||
case crypto.S3:
|
||||
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
|
||||
case crypto.S3KMS:
|
||||
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionKmsID, objInfo.KMSKeyID())
|
||||
if kmsCtx, ok := objInfo.UserDefined[crypto.MetaContext]; ok {
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionKmsContext, kmsCtx)
|
||||
}
|
||||
case crypto.SSEC:
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm))
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5))
|
||||
switch kind, _ := crypto.IsEncrypted(objInfo.UserDefined); kind {
|
||||
case crypto.S3:
|
||||
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
|
||||
case crypto.S3KMS:
|
||||
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionKmsID, objInfo.KMSKeyID())
|
||||
if kmsCtx, ok := objInfo.UserDefined[crypto.MetaContext]; ok {
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionKmsContext, kmsCtx)
|
||||
}
|
||||
case crypto.SSEC:
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm))
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5))
|
||||
}
|
||||
|
||||
if r.Header.Get(xhttp.AmzChecksumMode) == "ENABLED" {
|
||||
@ -632,10 +615,6 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrBadRequest))
|
||||
return
|
||||
}
|
||||
if crypto.Requested(r.Header) && !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
getObjectInfo := objectAPI.GetObjectInfo
|
||||
if api.CacheAPI() != nil {
|
||||
@ -780,11 +759,9 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
|
||||
// filter object lock metadata if permission does not permit
|
||||
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
|
||||
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if _, err = DecryptObjectInfo(&objInfo, r); err != nil {
|
||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||
return
|
||||
}
|
||||
if _, err = DecryptObjectInfo(&objInfo, r); err != nil {
|
||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate pre-conditions if any.
|
||||
@ -813,25 +790,23 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
|
||||
}
|
||||
|
||||
// Set encryption response headers
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
switch kind, _ := crypto.IsEncrypted(objInfo.UserDefined); kind {
|
||||
case crypto.S3:
|
||||
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
|
||||
case crypto.S3KMS:
|
||||
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionKmsID, objInfo.KMSKeyID())
|
||||
if kmsCtx, ok := objInfo.UserDefined[crypto.MetaContext]; ok {
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionKmsContext, kmsCtx)
|
||||
}
|
||||
case crypto.SSEC:
|
||||
// Validate the SSE-C Key set in the header.
|
||||
if _, err = crypto.SSEC.UnsealObjectKey(r.Header, objInfo.UserDefined, bucket, object); err != nil {
|
||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||
return
|
||||
}
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm))
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5))
|
||||
switch kind, _ := crypto.IsEncrypted(objInfo.UserDefined); kind {
|
||||
case crypto.S3:
|
||||
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES)
|
||||
case crypto.S3KMS:
|
||||
w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS)
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionKmsID, objInfo.KMSKeyID())
|
||||
if kmsCtx, ok := objInfo.UserDefined[crypto.MetaContext]; ok {
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionKmsContext, kmsCtx)
|
||||
}
|
||||
case crypto.SSEC:
|
||||
// Validate the SSE-C Key set in the header.
|
||||
if _, err = crypto.SSEC.UnsealObjectKey(r.Header, objInfo.UserDefined, bucket, object); err != nil {
|
||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||
return
|
||||
}
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm))
|
||||
w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5))
|
||||
}
|
||||
|
||||
if r.Header.Get(xhttp.AmzChecksumMode) == "ENABLED" {
|
||||
@ -1016,13 +991,6 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
if crypto.Requested(r.Header) {
|
||||
if !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
dstBucket := vars["bucket"]
|
||||
dstObject, err := unescapePath(vars["object"])
|
||||
@ -1128,11 +1096,9 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
|
||||
checkCopyPrecondFn := func(o ObjectInfo) bool {
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if _, err := DecryptObjectInfo(&o, r); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return true
|
||||
}
|
||||
if _, err := DecryptObjectInfo(&o, r); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return true
|
||||
}
|
||||
return checkCopyObjectPreconditions(ctx, w, r, o)
|
||||
}
|
||||
@ -1214,8 +1180,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
var compressMetadata map[string]string
|
||||
// No need to compress for remote etcd calls
|
||||
// Pass the decompressed stream to such calls.
|
||||
isDstCompressed := objectAPI.IsCompressionSupported() &&
|
||||
isCompressible(r.Header, dstObject) &&
|
||||
isDstCompressed := isCompressible(r.Header, dstObject) &&
|
||||
length > minCompressibleSize &&
|
||||
!isRemoteCopyRequired(ctx, srcBucket, dstBucket, objectAPI) && !cpSrcDstSame && !objectEncryption
|
||||
if isDstCompressed {
|
||||
@ -1225,7 +1190,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
compressMetadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(actualSize, 10)
|
||||
|
||||
reader = etag.NewReader(reader, nil)
|
||||
wantEncryption := objectAPI.IsEncryptionSupported() && crypto.Requested(r.Header)
|
||||
wantEncryption := crypto.Requested(r.Header)
|
||||
s2c, cb := newS2CompressReader(reader, actualSize, wantEncryption)
|
||||
dstOpts.IndexCB = cb
|
||||
defer s2c.Close()
|
||||
@ -1247,133 +1212,131 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
|
||||
// Handle encryption
|
||||
encMetadata := make(map[string]string)
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
// Encryption parameters not applicable for this object.
|
||||
if _, ok := crypto.IsEncrypted(srcInfo.UserDefined); !ok && crypto.SSECopy.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL)
|
||||
// Encryption parameters not applicable for this object.
|
||||
if _, ok := crypto.IsEncrypted(srcInfo.UserDefined); !ok && crypto.SSECopy.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL)
|
||||
return
|
||||
}
|
||||
// Encryption parameters not present for this object.
|
||||
if crypto.SSEC.IsEncrypted(srcInfo.UserDefined) && !crypto.SSECopy.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidSSECustomerAlgorithm), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var oldKey, newKey []byte
|
||||
var newKeyID string
|
||||
var kmsCtx kms.Context
|
||||
var objEncKey crypto.ObjectKey
|
||||
sseCopyKMS := crypto.S3KMS.IsEncrypted(srcInfo.UserDefined)
|
||||
sseCopyS3 := crypto.S3.IsEncrypted(srcInfo.UserDefined)
|
||||
sseCopyC := crypto.SSEC.IsEncrypted(srcInfo.UserDefined) && crypto.SSECopy.IsRequested(r.Header)
|
||||
sseC := crypto.SSEC.IsRequested(r.Header)
|
||||
sseS3 := crypto.S3.IsRequested(r.Header)
|
||||
sseKMS := crypto.S3KMS.IsRequested(r.Header)
|
||||
|
||||
isSourceEncrypted := sseCopyC || sseCopyS3 || sseCopyKMS
|
||||
isTargetEncrypted := sseC || sseS3 || sseKMS
|
||||
|
||||
if sseC {
|
||||
newKey, err = ParseSSECustomerRequest(r)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Encryption parameters not present for this object.
|
||||
if crypto.SSEC.IsEncrypted(srcInfo.UserDefined) && !crypto.SSECopy.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidSSECustomerAlgorithm), r.URL)
|
||||
}
|
||||
if crypto.S3KMS.IsRequested(r.Header) {
|
||||
newKeyID, kmsCtx, err = crypto.S3KMS.ParseHTTP(r.Header)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If src == dst and either
|
||||
// - the object is encrypted using SSE-C and two different SSE-C keys are present
|
||||
// - the object is encrypted using SSE-S3 and the SSE-S3 header is present
|
||||
// - the object storage class is not changing
|
||||
// then execute a key rotation.
|
||||
if cpSrcDstSame && (sseCopyC && sseC) && !chStorageClass {
|
||||
oldKey, err = ParseSSECopyCustomerRequest(r.Header, srcInfo.UserDefined)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var oldKey, newKey []byte
|
||||
var newKeyID string
|
||||
var kmsCtx kms.Context
|
||||
var objEncKey crypto.ObjectKey
|
||||
sseCopyKMS := crypto.S3KMS.IsEncrypted(srcInfo.UserDefined)
|
||||
sseCopyS3 := crypto.S3.IsEncrypted(srcInfo.UserDefined)
|
||||
sseCopyC := crypto.SSEC.IsEncrypted(srcInfo.UserDefined) && crypto.SSECopy.IsRequested(r.Header)
|
||||
sseC := crypto.SSEC.IsRequested(r.Header)
|
||||
sseS3 := crypto.S3.IsRequested(r.Header)
|
||||
sseKMS := crypto.S3KMS.IsRequested(r.Header)
|
||||
|
||||
isSourceEncrypted := sseCopyC || sseCopyS3 || sseCopyKMS
|
||||
isTargetEncrypted := sseC || sseS3 || sseKMS
|
||||
|
||||
if sseC {
|
||||
newKey, err = ParseSSECustomerRequest(r)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
if crypto.S3KMS.IsRequested(r.Header) {
|
||||
newKeyID, kmsCtx, err = crypto.S3KMS.ParseHTTP(r.Header)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
for k, v := range srcInfo.UserDefined {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
encMetadata[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// If src == dst and either
|
||||
// - the object is encrypted using SSE-C and two different SSE-C keys are present
|
||||
// - the object is encrypted using SSE-S3 and the SSE-S3 header is present
|
||||
// - the object storage class is not changing
|
||||
// then execute a key rotation.
|
||||
if cpSrcDstSame && (sseCopyC && sseC) && !chStorageClass {
|
||||
oldKey, err = ParseSSECopyCustomerRequest(r.Header, srcInfo.UserDefined)
|
||||
if err = rotateKey(ctx, oldKey, newKeyID, newKey, srcBucket, srcObject, encMetadata, kmsCtx); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Since we are rotating the keys, make sure to update the metadata.
|
||||
srcInfo.metadataOnly = true
|
||||
srcInfo.keyRotation = true
|
||||
} else {
|
||||
if isSourceEncrypted || isTargetEncrypted {
|
||||
// We are not only copying just metadata instead
|
||||
// we are creating a new object at this point, even
|
||||
// if source and destination are same objects.
|
||||
if !srcInfo.keyRotation {
|
||||
srcInfo.metadataOnly = false
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate the size of the target object
|
||||
var targetSize int64
|
||||
|
||||
switch {
|
||||
case isDstCompressed:
|
||||
targetSize = -1
|
||||
case !isSourceEncrypted && !isTargetEncrypted:
|
||||
targetSize, _ = srcInfo.GetActualSize()
|
||||
case isSourceEncrypted && isTargetEncrypted:
|
||||
objInfo := ObjectInfo{Size: actualSize}
|
||||
targetSize = objInfo.EncryptedSize()
|
||||
case !isSourceEncrypted && isTargetEncrypted:
|
||||
targetSize = srcInfo.EncryptedSize()
|
||||
case isSourceEncrypted && !isTargetEncrypted:
|
||||
targetSize, _ = srcInfo.DecryptedSize()
|
||||
}
|
||||
|
||||
if isTargetEncrypted {
|
||||
var encReader io.Reader
|
||||
kind, _ := crypto.IsRequested(r.Header)
|
||||
encReader, objEncKey, err = newEncryptReader(ctx, srcInfo.Reader, kind, newKeyID, newKey, dstBucket, dstObject, encMetadata, kmsCtx)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
reader = etag.Wrap(encReader, srcInfo.Reader)
|
||||
}
|
||||
|
||||
for k, v := range srcInfo.UserDefined {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
encMetadata[k] = v
|
||||
}
|
||||
}
|
||||
if isSourceEncrypted {
|
||||
// Remove all source encrypted related metadata to
|
||||
// avoid copying them in target object.
|
||||
crypto.RemoveInternalEntries(srcInfo.UserDefined)
|
||||
}
|
||||
|
||||
if err = rotateKey(ctx, oldKey, newKeyID, newKey, srcBucket, srcObject, encMetadata, kmsCtx); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// do not try to verify encrypted content
|
||||
srcInfo.Reader, err = hash.NewReader(reader, targetSize, "", "", actualSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Since we are rotating the keys, make sure to update the metadata.
|
||||
srcInfo.metadataOnly = true
|
||||
srcInfo.keyRotation = true
|
||||
} else {
|
||||
if isSourceEncrypted || isTargetEncrypted {
|
||||
// We are not only copying just metadata instead
|
||||
// we are creating a new object at this point, even
|
||||
// if source and destination are same objects.
|
||||
if !srcInfo.keyRotation {
|
||||
srcInfo.metadataOnly = false
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate the size of the target object
|
||||
var targetSize int64
|
||||
|
||||
switch {
|
||||
case isDstCompressed:
|
||||
targetSize = -1
|
||||
case !isSourceEncrypted && !isTargetEncrypted:
|
||||
targetSize, _ = srcInfo.GetActualSize()
|
||||
case isSourceEncrypted && isTargetEncrypted:
|
||||
objInfo := ObjectInfo{Size: actualSize}
|
||||
targetSize = objInfo.EncryptedSize()
|
||||
case !isSourceEncrypted && isTargetEncrypted:
|
||||
targetSize = srcInfo.EncryptedSize()
|
||||
case isSourceEncrypted && !isTargetEncrypted:
|
||||
targetSize, _ = srcInfo.DecryptedSize()
|
||||
}
|
||||
|
||||
if isTargetEncrypted {
|
||||
var encReader io.Reader
|
||||
kind, _ := crypto.IsRequested(r.Header)
|
||||
encReader, objEncKey, err = newEncryptReader(ctx, srcInfo.Reader, kind, newKeyID, newKey, dstBucket, dstObject, encMetadata, kmsCtx)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
reader = etag.Wrap(encReader, srcInfo.Reader)
|
||||
}
|
||||
|
||||
if isSourceEncrypted {
|
||||
// Remove all source encrypted related metadata to
|
||||
// avoid copying them in target object.
|
||||
crypto.RemoveInternalEntries(srcInfo.UserDefined)
|
||||
}
|
||||
|
||||
// do not try to verify encrypted content
|
||||
srcInfo.Reader, err = hash.NewReader(reader, targetSize, "", "", actualSize)
|
||||
if isTargetEncrypted {
|
||||
pReader, err = pReader.WithEncryption(srcInfo.Reader, &objEncKey)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if isTargetEncrypted {
|
||||
pReader, err = pReader.WithEncryption(srcInfo.Reader, &objEncKey)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if dstOpts.IndexCB != nil {
|
||||
dstOpts.IndexCB = compressionIndexEncrypter(objEncKey, dstOpts.IndexCB)
|
||||
}
|
||||
if dstOpts.IndexCB != nil {
|
||||
dstOpts.IndexCB = compressionIndexEncrypter(objEncKey, dstOpts.IndexCB)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1630,13 +1593,6 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
}
|
||||
|
||||
if crypto.Requested(r.Header) {
|
||||
if !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object, err := unescapePath(vars["object"])
|
||||
@ -1699,11 +1655,6 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
if objTags := r.Header.Get(xhttp.AmzObjectTagging); objTags != "" {
|
||||
if !objectAPI.IsTaggingSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := tags.ParseObjectTags(objTags); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@ -1773,7 +1724,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
actualSize := size
|
||||
var idxCb func() []byte
|
||||
if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) && size > minCompressibleSize {
|
||||
if isCompressible(r.Header, object) && size > minCompressibleSize {
|
||||
// Storing the compression metadata.
|
||||
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
|
||||
metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10)
|
||||
@ -1789,7 +1740,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
// Set compression metrics.
|
||||
var s2c io.ReadCloser
|
||||
wantEncryption := objectAPI.IsEncryptionSupported() && crypto.Requested(r.Header)
|
||||
wantEncryption := crypto.Requested(r.Header)
|
||||
s2c, idxCb = newS2CompressReader(actualReader, actualSize, wantEncryption)
|
||||
defer s2c.Close()
|
||||
|
||||
@ -1822,11 +1773,9 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
if !opts.MTime.IsZero() && opts.PreserveETag != "" {
|
||||
opts.CheckPrecondFn = func(oi ObjectInfo) bool {
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if _, err := DecryptObjectInfo(&oi, r); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return true
|
||||
}
|
||||
if _, err := DecryptObjectInfo(&oi, r); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return true
|
||||
}
|
||||
return checkPreconditionsPUT(ctx, w, r, oi, opts)
|
||||
}
|
||||
@ -1863,41 +1812,39 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
metadata[ReservedMetadataPrefixLower+ReplicationStatus] = dsc.PendingStatus()
|
||||
}
|
||||
var objectEncryptionKey crypto.ObjectKey
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if crypto.Requested(r.Header) && !HasSuffix(object, SlashSeparator) { // handle SSE requests
|
||||
if crypto.SSECopy.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
wantSize := int64(-1)
|
||||
if size >= 0 {
|
||||
info := ObjectInfo{Size: size}
|
||||
wantSize = info.EncryptedSize()
|
||||
}
|
||||
|
||||
// do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(etag.Wrap(reader, hashReader), wantSize, "", "", actualSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if opts.IndexCB != nil {
|
||||
opts.IndexCB = compressionIndexEncrypter(objectEncryptionKey, opts.IndexCB)
|
||||
}
|
||||
opts.EncryptFn = metadataEncrypter(objectEncryptionKey)
|
||||
if crypto.Requested(r.Header) && !HasSuffix(object, SlashSeparator) { // handle SSE requests
|
||||
if crypto.SSECopy.IsRequested(r.Header) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
wantSize := int64(-1)
|
||||
if size >= 0 {
|
||||
info := ObjectInfo{Size: size}
|
||||
wantSize = info.EncryptedSize()
|
||||
}
|
||||
|
||||
// do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(etag.Wrap(reader, hashReader), wantSize, "", "", actualSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if opts.IndexCB != nil {
|
||||
opts.IndexCB = compressionIndexEncrypter(objectEncryptionKey, opts.IndexCB)
|
||||
}
|
||||
opts.EncryptFn = metadataEncrypter(objectEncryptionKey)
|
||||
}
|
||||
|
||||
// Ensure that metadata does not contain sensitive information
|
||||
@ -2001,13 +1948,6 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
if crypto.Requested(r.Header) {
|
||||
if !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object, err := unescapePath(vars["object"])
|
||||
@ -2145,7 +2085,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
||||
|
||||
actualSize := size
|
||||
var idxCb func() []byte
|
||||
if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) && size > minCompressibleSize {
|
||||
if isCompressible(r.Header, object) && size > minCompressibleSize {
|
||||
// Storing the compression metadata.
|
||||
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
|
||||
metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10)
|
||||
@ -2156,7 +2096,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
|
||||
// Set compression metrics.
|
||||
wantEncryption := objectAPI.IsEncryptionSupported() && crypto.Requested(r.Header)
|
||||
wantEncryption := crypto.Requested(r.Header)
|
||||
s2c, cb := newS2CompressReader(actualReader, actualSize, wantEncryption)
|
||||
defer s2c.Close()
|
||||
idxCb = cb
|
||||
@ -2212,37 +2152,35 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
|
||||
var objectEncryptionKey crypto.ObjectKey
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if crypto.Requested(r.Header) && !HasSuffix(object, SlashSeparator) { // handle SSE requests
|
||||
if crypto.SSECopy.IsRequested(r.Header) {
|
||||
return errInvalidEncryptionParameters
|
||||
}
|
||||
|
||||
reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wantSize := int64(-1)
|
||||
if size >= 0 {
|
||||
info := ObjectInfo{Size: size}
|
||||
wantSize = info.EncryptedSize()
|
||||
}
|
||||
|
||||
// do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(etag.Wrap(reader, hashReader), wantSize, "", "", actualSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if crypto.Requested(r.Header) && !HasSuffix(object, SlashSeparator) { // handle SSE requests
|
||||
if crypto.SSECopy.IsRequested(r.Header) {
|
||||
return errInvalidEncryptionParameters
|
||||
}
|
||||
if opts.IndexCB != nil {
|
||||
opts.IndexCB = compressionIndexEncrypter(objectEncryptionKey, opts.IndexCB)
|
||||
|
||||
reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wantSize := int64(-1)
|
||||
if size >= 0 {
|
||||
info := ObjectInfo{Size: size}
|
||||
wantSize = info.EncryptedSize()
|
||||
}
|
||||
|
||||
// do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(etag.Wrap(reader, hashReader), wantSize, "", "", actualSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if opts.IndexCB != nil {
|
||||
opts.IndexCB = compressionIndexEncrypter(objectEncryptionKey, opts.IndexCB)
|
||||
}
|
||||
|
||||
// Ensure that metadata does not contain sensitive information
|
||||
@ -2261,6 +2199,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var opts untarOptions
|
||||
opts.ignoreDirs = strings.EqualFold(r.Header.Get(xhttp.MinIOSnowballIgnoreDirs), "true")
|
||||
opts.ignoreErrs = strings.EqualFold(r.Header.Get(xhttp.MinIOSnowballIgnoreErrors), "true")
|
||||
@ -2827,11 +2766,6 @@ func (api objectAPIHandlers) GetObjectTaggingHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
if !objAPI.IsTaggingSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Allow getObjectTagging if policy action is set.
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectTaggingAction, bucket, object); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL)
|
||||
@ -2889,10 +2823,6 @@ func (api objectAPIHandlers) PutObjectTaggingHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
if !objAPI.IsTaggingSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Tags XML will not be bigger than 1MiB in size, fail if its bigger.
|
||||
tags, err := tags.ParseObjectXML(io.LimitReader(r.Body, 1<<20))
|
||||
@ -2971,10 +2901,6 @@ func (api objectAPIHandlers) DeleteObjectTaggingHandler(w http.ResponseWriter, r
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
if !objAPI.IsTaggingSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
@ -71,13 +71,6 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
||||
return
|
||||
}
|
||||
|
||||
if crypto.Requested(r.Header) {
|
||||
if !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object, err := unescapePath(vars["object"])
|
||||
@ -107,16 +100,14 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
||||
|
||||
encMetadata := map[string]string{}
|
||||
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if crypto.Requested(r.Header) {
|
||||
if err = setEncryptionMetadata(r, bucket, object, encMetadata); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Set this for multipart only operations, we need to differentiate during
|
||||
// decryption if the file was actually multipart or not.
|
||||
encMetadata[ReservedMetadataPrefix+"Encrypted-Multipart"] = ""
|
||||
if crypto.Requested(r.Header) {
|
||||
if err = setEncryptionMetadata(r, bucket, object, encMetadata); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Set this for multipart only operations, we need to differentiate during
|
||||
// decryption if the file was actually multipart or not.
|
||||
encMetadata[ReservedMetadataPrefix+"Encrypted-Multipart"] = ""
|
||||
}
|
||||
|
||||
// Extract metadata that needs to be saved.
|
||||
@ -127,11 +118,6 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
||||
}
|
||||
|
||||
if objTags := r.Header.Get(xhttp.AmzObjectTagging); objTags != "" {
|
||||
if !objectAPI.IsTaggingSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := tags.ParseObjectTags(objTags); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@ -176,7 +162,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
||||
// Ensure that metadata does not contain sensitive information
|
||||
crypto.RemoveSensitiveEntries(metadata)
|
||||
|
||||
if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) {
|
||||
if isCompressible(r.Header, object) {
|
||||
// Storing the compression metadata.
|
||||
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
|
||||
}
|
||||
@ -189,11 +175,9 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
||||
|
||||
if !opts.MTime.IsZero() && opts.PreserveETag != "" {
|
||||
opts.CheckPrecondFn = func(oi ObjectInfo) bool {
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if _, err := DecryptObjectInfo(&oi, r); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return true
|
||||
}
|
||||
if _, err := DecryptObjectInfo(&oi, r); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return true
|
||||
}
|
||||
return checkPreconditionsPUT(ctx, w, r, oi, opts)
|
||||
}
|
||||
@ -245,11 +229,6 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
if crypto.Requested(r.Header) && !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
dstBucket := vars["bucket"]
|
||||
dstObject, err := unescapePath(vars["object"])
|
||||
@ -351,11 +330,9 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||
}
|
||||
|
||||
checkCopyPartPrecondFn := func(o ObjectInfo) bool {
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if _, err := DecryptObjectInfo(&o, r); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return true
|
||||
}
|
||||
if _, err := DecryptObjectInfo(&o, r); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return true
|
||||
}
|
||||
if checkCopyObjectPartPreconditions(ctx, w, r, o) {
|
||||
return true
|
||||
@ -462,7 +439,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||
// Compress only if the compression is enabled during initial multipart.
|
||||
var idxCb func() []byte
|
||||
if isCompressed {
|
||||
wantEncryption := objectAPI.IsEncryptionSupported() && crypto.Requested(r.Header)
|
||||
wantEncryption := crypto.Requested(r.Header)
|
||||
s2c, cb := newS2CompressReader(reader, actualPartSize, wantEncryption)
|
||||
idxCb = cb
|
||||
defer s2c.Close()
|
||||
@ -488,7 +465,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||
|
||||
_, isEncrypted := crypto.IsEncrypted(mi.UserDefined)
|
||||
var objectEncryptionKey crypto.ObjectKey
|
||||
if objectAPI.IsEncryptionSupported() && isEncrypted {
|
||||
if isEncrypted {
|
||||
if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(mi.UserDefined) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL)
|
||||
return
|
||||
@ -578,13 +555,6 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
if crypto.Requested(r.Header) {
|
||||
if !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object, err := unescapePath(vars["object"])
|
||||
@ -711,7 +681,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
_, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"]
|
||||
|
||||
var idxCb func() []byte
|
||||
if objectAPI.IsCompressionSupported() && isCompressed {
|
||||
if isCompressed {
|
||||
actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
@ -723,7 +693,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
}
|
||||
|
||||
// Set compression metrics.
|
||||
wantEncryption := objectAPI.IsEncryptionSupported() && crypto.Requested(r.Header)
|
||||
wantEncryption := crypto.Requested(r.Header)
|
||||
s2c, cb := newS2CompressReader(actualReader, actualSize, wantEncryption)
|
||||
idxCb = cb
|
||||
defer s2c.Close()
|
||||
@ -747,7 +717,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
|
||||
_, isEncrypted := crypto.IsEncrypted(mi.UserDefined)
|
||||
var objectEncryptionKey crypto.ObjectKey
|
||||
if objectAPI.IsEncryptionSupported() && isEncrypted {
|
||||
if isEncrypted {
|
||||
if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(mi.UserDefined) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL)
|
||||
return
|
||||
@ -1157,7 +1127,7 @@ func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *ht
|
||||
// Due to AWS S3, SSE-S3 encrypted parts return the plaintext ETag
|
||||
// being the content MD5 of that particular part. This is not the
|
||||
// case for SSE-C and SSE-KMS objects.
|
||||
if kind, ok := crypto.IsEncrypted(listPartsInfo.UserDefined); ok && objectAPI.IsEncryptionSupported() {
|
||||
if kind, ok := crypto.IsEncrypted(listPartsInfo.UserDefined); ok {
|
||||
var objectEncryptionKey []byte
|
||||
if kind == crypto.S3 {
|
||||
objectEncryptionKey, err = decryptObjectMeta(nil, bucket, object, listPartsInfo.UserDefined)
|
||||
|
@ -67,10 +67,6 @@ func (api objectAPIHandlers) getObjectInArchiveFileHandler(ctx context.Context,
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
|
||||
return
|
||||
}
|
||||
if crypto.Requested(r.Header) && !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
zipPath, object, err := splitZipExtensionPath(object)
|
||||
if err != nil {
|
||||
@ -135,11 +131,9 @@ func (api objectAPIHandlers) getObjectInArchiveFileHandler(ctx context.Context,
|
||||
|
||||
// Validate pre-conditions if any.
|
||||
opts.CheckPrecondFn = func(oi ObjectInfo) bool {
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if _, err := DecryptObjectInfo(&oi, r); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return true
|
||||
}
|
||||
if _, err := DecryptObjectInfo(&oi, r); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return true
|
||||
}
|
||||
|
||||
return checkPreconditions(ctx, w, r, oi, opts)
|
||||
@ -381,10 +375,6 @@ func (api objectAPIHandlers) headObjectInArchiveFileHandler(ctx context.Context,
|
||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrBadRequest))
|
||||
return
|
||||
}
|
||||
if crypto.Requested(r.Header) && !objectAPI.IsEncryptionSupported() {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrBadRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
zipPath, object, err := splitZipExtensionPath(object)
|
||||
if err != nil {
|
||||
|
@ -4206,7 +4206,7 @@ func (c *SiteReplicationSys) purgeDeletedBucket(ctx context.Context, objAPI Obje
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
z.purgeDelete(context.Background(), minioMetaBucket, pathJoin(bucketMetaPrefix, deletedBucketsPrefix, bucket))
|
||||
z.s3Peer.DeleteBucket(context.Background(), pathJoin(minioMetaBucket, bucketMetaPrefix, deletedBucketsPrefix, bucket), DeleteBucketOptions{})
|
||||
}
|
||||
|
||||
// healBucket creates/deletes the bucket according to latest state across clusters participating in site replication.
|
||||
|
Loading…
Reference in New Issue
Block a user