mirror of
https://github.com/minio/minio.git
synced 2025-05-22 01:53:55 -04:00
parent
d84fc58cac
commit
14b1c9f8e4
@ -916,7 +916,7 @@ func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRang
|
|||||||
// case of incomplete read.
|
// case of incomplete read.
|
||||||
pipeCloser := func() { pr.Close() }
|
pipeCloser := func() { pr.Close() }
|
||||||
|
|
||||||
gr, gerr := fn(pr, h, opts.CheckCopyPrecondFn, pipeCloser)
|
gr, gerr := fn(pr, h, opts.CheckPrecondFn, pipeCloser)
|
||||||
if gerr != nil {
|
if gerr != nil {
|
||||||
return gr, numHits, gerr
|
return gr, numHits, gerr
|
||||||
}
|
}
|
||||||
|
@ -613,7 +613,7 @@ func getDecryptedETag(headers http.Header, objInfo ObjectInfo, copySource bool)
|
|||||||
// As per AWS S3 Spec, ETag for SSE-C encrypted objects need not be MD5Sum of the data.
|
// As per AWS S3 Spec, ETag for SSE-C encrypted objects need not be MD5Sum of the data.
|
||||||
// Since server side copy with same source and dest just replaces the ETag, we save
|
// Since server side copy with same source and dest just replaces the ETag, we save
|
||||||
// encrypted content MD5Sum as ETag for both SSE-C and SSE-S3, we standardize the ETag
|
// encrypted content MD5Sum as ETag for both SSE-C and SSE-S3, we standardize the ETag
|
||||||
//encryption across SSE-C and SSE-S3, and only return last 32 bytes for SSE-C
|
// encryption across SSE-C and SSE-S3, and only return last 32 bytes for SSE-C
|
||||||
if crypto.SSEC.IsEncrypted(objInfo.UserDefined) && !copySource {
|
if crypto.SSEC.IsEncrypted(objInfo.UserDefined) && !copySource {
|
||||||
return objInfo.ETag[len(objInfo.ETag)-32:]
|
return objInfo.ETag[len(objInfo.ETag)-32:]
|
||||||
}
|
}
|
||||||
@ -777,34 +777,6 @@ func (o *ObjectInfo) EncryptedSize() int64 {
|
|||||||
return int64(size)
|
return int64(size)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DecryptCopyObjectInfo tries to decrypt the provided object if it is encrypted.
|
|
||||||
// It fails if the object is encrypted and the HTTP headers don't contain
|
|
||||||
// SSE-C headers or the object is not encrypted but SSE-C headers are provided. (AWS behavior)
|
|
||||||
// DecryptObjectInfo returns 'ErrNone' if the object is not encrypted or the
|
|
||||||
// decryption succeeded.
|
|
||||||
//
|
|
||||||
// DecryptCopyObjectInfo also returns whether the object is encrypted or not.
|
|
||||||
func DecryptCopyObjectInfo(info *ObjectInfo, headers http.Header) (errCode APIErrorCode, encrypted bool) {
|
|
||||||
// Directories are never encrypted.
|
|
||||||
if info.IsDir {
|
|
||||||
return ErrNone, false
|
|
||||||
}
|
|
||||||
if errCode, encrypted = ErrNone, crypto.IsEncrypted(info.UserDefined); !encrypted && crypto.SSECopy.IsRequested(headers) {
|
|
||||||
errCode = ErrInvalidEncryptionParameters
|
|
||||||
} else if encrypted {
|
|
||||||
if (!crypto.SSECopy.IsRequested(headers) && crypto.SSEC.IsEncrypted(info.UserDefined)) ||
|
|
||||||
(crypto.SSECopy.IsRequested(headers) && crypto.S3.IsEncrypted(info.UserDefined)) {
|
|
||||||
errCode = ErrSSEEncryptedObject
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
if info.Size, err = info.DecryptedSize(); err != nil {
|
|
||||||
errCode = toAPIErrorCode(GlobalContext, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecryptObjectInfo tries to decrypt the provided object if it is encrypted.
|
// DecryptObjectInfo tries to decrypt the provided object if it is encrypted.
|
||||||
// It fails if the object is encrypted and the HTTP headers don't contain
|
// It fails if the object is encrypted and the HTTP headers don't contain
|
||||||
// SSE-C headers or the object is not encrypted but SSE-C headers are provided. (AWS behavior)
|
// SSE-C headers or the object is not encrypted but SSE-C headers are provided. (AWS behavior)
|
||||||
@ -812,32 +784,46 @@ func DecryptCopyObjectInfo(info *ObjectInfo, headers http.Header) (errCode APIEr
|
|||||||
// decryption succeeded.
|
// decryption succeeded.
|
||||||
//
|
//
|
||||||
// DecryptObjectInfo also returns whether the object is encrypted or not.
|
// DecryptObjectInfo also returns whether the object is encrypted or not.
|
||||||
func DecryptObjectInfo(info *ObjectInfo, headers http.Header) (encrypted bool, err error) {
|
func DecryptObjectInfo(info *ObjectInfo, r *http.Request) (encrypted bool, err error) {
|
||||||
// Directories are never encrypted.
|
// Directories are never encrypted.
|
||||||
if info.IsDir {
|
if info.IsDir {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
// disallow X-Amz-Server-Side-Encryption header on HEAD and GET
|
if r == nil {
|
||||||
if crypto.S3.IsRequested(headers) {
|
return false, errInvalidArgument
|
||||||
err = errInvalidEncryptionParameters
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
if err, encrypted = nil, crypto.IsEncrypted(info.UserDefined); !encrypted && crypto.SSEC.IsRequested(headers) {
|
|
||||||
err = errInvalidEncryptionParameters
|
headers := r.Header
|
||||||
} else if encrypted {
|
|
||||||
|
// disallow X-Amz-Server-Side-Encryption header on HEAD and GET
|
||||||
|
switch r.Method {
|
||||||
|
case http.MethodGet, http.MethodHead:
|
||||||
|
if crypto.S3.IsRequested(headers) {
|
||||||
|
return false, errInvalidEncryptionParameters
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
encrypted = crypto.IsEncrypted(info.UserDefined)
|
||||||
|
if !encrypted && crypto.SSEC.IsRequested(headers) {
|
||||||
|
return false, errInvalidEncryptionParameters
|
||||||
|
}
|
||||||
|
|
||||||
|
if encrypted {
|
||||||
if (crypto.SSEC.IsEncrypted(info.UserDefined) && !crypto.SSEC.IsRequested(headers)) ||
|
if (crypto.SSEC.IsEncrypted(info.UserDefined) && !crypto.SSEC.IsRequested(headers)) ||
|
||||||
(crypto.S3.IsEncrypted(info.UserDefined) && crypto.SSEC.IsRequested(headers)) {
|
(crypto.S3.IsEncrypted(info.UserDefined) && crypto.SSEC.IsRequested(headers)) {
|
||||||
err = errEncryptedObject
|
return encrypted, errEncryptedObject
|
||||||
return
|
}
|
||||||
|
|
||||||
|
if _, err = info.DecryptedSize(); err != nil {
|
||||||
|
return encrypted, err
|
||||||
}
|
}
|
||||||
_, err = info.DecryptedSize()
|
|
||||||
|
|
||||||
if crypto.IsEncrypted(info.UserDefined) && !crypto.IsMultiPart(info.UserDefined) {
|
if crypto.IsEncrypted(info.UserDefined) && !crypto.IsMultiPart(info.UserDefined) {
|
||||||
info.ETag = getDecryptedETag(headers, *info, false)
|
info.ETag = getDecryptedETag(headers, *info, headers.Get(crypto.SSECopyAlgorithm) != "")
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return
|
|
||||||
|
return encrypted, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// The customer key in the header is used by the gateway for encryption in the case of
|
// The customer key in the header is used by the gateway for encryption in the case of
|
||||||
|
@ -80,44 +80,49 @@ func TestEncryptRequest(t *testing.T) {
|
|||||||
|
|
||||||
var decryptObjectInfoTests = []struct {
|
var decryptObjectInfoTests = []struct {
|
||||||
info ObjectInfo
|
info ObjectInfo
|
||||||
headers http.Header
|
request *http.Request
|
||||||
expErr error
|
expErr error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
info: ObjectInfo{Size: 100},
|
info: ObjectInfo{Size: 100},
|
||||||
headers: http.Header{},
|
request: &http.Request{Header: http.Header{}},
|
||||||
expErr: nil,
|
expErr: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
info: ObjectInfo{Size: 100, UserDefined: map[string]string{crypto.SSESealAlgorithm: crypto.InsecureSealAlgorithm}},
|
info: ObjectInfo{Size: 100, UserDefined: map[string]string{crypto.SSESealAlgorithm: crypto.InsecureSealAlgorithm}},
|
||||||
headers: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}},
|
request: &http.Request{Header: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}}},
|
||||||
expErr: nil,
|
expErr: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
info: ObjectInfo{Size: 0, UserDefined: map[string]string{crypto.SSESealAlgorithm: crypto.InsecureSealAlgorithm}},
|
info: ObjectInfo{Size: 0, UserDefined: map[string]string{crypto.SSESealAlgorithm: crypto.InsecureSealAlgorithm}},
|
||||||
headers: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}},
|
request: &http.Request{Header: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}}},
|
||||||
expErr: nil,
|
expErr: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
info: ObjectInfo{Size: 100, UserDefined: map[string]string{crypto.SSECSealedKey: "EAAfAAAAAAD7v1hQq3PFRUHsItalxmrJqrOq6FwnbXNarxOOpb8jTWONPPKyM3Gfjkjyj6NCf+aB/VpHCLCTBA=="}},
|
info: ObjectInfo{Size: 100, UserDefined: map[string]string{crypto.SSECSealedKey: "EAAfAAAAAAD7v1hQq3PFRUHsItalxmrJqrOq6FwnbXNarxOOpb8jTWONPPKyM3Gfjkjyj6NCf+aB/VpHCLCTBA=="}},
|
||||||
headers: http.Header{},
|
request: &http.Request{Header: http.Header{}},
|
||||||
expErr: errEncryptedObject,
|
expErr: errEncryptedObject,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
info: ObjectInfo{Size: 100, UserDefined: map[string]string{}},
|
info: ObjectInfo{Size: 100, UserDefined: map[string]string{}},
|
||||||
headers: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}},
|
request: &http.Request{Method: http.MethodGet, Header: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}}},
|
||||||
|
expErr: errInvalidEncryptionParameters,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
info: ObjectInfo{Size: 100, UserDefined: map[string]string{}},
|
||||||
|
request: &http.Request{Method: http.MethodHead, Header: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}}},
|
||||||
expErr: errInvalidEncryptionParameters,
|
expErr: errInvalidEncryptionParameters,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
info: ObjectInfo{Size: 31, UserDefined: map[string]string{crypto.SSESealAlgorithm: crypto.InsecureSealAlgorithm}},
|
info: ObjectInfo{Size: 31, UserDefined: map[string]string{crypto.SSESealAlgorithm: crypto.InsecureSealAlgorithm}},
|
||||||
headers: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}},
|
request: &http.Request{Header: http.Header{crypto.SSECAlgorithm: []string{crypto.SSEAlgorithmAES256}}},
|
||||||
expErr: errObjectTampered,
|
expErr: errObjectTampered,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDecryptObjectInfo(t *testing.T) {
|
func TestDecryptObjectInfo(t *testing.T) {
|
||||||
for i, test := range decryptObjectInfoTests {
|
for i, test := range decryptObjectInfoTests {
|
||||||
if encrypted, err := DecryptObjectInfo(&test.info, test.headers); err != test.expErr {
|
if encrypted, err := DecryptObjectInfo(&test.info, test.request); err != test.expErr {
|
||||||
t.Errorf("Test %d: Decryption returned wrong error code: got %d , want %d", i, err, test.expErr)
|
t.Errorf("Test %d: Decryption returned wrong error code: got %d , want %d", i, err, test.expErr)
|
||||||
} else if enc := crypto.IsEncrypted(test.info.UserDefined); encrypted && enc != encrypted {
|
} else if enc := crypto.IsEncrypted(test.info.UserDefined); encrypted && enc != encrypted {
|
||||||
t.Errorf("Test %d: Decryption thinks object is encrypted but it is not", i)
|
t.Errorf("Test %d: Decryption thinks object is encrypted but it is not", i)
|
||||||
|
@ -173,7 +173,7 @@ func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object stri
|
|||||||
// case of incomplete read.
|
// case of incomplete read.
|
||||||
pipeCloser := func() { pr.Close() }
|
pipeCloser := func() { pr.Close() }
|
||||||
|
|
||||||
return fn(pr, h, opts.CheckCopyPrecondFn, pipeCloser)
|
return fn(pr, h, opts.CheckPrecondFn, pipeCloser)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObject - reads an object erasured coded across multiple
|
// GetObject - reads an object erasured coded across multiple
|
||||||
|
@ -752,7 +752,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return objReaderFn(reader, h, opts.CheckCopyPrecondFn, closeFn)
|
return objReaderFn(reader, h, opts.CheckPrecondFn, closeFn)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObject - reads an object from the disk.
|
// GetObject - reads an object from the disk.
|
||||||
|
@ -908,7 +908,7 @@ func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, r *
|
|||||||
// CopyObject - Copies a blob from source container to destination container.
|
// CopyObject - Copies a blob from source container to destination container.
|
||||||
// Uses Azure equivalent `BlobURL.StartCopyFromURL`.
|
// Uses Azure equivalent `BlobURL.StartCopyFromURL`.
|
||||||
func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") {
|
if srcOpts.CheckPrecondFn != nil && srcOpts.CheckPrecondFn(srcInfo) {
|
||||||
return minio.ObjectInfo{}, minio.PreConditionFailed{}
|
return minio.ObjectInfo{}, minio.PreConditionFailed{}
|
||||||
}
|
}
|
||||||
srcBlob := a.client.NewContainerURL(srcBucket).NewBlobURL(srcObject)
|
srcBlob := a.client.NewContainerURL(srcBucket).NewBlobURL(srcObject)
|
||||||
|
@ -940,7 +940,7 @@ func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, r
|
|||||||
// CopyObject - Copies a blob from source container to destination container.
|
// CopyObject - Copies a blob from source container to destination container.
|
||||||
func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string,
|
func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string,
|
||||||
srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) {
|
srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) {
|
||||||
if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") {
|
if srcOpts.CheckPrecondFn != nil && srcOpts.CheckPrecondFn(srcInfo) {
|
||||||
return minio.ObjectInfo{}, minio.PreConditionFailed{}
|
return minio.ObjectInfo{}, minio.PreConditionFailed{}
|
||||||
}
|
}
|
||||||
src := l.client.Bucket(srcBucket).Object(srcObject)
|
src := l.client.Bucket(srcBucket).Object(srcObject)
|
||||||
|
@ -330,7 +330,7 @@ func (l *s3EncObjects) GetObjectNInfo(ctx context.Context, bucket, object string
|
|||||||
// Setup cleanup function to cause the above go-routine to
|
// Setup cleanup function to cause the above go-routine to
|
||||||
// exit in case of partial read
|
// exit in case of partial read
|
||||||
pipeCloser := func() { pr.Close() }
|
pipeCloser := func() { pr.Close() }
|
||||||
return fn(pr, h, o.CheckCopyPrecondFn, pipeCloser)
|
return fn(pr, h, o.CheckPrecondFn, pipeCloser)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObjectInfo reads object info and replies back ObjectInfo
|
// GetObjectInfo reads object info and replies back ObjectInfo
|
||||||
|
@ -490,7 +490,7 @@ func (l *s3Objects) PutObject(ctx context.Context, bucket string, object string,
|
|||||||
|
|
||||||
// CopyObject copies an object from source bucket to a destination bucket.
|
// CopyObject copies an object from source bucket to a destination bucket.
|
||||||
func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject string, dstBucket string, dstObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject string, dstBucket string, dstObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") {
|
if srcOpts.CheckPrecondFn != nil && srcOpts.CheckPrecondFn(srcInfo) {
|
||||||
return minio.ObjectInfo{}, minio.PreConditionFailed{}
|
return minio.ObjectInfo{}, minio.PreConditionFailed{}
|
||||||
}
|
}
|
||||||
// Set this header such that following CopyObject() always sets the right metadata on the destination.
|
// Set this header such that following CopyObject() always sets the right metadata on the destination.
|
||||||
@ -594,7 +594,7 @@ func (l *s3Objects) PutObjectPart(ctx context.Context, bucket string, object str
|
|||||||
// existing object or a part of it.
|
// existing object or a part of it.
|
||||||
func (l *s3Objects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
|
func (l *s3Objects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
|
||||||
partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (p minio.PartInfo, err error) {
|
partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (p minio.PartInfo, err error) {
|
||||||
if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") {
|
if srcOpts.CheckPrecondFn != nil && srcOpts.CheckPrecondFn(srcInfo) {
|
||||||
return minio.PartInfo{}, minio.PreConditionFailed{}
|
return minio.PartInfo{}, minio.PreConditionFailed{}
|
||||||
}
|
}
|
||||||
srcInfo.UserDefined = map[string]string{
|
srcInfo.UserDefined = map[string]string{
|
||||||
|
@ -28,8 +28,8 @@ import (
|
|||||||
"github.com/minio/minio/pkg/madmin"
|
"github.com/minio/minio/pkg/madmin"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CheckCopyPreconditionFn returns true if copy precondition check failed.
|
// CheckPreconditionFn returns true if precondition check failed.
|
||||||
type CheckCopyPreconditionFn func(o ObjectInfo, encETag string) bool
|
type CheckPreconditionFn func(o ObjectInfo) bool
|
||||||
|
|
||||||
// GetObjectInfoFn is the signature of GetObjectInfo function.
|
// GetObjectInfoFn is the signature of GetObjectInfo function.
|
||||||
type GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error)
|
type GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error)
|
||||||
@ -37,13 +37,13 @@ type GetObjectInfoFn func(ctx context.Context, bucket, object string, opts Objec
|
|||||||
// ObjectOptions represents object options for ObjectLayer object operations
|
// ObjectOptions represents object options for ObjectLayer object operations
|
||||||
type ObjectOptions struct {
|
type ObjectOptions struct {
|
||||||
ServerSideEncryption encrypt.ServerSide
|
ServerSideEncryption encrypt.ServerSide
|
||||||
Versioned bool // indicates if the bucket is versioned
|
Versioned bool // indicates if the bucket is versioned
|
||||||
WalkVersions bool // indicates if the we are interested in walking versions
|
WalkVersions bool // indicates if the we are interested in walking versions
|
||||||
VersionID string // Specifies the versionID which needs to be overwritten or read
|
VersionID string // Specifies the versionID which needs to be overwritten or read
|
||||||
MTime time.Time // Is only set in POST/PUT operations
|
MTime time.Time // Is only set in POST/PUT operations
|
||||||
UserDefined map[string]string // only set in case of POST/PUT operations
|
UserDefined map[string]string // only set in case of POST/PUT operations
|
||||||
PartNumber int // only useful in case of GetObject/HeadObject
|
PartNumber int // only useful in case of GetObject/HeadObject
|
||||||
CheckCopyPrecondFn CheckCopyPreconditionFn // only set during CopyObject preconditional valuation
|
CheckPrecondFn CheckPreconditionFn // only set during GetObject/HeadObject/CopyObjectPart preconditional valuation
|
||||||
}
|
}
|
||||||
|
|
||||||
// BucketOptions represents bucket options for ObjectLayer bucket operations
|
// BucketOptions represents bucket options for ObjectLayer bucket operations
|
||||||
|
@ -509,14 +509,12 @@ type GetObjectReader struct {
|
|||||||
// NewGetObjectReaderFromReader sets up a GetObjectReader with a given
|
// NewGetObjectReaderFromReader sets up a GetObjectReader with a given
|
||||||
// reader. This ignores any object properties.
|
// reader. This ignores any object properties.
|
||||||
func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, opts ObjectOptions, cleanupFns ...func()) (*GetObjectReader, error) {
|
func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, opts ObjectOptions, cleanupFns ...func()) (*GetObjectReader, error) {
|
||||||
if opts.CheckCopyPrecondFn != nil {
|
if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
|
||||||
if ok := opts.CheckCopyPrecondFn(oi, ""); ok {
|
// Call the cleanup funcs
|
||||||
// Call the cleanup funcs
|
for i := len(cleanupFns) - 1; i >= 0; i-- {
|
||||||
for i := len(cleanupFns) - 1; i >= 0; i-- {
|
cleanupFns[i]()
|
||||||
cleanupFns[i]()
|
|
||||||
}
|
|
||||||
return nil, PreConditionFailed{}
|
|
||||||
}
|
}
|
||||||
|
return nil, PreConditionFailed{}
|
||||||
}
|
}
|
||||||
return &GetObjectReader{
|
return &GetObjectReader{
|
||||||
ObjInfo: oi,
|
ObjInfo: oi,
|
||||||
@ -530,7 +528,7 @@ func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, opts ObjectOptions
|
|||||||
// GetObjectReader and an error. Request headers are passed to provide
|
// GetObjectReader and an error. Request headers are passed to provide
|
||||||
// encryption parameters. cleanupFns allow cleanup funcs to be
|
// encryption parameters. cleanupFns allow cleanup funcs to be
|
||||||
// registered for calling after usage of the reader.
|
// registered for calling after usage of the reader.
|
||||||
type ObjReaderFn func(inputReader io.Reader, h http.Header, pcfn CheckCopyPreconditionFn, cleanupFns ...func()) (r *GetObjectReader, err error)
|
type ObjReaderFn func(inputReader io.Reader, h http.Header, pcfn CheckPreconditionFn, cleanupFns ...func()) (r *GetObjectReader, err error)
|
||||||
|
|
||||||
// NewGetObjectReader creates a new GetObjectReader. The cleanUpFns
|
// NewGetObjectReader creates a new GetObjectReader. The cleanUpFns
|
||||||
// are called on Close() in reverse order as passed here. NOTE: It is
|
// are called on Close() in reverse order as passed here. NOTE: It is
|
||||||
@ -581,7 +579,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, cl
|
|||||||
// a reader that returns the desired range of
|
// a reader that returns the desired range of
|
||||||
// encrypted bytes. The header parameter is used to
|
// encrypted bytes. The header parameter is used to
|
||||||
// provide encryption parameters.
|
// provide encryption parameters.
|
||||||
fn = func(inputReader io.Reader, h http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
|
fn = func(inputReader io.Reader, h http.Header, pcfn CheckPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
|
||||||
copySource := h.Get(crypto.SSECopyAlgorithm) != ""
|
copySource := h.Get(crypto.SSECopyAlgorithm) != ""
|
||||||
|
|
||||||
cFns = append(cleanUpFns, cFns...)
|
cFns = append(cleanUpFns, cFns...)
|
||||||
@ -596,17 +594,14 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, cl
|
|||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
encETag := oi.ETag
|
|
||||||
oi.ETag = getDecryptedETag(h, oi, copySource) // Decrypt the ETag before top layer consumes this value.
|
oi.ETag = getDecryptedETag(h, oi, copySource) // Decrypt the ETag before top layer consumes this value.
|
||||||
|
|
||||||
if opts.CheckCopyPrecondFn != nil {
|
if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
|
||||||
if ok := opts.CheckCopyPrecondFn(oi, encETag); ok {
|
// Call the cleanup funcs
|
||||||
// Call the cleanup funcs
|
for i := len(cFns) - 1; i >= 0; i-- {
|
||||||
for i := len(cFns) - 1; i >= 0; i-- {
|
cFns[i]()
|
||||||
cFns[i]()
|
|
||||||
}
|
|
||||||
return nil, PreConditionFailed{}
|
|
||||||
}
|
}
|
||||||
|
return nil, PreConditionFailed{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply the skipLen and limit on the
|
// Apply the skipLen and limit on the
|
||||||
@ -650,16 +645,14 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, cl
|
|||||||
return nil, 0, 0, errInvalidRange
|
return nil, 0, 0, errInvalidRange
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fn = func(inputReader io.Reader, _ http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
|
fn = func(inputReader io.Reader, _ http.Header, pcfn CheckPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
|
||||||
cFns = append(cleanUpFns, cFns...)
|
cFns = append(cleanUpFns, cFns...)
|
||||||
if opts.CheckCopyPrecondFn != nil {
|
if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
|
||||||
if ok := opts.CheckCopyPrecondFn(oi, ""); ok {
|
// Call the cleanup funcs
|
||||||
// Call the cleanup funcs
|
for i := len(cFns) - 1; i >= 0; i-- {
|
||||||
for i := len(cFns) - 1; i >= 0; i-- {
|
cFns[i]()
|
||||||
cFns[i]()
|
|
||||||
}
|
|
||||||
return nil, PreConditionFailed{}
|
|
||||||
}
|
}
|
||||||
|
return nil, PreConditionFailed{}
|
||||||
}
|
}
|
||||||
// Decompression reader.
|
// Decompression reader.
|
||||||
s2Reader := s2.NewReader(inputReader)
|
s2Reader := s2.NewReader(inputReader)
|
||||||
@ -700,16 +693,14 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, cl
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, 0, err
|
return nil, 0, 0, err
|
||||||
}
|
}
|
||||||
fn = func(inputReader io.Reader, _ http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
|
fn = func(inputReader io.Reader, _ http.Header, pcfn CheckPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
|
||||||
cFns = append(cleanUpFns, cFns...)
|
cFns = append(cleanUpFns, cFns...)
|
||||||
if opts.CheckCopyPrecondFn != nil {
|
if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
|
||||||
if ok := opts.CheckCopyPrecondFn(oi, ""); ok {
|
// Call the cleanup funcs
|
||||||
// Call the cleanup funcs
|
for i := len(cFns) - 1; i >= 0; i-- {
|
||||||
for i := len(cFns) - 1; i >= 0; i-- {
|
cFns[i]()
|
||||||
cFns[i]()
|
|
||||||
}
|
|
||||||
return nil, PreConditionFailed{}
|
|
||||||
}
|
}
|
||||||
|
return nil, PreConditionFailed{}
|
||||||
}
|
}
|
||||||
r = &GetObjectReader{
|
r = &GetObjectReader{
|
||||||
ObjInfo: oi,
|
ObjInfo: oi,
|
||||||
|
@ -24,7 +24,6 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/minio/minio/cmd/crypto"
|
|
||||||
xhttp "github.com/minio/minio/cmd/http"
|
xhttp "github.com/minio/minio/cmd/http"
|
||||||
"github.com/minio/minio/pkg/bucket/lifecycle"
|
"github.com/minio/minio/pkg/bucket/lifecycle"
|
||||||
"github.com/minio/minio/pkg/event"
|
"github.com/minio/minio/pkg/event"
|
||||||
@ -41,8 +40,8 @@ var (
|
|||||||
// x-amz-copy-source-if-unmodified-since
|
// x-amz-copy-source-if-unmodified-since
|
||||||
// x-amz-copy-source-if-match
|
// x-amz-copy-source-if-match
|
||||||
// x-amz-copy-source-if-none-match
|
// x-amz-copy-source-if-none-match
|
||||||
func checkCopyObjectPartPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo, encETag string) bool {
|
func checkCopyObjectPartPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo) bool {
|
||||||
return checkCopyObjectPreconditions(ctx, w, r, objInfo, encETag)
|
return checkCopyObjectPreconditions(ctx, w, r, objInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validates the preconditions for CopyObject, returns true if CopyObject operation should not proceed.
|
// Validates the preconditions for CopyObject, returns true if CopyObject operation should not proceed.
|
||||||
@ -51,14 +50,11 @@ func checkCopyObjectPartPreconditions(ctx context.Context, w http.ResponseWriter
|
|||||||
// x-amz-copy-source-if-unmodified-since
|
// x-amz-copy-source-if-unmodified-since
|
||||||
// x-amz-copy-source-if-match
|
// x-amz-copy-source-if-match
|
||||||
// x-amz-copy-source-if-none-match
|
// x-amz-copy-source-if-none-match
|
||||||
func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo, encETag string) bool {
|
func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo) bool {
|
||||||
// Return false for methods other than GET and HEAD.
|
// Return false for methods other than GET and HEAD.
|
||||||
if r.Method != http.MethodPut {
|
if r.Method != http.MethodPut {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if encETag == "" {
|
|
||||||
encETag = objInfo.ETag
|
|
||||||
}
|
|
||||||
// If the object doesn't have a modtime (IsZero), or the modtime
|
// If the object doesn't have a modtime (IsZero), or the modtime
|
||||||
// is obviously garbage (Unix time == 0), then ignore modtimes
|
// is obviously garbage (Unix time == 0), then ignore modtimes
|
||||||
// and don't process the If-Modified-Since header.
|
// and don't process the If-Modified-Since header.
|
||||||
@ -106,17 +102,11 @@ func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
shouldDecryptEtag := crypto.SSECopy.IsRequested(r.Header) && !crypto.IsMultiPart(objInfo.UserDefined)
|
|
||||||
|
|
||||||
// x-amz-copy-source-if-match : Return the object only if its entity tag (ETag) is the
|
// x-amz-copy-source-if-match : Return the object only if its entity tag (ETag) is the
|
||||||
// same as the one specified; otherwise return a 412 (precondition failed).
|
// same as the one specified; otherwise return a 412 (precondition failed).
|
||||||
ifMatchETagHeader := r.Header.Get(xhttp.AmzCopySourceIfMatch)
|
ifMatchETagHeader := r.Header.Get(xhttp.AmzCopySourceIfMatch)
|
||||||
if ifMatchETagHeader != "" {
|
if ifMatchETagHeader != "" {
|
||||||
etag := objInfo.ETag
|
if !isETagEqual(objInfo.ETag, ifMatchETagHeader) {
|
||||||
if shouldDecryptEtag {
|
|
||||||
etag = encETag[len(encETag)-32:]
|
|
||||||
}
|
|
||||||
if objInfo.ETag != "" && !isETagEqual(etag, ifMatchETagHeader) {
|
|
||||||
// If the object ETag does not match with the specified ETag.
|
// If the object ETag does not match with the specified ETag.
|
||||||
writeHeaders()
|
writeHeaders()
|
||||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPreconditionFailed), r.URL, guessIsBrowserReq(r))
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPreconditionFailed), r.URL, guessIsBrowserReq(r))
|
||||||
@ -128,11 +118,7 @@ func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r
|
|||||||
// one specified otherwise, return a 304 (not modified).
|
// one specified otherwise, return a 304 (not modified).
|
||||||
ifNoneMatchETagHeader := r.Header.Get(xhttp.AmzCopySourceIfNoneMatch)
|
ifNoneMatchETagHeader := r.Header.Get(xhttp.AmzCopySourceIfNoneMatch)
|
||||||
if ifNoneMatchETagHeader != "" {
|
if ifNoneMatchETagHeader != "" {
|
||||||
etag := objInfo.ETag
|
if isETagEqual(objInfo.ETag, ifNoneMatchETagHeader) {
|
||||||
if shouldDecryptEtag {
|
|
||||||
etag = encETag[len(encETag)-32:]
|
|
||||||
}
|
|
||||||
if objInfo.ETag != "" && isETagEqual(etag, ifNoneMatchETagHeader) {
|
|
||||||
// If the object ETag matches with the specified ETag.
|
// If the object ETag matches with the specified ETag.
|
||||||
writeHeaders()
|
writeHeaders()
|
||||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPreconditionFailed), r.URL, guessIsBrowserReq(r))
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPreconditionFailed), r.URL, guessIsBrowserReq(r))
|
||||||
|
@ -213,7 +213,7 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
|
|||||||
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
|
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
|
||||||
|
|
||||||
if objectAPI.IsEncryptionSupported() {
|
if objectAPI.IsEncryptionSupported() {
|
||||||
if _, err = DecryptObjectInfo(&objInfo, r.Header); err != nil {
|
if _, err = DecryptObjectInfo(&objInfo, r); err != nil {
|
||||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -371,23 +371,43 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
|||||||
|
|
||||||
// Get request range.
|
// Get request range.
|
||||||
var rs *HTTPRangeSpec
|
var rs *HTTPRangeSpec
|
||||||
|
var rangeErr error
|
||||||
rangeHeader := r.Header.Get(xhttp.Range)
|
rangeHeader := r.Header.Get(xhttp.Range)
|
||||||
if rangeHeader != "" {
|
if rangeHeader != "" {
|
||||||
if rs, err = parseRequestRangeSpec(rangeHeader); err != nil {
|
rs, rangeErr = parseRequestRangeSpec(rangeHeader)
|
||||||
// Handle only errInvalidRange. Ignore other
|
}
|
||||||
// parse error and treat it as regular Get
|
|
||||||
// request like Amazon S3.
|
|
||||||
if err == errInvalidRange {
|
|
||||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRange), r.URL, guessIsBrowserReq(r))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.LogIf(ctx, err, logger.Application)
|
// Validate pre-conditions if any.
|
||||||
|
opts.CheckPrecondFn = func(oi ObjectInfo) bool {
|
||||||
|
if objectAPI.IsEncryptionSupported() {
|
||||||
|
if _, err := DecryptObjectInfo(&oi, r); err != nil {
|
||||||
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||||
|
return true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if checkPreconditions(ctx, w, r, oi, opts) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle only errInvalidRange. Ignore other
|
||||||
|
// parse error and treat it as regular Get
|
||||||
|
// request like Amazon S3.
|
||||||
|
if rangeErr == errInvalidRange {
|
||||||
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRange), r.URL, guessIsBrowserReq(r))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if rangeErr != nil {
|
||||||
|
logger.LogIf(ctx, rangeErr, logger.Application)
|
||||||
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
gr, err := getObjectNInfo(ctx, bucket, object, rs, r.Header, readLock, opts)
|
gr, err := getObjectNInfo(ctx, bucket, object, rs, r.Header, readLock, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if isErrPreconditionFailed(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
if globalBucketVersioningSys.Enabled(bucket) && gr != nil {
|
if globalBucketVersioningSys.Enabled(bucket) && gr != nil {
|
||||||
// Versioning enabled quite possibly object is deleted might be delete-marker
|
// Versioning enabled quite possibly object is deleted might be delete-marker
|
||||||
// if present set the headers, no idea why AWS S3 sets these headers.
|
// if present set the headers, no idea why AWS S3 sets these headers.
|
||||||
@ -409,18 +429,6 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
|||||||
// filter object lock metadata if permission does not permit
|
// filter object lock metadata if permission does not permit
|
||||||
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
|
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
|
||||||
|
|
||||||
if objectAPI.IsEncryptionSupported() {
|
|
||||||
if _, err = DecryptObjectInfo(&objInfo, r.Header); err != nil {
|
|
||||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate pre-conditions if any.
|
|
||||||
if checkPreconditions(ctx, w, r, objInfo, opts) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set encryption response headers
|
// Set encryption response headers
|
||||||
if objectAPI.IsEncryptionSupported() {
|
if objectAPI.IsEncryptionSupported() {
|
||||||
if crypto.IsEncrypted(objInfo.UserDefined) {
|
if crypto.IsEncrypted(objInfo.UserDefined) {
|
||||||
@ -455,7 +463,8 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
|||||||
|
|
||||||
// Write object content to response body
|
// Write object content to response body
|
||||||
if _, err = io.Copy(httpWriter, gr); err != nil {
|
if _, err = io.Copy(httpWriter, gr); err != nil {
|
||||||
if !httpWriter.HasWritten() && !statusCodeWritten { // write error response only if no data or headers has been written to client yet
|
if !httpWriter.HasWritten() && !statusCodeWritten {
|
||||||
|
// write error response only if no data or headers has been written to client yet
|
||||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@ -551,23 +560,6 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get request range.
|
|
||||||
var rs *HTTPRangeSpec
|
|
||||||
rangeHeader := r.Header.Get(xhttp.Range)
|
|
||||||
if rangeHeader != "" {
|
|
||||||
if rs, err = parseRequestRangeSpec(rangeHeader); err != nil {
|
|
||||||
// Handle only errInvalidRange. Ignore other
|
|
||||||
// parse error and treat it as regular Get
|
|
||||||
// request like Amazon S3.
|
|
||||||
if err == errInvalidRange {
|
|
||||||
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrInvalidRange))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.LogIf(ctx, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
|
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if globalBucketVersioningSys.Enabled(bucket) {
|
if globalBucketVersioningSys.Enabled(bucket) {
|
||||||
@ -590,14 +582,40 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
|
|||||||
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
|
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
|
||||||
|
|
||||||
if objectAPI.IsEncryptionSupported() {
|
if objectAPI.IsEncryptionSupported() {
|
||||||
if _, err = DecryptObjectInfo(&objInfo, r.Header); err != nil {
|
if _, err = DecryptObjectInfo(&objInfo, r); err != nil {
|
||||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate pre-conditions if any.
|
||||||
|
if checkPreconditions(ctx, w, r, objInfo, opts) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get request range.
|
||||||
|
var rs *HTTPRangeSpec
|
||||||
|
rangeHeader := r.Header.Get(xhttp.Range)
|
||||||
|
if rangeHeader != "" {
|
||||||
|
if rs, err = parseRequestRangeSpec(rangeHeader); err != nil {
|
||||||
|
// Handle only errInvalidRange. Ignore other
|
||||||
|
// parse error and treat it as regular Get
|
||||||
|
// request like Amazon S3.
|
||||||
|
if err == errInvalidRange {
|
||||||
|
writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrInvalidRange))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.LogIf(ctx, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Set encryption response headers
|
// Set encryption response headers
|
||||||
if objectAPI.IsEncryptionSupported() {
|
if objectAPI.IsEncryptionSupported() {
|
||||||
|
if _, err = DecryptObjectInfo(&objInfo, r); err != nil {
|
||||||
|
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||||
|
return
|
||||||
|
}
|
||||||
if crypto.IsEncrypted(objInfo.UserDefined) {
|
if crypto.IsEncrypted(objInfo.UserDefined) {
|
||||||
switch {
|
switch {
|
||||||
case crypto.S3.IsEncrypted(objInfo.UserDefined):
|
case crypto.S3.IsEncrypted(objInfo.UserDefined):
|
||||||
@ -614,11 +632,6 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate pre-conditions if any.
|
|
||||||
if checkPreconditions(ctx, w, r, objInfo, opts) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set standard object headers.
|
// Set standard object headers.
|
||||||
if err = setObjectHeaders(w, objInfo, rs); err != nil {
|
if err = setObjectHeaders(w, objInfo, rs); err != nil {
|
||||||
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
|
||||||
@ -898,16 +911,25 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||||||
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
||||||
|
|
||||||
getObjectNInfo := objectAPI.GetObjectNInfo
|
getObjectNInfo := objectAPI.GetObjectNInfo
|
||||||
|
if api.CacheAPI() != nil {
|
||||||
|
getObjectNInfo = api.CacheAPI().GetObjectNInfo
|
||||||
|
}
|
||||||
|
|
||||||
var lock = noLock
|
var lock = noLock
|
||||||
if !cpSrcDstSame {
|
if !cpSrcDstSame {
|
||||||
lock = readLock
|
lock = readLock
|
||||||
}
|
}
|
||||||
checkCopyPrecondFn := func(o ObjectInfo, encETag string) bool {
|
checkCopyPrecondFn := func(o ObjectInfo) bool {
|
||||||
return checkCopyObjectPreconditions(ctx, w, r, o, encETag)
|
if objectAPI.IsEncryptionSupported() {
|
||||||
|
if _, err := DecryptObjectInfo(&o, r); err != nil {
|
||||||
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return checkCopyObjectPreconditions(ctx, w, r, o)
|
||||||
}
|
}
|
||||||
getOpts.CheckCopyPrecondFn = checkCopyPrecondFn
|
getOpts.CheckPrecondFn = checkCopyPrecondFn
|
||||||
srcOpts.CheckCopyPrecondFn = checkCopyPrecondFn
|
|
||||||
var rs *HTTPRangeSpec
|
var rs *HTTPRangeSpec
|
||||||
gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, lock, getOpts)
|
gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, lock, getOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1790,22 +1812,31 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
|||||||
|
|
||||||
// Get request range.
|
// Get request range.
|
||||||
var rs *HTTPRangeSpec
|
var rs *HTTPRangeSpec
|
||||||
rangeHeader := r.Header.Get(xhttp.AmzCopySourceRange)
|
var parseRangeErr error
|
||||||
if rangeHeader != "" {
|
if rangeHeader := r.Header.Get(xhttp.AmzCopySourceRange); rangeHeader != "" {
|
||||||
var parseRangeErr error
|
rs, parseRangeErr = parseCopyPartRangeSpec(rangeHeader)
|
||||||
if rs, parseRangeErr = parseCopyPartRangeSpec(rangeHeader); parseRangeErr != nil {
|
}
|
||||||
logger.GetReqInfo(ctx).AppendTags("rangeHeader", rangeHeader)
|
|
||||||
|
checkCopyPartPrecondFn := func(o ObjectInfo) bool {
|
||||||
|
if objectAPI.IsEncryptionSupported() {
|
||||||
|
if _, err := DecryptObjectInfo(&o, r); err != nil {
|
||||||
|
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if checkCopyObjectPartPreconditions(ctx, w, r, o) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if parseRangeErr != nil {
|
||||||
logger.LogIf(ctx, parseRangeErr)
|
logger.LogIf(ctx, parseRangeErr)
|
||||||
writeCopyPartErr(ctx, w, parseRangeErr, r.URL, guessIsBrowserReq(r))
|
writeCopyPartErr(ctx, w, parseRangeErr, r.URL, guessIsBrowserReq(r))
|
||||||
return
|
// Range header mismatch is pre-condition like failure
|
||||||
|
// so return true to indicate Range precondition failed.
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
checkCopyPartPrecondFn := func(o ObjectInfo, encETag string) bool {
|
getOpts.CheckPrecondFn = checkCopyPartPrecondFn
|
||||||
return checkCopyObjectPartPreconditions(ctx, w, r, o, encETag)
|
|
||||||
}
|
|
||||||
getOpts.CheckCopyPrecondFn = checkCopyPartPrecondFn
|
|
||||||
srcOpts.CheckCopyPrecondFn = checkCopyPartPrecondFn
|
|
||||||
|
|
||||||
gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, readLock, getOpts)
|
gr, err := getObjectNInfo(ctx, srcBucket, srcObject, rs, r.Header, readLock, getOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1269,7 +1269,7 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) {
|
|||||||
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
|
objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone)
|
||||||
|
|
||||||
if objectAPI.IsEncryptionSupported() {
|
if objectAPI.IsEncryptionSupported() {
|
||||||
if _, err = DecryptObjectInfo(&objInfo, r.Header); err != nil {
|
if _, err = DecryptObjectInfo(&objInfo, r); err != nil {
|
||||||
writeWebErrorResponse(w, err)
|
writeWebErrorResponse(w, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -48,6 +48,19 @@ type FilterRule struct {
|
|||||||
Value string `xml:"Value"`
|
Value string `xml:"Value"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (filter FilterRule) isEmpty() bool {
|
||||||
|
return filter.Name == "" && filter.Value == ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalXML implements a custom marshaller to support `omitempty` feature.
|
||||||
|
func (filter FilterRule) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||||
|
if filter.isEmpty() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
type filterRuleWrapper FilterRule
|
||||||
|
return e.EncodeElement(filterRuleWrapper(filter), start)
|
||||||
|
}
|
||||||
|
|
||||||
// UnmarshalXML - decodes XML data.
|
// UnmarshalXML - decodes XML data.
|
||||||
func (filter *FilterRule) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
func (filter *FilterRule) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||||
// Make subtype to avoid recursive UnmarshalXML().
|
// Make subtype to avoid recursive UnmarshalXML().
|
||||||
@ -102,6 +115,10 @@ func (ruleList *FilterRuleList) UnmarshalXML(d *xml.Decoder, start xml.StartElem
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ruleList FilterRuleList) isEmpty() bool {
|
||||||
|
return len(ruleList.Rules) == 0
|
||||||
|
}
|
||||||
|
|
||||||
// Pattern - returns pattern using prefix and suffix values.
|
// Pattern - returns pattern using prefix and suffix values.
|
||||||
func (ruleList FilterRuleList) Pattern() string {
|
func (ruleList FilterRuleList) Pattern() string {
|
||||||
var prefix string
|
var prefix string
|
||||||
@ -124,6 +141,15 @@ type S3Key struct {
|
|||||||
RuleList FilterRuleList `xml:"S3Key,omitempty" json:"S3Key,omitempty"`
|
RuleList FilterRuleList `xml:"S3Key,omitempty" json:"S3Key,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MarshalXML implements a custom marshaller to support `omitempty` feature.
|
||||||
|
func (s3Key S3Key) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||||
|
if s3Key.RuleList.isEmpty() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
type s3KeyWrapper S3Key
|
||||||
|
return e.EncodeElement(s3KeyWrapper(s3Key), start)
|
||||||
|
}
|
||||||
|
|
||||||
// common - represents common elements inside <QueueConfiguration>, <CloudFunctionConfiguration>
|
// common - represents common elements inside <QueueConfiguration>, <CloudFunctionConfiguration>
|
||||||
// and <TopicConfiguration>
|
// and <TopicConfiguration>
|
||||||
type common struct {
|
type common struct {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user