mirror of https://github.com/minio/minio.git
fix: invalid multipart offset when compressed+encrypted. (#12340)
Fixes `testSSES3EncryptedGetObjectReadSeekFunctional` mint test. ``` { "args": { "bucketName": "minio-go-test-w53hbpat649nhvws", "objectName": "6mdswladz4vfpp2oit1pkn3qd11te5" }, "duration": 7537, "error": "We encountered an internal error, please try again.: cause(The requested range \"bytes 251717932 -> -116384170 of 135333762\" is not satisfiable.)", "function": "GetObject(bucketName, objectName)", "message": "CopyN failed", "name": "minio-go: testSSES3EncryptedGetObjectReadSeekFunctional", "status": "FAIL" } ``` Compressed files always start at the beginning of a part so no additional offset should be added.
This commit is contained in:
parent
df4914b6f3
commit
f01820a4ee
|
@ -71,14 +71,14 @@ const (
|
|||
// isEncryptedMultipart returns true if the current object is
|
||||
// uploaded by the user using multipart mechanism:
|
||||
// initiate new multipart, upload part, complete upload
|
||||
func isEncryptedMultipart(objInfo ObjectInfo) bool {
|
||||
if len(objInfo.Parts) == 0 {
|
||||
func (o *ObjectInfo) isEncryptedMultipart() bool {
|
||||
if len(o.Parts) == 0 {
|
||||
return false
|
||||
}
|
||||
if !crypto.IsMultiPart(objInfo.UserDefined) {
|
||||
if !crypto.IsMultiPart(o.UserDefined) {
|
||||
return false
|
||||
}
|
||||
for _, part := range objInfo.Parts {
|
||||
for _, part := range o.Parts {
|
||||
_, err := sio.DecryptedSize(uint64(part.Size))
|
||||
if err != nil {
|
||||
return false
|
||||
|
@ -87,7 +87,7 @@ func isEncryptedMultipart(objInfo ObjectInfo) bool {
|
|||
// Further check if this object is uploaded using multipart mechanism
|
||||
// by the user and it is not about Erasure internally splitting the
|
||||
// object into parts in PutObject()
|
||||
return !(objInfo.backendType == BackendErasure && len(objInfo.ETag) == 32)
|
||||
return !(o.backendType == BackendErasure && len(o.ETag) == 32)
|
||||
}
|
||||
|
||||
// ParseSSECopyCustomerRequest parses the SSE-C header fields of the provided request.
|
||||
|
@ -427,7 +427,7 @@ func DecryptBlocksRequestR(inputReader io.Reader, h http.Header, seqNumber uint3
|
|||
|
||||
bucket, object := oi.Bucket, oi.Name
|
||||
// Single part case
|
||||
if !isEncryptedMultipart(oi) {
|
||||
if !oi.isEncryptedMultipart() {
|
||||
var reader io.Reader
|
||||
var err error
|
||||
if copySource {
|
||||
|
@ -589,7 +589,7 @@ func (o *ObjectInfo) DecryptedSize() (int64, error) {
|
|||
if _, ok := crypto.IsEncrypted(o.UserDefined); !ok {
|
||||
return 0, errors.New("Cannot compute decrypted size of an unencrypted object")
|
||||
}
|
||||
if !isEncryptedMultipart(*o) {
|
||||
if !o.isEncryptedMultipart() {
|
||||
size, err := sio.DecryptedSize(uint64(o.Size))
|
||||
if err != nil {
|
||||
err = errObjectTampered // assign correct error type
|
||||
|
@ -732,7 +732,7 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk
|
|||
// Assemble slice of (decrypted) part sizes in `sizes`
|
||||
var sizes []int64
|
||||
var decObjSize int64 // decrypted total object size
|
||||
if isEncryptedMultipart(*o) {
|
||||
if o.isEncryptedMultipart() {
|
||||
sizes = make([]int64, len(o.Parts))
|
||||
for i, part := range o.Parts {
|
||||
var partSize uint64
|
||||
|
|
|
@ -561,7 +561,7 @@ type InvalidRange struct {
|
|||
}
|
||||
|
||||
func (e InvalidRange) Error() string {
|
||||
return fmt.Sprintf("The requested range \"bytes %d-%d/%d\" is not satisfiable.", e.OffsetBegin, e.OffsetEnd, e.ResourceSize)
|
||||
return fmt.Sprintf("The requested range \"bytes %d -> %d of %d\" is not satisfiable.", e.OffsetBegin, e.OffsetEnd, e.ResourceSize)
|
||||
}
|
||||
|
||||
// ObjectTooLarge error returned when the size of the object > max object size allowed (5G) per request.
|
||||
|
|
|
@ -543,11 +543,6 @@ func getCompressedOffsets(objectInfo ObjectInfo, offset int64) (compressedOffset
|
|||
}
|
||||
}
|
||||
|
||||
if isEncryptedMultipart(objectInfo) && firstPartIdx > 0 {
|
||||
off, _, _, _, _, err := objectInfo.GetDecryptedRange(partNumberToRangeSpec(objectInfo, firstPartIdx))
|
||||
logger.LogIf(context.Background(), err)
|
||||
compressedOffset += off
|
||||
}
|
||||
return compressedOffset, offset - skipLength, firstPartIdx
|
||||
}
|
||||
|
||||
|
@ -632,6 +627,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
|
|||
if err != nil {
|
||||
return nil, 0, 0, err
|
||||
}
|
||||
|
||||
off, length = int64(0), oi.Size
|
||||
decOff, decLength := int64(0), actualSize
|
||||
if rs != nil {
|
||||
|
@ -639,8 +635,10 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
|
|||
if err != nil {
|
||||
return nil, 0, 0, err
|
||||
}
|
||||
|
||||
// In case of range based queries on multiparts, the offset and length are reduced.
|
||||
off, decOff, firstPart = getCompressedOffsets(oi, off)
|
||||
|
||||
decLength = length
|
||||
length = oi.Size - off
|
||||
// For negative length we read everything.
|
||||
|
|
Loading…
Reference in New Issue