mirror of
https://github.com/minio/minio.git
synced 2025-11-06 20:33:07 -05:00
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing with S3 ETags. Even though ETag is often viewed as MD5 checksum of an object, handling S3 ETags correctly is a surprisingly complex task. While it is true that the ETag corresponds to the MD5 for the most basic S3 API operations, there are many exceptions in case of multipart uploads or encryption. In worse, some S3 clients expect very specific behavior when it comes to ETags. For example, some clients expect that the ETag is a double-quoted string and fail otherwise. Non-AWS compliant ETag handling has been a source of many bugs in the past. Therefore, this commit adds a dedicated `etag` package that provides functionality for parsing, generating and converting S3 ETags. Further, this commit removes the ETag computation from the `hash` package. Instead, the `hash` package (i.e. `hash.Reader`) should focus only on computing and verifying the content-sha256. One core feature of this commit is to provide a mechanism to communicate a computed ETag from a low-level `io.Reader` to a high-level `io.Reader`. This problem occurs when an S3 server receives a request and has to compute the ETag of the content. However, the server may also wrap the initial body with several other `io.Reader`, e.g. when encrypting or compressing the content: ``` reader := Encrypt(Compress(ETag(content))) ``` In such a case, the ETag should be accessible by the high-level `io.Reader`. The `etag` provides a mechanism to wrap `io.Reader` implementations such that the `ETag` can be accessed by a type-check. This technique is applied to the PUT, COPY and Upload handlers.
This commit is contained in:
committed by
GitHub
parent
1b63291ee2
commit
d4b822d697
@@ -459,8 +459,7 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
|
||||
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
|
||||
// The verification happens implicit during reading.
|
||||
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5),
|
||||
hex.EncodeToString(contentSHA256), -1, globalCLIContext.StrictS3Compat)
|
||||
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5), hex.EncodeToString(contentSHA256), -1)
|
||||
if err != nil {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
||||
@@ -917,7 +917,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize, globalCLIContext.StrictS3Compat)
|
||||
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
@@ -964,7 +964,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
info := ObjectInfo{Size: fileSize}
|
||||
// do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize, globalCLIContext.StrictS3Compat)
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
|
||||
@@ -692,7 +692,7 @@ func restoreTransitionedObject(ctx context.Context, bucket, object string, objAP
|
||||
return err
|
||||
}
|
||||
defer gr.Close()
|
||||
hashReader, err := hash.NewReader(gr, objInfo.Size, "", "", objInfo.Size, globalCLIContext.StrictS3Compat)
|
||||
hashReader, err := hash.NewReader(gr, objInfo.Size, "", "", objInfo.Size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ func deleteConfig(ctx context.Context, objAPI objectDeleter, configFile string)
|
||||
}
|
||||
|
||||
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)), globalCLIContext.StrictS3Compat)
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -128,7 +128,7 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
|
||||
nextBloomCycle++
|
||||
var tmp [8]byte
|
||||
binary.LittleEndian.PutUint64(tmp[:], nextBloomCycle)
|
||||
r, err := hash.NewReader(bytes.NewReader(tmp[:]), int64(len(tmp)), "", "", int64(len(tmp)), false)
|
||||
r, err := hash.NewReader(bytes.NewReader(tmp[:]), int64(len(tmp)), "", "", int64(len(tmp)))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
|
||||
@@ -513,7 +513,7 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string)
|
||||
}()
|
||||
defer pr.Close()
|
||||
|
||||
r, err := hash.NewReader(pr, -1, "", "", -1, false)
|
||||
r, err := hash.NewReader(pr, -1, "", "", -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan
|
||||
continue
|
||||
}
|
||||
size := int64(len(dataUsageJSON))
|
||||
r, err := hash.NewReader(bytes.NewReader(dataUsageJSON), size, "", "", size, false)
|
||||
r, err := hash.NewReader(bytes.NewReader(dataUsageJSON), size, "", "", size)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
|
||||
@@ -705,7 +705,7 @@ func (c *cacheObjects) uploadObject(ctx context.Context, oi ObjectInfo) {
|
||||
if st == CommitComplete || st.String() == "" {
|
||||
return
|
||||
}
|
||||
hashReader, err := hash.NewReader(cReader, oi.Size, "", "", oi.Size, globalCLIContext.StrictS3Compat)
|
||||
hashReader, err := hash.NewReader(cReader, oi.Size, "", "", oi.Size)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -168,7 +168,7 @@ func getGWMetadata(ctx context.Context, bucket, prefix string, gwMeta gwMetaV1)
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, err
|
||||
}
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(metadataBytes), int64(len(metadataBytes)), "", "", int64(len(metadataBytes)), false)
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(metadataBytes), int64(len(metadataBytes)), "", "", int64(len(metadataBytes)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -176,7 +176,7 @@ func (b *bucketMetacache) save(ctx context.Context) error {
|
||||
b.updated = false
|
||||
b.mu.Unlock()
|
||||
|
||||
hr, err := hash.NewReader(tmp, int64(tmp.Len()), "", "", int64(tmp.Len()), false)
|
||||
hr, err := hash.NewReader(tmp, int64(tmp.Len()), "", "", int64(tmp.Len()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -672,7 +672,7 @@ func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions) (entr
|
||||
return nil
|
||||
}
|
||||
o.debugln(color.Green("listPath:")+" saving block", b.n, "to", o.objectPath(b.n))
|
||||
r, err := hash.NewReader(bytes.NewReader(b.data), int64(len(b.data)), "", "", int64(len(b.data)), false)
|
||||
r, err := hash.NewReader(bytes.NewReader(b.data), int64(len(b.data)), "", "", int64(len(b.data)))
|
||||
logger.LogIf(ctx, err)
|
||||
custom := b.headerKV()
|
||||
_, err = er.putObject(ctx, minioMetaBucket, o.objectPath(b.n), NewPutObjReader(r), ObjectOptions{
|
||||
|
||||
@@ -48,6 +48,7 @@ import (
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
"github.com/minio/minio/pkg/etag"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
@@ -1025,7 +1026,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
srcInfo.metadataOnly = false
|
||||
}
|
||||
|
||||
var reader io.Reader
|
||||
var reader io.Reader = gr
|
||||
|
||||
// Set the actual size to the compressed/decrypted size if encrypted.
|
||||
actualSize, err := srcInfo.GetActualSize()
|
||||
@@ -1057,15 +1058,15 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
// avoid copying them in target object.
|
||||
crypto.RemoveInternalEntries(srcInfo.UserDefined)
|
||||
|
||||
s2c := newS2CompressReader(gr, actualSize)
|
||||
s2c := newS2CompressReader(reader, actualSize)
|
||||
defer s2c.Close()
|
||||
reader = s2c
|
||||
reader = etag.Wrap(s2c, reader)
|
||||
length = -1
|
||||
} else {
|
||||
reader = gr
|
||||
}
|
||||
|
||||
srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualSize, globalCLIContext.StrictS3Compat)
|
||||
srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -1162,11 +1163,13 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
|
||||
if isTargetEncrypted {
|
||||
reader, objEncKey, err = newEncryptReader(srcInfo.Reader, newKey, dstBucket, dstObject, encMetadata, sseS3)
|
||||
var encReader io.Reader
|
||||
encReader, objEncKey, err = newEncryptReader(srcInfo.Reader, newKey, dstBucket, dstObject, encMetadata, sseS3)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
reader = etag.Wrap(encReader, srcInfo.Reader)
|
||||
}
|
||||
|
||||
if isSourceEncrypted {
|
||||
@@ -1176,7 +1179,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
|
||||
// do not try to verify encrypted content
|
||||
srcInfo.Reader, err = hash.NewReader(reader, targetSize, "", "", actualSize, globalCLIContext.StrictS3Compat)
|
||||
srcInfo.Reader, err = hash.NewReader(reader, targetSize, "", "", actualSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -1457,13 +1460,12 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
var (
|
||||
md5hex = hex.EncodeToString(md5Bytes)
|
||||
sha256hex = ""
|
||||
reader io.Reader
|
||||
md5hex = hex.EncodeToString(md5Bytes)
|
||||
sha256hex = ""
|
||||
reader io.Reader = r.Body
|
||||
s3Err APIErrorCode
|
||||
putObject = objectAPI.PutObject
|
||||
)
|
||||
reader = r.Body
|
||||
|
||||
// Check if put is allowed
|
||||
if s3Err = isPutActionAllowed(ctx, rAuthType, bucket, object, r, iampolicy.PutObjectAction); s3Err != ErrNone {
|
||||
@@ -1509,13 +1511,12 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
actualSize := size
|
||||
|
||||
if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) && size > 0 {
|
||||
// Storing the compression metadata.
|
||||
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
|
||||
metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10)
|
||||
|
||||
actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize, globalCLIContext.StrictS3Compat)
|
||||
actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -1524,13 +1525,13 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
// Set compression metrics.
|
||||
s2c := newS2CompressReader(actualReader, actualSize)
|
||||
defer s2c.Close()
|
||||
reader = s2c
|
||||
reader = etag.Wrap(s2c, actualReader)
|
||||
size = -1 // Since compressed size is un-predictable.
|
||||
md5hex = "" // Do not try to verify the content.
|
||||
sha256hex = ""
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize, globalCLIContext.StrictS3Compat)
|
||||
hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -1601,7 +1602,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
// do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(reader, wantSize, "", "", actualSize, globalCLIContext.StrictS3Compat)
|
||||
hashReader, err = hash.NewReader(etag.Wrap(reader, hashReader), wantSize, "", "", actualSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -1999,8 +2000,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
partInfo, err := core.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID,
|
||||
gr, length, "", "", dstOpts.ServerSideEncryption)
|
||||
partInfo, err := core.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, gr, length, "", "", dstOpts.ServerSideEncryption)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -2015,7 +2015,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||
}
|
||||
|
||||
actualPartSize = length
|
||||
var reader io.Reader
|
||||
var reader io.Reader = etag.NewReader(gr, nil)
|
||||
|
||||
mi, err := objectAPI.GetMultipartInfo(ctx, dstBucket, dstObject, uploadID, dstOpts)
|
||||
if err != nil {
|
||||
@@ -2027,15 +2027,13 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||
_, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"]
|
||||
// Compress only if the compression is enabled during initial multipart.
|
||||
if isCompressed {
|
||||
s2c := newS2CompressReader(gr, actualPartSize)
|
||||
s2c := newS2CompressReader(reader, actualPartSize)
|
||||
defer s2c.Close()
|
||||
reader = s2c
|
||||
reader = etag.Wrap(s2c, reader)
|
||||
length = -1
|
||||
} else {
|
||||
reader = gr
|
||||
}
|
||||
|
||||
srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualPartSize, globalCLIContext.StrictS3Compat)
|
||||
srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualPartSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -2077,11 +2075,12 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||
copy(objectEncryptionKey[:], key)
|
||||
|
||||
partEncryptionKey := objectEncryptionKey.DerivePartKey(uint32(partID))
|
||||
reader, err = sio.EncryptReader(reader, sio.Config{Key: partEncryptionKey[:]})
|
||||
encReader, err := sio.EncryptReader(reader, sio.Config{Key: partEncryptionKey[:]})
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
reader = etag.Wrap(encReader, reader)
|
||||
|
||||
wantSize := int64(-1)
|
||||
if length >= 0 {
|
||||
@@ -2089,7 +2088,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||
wantSize = info.EncryptedSize()
|
||||
}
|
||||
|
||||
srcInfo.Reader, err = hash.NewReader(reader, wantSize, "", "", actualPartSize, globalCLIContext.StrictS3Compat)
|
||||
srcInfo.Reader, err = hash.NewReader(reader, wantSize, "", "", actualPartSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -2209,12 +2208,11 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
}
|
||||
|
||||
var (
|
||||
md5hex = hex.EncodeToString(md5Bytes)
|
||||
sha256hex = ""
|
||||
reader io.Reader
|
||||
md5hex = hex.EncodeToString(md5Bytes)
|
||||
sha256hex = ""
|
||||
reader io.Reader = r.Body
|
||||
s3Error APIErrorCode
|
||||
)
|
||||
reader = r.Body
|
||||
if s3Error = isPutActionAllowed(ctx, rAuthType, bucket, object, r, iampolicy.PutObjectAction); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -2271,7 +2269,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
_, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"]
|
||||
|
||||
if objectAPI.IsCompressionSupported() && isCompressed {
|
||||
actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize, globalCLIContext.StrictS3Compat)
|
||||
actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -2280,13 +2278,13 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
// Set compression metrics.
|
||||
s2c := newS2CompressReader(actualReader, actualSize)
|
||||
defer s2c.Close()
|
||||
reader = s2c
|
||||
reader = etag.Wrap(s2c, actualReader)
|
||||
size = -1 // Since compressed size is un-predictable.
|
||||
md5hex = "" // Do not try to verify the content.
|
||||
sha256hex = ""
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize, globalCLIContext.StrictS3Compat)
|
||||
hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@@ -2343,7 +2341,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
wantSize = info.EncryptedSize()
|
||||
}
|
||||
// do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(reader, wantSize, "", "", actualSize, globalCLIContext.StrictS3Compat)
|
||||
hashReader, err = hash.NewReader(etag.Wrap(reader, hashReader), wantSize, "", "", actualSize)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
|
||||
@@ -160,7 +160,7 @@ func calculateSignedChunkLength(chunkDataSize int64) int64 {
|
||||
}
|
||||
|
||||
func mustGetPutObjReader(t TestErrHandler, data io.Reader, size int64, md5hex, sha256hex string) *PutObjReader {
|
||||
hr, err := hash.NewReader(data, size, md5hex, sha256hex, size, globalCLIContext.StrictS3Compat)
|
||||
hr, err := hash.NewReader(data, size, md5hex, sha256hex, size)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -50,6 +50,7 @@ import (
|
||||
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
|
||||
"github.com/minio/minio/pkg/bucket/policy"
|
||||
"github.com/minio/minio/pkg/bucket/replication"
|
||||
"github.com/minio/minio/pkg/etag"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
@@ -1233,7 +1234,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
||||
var reader io.Reader = r.Body
|
||||
actualSize := size
|
||||
|
||||
hashReader, err := hash.NewReader(reader, size, "", "", actualSize, globalCLIContext.StrictS3Compat)
|
||||
hashReader, err := hash.NewReader(reader, size, "", "", actualSize)
|
||||
if err != nil {
|
||||
writeWebErrorResponse(w, err)
|
||||
return
|
||||
@@ -1244,7 +1245,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
||||
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2
|
||||
metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(actualSize, 10)
|
||||
|
||||
actualReader, err := hash.NewReader(reader, actualSize, "", "", actualSize, globalCLIContext.StrictS3Compat)
|
||||
actualReader, err := hash.NewReader(reader, actualSize, "", "", actualSize)
|
||||
if err != nil {
|
||||
writeWebErrorResponse(w, err)
|
||||
return
|
||||
@@ -1254,8 +1255,8 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
||||
size = -1 // Since compressed size is un-predictable.
|
||||
s2c := newS2CompressReader(actualReader, actualSize)
|
||||
defer s2c.Close()
|
||||
reader = s2c
|
||||
hashReader, err = hash.NewReader(reader, size, "", "", actualSize, globalCLIContext.StrictS3Compat)
|
||||
reader = etag.Wrap(s2c, actualReader)
|
||||
hashReader, err = hash.NewReader(reader, size, "", "", actualSize)
|
||||
if err != nil {
|
||||
writeWebErrorResponse(w, err)
|
||||
return
|
||||
@@ -1276,15 +1277,18 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if _, ok := crypto.IsRequested(r.Header); ok && !HasSuffix(object, SlashSeparator) { // handle SSE requests
|
||||
var objectEncryptionKey crypto.ObjectKey
|
||||
reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata)
|
||||
var (
|
||||
objectEncryptionKey crypto.ObjectKey
|
||||
encReader io.Reader
|
||||
)
|
||||
encReader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
info := ObjectInfo{Size: size}
|
||||
// do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", size, globalCLIContext.StrictS3Compat)
|
||||
hashReader, err = hash.NewReader(etag.Wrap(encReader, hashReader), info.EncryptedSize(), "", "", size)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
|
||||
Reference in New Issue
Block a user