Turn off md5sum optionally if content-md5 is not set (#7609)

This PR also brings --compat option to run MinIO in strict
S3 compatibility mode, MinIO by default will now try to run
high performance mode.
This commit is contained in:
Harshavardhana 2019-05-08 18:35:40 -07:00 committed by GitHub
parent 757c639044
commit 72929ec05b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 143 additions and 175 deletions

View File

@ -362,7 +362,7 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present. // Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
// The verification happens implicit during reading. // The verification happens implicit during reading.
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5), hex.EncodeToString(contentSHA256), -1) reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5), hex.EncodeToString(contentSHA256), -1, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
return toAPIErrorCode(ctx, err) return toAPIErrorCode(ctx, err)
} }

View File

@ -600,7 +600,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
return return
} }
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize) hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
@ -638,7 +638,8 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
return return
} }
info := ObjectInfo{Size: fileSize} info := ObjectInfo{Size: fileSize}
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize) // do not try to verify encrypted content // do not try to verify encrypted content
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return

View File

@ -161,6 +161,9 @@ func handleCommonCmdArgs(ctx *cli.Context) {
globalCertsCADir = &ConfigDir{path: filepath.Join(globalCertsDir.Get(), certsCADir)} globalCertsCADir = &ConfigDir{path: filepath.Join(globalCertsDir.Get(), certsCADir)}
logger.FatalIf(mkdirAllIgnorePerm(globalCertsCADir.Get()), "Unable to create certs CA directory at %s", globalCertsCADir.Get()) logger.FatalIf(mkdirAllIgnorePerm(globalCertsCADir.Get()), "Unable to create certs CA directory at %s", globalCertsCADir.Get())
// Check "compat" flag from command line argument.
globalCLIContext.StrictS3Compat = ctx.IsSet("compat") || ctx.GlobalIsSet("compat")
} }
// Parses the given compression exclude list `extensions` or `content-types`. // Parses the given compression exclude list `extensions` or `content-types`.

View File

@ -74,7 +74,7 @@ func saveConfigEtcd(ctx context.Context, client *etcd.Client, configFile string,
} }
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error { func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data))) hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)), globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
return err return err
} }

View File

@ -18,7 +18,6 @@ package cmd
import ( import (
"context" "context"
"encoding/hex"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
@ -399,7 +398,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }
if fsMeta.Meta["etag"] == "" { if fsMeta.Meta["etag"] == "" {
fsMeta.Meta["etag"] = hex.EncodeToString(data.MD5Current()) fsMeta.Meta["etag"] = r.MD5CurrentHexString()
} }
// Should return IncompleteBody{} error when reader has fewer // Should return IncompleteBody{} error when reader has fewer
// bytes than specified in request header. // bytes than specified in request header.

View File

@ -243,7 +243,7 @@ func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
// Initialize pipe. // Initialize pipe.
pipeReader, pipeWriter := io.Pipe() pipeReader, pipeWriter := io.Pipe()
teeReader := io.TeeReader(bkReader, pipeWriter) teeReader := io.TeeReader(bkReader, pipeWriter)
hashReader, herr := hash.NewReader(pipeReader, bkReader.ObjInfo.Size, "", "", bkReader.ObjInfo.Size) hashReader, herr := hash.NewReader(pipeReader, bkReader.ObjInfo.Size, "", "", bkReader.ObjInfo.Size, globalCLIContext.StrictS3Compat)
if herr != nil { if herr != nil {
bkReader.Close() bkReader.Close()
return nil, herr return nil, herr
@ -314,7 +314,7 @@ func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, star
} }
// Initialize pipe. // Initialize pipe.
pipeReader, pipeWriter := io.Pipe() pipeReader, pipeWriter := io.Pipe()
hashReader, err := hash.NewReader(pipeReader, objInfo.Size, "", "", objInfo.Size) hashReader, err := hash.NewReader(pipeReader, objInfo.Size, "", "", objInfo.Size, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
return err return err
} }
@ -664,13 +664,13 @@ func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *P
objInfo = ObjectInfo{} objInfo = ObjectInfo{}
// Initialize pipe to stream data to backend // Initialize pipe to stream data to backend
pipeReader, pipeWriter := io.Pipe() pipeReader, pipeWriter := io.Pipe()
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize()) hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize(), globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
return ObjectInfo{}, err return ObjectInfo{}, err
} }
// Initialize pipe to stream data to cache // Initialize pipe to stream data to cache
rPipe, wPipe := io.Pipe() rPipe, wPipe := io.Pipe()
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize()) cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize(), globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
return ObjectInfo{}, err return ObjectInfo{}, err
} }
@ -758,13 +758,13 @@ func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadI
info = PartInfo{} info = PartInfo{}
// Initialize pipe to stream data to backend // Initialize pipe to stream data to backend
pipeReader, pipeWriter := io.Pipe() pipeReader, pipeWriter := io.Pipe()
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize()) hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize(), globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
return return
} }
// Initialize pipe to stream data to cache // Initialize pipe to stream data to cache
rPipe, wPipe := io.Pipe() rPipe, wPipe := io.Pipe()
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize()) cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize(), globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
return return
} }

View File

@ -194,7 +194,7 @@ func TestDiskCache(t *testing.T) {
objInfo.UserDefined = httpMeta objInfo.UserDefined = httpMeta
var opts ObjectOptions var opts ObjectOptions
byteReader := bytes.NewReader([]byte(content)) byteReader := bytes.NewReader([]byte(content))
hashReader, err := hash.NewReader(byteReader, int64(size), "", "", int64(size)) hashReader, err := hash.NewReader(byteReader, int64(size), "", "", int64(size), globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -269,7 +269,7 @@ func TestDiskCacheMaxUse(t *testing.T) {
opts := ObjectOptions{} opts := ObjectOptions{}
byteReader := bytes.NewReader([]byte(content)) byteReader := bytes.NewReader([]byte(content))
hashReader, err := hash.NewReader(byteReader, int64(size), "", "", int64(size)) hashReader, err := hash.NewReader(byteReader, int64(size), "", "", int64(size), globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -504,10 +504,7 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
} }
// Calculate s3 compatible md5sum for complete multipart. // Calculate s3 compatible md5sum for complete multipart.
s3MD5, err := getCompleteMultipartMD5(ctx, parts) s3MD5 := getCompleteMultipartMD5(parts)
if err != nil {
return oi, err
}
partSize := int64(-1) // Used later to ensure that all parts sizes are same. partSize := int64(-1) // Used later to ensure that all parts sizes are same.

View File

@ -17,7 +17,6 @@
package cmd package cmd
import ( import (
"context"
"net/http" "net/http"
"os" "os"
"strings" "strings"
@ -49,9 +48,6 @@ var (
// IsStringEqual is string equal. // IsStringEqual is string equal.
IsStringEqual = isStringEqual IsStringEqual = isStringEqual
// GetCompleteMultipartMD5 returns multipart MD5
GetCompleteMultipartMD5 = getCompleteMultipartMD5
) )
// StatInfo - alias for statInfo // StatInfo - alias for statInfo
@ -351,8 +347,8 @@ func ErrorRespToObjectError(err error, params ...string) error {
} }
// ComputeCompleteMultipartMD5 calculates MD5 ETag for complete multipart responses // ComputeCompleteMultipartMD5 calculates MD5 ETag for complete multipart responses
func ComputeCompleteMultipartMD5(parts []CompletePart) (string, error) { func ComputeCompleteMultipartMD5(parts []CompletePart) string {
return getCompleteMultipartMD5(context.Background(), parts) return getCompleteMultipartMD5(parts)
} }
// parse gateway sse env variable // parse gateway sse env variable

View File

@ -835,7 +835,7 @@ func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, r *
} }
// Save md5sum for future processing on the object. // Save md5sum for future processing on the object.
opts.UserDefined["x-amz-meta-md5sum"] = hex.EncodeToString(data.MD5Current()) opts.UserDefined["x-amz-meta-md5sum"] = r.MD5CurrentHexString()
objBlob.Metadata, objBlob.Properties, err = s3MetaToAzureProperties(ctx, opts.UserDefined) objBlob.Metadata, objBlob.Properties, err = s3MetaToAzureProperties(ctx, opts.UserDefined)
if err != nil { if err != nil {
return objInfo, azureToObjectError(err, bucket, object) return objInfo, azureToObjectError(err, bucket, object)
@ -1207,10 +1207,7 @@ func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
if err != nil { if err != nil {
return objInfo, azureToObjectError(err, bucket, object) return objInfo, azureToObjectError(err, bucket, object)
} }
objBlob.Metadata["md5sum"], err = cmd.ComputeCompleteMultipartMD5(uploadedParts) objBlob.Metadata["md5sum"] = cmd.ComputeCompleteMultipartMD5(uploadedParts)
if err != nil {
return objInfo, err
}
err = objBlob.SetProperties(nil) err = objBlob.SetProperties(nil)
if err != nil { if err != nil {
return objInfo, azureToObjectError(err, bucket, object) return objInfo, azureToObjectError(err, bucket, object)

View File

@ -647,10 +647,7 @@ func (n *hdfsObjects) CompleteMultipartUpload(ctx context.Context, bucket, objec
} }
// Calculate s3 compatible md5sum for complete multipart. // Calculate s3 compatible md5sum for complete multipart.
s3MD5, err := minio.GetCompleteMultipartMD5(ctx, parts) s3MD5 := minio.ComputeCompleteMultipartMD5(parts)
if err != nil {
return objInfo, err
}
return minio.ObjectInfo{ return minio.ObjectInfo{
Bucket: bucket, Bucket: bucket,

View File

@ -241,7 +241,7 @@ func getGWMetadata(ctx context.Context, bucket, prefix string, gwMeta gwMetaV1)
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return nil, err return nil, err
} }
hashReader, err := hash.NewReader(bytes.NewReader(metadataBytes), int64(len(metadataBytes)), "", "", int64(len(metadataBytes))) hashReader, err := hash.NewReader(bytes.NewReader(metadataBytes), int64(len(metadataBytes)), "", "", int64(len(metadataBytes)), false)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -92,9 +92,10 @@ const (
) )
var globalCLIContext = struct { var globalCLIContext = struct {
JSON, Quiet bool JSON, Quiet bool
Anonymous bool Anonymous bool
Addr string Addr string
StrictS3Compat bool
}{} }{}
var ( var (

View File

@ -32,24 +32,28 @@ var globalFlags = []cli.Flag{
cli.StringFlag{ cli.StringFlag{
Name: "config-dir, C", Name: "config-dir, C",
Value: defaultConfigDir.Get(), Value: defaultConfigDir.Get(),
Usage: "[DEPRECATED] Path to legacy configuration directory.", Usage: "[DEPRECATED] path to legacy configuration directory",
}, },
cli.StringFlag{ cli.StringFlag{
Name: "certs-dir, S", Name: "certs-dir, S",
Value: defaultCertsDir.Get(), Value: defaultCertsDir.Get(),
Usage: "Path to certs directory.", Usage: "path to certs directory",
}, },
cli.BoolFlag{ cli.BoolFlag{
Name: "quiet", Name: "quiet",
Usage: "Disable startup information.", Usage: "disable startup information",
}, },
cli.BoolFlag{ cli.BoolFlag{
Name: "anonymous", Name: "anonymous",
Usage: "Hide sensitive information from logging.", Usage: "hide sensitive information from logging",
}, },
cli.BoolFlag{ cli.BoolFlag{
Name: "json", Name: "json",
Usage: "Output server logs and startup information in json format.", Usage: "output server logs and startup information in json format",
},
cli.BoolFlag{
Name: "compat",
Usage: "trade off performance for S3 compatibility",
}, },
} }
@ -118,7 +122,7 @@ func newApp(name string) *cli.App {
// Set up app. // Set up app.
cli.HelpFlag = cli.BoolFlag{ cli.HelpFlag = cli.BoolFlag{
Name: "help, h", Name: "help, h",
Usage: "Show help.", Usage: "show help",
} }
app := cli.NewApp() app := cli.NewApp()

View File

@ -19,7 +19,6 @@ package cmd
import ( import (
"bytes" "bytes"
"context" "context"
"encoding/hex"
"fmt" "fmt"
"os" "os"
"reflect" "reflect"
@ -1866,10 +1865,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
}, },
}, },
} }
s3MD5, err := getCompleteMultipartMD5(context.Background(), inputParts[3].parts) s3MD5 := getCompleteMultipartMD5(inputParts[3].parts)
if err != nil {
t.Fatalf("Obtaining S3MD5 failed")
}
// Test cases with sample input values for CompleteMultipartUpload. // Test cases with sample input values for CompleteMultipartUpload.
testCases := []struct { testCases := []struct {
@ -1898,8 +1894,8 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
// Asserting for Invalid UploadID (Test number 9). // Asserting for Invalid UploadID (Test number 9).
{bucketNames[0], objectNames[0], "abc", []CompletePart{}, "", InvalidUploadID{UploadID: "abc"}, false}, {bucketNames[0], objectNames[0], "abc", []CompletePart{}, "", InvalidUploadID{UploadID: "abc"}, false},
// Test case with invalid Part Etag (Test number 10-11). // Test case with invalid Part Etag (Test number 10-11).
{bucketNames[0], objectNames[0], uploadIDs[0], []CompletePart{{ETag: "abc"}}, "", hex.ErrLength, false}, {bucketNames[0], objectNames[0], uploadIDs[0], []CompletePart{{ETag: "abc"}}, "", InvalidPart{}, false},
{bucketNames[0], objectNames[0], uploadIDs[0], []CompletePart{{ETag: "abcz"}}, "", hex.InvalidByteError(00), false}, {bucketNames[0], objectNames[0], uploadIDs[0], []CompletePart{{ETag: "abcz"}}, "", InvalidPart{}, false},
// Part number 0 doesn't exist, expecting InvalidPart error (Test number 12). // Part number 0 doesn't exist, expecting InvalidPart error (Test number 12).
{bucketNames[0], objectNames[0], uploadIDs[0], []CompletePart{{ETag: "abcd", PartNumber: 0}}, "", InvalidPart{}, false}, {bucketNames[0], objectNames[0], uploadIDs[0], []CompletePart{{ETag: "abcd", PartNumber: 0}}, "", InvalidPart{}, false},
// // Upload and PartNumber exists, But a deliberate ETag mismatch is introduced (Test number 13). // // Upload and PartNumber exists, But a deliberate ETag mismatch is introduced (Test number 13).

View File

@ -1,5 +1,5 @@
/* /*
* MinIO Cloud Storage, (C) 2015, 2016 MinIO, Inc. * MinIO Cloud Storage, (C) 2015-2019 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -190,18 +190,18 @@ func mustGetUUID() string {
} }
// Create an s3 compatible MD5sum for complete multipart transaction. // Create an s3 compatible MD5sum for complete multipart transaction.
func getCompleteMultipartMD5(ctx context.Context, parts []CompletePart) (string, error) { func getCompleteMultipartMD5(parts []CompletePart) string {
var finalMD5Bytes []byte var finalMD5Bytes []byte
for _, part := range parts { for _, part := range parts {
md5Bytes, err := hex.DecodeString(canonicalizeETag(part.ETag)) md5Bytes, err := hex.DecodeString(canonicalizeETag(part.ETag))
if err != nil { if err != nil {
logger.LogIf(ctx, err) finalMD5Bytes = append(finalMD5Bytes, []byte(part.ETag)...)
return "", err } else {
finalMD5Bytes = append(finalMD5Bytes, md5Bytes...)
} }
finalMD5Bytes = append(finalMD5Bytes, md5Bytes...)
} }
s3MD5 := fmt.Sprintf("%s-%d", getMD5Hash(finalMD5Bytes), len(parts)) s3MD5 := fmt.Sprintf("%s-%d", getMD5Hash(finalMD5Bytes), len(parts))
return s3MD5, nil return s3MD5
} }
// Clean unwanted fields from metadata // Clean unwanted fields from metadata
@ -675,9 +675,26 @@ func (p *PutObjReader) Size() int64 {
// as a hex encoded string // as a hex encoded string
func (p *PutObjReader) MD5CurrentHexString() string { func (p *PutObjReader) MD5CurrentHexString() string {
md5sumCurr := p.rawReader.MD5Current() md5sumCurr := p.rawReader.MD5Current()
var appendHyphen bool
// md5sumcurr is not empty in two scenarios
// - server is running in strict compatibility mode
// - client set Content-Md5 during PUT operation
if len(md5sumCurr) == 0 {
// md5sumCurr is only empty when we are running
// in non-compatibility mode.
md5sumCurr = make([]byte, 16)
rand.Read(md5sumCurr)
appendHyphen = true
}
if p.sealMD5Fn != nil { if p.sealMD5Fn != nil {
md5sumCurr = p.sealMD5Fn(md5sumCurr) md5sumCurr = p.sealMD5Fn(md5sumCurr)
} }
if appendHyphen {
// Make sure to return etag string upto 32 length, for SSE
// requests ETag might be longer and the code decrypting the
// ETag ignores ETag in multipart ETag form i.e <hex>-N
return hex.EncodeToString(md5sumCurr)[:32] + "-1"
}
return hex.EncodeToString(md5sumCurr) return hex.EncodeToString(md5sumCurr)
} }

View File

@ -18,7 +18,6 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"io" "io"
"net/http" "net/http"
"reflect" "reflect"
@ -143,8 +142,8 @@ func TestGetCompleteMultipartMD5(t *testing.T) {
expectedResult string expectedResult string
expectedErr string expectedErr string
}{ }{
// Wrong MD5 hash string // Wrong MD5 hash string, returns md5um of hash
{[]CompletePart{{ETag: "wrong-md5-hash-string"}}, "", "encoding/hex: invalid byte: U+0077 'w'"}, {[]CompletePart{{ETag: "wrong-md5-hash-string"}}, "0deb8cb07527b4b2669c861cb9653607-1", ""},
// Single CompletePart with valid MD5 hash string. // Single CompletePart with valid MD5 hash string.
{[]CompletePart{{ETag: "cf1f738a5924e645913c984e0fe3d708"}}, "10dc1617fbcf0bd0858048cb96e6bd77-1", ""}, {[]CompletePart{{ETag: "cf1f738a5924e645913c984e0fe3d708"}}, "10dc1617fbcf0bd0858048cb96e6bd77-1", ""},
@ -154,17 +153,10 @@ func TestGetCompleteMultipartMD5(t *testing.T) {
} }
for i, test := range testCases { for i, test := range testCases {
result, err := getCompleteMultipartMD5(context.Background(), test.parts) result := getCompleteMultipartMD5(test.parts)
if result != test.expectedResult { if result != test.expectedResult {
t.Fatalf("test %d failed: expected: result=%v, got=%v", i+1, test.expectedResult, result) t.Fatalf("test %d failed: expected: result=%v, got=%v", i+1, test.expectedResult, result)
} }
errString := ""
if err != nil {
errString = err.Error()
}
if errString != test.expectedErr {
t.Fatalf("test %d failed: expected: err=%v, got=%v", i+1, test.expectedErr, err)
}
} }
} }

View File

@ -827,7 +827,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
reader = gr reader = gr
} }
srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualSize) srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualSize, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
@ -934,7 +934,8 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
crypto.RemoveInternalEntries(srcInfo.UserDefined) crypto.RemoveInternalEntries(srcInfo.UserDefined)
} }
srcInfo.Reader, err = hash.NewReader(reader, targetSize, "", "", targetSize) // do not try to verify encrypted content // do not try to verify encrypted content
srcInfo.Reader, err = hash.NewReader(reader, targetSize, "", "", targetSize, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
@ -1188,7 +1189,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV1 metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV1
metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10) metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10)
actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize) actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
@ -1201,7 +1202,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
sha256hex = "" sha256hex = ""
} }
hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize) hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
@ -1235,7 +1236,8 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
return return
} }
info := ObjectInfo{Size: size} info := ObjectInfo{Size: size}
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", size) // do not try to verify encrypted content // do not try to verify encrypted content
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", size, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
@ -1260,8 +1262,9 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
etag := objInfo.ETag etag := objInfo.ETag
if objInfo.IsCompressed() { if objInfo.IsCompressed() {
// Ignore compressed ETag. if !strings.HasSuffix(objInfo.ETag, "-1") {
etag = objInfo.ETag + "-1" etag = objInfo.ETag + "-1"
}
} else if hasServerSideEncryptionHeader(r.Header) { } else if hasServerSideEncryptionHeader(r.Header) {
etag = getDecryptedETag(r.Header, objInfo, false) etag = getDecryptedETag(r.Header, objInfo, false)
} }
@ -1613,7 +1616,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
reader = gr reader = gr
} }
srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualPartSize) srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualPartSize, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
@ -1673,7 +1676,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
} }
info := ObjectInfo{Size: length} info := ObjectInfo{Size: length}
srcInfo.Reader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", length) srcInfo.Reader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", length, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
@ -1840,7 +1843,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
isCompressed := false isCompressed := false
if objectAPI.IsCompressionSupported() && compressPart { if objectAPI.IsCompressionSupported() && compressPart {
actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize) actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
@ -1854,7 +1857,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
isCompressed = true isCompressed = true
} }
hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize) hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
@ -1921,7 +1924,8 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
return return
} }
info := ObjectInfo{Size: size} info := ObjectInfo{Size: size}
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", size) // do not try to verify encrypted content // do not try to verify encrypted content
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", size, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
@ -1942,10 +1946,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
} }
etag := partInfo.ETag etag := partInfo.ETag
if isCompressed { if isEncrypted {
// Suppress compressed ETag.
etag = partInfo.ETag + "-1"
} else if isEncrypted {
etag = tryDecryptETag(objectEncryptionKey, partInfo.ETag, crypto.SSEC.IsRequested(r.Header)) etag = tryDecryptETag(objectEncryptionKey, partInfo.ETag, crypto.SSEC.IsRequested(r.Header))
} }
w.Header()["ETag"] = []string{"\"" + etag + "\""} w.Header()["ETag"] = []string{"\"" + etag + "\""}
@ -2235,11 +2236,6 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
// Complete parts. // Complete parts.
var completeParts []CompletePart var completeParts []CompletePart
for _, part := range complMultipartUpload.Parts { for _, part := range complMultipartUpload.Parts {
// Avoiding for gateway parts.
// `strings.TrimPrefix` does not work here as intended. So `Replace` is used instead.
if objectAPI.IsCompressionSupported() {
part.ETag = strings.Replace(part.ETag, "-1", "", -1) // For compressed multiparts, We append '-1' for part.ETag.
}
part.ETag = canonicalizeETag(part.ETag) part.ETag = canonicalizeETag(part.ETag)
if isEncrypted { if isEncrypted {
// ETag is stored in the backend in encrypted form. Validate client sent ETag with // ETag is stored in the backend in encrypted form. Validate client sent ETag with

View File

@ -1916,7 +1916,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
for i, input := range putObjectInputs { for i, input := range putObjectInputs {
// uploading the object. // uploading the object.
var objInfo ObjectInfo var objInfo ObjectInfo
objInfo, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetPutObjReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), ObjectOptions{UserDefined: input.metaData}) objInfo, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetPutObjReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.md5sum, ""), ObjectOptions{UserDefined: input.metaData})
// if object upload fails stop the test. // if object upload fails stop the test.
if err != nil { if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err) t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
@ -2233,11 +2233,6 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
t.Fatalf("Test %d: %s: Failed to parse the CopyObjectResult response: <ERROR> %s", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to parse the CopyObjectResult response: <ERROR> %s", i+1, instanceType, err)
} }
retag := canonicalizeETag(cpObjResp.ETag)
if retag != bytesData[0].md5sum {
t.Fatalf("Test %d: %s: Failed to CopyObject: <ERROR> got %s, expected %s", i+1, instanceType, retag, bytesData[0].md5sum)
}
// See if the new object is formed. // See if the new object is formed.
// testing whether the copy was successful. // testing whether the copy was successful.
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.newObjectName, 0, int64(len(bytesData[0].byteData)), buffers[0], "", opts) err = obj.GetObject(context.Background(), testCase.bucketName, testCase.newObjectName, 0, int64(len(bytesData[0].byteData)), buffers[0], "", opts)
@ -2660,10 +2655,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
} }
// on successful complete multipart operation the s3MD5 for the parts uploaded will be returned. // on successful complete multipart operation the s3MD5 for the parts uploaded will be returned.
s3MD5, err := getCompleteMultipartMD5(context.Background(), inputParts[3].parts) s3MD5 := getCompleteMultipartMD5(inputParts[3].parts)
if err != nil {
t.Fatalf("Obtaining S3MD5 failed")
}
// generating the response body content for the success case. // generating the response body content for the success case.
successResponse := generateCompleteMultpartUploadResponse(bucketName, objectName, getGetObjectURL("", bucketName, objectName), s3MD5) successResponse := generateCompleteMultpartUploadResponse(bucketName, objectName, getGetObjectURL("", bucketName, objectName), s3MD5)

View File

@ -18,7 +18,6 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"encoding/xml" "encoding/xml"
@ -2725,12 +2724,9 @@ func (s *TestSuiteCommon) TestObjectMultipart(c *check) {
c.Assert(response.StatusCode, http.StatusOK) c.Assert(response.StatusCode, http.StatusOK)
var parts []CompletePart var parts []CompletePart
for _, part := range completeUploads.Parts { for _, part := range completeUploads.Parts {
// For compressed objects, we dont treat E-Tag as checksum.
part.ETag = strings.Replace(part.ETag, "-1", "", -1)
part.ETag = canonicalizeETag(part.ETag) part.ETag = canonicalizeETag(part.ETag)
parts = append(parts, part) parts = append(parts, part)
} }
etag, err := getCompleteMultipartMD5(context.Background(), parts) etag := getCompleteMultipartMD5(parts)
c.Assert(err, nil)
c.Assert(canonicalizeETag(response.Header.Get("Etag")), etag) c.Assert(canonicalizeETag(response.Header.Get("Etag")), etag)
} }

View File

@ -138,7 +138,7 @@ func calculateSignedChunkLength(chunkDataSize int64) int64 {
} }
func mustGetPutObjReader(t TestErrHandler, data io.Reader, size int64, md5hex, sha256hex string) *PutObjReader { func mustGetPutObjReader(t TestErrHandler, data io.Reader, size int64, md5hex, sha256hex string) *PutObjReader {
hr, err := hash.NewReader(data, size, md5hex, sha256hex, size) hr, err := hash.NewReader(data, size, md5hex, sha256hex, size, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -953,7 +953,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
var reader io.Reader = r.Body var reader io.Reader = r.Body
actualSize := size actualSize := size
hashReader, err := hash.NewReader(reader, size, "", "", actualSize) hashReader, err := hash.NewReader(reader, size, "", "", actualSize, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
writeWebErrorResponse(w, err) writeWebErrorResponse(w, err)
return return
@ -963,7 +963,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV1 metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV1
metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10) metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10)
actualReader, err := hash.NewReader(reader, size, "", "", actualSize) actualReader, err := hash.NewReader(reader, size, "", "", actualSize, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
writeWebErrorResponse(w, err) writeWebErrorResponse(w, err)
return return
@ -972,7 +972,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
// Set compression metrics. // Set compression metrics.
size = -1 // Since compressed size is un-predictable. size = -1 // Since compressed size is un-predictable.
reader = newSnappyCompressReader(actualReader) reader = newSnappyCompressReader(actualReader)
hashReader, err = hash.NewReader(reader, size, "", "", actualSize) hashReader, err = hash.NewReader(reader, size, "", "", actualSize, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
writeWebErrorResponse(w, err) writeWebErrorResponse(w, err)
return return
@ -996,7 +996,8 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
return return
} }
info := ObjectInfo{Size: size} info := ObjectInfo{Size: size}
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", size) // do not try to verify encrypted content // do not try to verify encrypted content
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", size, globalCLIContext.StrictS3Compat)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return

View File

@ -628,10 +628,7 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
} }
// Calculate s3 compatible md5sum for complete multipart. // Calculate s3 compatible md5sum for complete multipart.
s3MD5, err := getCompleteMultipartMD5(ctx, parts) s3MD5 := getCompleteMultipartMD5(parts)
if err != nil {
return oi, err
}
// Read metadata associated with the object from all disks. // Read metadata associated with the object from all disks.
partsMetadata, errs := readAllXLMetadata(ctx, xl.getDisks(), minioMetaMultipartBucket, uploadIDPath) partsMetadata, errs := readAllXLMetadata(ctx, xl.getDisks(), minioMetaMultipartBucket, uploadIDPath)

View File

@ -26,7 +26,6 @@ import (
"sync" "sync"
"github.com/minio/minio/cmd/logger" "github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/mimedb" "github.com/minio/minio/pkg/mimedb"
) )
@ -64,35 +63,32 @@ func (xl xlObjects) putObjectDir(ctx context.Context, bucket, object string, wri
func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, e error) { func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, e error) {
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject)) cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
// Read metadata associated with the object from all disks.
storageDisks := xl.getDisks()
metaArr, errs := readAllXLMetadata(ctx, storageDisks, srcBucket, srcObject)
// get Quorum for this object
readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, xl, metaArr, errs)
if err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
return oi, toObjectErr(reducedErr, srcBucket, srcObject)
}
// List all online disks.
_, modTime := listOnlineDisks(storageDisks, metaArr, errs)
// Pick latest valid metadata.
xlMeta, err := pickValidXLMeta(ctx, metaArr, modTime, readQuorum)
if err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
// Length of the file to read.
length := xlMeta.Stat.Size
// Check if this request is only metadata update. // Check if this request is only metadata update.
if cpSrcDstSame { if cpSrcDstSame {
// Read metadata associated with the object from all disks.
storageDisks := xl.getDisks()
metaArr, errs := readAllXLMetadata(ctx, storageDisks, srcBucket, srcObject)
// get Quorum for this object
readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, xl, metaArr, errs)
if err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
return oi, toObjectErr(reducedErr, srcBucket, srcObject)
}
// List all online disks.
_, modTime := listOnlineDisks(storageDisks, metaArr, errs)
// Pick latest valid metadata.
xlMeta, err := pickValidXLMeta(ctx, metaArr, modTime, readQuorum)
if err != nil {
return oi, toObjectErr(err, srcBucket, srcObject)
}
// Update `xl.json` content on each disks. // Update `xl.json` content on each disks.
for index := range metaArr { for index := range metaArr {
metaArr[index].Meta = srcInfo.UserDefined metaArr[index].Meta = srcInfo.UserDefined
@ -110,40 +106,17 @@ func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBuc
if onlineDisks, err = writeUniqueXLMetadata(ctx, storageDisks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil { if onlineDisks, err = writeUniqueXLMetadata(ctx, storageDisks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil {
return oi, toObjectErr(err, srcBucket, srcObject) return oi, toObjectErr(err, srcBucket, srcObject)
} }
// Rename atomically `xl.json` from tmp location to destination for each disk. // Rename atomically `xl.json` from tmp location to destination for each disk.
if _, err = renameXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempObj, srcBucket, srcObject, writeQuorum); err != nil { if _, err = renameXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempObj, srcBucket, srcObject, writeQuorum); err != nil {
return oi, toObjectErr(err, srcBucket, srcObject) return oi, toObjectErr(err, srcBucket, srcObject)
} }
return xlMeta.ToObjectInfo(srcBucket, srcObject), nil return xlMeta.ToObjectInfo(srcBucket, srcObject), nil
} }
// Initialize pipe. putOpts := ObjectOptions{ServerSideEncryption: dstOpts.ServerSideEncryption, UserDefined: srcInfo.UserDefined}
pipeReader, pipeWriter := io.Pipe() return xl.putObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts)
go func() {
var startOffset int64 // Read the whole file.
if gerr := xl.getObject(ctx, srcBucket, srcObject, startOffset, length, pipeWriter, srcInfo.ETag, srcOpts); gerr != nil {
pipeWriter.CloseWithError(toObjectErr(gerr, srcBucket, srcObject))
return
}
pipeWriter.Close() // Close writer explicitly signaling we wrote all data.
}()
hashReader, err := hash.NewReader(pipeReader, length, "", "", length)
if err != nil {
logger.LogIf(ctx, err)
return oi, toObjectErr(err, dstBucket, dstObject)
}
putOpts := ObjectOptions{UserDefined: srcInfo.UserDefined, ServerSideEncryption: dstOpts.ServerSideEncryption}
objInfo, err := xl.putObject(ctx, dstBucket, dstObject, NewPutObjReader(hashReader, nil, nil), putOpts)
if err != nil {
return oi, toObjectErr(err, dstBucket, dstObject)
}
// Explicitly close the reader.
pipeReader.Close()
return objInfo, nil
} }
// GetObjectNInfo - returns object info and an object // GetObjectNInfo - returns object info and an object

View File

@ -43,7 +43,7 @@ type Reader struct {
// NewReader returns a new hash Reader which computes the MD5 sum and // NewReader returns a new hash Reader which computes the MD5 sum and
// SHA256 sum (if set) of the provided io.Reader at EOF. // SHA256 sum (if set) of the provided io.Reader at EOF.
func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64) (*Reader, error) { func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64, strictCompat bool) (*Reader, error) {
if _, ok := src.(*Reader); ok { if _, ok := src.(*Reader); ok {
return nil, errNestedReader return nil, errNestedReader
} }
@ -62,6 +62,14 @@ func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i
if len(sha256sum) != 0 { if len(sha256sum) != 0 {
sha256Hash = sha256.New() sha256Hash = sha256.New()
} }
var md5Hash hash.Hash
if strictCompat {
// Strict compatibility is set then we should
// calculate md5sum always.
md5Hash = md5.New()
} else if len(md5sum) != 0 {
md5Hash = md5.New()
}
if size >= 0 { if size >= 0 {
src = io.LimitReader(src, size) src = io.LimitReader(src, size)
} }
@ -70,7 +78,7 @@ func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i
sha256sum: sha256sum, sha256sum: sha256sum,
src: src, src: src,
size: size, size: size,
md5Hash: md5.New(), md5Hash: md5Hash,
sha256Hash: sha256Hash, sha256Hash: sha256Hash,
actualSize: actualSize, actualSize: actualSize,
}, nil }, nil
@ -79,7 +87,9 @@ func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i
func (r *Reader) Read(p []byte) (n int, err error) { func (r *Reader) Read(p []byte) (n int, err error) {
n, err = r.src.Read(p) n, err = r.src.Read(p)
if n > 0 { if n > 0 {
r.md5Hash.Write(p[:n]) if r.md5Hash != nil {
r.md5Hash.Write(p[:n])
}
if r.sha256Hash != nil { if r.sha256Hash != nil {
r.sha256Hash.Write(p[:n]) r.sha256Hash.Write(p[:n])
} }
@ -114,7 +124,10 @@ func (r *Reader) MD5() []byte {
// NOTE: Calling this function multiple times might yield // NOTE: Calling this function multiple times might yield
// different results if they are intermixed with Reader. // different results if they are intermixed with Reader.
func (r *Reader) MD5Current() []byte { func (r *Reader) MD5Current() []byte {
return r.md5Hash.Sum(nil) if r.md5Hash != nil {
return r.md5Hash.Sum(nil)
}
return nil
} }
// SHA256 - returns byte sha256 value // SHA256 - returns byte sha256 value
@ -145,7 +158,7 @@ func (r *Reader) Verify() error {
return SHA256Mismatch{hex.EncodeToString(r.sha256sum), hex.EncodeToString(sum)} return SHA256Mismatch{hex.EncodeToString(r.sha256sum), hex.EncodeToString(sum)}
} }
} }
if len(r.md5sum) > 0 { if r.md5Hash != nil && len(r.md5sum) > 0 {
if sum := r.md5Hash.Sum(nil); !bytes.Equal(r.md5sum, sum) { if sum := r.md5Hash.Sum(nil); !bytes.Equal(r.md5sum, sum) {
return BadDigest{hex.EncodeToString(r.md5sum), hex.EncodeToString(sum)} return BadDigest{hex.EncodeToString(r.md5sum), hex.EncodeToString(sum)}
} }

View File

@ -26,7 +26,7 @@ import (
// Tests functions like Size(), MD5*(), SHA256*() // Tests functions like Size(), MD5*(), SHA256*()
func TestHashReaderHelperMethods(t *testing.T) { func TestHashReaderHelperMethods(t *testing.T) {
r, err := NewReader(bytes.NewReader([]byte("abcd")), 4, "e2fc714c4727ee9395f324cd2e7f331f", "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", 4) r, err := NewReader(bytes.NewReader([]byte("abcd")), 4, "e2fc714c4727ee9395f324cd2e7f331f", "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", 4, false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -107,7 +107,7 @@ func TestHashReaderVerification(t *testing.T) {
}, },
} }
for i, testCase := range testCases { for i, testCase := range testCases {
r, err := NewReader(testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize) r, err := NewReader(testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize, false)
if err != nil { if err != nil {
t.Fatalf("Test %d: Initializing reader failed %s", i+1, err) t.Fatalf("Test %d: Initializing reader failed %s", i+1, err)
} }
@ -166,7 +166,7 @@ func TestHashReaderInvalidArguments(t *testing.T) {
} }
for i, testCase := range testCases { for i, testCase := range testCases {
_, err := NewReader(testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize) _, err := NewReader(testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize, false)
if err != nil && testCase.success { if err != nil && testCase.success {
t.Errorf("Test %d: Expected success, but got error %s instead", i+1, err) t.Errorf("Test %d: Expected success, but got error %s instead", i+1, err)
} }