mirror of
https://github.com/minio/minio.git
synced 2024-12-23 21:55:53 -05:00
Turn off md5sum optionally if content-md5 is not set (#7609)
This PR also brings --compat option to run MinIO in strict S3 compatibility mode, MinIO by default will now try to run high performance mode.
This commit is contained in:
parent
757c639044
commit
72929ec05b
@ -362,7 +362,7 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty
|
||||
|
||||
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
|
||||
// The verification happens implicit during reading.
|
||||
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5), hex.EncodeToString(contentSHA256), -1)
|
||||
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5), hex.EncodeToString(contentSHA256), -1, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
return toAPIErrorCode(ctx, err)
|
||||
}
|
||||
|
@ -600,7 +600,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize)
|
||||
hashReader, err := hash.NewReader(fileBody, fileSize, "", "", fileSize, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
@ -638,7 +638,8 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
return
|
||||
}
|
||||
info := ObjectInfo{Size: fileSize}
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize) // do not try to verify encrypted content
|
||||
// do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", fileSize, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
|
@ -161,6 +161,9 @@ func handleCommonCmdArgs(ctx *cli.Context) {
|
||||
globalCertsCADir = &ConfigDir{path: filepath.Join(globalCertsDir.Get(), certsCADir)}
|
||||
|
||||
logger.FatalIf(mkdirAllIgnorePerm(globalCertsCADir.Get()), "Unable to create certs CA directory at %s", globalCertsCADir.Get())
|
||||
|
||||
// Check "compat" flag from command line argument.
|
||||
globalCLIContext.StrictS3Compat = ctx.IsSet("compat") || ctx.GlobalIsSet("compat")
|
||||
}
|
||||
|
||||
// Parses the given compression exclude list `extensions` or `content-types`.
|
||||
|
@ -74,7 +74,7 @@ func saveConfigEtcd(ctx context.Context, client *etcd.Client, configFile string,
|
||||
}
|
||||
|
||||
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)))
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)), globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -18,7 +18,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -399,7 +398,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
if fsMeta.Meta["etag"] == "" {
|
||||
fsMeta.Meta["etag"] = hex.EncodeToString(data.MD5Current())
|
||||
fsMeta.Meta["etag"] = r.MD5CurrentHexString()
|
||||
}
|
||||
// Should return IncompleteBody{} error when reader has fewer
|
||||
// bytes than specified in request header.
|
||||
|
@ -243,7 +243,7 @@ func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
||||
// Initialize pipe.
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
teeReader := io.TeeReader(bkReader, pipeWriter)
|
||||
hashReader, herr := hash.NewReader(pipeReader, bkReader.ObjInfo.Size, "", "", bkReader.ObjInfo.Size)
|
||||
hashReader, herr := hash.NewReader(pipeReader, bkReader.ObjInfo.Size, "", "", bkReader.ObjInfo.Size, globalCLIContext.StrictS3Compat)
|
||||
if herr != nil {
|
||||
bkReader.Close()
|
||||
return nil, herr
|
||||
@ -314,7 +314,7 @@ func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, star
|
||||
}
|
||||
// Initialize pipe.
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
hashReader, err := hash.NewReader(pipeReader, objInfo.Size, "", "", objInfo.Size)
|
||||
hashReader, err := hash.NewReader(pipeReader, objInfo.Size, "", "", objInfo.Size, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -664,13 +664,13 @@ func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *P
|
||||
objInfo = ObjectInfo{}
|
||||
// Initialize pipe to stream data to backend
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
|
||||
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize(), globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
// Initialize pipe to stream data to cache
|
||||
rPipe, wPipe := io.Pipe()
|
||||
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
|
||||
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize(), globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
@ -758,13 +758,13 @@ func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadI
|
||||
info = PartInfo{}
|
||||
// Initialize pipe to stream data to backend
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
|
||||
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize(), globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Initialize pipe to stream data to cache
|
||||
rPipe, wPipe := io.Pipe()
|
||||
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
|
||||
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize(), globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -194,7 +194,7 @@ func TestDiskCache(t *testing.T) {
|
||||
objInfo.UserDefined = httpMeta
|
||||
var opts ObjectOptions
|
||||
byteReader := bytes.NewReader([]byte(content))
|
||||
hashReader, err := hash.NewReader(byteReader, int64(size), "", "", int64(size))
|
||||
hashReader, err := hash.NewReader(byteReader, int64(size), "", "", int64(size), globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -269,7 +269,7 @@ func TestDiskCacheMaxUse(t *testing.T) {
|
||||
opts := ObjectOptions{}
|
||||
|
||||
byteReader := bytes.NewReader([]byte(content))
|
||||
hashReader, err := hash.NewReader(byteReader, int64(size), "", "", int64(size))
|
||||
hashReader, err := hash.NewReader(byteReader, int64(size), "", "", int64(size), globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -504,10 +504,7 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
|
||||
}
|
||||
|
||||
// Calculate s3 compatible md5sum for complete multipart.
|
||||
s3MD5, err := getCompleteMultipartMD5(ctx, parts)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
s3MD5 := getCompleteMultipartMD5(parts)
|
||||
|
||||
partSize := int64(-1) // Used later to ensure that all parts sizes are same.
|
||||
|
||||
|
@ -17,7 +17,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
@ -49,9 +48,6 @@ var (
|
||||
|
||||
// IsStringEqual is string equal.
|
||||
IsStringEqual = isStringEqual
|
||||
|
||||
// GetCompleteMultipartMD5 returns multipart MD5
|
||||
GetCompleteMultipartMD5 = getCompleteMultipartMD5
|
||||
)
|
||||
|
||||
// StatInfo - alias for statInfo
|
||||
@ -351,8 +347,8 @@ func ErrorRespToObjectError(err error, params ...string) error {
|
||||
}
|
||||
|
||||
// ComputeCompleteMultipartMD5 calculates MD5 ETag for complete multipart responses
|
||||
func ComputeCompleteMultipartMD5(parts []CompletePart) (string, error) {
|
||||
return getCompleteMultipartMD5(context.Background(), parts)
|
||||
func ComputeCompleteMultipartMD5(parts []CompletePart) string {
|
||||
return getCompleteMultipartMD5(parts)
|
||||
}
|
||||
|
||||
// parse gateway sse env variable
|
||||
|
@ -835,7 +835,7 @@ func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, r *
|
||||
}
|
||||
|
||||
// Save md5sum for future processing on the object.
|
||||
opts.UserDefined["x-amz-meta-md5sum"] = hex.EncodeToString(data.MD5Current())
|
||||
opts.UserDefined["x-amz-meta-md5sum"] = r.MD5CurrentHexString()
|
||||
objBlob.Metadata, objBlob.Properties, err = s3MetaToAzureProperties(ctx, opts.UserDefined)
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(err, bucket, object)
|
||||
@ -1207,10 +1207,7 @@ func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(err, bucket, object)
|
||||
}
|
||||
objBlob.Metadata["md5sum"], err = cmd.ComputeCompleteMultipartMD5(uploadedParts)
|
||||
if err != nil {
|
||||
return objInfo, err
|
||||
}
|
||||
objBlob.Metadata["md5sum"] = cmd.ComputeCompleteMultipartMD5(uploadedParts)
|
||||
err = objBlob.SetProperties(nil)
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(err, bucket, object)
|
||||
|
@ -647,10 +647,7 @@ func (n *hdfsObjects) CompleteMultipartUpload(ctx context.Context, bucket, objec
|
||||
}
|
||||
|
||||
// Calculate s3 compatible md5sum for complete multipart.
|
||||
s3MD5, err := minio.GetCompleteMultipartMD5(ctx, parts)
|
||||
if err != nil {
|
||||
return objInfo, err
|
||||
}
|
||||
s3MD5 := minio.ComputeCompleteMultipartMD5(parts)
|
||||
|
||||
return minio.ObjectInfo{
|
||||
Bucket: bucket,
|
||||
|
@ -241,7 +241,7 @@ func getGWMetadata(ctx context.Context, bucket, prefix string, gwMeta gwMetaV1)
|
||||
logger.LogIf(ctx, err)
|
||||
return nil, err
|
||||
}
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(metadataBytes), int64(len(metadataBytes)), "", "", int64(len(metadataBytes)))
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(metadataBytes), int64(len(metadataBytes)), "", "", int64(len(metadataBytes)), false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -92,9 +92,10 @@ const (
|
||||
)
|
||||
|
||||
var globalCLIContext = struct {
|
||||
JSON, Quiet bool
|
||||
Anonymous bool
|
||||
Addr string
|
||||
JSON, Quiet bool
|
||||
Anonymous bool
|
||||
Addr string
|
||||
StrictS3Compat bool
|
||||
}{}
|
||||
|
||||
var (
|
||||
|
16
cmd/main.go
16
cmd/main.go
@ -32,24 +32,28 @@ var globalFlags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "config-dir, C",
|
||||
Value: defaultConfigDir.Get(),
|
||||
Usage: "[DEPRECATED] Path to legacy configuration directory.",
|
||||
Usage: "[DEPRECATED] path to legacy configuration directory",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "certs-dir, S",
|
||||
Value: defaultCertsDir.Get(),
|
||||
Usage: "Path to certs directory.",
|
||||
Usage: "path to certs directory",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet",
|
||||
Usage: "Disable startup information.",
|
||||
Usage: "disable startup information",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "anonymous",
|
||||
Usage: "Hide sensitive information from logging.",
|
||||
Usage: "hide sensitive information from logging",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "json",
|
||||
Usage: "Output server logs and startup information in json format.",
|
||||
Usage: "output server logs and startup information in json format",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "compat",
|
||||
Usage: "trade off performance for S3 compatibility",
|
||||
},
|
||||
}
|
||||
|
||||
@ -118,7 +122,7 @@ func newApp(name string) *cli.App {
|
||||
// Set up app.
|
||||
cli.HelpFlag = cli.BoolFlag{
|
||||
Name: "help, h",
|
||||
Usage: "Show help.",
|
||||
Usage: "show help",
|
||||
}
|
||||
|
||||
app := cli.NewApp()
|
||||
|
@ -19,7 +19,6 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
@ -1866,10 +1865,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
||||
},
|
||||
},
|
||||
}
|
||||
s3MD5, err := getCompleteMultipartMD5(context.Background(), inputParts[3].parts)
|
||||
if err != nil {
|
||||
t.Fatalf("Obtaining S3MD5 failed")
|
||||
}
|
||||
s3MD5 := getCompleteMultipartMD5(inputParts[3].parts)
|
||||
|
||||
// Test cases with sample input values for CompleteMultipartUpload.
|
||||
testCases := []struct {
|
||||
@ -1898,8 +1894,8 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
||||
// Asserting for Invalid UploadID (Test number 9).
|
||||
{bucketNames[0], objectNames[0], "abc", []CompletePart{}, "", InvalidUploadID{UploadID: "abc"}, false},
|
||||
// Test case with invalid Part Etag (Test number 10-11).
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], []CompletePart{{ETag: "abc"}}, "", hex.ErrLength, false},
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], []CompletePart{{ETag: "abcz"}}, "", hex.InvalidByteError(00), false},
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], []CompletePart{{ETag: "abc"}}, "", InvalidPart{}, false},
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], []CompletePart{{ETag: "abcz"}}, "", InvalidPart{}, false},
|
||||
// Part number 0 doesn't exist, expecting InvalidPart error (Test number 12).
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], []CompletePart{{ETag: "abcd", PartNumber: 0}}, "", InvalidPart{}, false},
|
||||
// // Upload and PartNumber exists, But a deliberate ETag mismatch is introduced (Test number 13).
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2015, 2016 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2015-2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -190,18 +190,18 @@ func mustGetUUID() string {
|
||||
}
|
||||
|
||||
// Create an s3 compatible MD5sum for complete multipart transaction.
|
||||
func getCompleteMultipartMD5(ctx context.Context, parts []CompletePart) (string, error) {
|
||||
func getCompleteMultipartMD5(parts []CompletePart) string {
|
||||
var finalMD5Bytes []byte
|
||||
for _, part := range parts {
|
||||
md5Bytes, err := hex.DecodeString(canonicalizeETag(part.ETag))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return "", err
|
||||
finalMD5Bytes = append(finalMD5Bytes, []byte(part.ETag)...)
|
||||
} else {
|
||||
finalMD5Bytes = append(finalMD5Bytes, md5Bytes...)
|
||||
}
|
||||
finalMD5Bytes = append(finalMD5Bytes, md5Bytes...)
|
||||
}
|
||||
s3MD5 := fmt.Sprintf("%s-%d", getMD5Hash(finalMD5Bytes), len(parts))
|
||||
return s3MD5, nil
|
||||
return s3MD5
|
||||
}
|
||||
|
||||
// Clean unwanted fields from metadata
|
||||
@ -675,9 +675,26 @@ func (p *PutObjReader) Size() int64 {
|
||||
// as a hex encoded string
|
||||
func (p *PutObjReader) MD5CurrentHexString() string {
|
||||
md5sumCurr := p.rawReader.MD5Current()
|
||||
var appendHyphen bool
|
||||
// md5sumcurr is not empty in two scenarios
|
||||
// - server is running in strict compatibility mode
|
||||
// - client set Content-Md5 during PUT operation
|
||||
if len(md5sumCurr) == 0 {
|
||||
// md5sumCurr is only empty when we are running
|
||||
// in non-compatibility mode.
|
||||
md5sumCurr = make([]byte, 16)
|
||||
rand.Read(md5sumCurr)
|
||||
appendHyphen = true
|
||||
}
|
||||
if p.sealMD5Fn != nil {
|
||||
md5sumCurr = p.sealMD5Fn(md5sumCurr)
|
||||
}
|
||||
if appendHyphen {
|
||||
// Make sure to return etag string upto 32 length, for SSE
|
||||
// requests ETag might be longer and the code decrypting the
|
||||
// ETag ignores ETag in multipart ETag form i.e <hex>-N
|
||||
return hex.EncodeToString(md5sumCurr)[:32] + "-1"
|
||||
}
|
||||
return hex.EncodeToString(md5sumCurr)
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"reflect"
|
||||
@ -143,8 +142,8 @@ func TestGetCompleteMultipartMD5(t *testing.T) {
|
||||
expectedResult string
|
||||
expectedErr string
|
||||
}{
|
||||
// Wrong MD5 hash string
|
||||
{[]CompletePart{{ETag: "wrong-md5-hash-string"}}, "", "encoding/hex: invalid byte: U+0077 'w'"},
|
||||
// Wrong MD5 hash string, returns md5um of hash
|
||||
{[]CompletePart{{ETag: "wrong-md5-hash-string"}}, "0deb8cb07527b4b2669c861cb9653607-1", ""},
|
||||
|
||||
// Single CompletePart with valid MD5 hash string.
|
||||
{[]CompletePart{{ETag: "cf1f738a5924e645913c984e0fe3d708"}}, "10dc1617fbcf0bd0858048cb96e6bd77-1", ""},
|
||||
@ -154,17 +153,10 @@ func TestGetCompleteMultipartMD5(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, test := range testCases {
|
||||
result, err := getCompleteMultipartMD5(context.Background(), test.parts)
|
||||
result := getCompleteMultipartMD5(test.parts)
|
||||
if result != test.expectedResult {
|
||||
t.Fatalf("test %d failed: expected: result=%v, got=%v", i+1, test.expectedResult, result)
|
||||
}
|
||||
errString := ""
|
||||
if err != nil {
|
||||
errString = err.Error()
|
||||
}
|
||||
if errString != test.expectedErr {
|
||||
t.Fatalf("test %d failed: expected: err=%v, got=%v", i+1, test.expectedErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -827,7 +827,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
reader = gr
|
||||
}
|
||||
|
||||
srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualSize)
|
||||
srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualSize, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@ -934,7 +934,8 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
crypto.RemoveInternalEntries(srcInfo.UserDefined)
|
||||
}
|
||||
|
||||
srcInfo.Reader, err = hash.NewReader(reader, targetSize, "", "", targetSize) // do not try to verify encrypted content
|
||||
// do not try to verify encrypted content
|
||||
srcInfo.Reader, err = hash.NewReader(reader, targetSize, "", "", targetSize, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@ -1188,7 +1189,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV1
|
||||
metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10)
|
||||
|
||||
actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
|
||||
actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@ -1201,7 +1202,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
sha256hex = ""
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
|
||||
hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@ -1235,7 +1236,8 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
}
|
||||
info := ObjectInfo{Size: size}
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", size) // do not try to verify encrypted content
|
||||
// do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", size, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@ -1260,8 +1262,9 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
etag := objInfo.ETag
|
||||
if objInfo.IsCompressed() {
|
||||
// Ignore compressed ETag.
|
||||
etag = objInfo.ETag + "-1"
|
||||
if !strings.HasSuffix(objInfo.ETag, "-1") {
|
||||
etag = objInfo.ETag + "-1"
|
||||
}
|
||||
} else if hasServerSideEncryptionHeader(r.Header) {
|
||||
etag = getDecryptedETag(r.Header, objInfo, false)
|
||||
}
|
||||
@ -1613,7 +1616,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||
reader = gr
|
||||
}
|
||||
|
||||
srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualPartSize)
|
||||
srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualPartSize, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@ -1673,7 +1676,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||
}
|
||||
|
||||
info := ObjectInfo{Size: length}
|
||||
srcInfo.Reader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", length)
|
||||
srcInfo.Reader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", length, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@ -1840,7 +1843,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
|
||||
isCompressed := false
|
||||
if objectAPI.IsCompressionSupported() && compressPart {
|
||||
actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
|
||||
actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@ -1854,7 +1857,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
isCompressed = true
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize)
|
||||
hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@ -1921,7 +1924,8 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
info := ObjectInfo{Size: size}
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", size) // do not try to verify encrypted content
|
||||
// do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", size, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
@ -1942,10 +1946,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
}
|
||||
|
||||
etag := partInfo.ETag
|
||||
if isCompressed {
|
||||
// Suppress compressed ETag.
|
||||
etag = partInfo.ETag + "-1"
|
||||
} else if isEncrypted {
|
||||
if isEncrypted {
|
||||
etag = tryDecryptETag(objectEncryptionKey, partInfo.ETag, crypto.SSEC.IsRequested(r.Header))
|
||||
}
|
||||
w.Header()["ETag"] = []string{"\"" + etag + "\""}
|
||||
@ -2235,11 +2236,6 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
||||
// Complete parts.
|
||||
var completeParts []CompletePart
|
||||
for _, part := range complMultipartUpload.Parts {
|
||||
// Avoiding for gateway parts.
|
||||
// `strings.TrimPrefix` does not work here as intended. So `Replace` is used instead.
|
||||
if objectAPI.IsCompressionSupported() {
|
||||
part.ETag = strings.Replace(part.ETag, "-1", "", -1) // For compressed multiparts, We append '-1' for part.ETag.
|
||||
}
|
||||
part.ETag = canonicalizeETag(part.ETag)
|
||||
if isEncrypted {
|
||||
// ETag is stored in the backend in encrypted form. Validate client sent ETag with
|
||||
|
@ -1916,7 +1916,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetPutObjReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), ObjectOptions{UserDefined: input.metaData})
|
||||
objInfo, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetPutObjReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.md5sum, ""), ObjectOptions{UserDefined: input.metaData})
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
@ -2233,11 +2233,6 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
||||
t.Fatalf("Test %d: %s: Failed to parse the CopyObjectResult response: <ERROR> %s", i+1, instanceType, err)
|
||||
}
|
||||
|
||||
retag := canonicalizeETag(cpObjResp.ETag)
|
||||
if retag != bytesData[0].md5sum {
|
||||
t.Fatalf("Test %d: %s: Failed to CopyObject: <ERROR> got %s, expected %s", i+1, instanceType, retag, bytesData[0].md5sum)
|
||||
}
|
||||
|
||||
// See if the new object is formed.
|
||||
// testing whether the copy was successful.
|
||||
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.newObjectName, 0, int64(len(bytesData[0].byteData)), buffers[0], "", opts)
|
||||
@ -2660,10 +2655,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
}
|
||||
|
||||
// on successful complete multipart operation the s3MD5 for the parts uploaded will be returned.
|
||||
s3MD5, err := getCompleteMultipartMD5(context.Background(), inputParts[3].parts)
|
||||
if err != nil {
|
||||
t.Fatalf("Obtaining S3MD5 failed")
|
||||
}
|
||||
s3MD5 := getCompleteMultipartMD5(inputParts[3].parts)
|
||||
|
||||
// generating the response body content for the success case.
|
||||
successResponse := generateCompleteMultpartUploadResponse(bucketName, objectName, getGetObjectURL("", bucketName, objectName), s3MD5)
|
||||
|
@ -18,7 +18,6 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/xml"
|
||||
@ -2725,12 +2724,9 @@ func (s *TestSuiteCommon) TestObjectMultipart(c *check) {
|
||||
c.Assert(response.StatusCode, http.StatusOK)
|
||||
var parts []CompletePart
|
||||
for _, part := range completeUploads.Parts {
|
||||
// For compressed objects, we dont treat E-Tag as checksum.
|
||||
part.ETag = strings.Replace(part.ETag, "-1", "", -1)
|
||||
part.ETag = canonicalizeETag(part.ETag)
|
||||
parts = append(parts, part)
|
||||
}
|
||||
etag, err := getCompleteMultipartMD5(context.Background(), parts)
|
||||
c.Assert(err, nil)
|
||||
etag := getCompleteMultipartMD5(parts)
|
||||
c.Assert(canonicalizeETag(response.Header.Get("Etag")), etag)
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ func calculateSignedChunkLength(chunkDataSize int64) int64 {
|
||||
}
|
||||
|
||||
func mustGetPutObjReader(t TestErrHandler, data io.Reader, size int64, md5hex, sha256hex string) *PutObjReader {
|
||||
hr, err := hash.NewReader(data, size, md5hex, sha256hex, size)
|
||||
hr, err := hash.NewReader(data, size, md5hex, sha256hex, size, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -953,7 +953,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
||||
var reader io.Reader = r.Body
|
||||
actualSize := size
|
||||
|
||||
hashReader, err := hash.NewReader(reader, size, "", "", actualSize)
|
||||
hashReader, err := hash.NewReader(reader, size, "", "", actualSize, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
writeWebErrorResponse(w, err)
|
||||
return
|
||||
@ -963,7 +963,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
||||
metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV1
|
||||
metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10)
|
||||
|
||||
actualReader, err := hash.NewReader(reader, size, "", "", actualSize)
|
||||
actualReader, err := hash.NewReader(reader, size, "", "", actualSize, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
writeWebErrorResponse(w, err)
|
||||
return
|
||||
@ -972,7 +972,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
||||
// Set compression metrics.
|
||||
size = -1 // Since compressed size is un-predictable.
|
||||
reader = newSnappyCompressReader(actualReader)
|
||||
hashReader, err = hash.NewReader(reader, size, "", "", actualSize)
|
||||
hashReader, err = hash.NewReader(reader, size, "", "", actualSize, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
writeWebErrorResponse(w, err)
|
||||
return
|
||||
@ -996,7 +996,8 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
info := ObjectInfo{Size: size}
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", size) // do not try to verify encrypted content
|
||||
// do not try to verify encrypted content
|
||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", size, globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
|
@ -628,10 +628,7 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
|
||||
}
|
||||
|
||||
// Calculate s3 compatible md5sum for complete multipart.
|
||||
s3MD5, err := getCompleteMultipartMD5(ctx, parts)
|
||||
if err != nil {
|
||||
return oi, err
|
||||
}
|
||||
s3MD5 := getCompleteMultipartMD5(parts)
|
||||
|
||||
// Read metadata associated with the object from all disks.
|
||||
partsMetadata, errs := readAllXLMetadata(ctx, xl.getDisks(), minioMetaMultipartBucket, uploadIDPath)
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/mimedb"
|
||||
)
|
||||
|
||||
@ -64,35 +63,32 @@ func (xl xlObjects) putObjectDir(ctx context.Context, bucket, object string, wri
|
||||
func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, e error) {
|
||||
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
||||
|
||||
// Read metadata associated with the object from all disks.
|
||||
storageDisks := xl.getDisks()
|
||||
|
||||
metaArr, errs := readAllXLMetadata(ctx, storageDisks, srcBucket, srcObject)
|
||||
|
||||
// get Quorum for this object
|
||||
readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, xl, metaArr, errs)
|
||||
if err != nil {
|
||||
return oi, toObjectErr(err, srcBucket, srcObject)
|
||||
}
|
||||
|
||||
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
|
||||
return oi, toObjectErr(reducedErr, srcBucket, srcObject)
|
||||
}
|
||||
|
||||
// List all online disks.
|
||||
_, modTime := listOnlineDisks(storageDisks, metaArr, errs)
|
||||
|
||||
// Pick latest valid metadata.
|
||||
xlMeta, err := pickValidXLMeta(ctx, metaArr, modTime, readQuorum)
|
||||
if err != nil {
|
||||
return oi, toObjectErr(err, srcBucket, srcObject)
|
||||
}
|
||||
|
||||
// Length of the file to read.
|
||||
length := xlMeta.Stat.Size
|
||||
|
||||
// Check if this request is only metadata update.
|
||||
if cpSrcDstSame {
|
||||
// Read metadata associated with the object from all disks.
|
||||
storageDisks := xl.getDisks()
|
||||
|
||||
metaArr, errs := readAllXLMetadata(ctx, storageDisks, srcBucket, srcObject)
|
||||
|
||||
// get Quorum for this object
|
||||
readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, xl, metaArr, errs)
|
||||
if err != nil {
|
||||
return oi, toObjectErr(err, srcBucket, srcObject)
|
||||
}
|
||||
|
||||
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
|
||||
return oi, toObjectErr(reducedErr, srcBucket, srcObject)
|
||||
}
|
||||
|
||||
// List all online disks.
|
||||
_, modTime := listOnlineDisks(storageDisks, metaArr, errs)
|
||||
|
||||
// Pick latest valid metadata.
|
||||
xlMeta, err := pickValidXLMeta(ctx, metaArr, modTime, readQuorum)
|
||||
if err != nil {
|
||||
return oi, toObjectErr(err, srcBucket, srcObject)
|
||||
}
|
||||
|
||||
// Update `xl.json` content on each disks.
|
||||
for index := range metaArr {
|
||||
metaArr[index].Meta = srcInfo.UserDefined
|
||||
@ -110,40 +106,17 @@ func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBuc
|
||||
if onlineDisks, err = writeUniqueXLMetadata(ctx, storageDisks, minioMetaTmpBucket, tempObj, metaArr, writeQuorum); err != nil {
|
||||
return oi, toObjectErr(err, srcBucket, srcObject)
|
||||
}
|
||||
|
||||
// Rename atomically `xl.json` from tmp location to destination for each disk.
|
||||
if _, err = renameXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempObj, srcBucket, srcObject, writeQuorum); err != nil {
|
||||
return oi, toObjectErr(err, srcBucket, srcObject)
|
||||
}
|
||||
|
||||
return xlMeta.ToObjectInfo(srcBucket, srcObject), nil
|
||||
}
|
||||
|
||||
// Initialize pipe.
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
|
||||
go func() {
|
||||
var startOffset int64 // Read the whole file.
|
||||
if gerr := xl.getObject(ctx, srcBucket, srcObject, startOffset, length, pipeWriter, srcInfo.ETag, srcOpts); gerr != nil {
|
||||
pipeWriter.CloseWithError(toObjectErr(gerr, srcBucket, srcObject))
|
||||
return
|
||||
}
|
||||
pipeWriter.Close() // Close writer explicitly signaling we wrote all data.
|
||||
}()
|
||||
|
||||
hashReader, err := hash.NewReader(pipeReader, length, "", "", length)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return oi, toObjectErr(err, dstBucket, dstObject)
|
||||
}
|
||||
putOpts := ObjectOptions{UserDefined: srcInfo.UserDefined, ServerSideEncryption: dstOpts.ServerSideEncryption}
|
||||
objInfo, err := xl.putObject(ctx, dstBucket, dstObject, NewPutObjReader(hashReader, nil, nil), putOpts)
|
||||
if err != nil {
|
||||
return oi, toObjectErr(err, dstBucket, dstObject)
|
||||
}
|
||||
|
||||
// Explicitly close the reader.
|
||||
pipeReader.Close()
|
||||
|
||||
return objInfo, nil
|
||||
putOpts := ObjectOptions{ServerSideEncryption: dstOpts.ServerSideEncryption, UserDefined: srcInfo.UserDefined}
|
||||
return xl.putObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts)
|
||||
}
|
||||
|
||||
// GetObjectNInfo - returns object info and an object
|
||||
|
@ -43,7 +43,7 @@ type Reader struct {
|
||||
|
||||
// NewReader returns a new hash Reader which computes the MD5 sum and
|
||||
// SHA256 sum (if set) of the provided io.Reader at EOF.
|
||||
func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64) (*Reader, error) {
|
||||
func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64, strictCompat bool) (*Reader, error) {
|
||||
if _, ok := src.(*Reader); ok {
|
||||
return nil, errNestedReader
|
||||
}
|
||||
@ -62,6 +62,14 @@ func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i
|
||||
if len(sha256sum) != 0 {
|
||||
sha256Hash = sha256.New()
|
||||
}
|
||||
var md5Hash hash.Hash
|
||||
if strictCompat {
|
||||
// Strict compatibility is set then we should
|
||||
// calculate md5sum always.
|
||||
md5Hash = md5.New()
|
||||
} else if len(md5sum) != 0 {
|
||||
md5Hash = md5.New()
|
||||
}
|
||||
if size >= 0 {
|
||||
src = io.LimitReader(src, size)
|
||||
}
|
||||
@ -70,7 +78,7 @@ func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i
|
||||
sha256sum: sha256sum,
|
||||
src: src,
|
||||
size: size,
|
||||
md5Hash: md5.New(),
|
||||
md5Hash: md5Hash,
|
||||
sha256Hash: sha256Hash,
|
||||
actualSize: actualSize,
|
||||
}, nil
|
||||
@ -79,7 +87,9 @@ func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i
|
||||
func (r *Reader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.src.Read(p)
|
||||
if n > 0 {
|
||||
r.md5Hash.Write(p[:n])
|
||||
if r.md5Hash != nil {
|
||||
r.md5Hash.Write(p[:n])
|
||||
}
|
||||
if r.sha256Hash != nil {
|
||||
r.sha256Hash.Write(p[:n])
|
||||
}
|
||||
@ -114,7 +124,10 @@ func (r *Reader) MD5() []byte {
|
||||
// NOTE: Calling this function multiple times might yield
|
||||
// different results if they are intermixed with Reader.
|
||||
func (r *Reader) MD5Current() []byte {
|
||||
return r.md5Hash.Sum(nil)
|
||||
if r.md5Hash != nil {
|
||||
return r.md5Hash.Sum(nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SHA256 - returns byte sha256 value
|
||||
@ -145,7 +158,7 @@ func (r *Reader) Verify() error {
|
||||
return SHA256Mismatch{hex.EncodeToString(r.sha256sum), hex.EncodeToString(sum)}
|
||||
}
|
||||
}
|
||||
if len(r.md5sum) > 0 {
|
||||
if r.md5Hash != nil && len(r.md5sum) > 0 {
|
||||
if sum := r.md5Hash.Sum(nil); !bytes.Equal(r.md5sum, sum) {
|
||||
return BadDigest{hex.EncodeToString(r.md5sum), hex.EncodeToString(sum)}
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
|
||||
// Tests functions like Size(), MD5*(), SHA256*()
|
||||
func TestHashReaderHelperMethods(t *testing.T) {
|
||||
r, err := NewReader(bytes.NewReader([]byte("abcd")), 4, "e2fc714c4727ee9395f324cd2e7f331f", "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", 4)
|
||||
r, err := NewReader(bytes.NewReader([]byte("abcd")), 4, "e2fc714c4727ee9395f324cd2e7f331f", "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", 4, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -107,7 +107,7 @@ func TestHashReaderVerification(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
r, err := NewReader(testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize)
|
||||
r, err := NewReader(testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Initializing reader failed %s", i+1, err)
|
||||
}
|
||||
@ -166,7 +166,7 @@ func TestHashReaderInvalidArguments(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
_, err := NewReader(testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize)
|
||||
_, err := NewReader(testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize, false)
|
||||
if err != nil && testCase.success {
|
||||
t.Errorf("Test %d: Expected success, but got error %s instead", i+1, err)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user