mirror of
https://github.com/minio/minio.git
synced 2024-12-23 21:55:53 -05:00
Simplify data verification with HashReader. (#5071)
Verify() was being called by caller after the data has been successfully read after io.EOF. This disconnection opens a race under concurrent access to such an object. Verification is not necessary outside of Read() call, we can simply just do checksum verification right inside Read() call at io.EOF. This approach simplifies the usage.
This commit is contained in:
parent
65a817fe8c
commit
1d8a8c63db
@ -943,7 +943,7 @@ func TestHealObjectHandler(t *testing.T) {
|
||||
}
|
||||
|
||||
_, err = adminTestBed.objLayer.PutObject(bucketName, objName,
|
||||
NewHashReader(bytes.NewReader([]byte("hello")), int64(len("hello")), "", ""), nil)
|
||||
mustGetHashReader(t, bytes.NewReader([]byte("hello")), int64(len("hello")), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create %s - %v", objName, err)
|
||||
}
|
||||
@ -1083,7 +1083,7 @@ func TestHealUploadHandler(t *testing.T) {
|
||||
// Upload a part.
|
||||
partID := 1
|
||||
_, err = adminTestBed.objLayer.PutObjectPart(bucketName, objName, uploadID,
|
||||
partID, NewHashReader(bytes.NewReader([]byte("hello")), int64(len("hello")), "", ""))
|
||||
partID, mustGetHashReader(t, bytes.NewReader([]byte("hello")), int64(len("hello")), "", ""))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to upload part %d of %s/%s - %v", partID,
|
||||
bucketName, objName, err)
|
||||
|
@ -19,6 +19,8 @@ package cmd
|
||||
import (
|
||||
"encoding/xml"
|
||||
"net/http"
|
||||
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
// APIError structure
|
||||
@ -681,8 +683,6 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
switch err {
|
||||
case errSignatureMismatch:
|
||||
apiErr = ErrSignatureDoesNotMatch
|
||||
case errContentSHA256Mismatch:
|
||||
apiErr = ErrContentSHA256Mismatch
|
||||
case errDataTooLarge:
|
||||
apiErr = ErrEntityTooLarge
|
||||
case errDataTooSmall:
|
||||
@ -701,7 +701,7 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
switch err.(type) {
|
||||
case StorageFull:
|
||||
apiErr = ErrStorageFull
|
||||
case BadDigest:
|
||||
case hash.BadDigest:
|
||||
apiErr = ErrBadDigest
|
||||
case AllAccessDisabled:
|
||||
apiErr = ErrAllAccessDisabled
|
||||
@ -747,7 +747,7 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
|
||||
apiErr = ErrEntityTooSmall
|
||||
case SignatureDoesNotMatch:
|
||||
apiErr = ErrSignatureDoesNotMatch
|
||||
case SHA256Mismatch:
|
||||
case hash.SHA256Mismatch:
|
||||
apiErr = ErrContentSHA256Mismatch
|
||||
case ObjectTooLarge:
|
||||
apiErr = ErrEntityTooLarge
|
||||
|
@ -19,6 +19,8 @@ package cmd
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
func TestAPIErrCode(t *testing.T) {
|
||||
@ -28,9 +30,13 @@ func TestAPIErrCode(t *testing.T) {
|
||||
}{
|
||||
// Valid cases.
|
||||
{
|
||||
BadDigest{},
|
||||
hash.BadDigest{},
|
||||
ErrBadDigest,
|
||||
},
|
||||
{
|
||||
hash.SHA256Mismatch{},
|
||||
ErrContentSHA256Mismatch,
|
||||
},
|
||||
{
|
||||
IncompleteBody{},
|
||||
ErrIncompleteBody,
|
||||
@ -110,10 +116,6 @@ func TestAPIErrCode(t *testing.T) {
|
||||
{
|
||||
errSignatureMismatch,
|
||||
ErrSignatureDoesNotMatch,
|
||||
},
|
||||
{
|
||||
errContentSHA256Mismatch,
|
||||
ErrContentSHA256Mismatch,
|
||||
}, // End of all valid cases.
|
||||
|
||||
// Case where err is nil.
|
||||
|
@ -50,20 +50,23 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for PutObject.
|
||||
metadata := make(map[string]string)
|
||||
metadata["etag"] = getMD5Hash(textData)
|
||||
sha256sum := ""
|
||||
|
||||
md5hex := getMD5Hash(textData)
|
||||
sha256hex := ""
|
||||
|
||||
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
|
||||
b.ReportAllocs()
|
||||
// the actual benchmark for PutObject starts here. Reset the benchmark timer.
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(bucket, "object"+strconv.Itoa(i), NewHashReader(bytes.NewBuffer(textData), int64(len(textData)), metadata["etag"], sha256sum), metadata)
|
||||
objInfo, err := obj.PutObject(bucket, "object"+strconv.Itoa(i),
|
||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if objInfo.ETag != metadata["etag"] {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, metadata["etag"])
|
||||
if objInfo.ETag != md5hex {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
|
||||
}
|
||||
}
|
||||
// Benchmark ends here. Stop timer.
|
||||
@ -94,13 +97,14 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for NewMultipartUpload.
|
||||
metadata := make(map[string]string)
|
||||
metadata["etag"] = getMD5Hash(textData)
|
||||
sha256sum := ""
|
||||
uploadID, err = obj.NewMultipartUpload(bucket, object, metadata)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
md5hex := getMD5Hash(textData)
|
||||
sha256hex := ""
|
||||
|
||||
var textPartData []byte
|
||||
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
|
||||
b.ReportAllocs()
|
||||
@ -115,15 +119,15 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
} else {
|
||||
textPartData = textData[j*partSize:]
|
||||
}
|
||||
metadata := make(map[string]string)
|
||||
metadata["etag"] = getMD5Hash([]byte(textPartData))
|
||||
md5hex = getMD5Hash([]byte(textPartData))
|
||||
var partInfo PartInfo
|
||||
partInfo, err = obj.PutObjectPart(bucket, object, uploadID, j, NewHashReader(bytes.NewBuffer(textPartData), int64(len(textPartData)), metadata["etag"], sha256sum))
|
||||
partInfo, err = obj.PutObjectPart(bucket, object, uploadID, j,
|
||||
mustGetHashReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if partInfo.ETag != metadata["etag"] {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, etag, metadata["etag"])
|
||||
if partInfo.ETag != md5hex {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, etag, md5hex)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -205,23 +209,27 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
sha256sum := ""
|
||||
textData := generateBytesData(objSize)
|
||||
|
||||
// generate etag for the generated data.
|
||||
// etag of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/XL backend.
|
||||
metadata := make(map[string]string)
|
||||
|
||||
// get text data generated for number of bytes equal to object size.
|
||||
md5hex := getMD5Hash(textData)
|
||||
sha256hex := ""
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
// get text data generated for number of bytes equal to object size.
|
||||
textData := generateBytesData(objSize)
|
||||
// generate etag for the generated data.
|
||||
// etag of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/XL backend.
|
||||
metadata := make(map[string]string)
|
||||
metadata["etag"] = getMD5Hash(textData)
|
||||
// insert the object.
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject(bucket, "object"+strconv.Itoa(i), NewHashReader(bytes.NewBuffer(textData), int64(len(textData)), metadata["etag"], sha256sum), metadata)
|
||||
objInfo, err = obj.PutObject(bucket, "object"+strconv.Itoa(i),
|
||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if objInfo.ETag != metadata["etag"] {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, metadata["etag"])
|
||||
if objInfo.ETag != md5hex {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
|
||||
}
|
||||
}
|
||||
|
||||
@ -318,8 +326,10 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for PutObject.
|
||||
metadata := make(map[string]string)
|
||||
metadata["etag"] = getMD5Hash([]byte(textData))
|
||||
sha256sum := ""
|
||||
|
||||
md5hex := getMD5Hash([]byte(textData))
|
||||
sha256hex := ""
|
||||
|
||||
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
|
||||
b.ReportAllocs()
|
||||
// the actual benchmark for PutObject starts here. Reset the benchmark timer.
|
||||
@ -329,12 +339,13 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
i := 0
|
||||
for pb.Next() {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(bucket, "object"+strconv.Itoa(i), NewHashReader(bytes.NewBuffer(textData), int64(len(textData)), metadata["etag"], sha256sum), metadata)
|
||||
objInfo, err := obj.PutObject(bucket, "object"+strconv.Itoa(i),
|
||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if objInfo.ETag != metadata["etag"] {
|
||||
b.Fatalf("Write no: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", objInfo.ETag, metadata["etag"])
|
||||
if objInfo.ETag != md5hex {
|
||||
b.Fatalf("Write no: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", objInfo.ETag, md5hex)
|
||||
}
|
||||
i++
|
||||
}
|
||||
@ -361,23 +372,26 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// get text data generated for number of bytes equal to object size.
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/XL backend.
|
||||
metadata := make(map[string]string)
|
||||
|
||||
md5hex := getMD5Hash([]byte(textData))
|
||||
sha256hex := ""
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
// get text data generated for number of bytes equal to object size.
|
||||
textData := generateBytesData(objSize)
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for PutObject.
|
||||
// PutObject is the functions which writes the data onto the FS/XL backend.
|
||||
metadata := make(map[string]string)
|
||||
metadata["etag"] = getMD5Hash([]byte(textData))
|
||||
sha256sum := ""
|
||||
// insert the object.
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject(bucket, "object"+strconv.Itoa(i), NewHashReader(bytes.NewBuffer(textData), int64(len(textData)), metadata["etag"], sha256sum), metadata)
|
||||
objInfo, err = obj.PutObject(bucket, "object"+strconv.Itoa(i),
|
||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if objInfo.ETag != metadata["etag"] {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, metadata["etag"])
|
||||
if objInfo.ETag != md5hex {
|
||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
|
||||
mux "github.com/gorilla/mux"
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html
|
||||
@ -549,7 +550,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||
return
|
||||
}
|
||||
sha256sum := ""
|
||||
|
||||
objectLock := globalNSMutex.NewNSLock(bucket, object)
|
||||
if objectLock.GetLock(globalObjectTimeout) != nil {
|
||||
@ -558,7 +558,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
defer objectLock.Unlock()
|
||||
|
||||
objInfo, err := objectAPI.PutObject(bucket, object, NewHashReader(fileBody, fileSize, metadata["etag"], sha256sum), metadata)
|
||||
hashReader, err := hash.NewReader(fileBody, fileSize, "", "")
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to initialize hashReader.")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.PutObject(bucket, object, hashReader, metadata)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to create object.")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
|
@ -631,7 +631,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
for i := 0; i < 10; i++ {
|
||||
objectName := "test-object-" + strconv.Itoa(i)
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), nil)
|
||||
_, err = obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), nil)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)
|
||||
|
@ -21,6 +21,8 @@ import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -214,11 +216,18 @@ func writeBucketPolicy(bucket string, objAPI ObjectLayer, bpy *bucketPolicy) err
|
||||
policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig)
|
||||
// Acquire a write lock on policy config before modifying.
|
||||
objLock := globalNSMutex.NewNSLock(minioMetaBucket, policyPath)
|
||||
if err := objLock.GetLock(globalOperationTimeout); err != nil {
|
||||
if err = objLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer objLock.Unlock()
|
||||
if _, err := objAPI.PutObject(minioMetaBucket, policyPath, NewHashReader(bytes.NewReader(buf), int64(len(buf)), "", ""), nil); err != nil {
|
||||
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(buf), int64(len(buf)), "", getSHA256Hash(buf))
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to set policy for the bucket %s", bucket)
|
||||
return errorCause(err)
|
||||
}
|
||||
|
||||
if _, err = objAPI.PutObject(minioMetaBucket, policyPath, hashReader, nil); err != nil {
|
||||
errorIf(err, "Unable to set policy for the bucket %s", bucket)
|
||||
return errorCause(err)
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -469,8 +470,12 @@ func persistNotificationConfig(bucket string, ncfg *notificationConfig, obj Obje
|
||||
defer objLock.Unlock()
|
||||
|
||||
// write object to path
|
||||
sha256Sum := getSHA256Hash(buf)
|
||||
_, err = obj.PutObject(minioMetaBucket, ncPath, NewHashReader(bytes.NewReader(buf), int64(len(buf)), "", sha256Sum), nil)
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(buf), int64(len(buf)), "", getSHA256Hash(buf))
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to write bucket notification configuration.")
|
||||
return err
|
||||
}
|
||||
_, err = obj.PutObject(minioMetaBucket, ncPath, hashReader, nil)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to write bucket notification configuration.")
|
||||
return err
|
||||
@ -496,12 +501,19 @@ func persistListenerConfig(bucket string, lcfg []listenerConfig, obj ObjectLayer
|
||||
defer objLock.Unlock()
|
||||
|
||||
// write object to path
|
||||
sha256Sum := getSHA256Hash(buf)
|
||||
_, err = obj.PutObject(minioMetaBucket, lcPath, NewHashReader(bytes.NewReader(buf), int64(len(buf)), "", sha256Sum), nil)
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(buf), int64(len(buf)), "", getSHA256Hash(buf))
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to write bucket listener configuration to object layer.")
|
||||
return err
|
||||
}
|
||||
return err
|
||||
|
||||
// write object to path
|
||||
_, err = obj.PutObject(minioMetaBucket, lcPath, hashReader, nil)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to write bucket listener configuration to object layer.")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Removes notification.xml for a given bucket, only used during DeleteBucket.
|
||||
|
@ -62,7 +62,7 @@ func TestInitEventNotifierFaultyDisks(t *testing.T) {
|
||||
notificationXML += "</NotificationConfiguration>"
|
||||
size := int64(len([]byte(notificationXML)))
|
||||
reader := bytes.NewReader([]byte(notificationXML))
|
||||
if _, err := xl.PutObject(minioMetaBucket, bucketConfigPrefix+"/"+bucketName+"/"+bucketNotificationConfig, NewHashReader(reader, size, "", ""), nil); err != nil {
|
||||
if _, err := xl.PutObject(minioMetaBucket, bucketConfigPrefix+"/"+bucketName+"/"+bucketNotificationConfig, mustGetHashReader(t, reader, size, "", ""), nil); err != nil {
|
||||
t.Fatal("Unexpected error:", err)
|
||||
}
|
||||
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/lock"
|
||||
)
|
||||
|
||||
@ -236,11 +237,15 @@ func prepareFormatXLHealFreshDisks(obj ObjectLayer) ([]StorageAPI, error) {
|
||||
bucket := "bucket"
|
||||
object := "object"
|
||||
|
||||
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
hashReader, err := hash.NewReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", "")
|
||||
if err != nil {
|
||||
return []StorageAPI{}, err
|
||||
}
|
||||
|
||||
if _, err = obj.PutObject(bucket, object, hashReader, nil); err != nil {
|
||||
return []StorageAPI{}, err
|
||||
}
|
||||
|
||||
// Remove the content of export dir 10 but preserve .minio.sys because it is automatically
|
||||
// created when minio starts
|
||||
for i := 3; i <= 5; i++ {
|
||||
@ -326,7 +331,7 @@ func TestFormatXLHealCorruptedDisks(t *testing.T) {
|
||||
bucket := "bucket"
|
||||
object := "object"
|
||||
|
||||
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -400,7 +405,7 @@ func TestFormatXLReorderByInspection(t *testing.T) {
|
||||
bucket := "bucket"
|
||||
object := "object"
|
||||
|
||||
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ func TestReadFSMetadata(t *testing.T) {
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
if _, err := obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil); err != nil {
|
||||
if _, err := obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil); err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
|
||||
@ -87,7 +87,7 @@ func TestWriteFSMetadata(t *testing.T) {
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
if _, err := obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil); err != nil {
|
||||
if _, err := obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil); err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@ -25,6 +24,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/lock"
|
||||
)
|
||||
|
||||
@ -458,7 +458,12 @@ func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
|
||||
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
|
||||
}()
|
||||
|
||||
partInfo, err := fs.PutObjectPart(dstBucket, dstObject, uploadID, partID, NewHashReader(pipeReader, length, "", ""))
|
||||
hashReader, err := hash.NewReader(pipeReader, length, "", "")
|
||||
if err != nil {
|
||||
return pi, toObjectErr(err, dstBucket, dstObject)
|
||||
}
|
||||
|
||||
partInfo, err := fs.PutObjectPart(dstBucket, dstObject, uploadID, partID, hashReader)
|
||||
if err != nil {
|
||||
return pi, toObjectErr(err, dstBucket, dstObject)
|
||||
}
|
||||
@ -473,7 +478,7 @@ func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
|
||||
// an ongoing multipart transaction. Internally incoming data is
|
||||
// written to '.minio.sys/tmp' location and safely renamed to
|
||||
// '.minio.sys/multipart' for reach parts.
|
||||
func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *HashReader) (pi PartInfo, e error) {
|
||||
func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) {
|
||||
if err := checkPutObjectPartArgs(bucket, object, fs); err != nil {
|
||||
return pi, err
|
||||
}
|
||||
@ -552,10 +557,6 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
|
||||
// delete.
|
||||
defer fsRemoveFile(fsPartPath)
|
||||
|
||||
if err = data.Verify(); err != nil {
|
||||
return pi, toObjectErr(err, minioMetaTmpBucket, tmpPartPath)
|
||||
}
|
||||
|
||||
partPath := pathJoin(bucket, object, uploadID, partSuffix)
|
||||
// Lock the part so that another part upload with same part-number gets blocked
|
||||
// while the part is getting appended in the background.
|
||||
@ -570,9 +571,10 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
|
||||
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
|
||||
}
|
||||
|
||||
md5hex := data.MD5HexString()
|
||||
|
||||
// Save the object part info in `fs.json`.
|
||||
md5Hex := hex.EncodeToString(data.MD5())
|
||||
fsMeta.AddObjectPart(partID, partSuffix, md5Hex, data.Size())
|
||||
fsMeta.AddObjectPart(partID, partSuffix, md5hex, data.Size())
|
||||
if _, err = fsMeta.WriteTo(rwlk); err != nil {
|
||||
partLock.Unlock()
|
||||
return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
||||
@ -598,7 +600,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
|
||||
return PartInfo{
|
||||
PartNumber: partID,
|
||||
LastModified: fi.ModTime(),
|
||||
ETag: md5Hex,
|
||||
ETag: md5hex,
|
||||
Size: fi.Size(),
|
||||
}, nil
|
||||
}
|
||||
|
@ -183,7 +183,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
|
||||
sha256sum := ""
|
||||
|
||||
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||
_, err = fs.PutObjectPart(bucketName, objectName, uploadID, 1, NewHashReader(bytes.NewReader(data), dataLen, md5Hex, sha256sum))
|
||||
_, err = fs.PutObjectPart(bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), dataLen, md5Hex, sha256sum))
|
||||
if !isSameType(errorCause(err), BucketNotFound{}) {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
@ -212,7 +212,7 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) {
|
||||
|
||||
md5Hex := getMD5Hash(data)
|
||||
|
||||
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, NewHashReader(bytes.NewReader(data), 5, md5Hex, "")); err != nil {
|
||||
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), 5, md5Hex, "")); err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
|
||||
@ -249,7 +249,7 @@ func TestCompleteMultipartUpload(t *testing.T) {
|
||||
|
||||
md5Hex := getMD5Hash(data)
|
||||
|
||||
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, NewHashReader(bytes.NewReader(data), 5, md5Hex, "")); err != nil {
|
||||
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), 5, md5Hex, "")); err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
|
||||
@ -283,7 +283,7 @@ func TestAbortMultipartUpload(t *testing.T) {
|
||||
|
||||
md5Hex := getMD5Hash(data)
|
||||
|
||||
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, NewHashReader(bytes.NewReader(data), 5, md5Hex, "")); err != nil {
|
||||
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), 5, md5Hex, "")); err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
|
||||
@ -317,7 +317,7 @@ func TestListMultipartUploadsFaultyDisk(t *testing.T) {
|
||||
md5Hex := getMD5Hash(data)
|
||||
sha256sum := ""
|
||||
|
||||
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, NewHashReader(bytes.NewReader(data), 5, md5Hex, sha256sum)); err != nil {
|
||||
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), 5, md5Hex, sha256sum)); err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
|
||||
|
20
cmd/fs-v1.go
20
cmd/fs-v1.go
@ -17,7 +17,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -27,6 +26,7 @@ import (
|
||||
"sort"
|
||||
"syscall"
|
||||
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/lock"
|
||||
)
|
||||
|
||||
@ -361,7 +361,12 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
|
||||
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
|
||||
}()
|
||||
|
||||
objInfo, err := fs.PutObject(dstBucket, dstObject, NewHashReader(pipeReader, length, metadata["etag"], ""), metadata)
|
||||
hashReader, err := hash.NewReader(pipeReader, length, "", "")
|
||||
if err != nil {
|
||||
return oi, toObjectErr(err, dstBucket, dstObject)
|
||||
}
|
||||
|
||||
objInfo, err := fs.PutObject(dstBucket, dstObject, hashReader, metadata)
|
||||
if err != nil {
|
||||
return oi, toObjectErr(err, dstBucket, dstObject)
|
||||
}
|
||||
@ -540,7 +545,7 @@ func (fs fsObjects) parentDirIsObject(bucket, parent string) bool {
|
||||
// until EOF, writes data directly to configured filesystem path.
|
||||
// Additionally writes `fs.json` which carries the necessary metadata
|
||||
// for future object operations.
|
||||
func (fs fsObjects) PutObject(bucket string, object string, data *HashReader, metadata map[string]string) (objInfo ObjectInfo, retErr error) {
|
||||
func (fs fsObjects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, retErr error) {
|
||||
// No metadata is set, allocate a new one.
|
||||
if metadata == nil {
|
||||
metadata = make(map[string]string)
|
||||
@ -588,6 +593,8 @@ func (fs fsObjects) PutObject(bucket string, object string, data *HashReader, me
|
||||
return ObjectInfo{}, traceError(errInvalidArgument)
|
||||
}
|
||||
|
||||
metadata["etag"] = data.MD5HexString()
|
||||
|
||||
var wlk *lock.LockedFile
|
||||
if bucket != minioMetaBucket {
|
||||
bucketMetaDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix)
|
||||
@ -617,6 +624,7 @@ func (fs fsObjects) PutObject(bucket string, object string, data *HashReader, me
|
||||
if size := data.Size(); size > 0 && bufSize > size {
|
||||
bufSize = size
|
||||
}
|
||||
|
||||
buf := make([]byte, int(bufSize))
|
||||
fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, tempObj)
|
||||
bytesWritten, err := fsCreateFile(fsTmpObjPath, data, buf, data.Size())
|
||||
@ -638,12 +646,6 @@ func (fs fsObjects) PutObject(bucket string, object string, data *HashReader, me
|
||||
// nothing to delete.
|
||||
defer fsRemoveFile(fsTmpObjPath)
|
||||
|
||||
if err = data.Verify(); err != nil { // verify MD5 and SHA256
|
||||
return ObjectInfo{}, traceError(err)
|
||||
}
|
||||
|
||||
metadata["etag"] = hex.EncodeToString(data.MD5())
|
||||
|
||||
// Entire object was written to the temp location, now it's safe to rename it to the actual location.
|
||||
fsNSObjPath := pathJoin(fs.fsPath, bucket, object)
|
||||
if err = fsRenameFile(fsTmpObjPath, fsNSObjPath); err != nil {
|
||||
|
@ -46,7 +46,7 @@ func TestFSParentDirIsObject(t *testing.T) {
|
||||
}
|
||||
objectContent := "12345"
|
||||
objInfo, err := obj.PutObject(bucketName, objectName,
|
||||
NewHashReader(bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil)
|
||||
mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -135,7 +135,7 @@ func TestFSShutdown(t *testing.T) {
|
||||
fs := obj.(*fsObjects)
|
||||
objectContent := "12345"
|
||||
obj.MakeBucketWithLocation(bucketName, "")
|
||||
obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil)
|
||||
obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil)
|
||||
return fs, disk
|
||||
}
|
||||
|
||||
@ -205,7 +205,7 @@ func TestFSPutObject(t *testing.T) {
|
||||
}
|
||||
|
||||
// With a regular object.
|
||||
_, err := obj.PutObject(bucketName+"non-existent", objectName, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err := obj.PutObject(bucketName+"non-existent", objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
||||
}
|
||||
@ -214,7 +214,7 @@ func TestFSPutObject(t *testing.T) {
|
||||
}
|
||||
|
||||
// With a directory object.
|
||||
_, err = obj.PutObject(bucketName+"non-existent", objectName+"/", NewHashReader(bytes.NewReader([]byte("abcd")), 0, "", ""), nil)
|
||||
_, err = obj.PutObject(bucketName+"non-existent", objectName+"/", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), nil)
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
||||
}
|
||||
@ -222,11 +222,11 @@ func TestFSPutObject(t *testing.T) {
|
||||
t.Fatalf("Expected error type BucketNotFound, got %#v", err)
|
||||
}
|
||||
|
||||
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err = obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = obj.PutObject(bucketName, objectName+"/1", NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err = obj.PutObject(bucketName, objectName+"/1", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected should fail here, backend corruption occurred")
|
||||
}
|
||||
@ -241,7 +241,7 @@ func TestFSPutObject(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
_, err = obj.PutObject(bucketName, objectName+"/1/", NewHashReader(bytes.NewReader([]byte("abcd")), 0, "", ""), nil)
|
||||
_, err = obj.PutObject(bucketName, objectName+"/1/", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), nil)
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected should fail here, backned corruption occurred")
|
||||
}
|
||||
@ -269,7 +269,7 @@ func TestFSDeleteObject(t *testing.T) {
|
||||
objectName := "object"
|
||||
|
||||
obj.MakeBucketWithLocation(bucketName, "")
|
||||
obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
|
||||
// Test with invalid bucket name
|
||||
if err := fs.DeleteObject("fo", objectName); !isSameType(errorCause(err), BucketNameInvalid{}) {
|
||||
|
@ -34,6 +34,7 @@ import (
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
const globalAzureAPIVersion = "2016-05-31"
|
||||
@ -521,8 +522,7 @@ func (a *azureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo,
|
||||
|
||||
// PutObject - Create a new blob with the incoming data,
|
||||
// uses Azure equivalent CreateBlockBlobFromReader.
|
||||
func (a *azureObjects) PutObject(bucket, object string, data *HashReader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||
delete(metadata, "etag")
|
||||
func (a *azureObjects) PutObject(bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
||||
blob.Metadata, blob.Properties, err = s3MetaToAzureProperties(metadata)
|
||||
if err != nil {
|
||||
@ -532,12 +532,6 @@ func (a *azureObjects) PutObject(bucket, object string, data *HashReader, metada
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
if err = data.Verify(); err != nil {
|
||||
errorIf(err, "Verification of uploaded object data failed against client provided checksums.")
|
||||
derr := blob.Delete(nil)
|
||||
errorIf(derr, "Failed to delete blob when cleaning up a bad blob upload.")
|
||||
return ObjectInfo{}, azureToObjectError(traceError(err))
|
||||
}
|
||||
return a.GetObjectInfo(bucket, object)
|
||||
}
|
||||
|
||||
@ -624,7 +618,7 @@ func (a *azureObjects) NewMultipartUpload(bucket, object string, metadata map[st
|
||||
}
|
||||
|
||||
// PutObjectPart - Use Azure equivalent PutBlockWithLength.
|
||||
func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *HashReader) (info PartInfo, err error) {
|
||||
func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error) {
|
||||
if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil {
|
||||
return info, err
|
||||
}
|
||||
@ -633,7 +627,7 @@ func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int
|
||||
return info, err
|
||||
}
|
||||
|
||||
etag := data.md5Sum
|
||||
etag := data.MD5HexString()
|
||||
if etag == "" {
|
||||
// Generate random ETag.
|
||||
etag = azureToS3ETag(getMD5Hash([]byte(mustGetUUID())))
|
||||
@ -658,13 +652,6 @@ func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int
|
||||
}
|
||||
subPartNumber++
|
||||
}
|
||||
if err = data.Verify(); err != nil {
|
||||
errorIf(err, "Verification of uploaded object data failed against client provided checksums.")
|
||||
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
||||
derr := blob.Delete(nil)
|
||||
errorIf(derr, "Failed to delete blob when cleaning up a bad blob upload.")
|
||||
return info, azureToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
|
||||
info.PartNumber = partID
|
||||
info.ETag = etag
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
|
||||
b2 "github.com/minio/blazer/base"
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
h2 "github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
// Supported bucket types by B2 backend.
|
||||
@ -371,13 +372,13 @@ const (
|
||||
// being uploaded. Note that the content length is the size of the file plus 40
|
||||
// of the original size of the reader.
|
||||
//
|
||||
// newB2Reader implements a B2 compatible reader by wrapping the HashReader into
|
||||
// newB2Reader implements a B2 compatible reader by wrapping the hash.Reader into
|
||||
// a new io.Reader which will emit out the sha1 hex digits at io.EOF.
|
||||
// It also means that your overall content size is now original size + 40 bytes.
|
||||
// Additionally this reader also verifies Hash encapsulated inside HashReader
|
||||
// Additionally this reader also verifies Hash encapsulated inside hash.Reader
|
||||
// at io.EOF if the verification failed we return an error and do not send
|
||||
// the content to server.
|
||||
func newB2Reader(r *HashReader, size int64) *B2Reader {
|
||||
func newB2Reader(r *h2.Reader, size int64) *B2Reader {
|
||||
return &B2Reader{
|
||||
r: r,
|
||||
size: size,
|
||||
@ -385,13 +386,13 @@ func newB2Reader(r *HashReader, size int64) *B2Reader {
|
||||
}
|
||||
}
|
||||
|
||||
// B2Reader - is a Reader wraps the HashReader which will emit out the sha1
|
||||
// B2Reader - is a Reader wraps the hash.Reader which will emit out the sha1
|
||||
// hex digits at io.EOF. It also means that your overall content size is
|
||||
// now original size + 40 bytes. Additionally this reader also verifies
|
||||
// Hash encapsulated inside HashReader at io.EOF if the verification
|
||||
// Hash encapsulated inside hash.Reader at io.EOF if the verification
|
||||
// failed we return an error and do not send the content to server.
|
||||
type B2Reader struct {
|
||||
r *HashReader
|
||||
r *h2.Reader
|
||||
size int64
|
||||
sha1Hash hash.Hash
|
||||
|
||||
@ -408,10 +409,6 @@ func (nb *B2Reader) Read(p []byte) (int, error) {
|
||||
// Read into hash to update the on going checksum.
|
||||
n, err := io.TeeReader(nb.r, nb.sha1Hash).Read(p)
|
||||
if err == io.EOF {
|
||||
// Verify checksum at io.EOF
|
||||
if err = nb.r.Verify(); err != nil {
|
||||
return n, err
|
||||
}
|
||||
// Stream is not corrupted on this end
|
||||
// now fill in the last 40 bytes of sha1 hex
|
||||
// so that the server can verify the stream on
|
||||
@ -424,7 +421,7 @@ func (nb *B2Reader) Read(p []byte) (int, error) {
|
||||
}
|
||||
|
||||
// PutObject uploads the single upload to B2 backend by using *b2_upload_file* API, uploads upto 5GiB.
|
||||
func (l *b2Objects) PutObject(bucket string, object string, data *HashReader, metadata map[string]string) (ObjectInfo, error) {
|
||||
func (l *b2Objects) PutObject(bucket string, object string, data *h2.Reader, metadata map[string]string) (ObjectInfo, error) {
|
||||
var objInfo ObjectInfo
|
||||
bkt, err := l.Bucket(bucket)
|
||||
if err != nil {
|
||||
@ -432,7 +429,6 @@ func (l *b2Objects) PutObject(bucket string, object string, data *HashReader, me
|
||||
}
|
||||
contentType := metadata["content-type"]
|
||||
delete(metadata, "content-type")
|
||||
delete(metadata, "etag")
|
||||
|
||||
var u *b2.URL
|
||||
u, err = bkt.GetUploadURL(l.ctx)
|
||||
@ -550,7 +546,7 @@ func (l *b2Objects) CopyObjectPart(srcBucket string, srcObject string, destBucke
|
||||
}
|
||||
|
||||
// PutObjectPart puts a part of object in bucket, uses B2's LargeFile upload API.
|
||||
func (l *b2Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *HashReader) (pi PartInfo, err error) {
|
||||
func (l *b2Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *h2.Reader) (pi PartInfo, err error) {
|
||||
bkt, err := l.Bucket(bucket)
|
||||
if err != nil {
|
||||
return pi, err
|
||||
|
@ -19,7 +19,6 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -40,6 +39,7 @@ import (
|
||||
|
||||
minio "github.com/minio/minio-go"
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -747,19 +747,13 @@ func (l *gcsGateway) GetObjectInfo(bucket string, object string) (ObjectInfo, er
|
||||
}
|
||||
|
||||
// PutObject - Create a new object with the incoming data,
|
||||
func (l *gcsGateway) PutObject(bucket string, key string, data *HashReader, metadata map[string]string) (ObjectInfo, error) {
|
||||
|
||||
func (l *gcsGateway) PutObject(bucket string, key string, data *hash.Reader, metadata map[string]string) (ObjectInfo, error) {
|
||||
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
|
||||
// otherwise gcs will just return object not exist in case of non-existing bucket
|
||||
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
|
||||
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket)
|
||||
}
|
||||
|
||||
if _, err := hex.DecodeString(metadata["etag"]); err != nil {
|
||||
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key)
|
||||
}
|
||||
delete(metadata, "etag")
|
||||
|
||||
object := l.client.Bucket(bucket).Object(key)
|
||||
|
||||
w := object.NewWriter(l.ctx)
|
||||
@ -773,14 +767,10 @@ func (l *gcsGateway) PutObject(bucket string, key string, data *HashReader, meta
|
||||
w.Close()
|
||||
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key)
|
||||
}
|
||||
|
||||
// Close the object writer upon success.
|
||||
w.Close()
|
||||
|
||||
if err := data.Verify(); err != nil { // Verify sha256sum after close.
|
||||
object.Delete(l.ctx)
|
||||
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key)
|
||||
}
|
||||
|
||||
attrs, err := object.Attrs(l.ctx)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key)
|
||||
@ -861,11 +851,11 @@ func (l *gcsGateway) checkUploadIDExists(bucket string, key string, uploadID str
|
||||
}
|
||||
|
||||
// PutObjectPart puts a part of object in bucket
|
||||
func (l *gcsGateway) PutObjectPart(bucket string, key string, uploadID string, partNumber int, data *HashReader) (PartInfo, error) {
|
||||
func (l *gcsGateway) PutObjectPart(bucket string, key string, uploadID string, partNumber int, data *hash.Reader) (PartInfo, error) {
|
||||
if err := l.checkUploadIDExists(bucket, key, uploadID); err != nil {
|
||||
return PartInfo{}, err
|
||||
}
|
||||
etag := data.md5Sum
|
||||
etag := data.MD5HexString()
|
||||
if etag == "" {
|
||||
// Generate random ETag.
|
||||
etag = getMD5Hash([]byte(mustGetUUID()))
|
||||
@ -883,10 +873,6 @@ func (l *gcsGateway) PutObjectPart(bucket string, key string, uploadID string, p
|
||||
// Make sure to close the object writer upon success.
|
||||
w.Close()
|
||||
|
||||
if err := data.Verify(); err != nil {
|
||||
object.Delete(l.ctx)
|
||||
return PartInfo{}, gcsToObjectError(traceError(err), bucket, key)
|
||||
}
|
||||
return PartInfo{
|
||||
PartNumber: partNumber,
|
||||
ETag: etag,
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
|
||||
router "github.com/gorilla/mux"
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
// GetObjectHandler - GET Object
|
||||
@ -257,9 +258,6 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure we hex encode md5sum here.
|
||||
metadata["etag"] = hex.EncodeToString(md5Bytes)
|
||||
|
||||
// Lock the object.
|
||||
objectLock := globalNSMutex.NewNSLock(bucket, object)
|
||||
if objectLock.GetLock(globalOperationTimeout) != nil {
|
||||
@ -268,20 +266,30 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
defer objectLock.Unlock()
|
||||
|
||||
var objInfo ObjectInfo
|
||||
var (
|
||||
// Make sure we hex encode md5sum here.
|
||||
md5hex = hex.EncodeToString(md5Bytes)
|
||||
sha256hex = ""
|
||||
putObject = objectAPI.PutObject
|
||||
reader = r.Body
|
||||
)
|
||||
|
||||
switch reqAuthType {
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
writeErrorResponse(w, ErrAccessDenied, r.URL)
|
||||
return
|
||||
case authTypeAnonymous:
|
||||
// Create anonymous object.
|
||||
objInfo, err = objectAPI.AnonPutObject(bucket, object, size, r.Body, metadata, "")
|
||||
putObject = objectAPI.AnonPutObject
|
||||
case authTypeStreamingSigned:
|
||||
// Initialize stream signature verifier.
|
||||
reader, s3Error := newSignV4ChunkedReader(r)
|
||||
var s3Error APIErrorCode
|
||||
reader, s3Error = newSignV4ChunkedReader(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(reader, size, "", ""), metadata)
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
@ -289,27 +297,24 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(r.Body, size, "", ""), metadata)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
if s3Error := reqSignatureV4Verify(r, serverConfig.GetRegion()); s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
sha256sum := ""
|
||||
if !skipContentSha256Cksum(r) {
|
||||
sha256sum = getContentSha256Cksum(r)
|
||||
sha256hex = getContentSha256Cksum(r)
|
||||
}
|
||||
}
|
||||
|
||||
// Create object.
|
||||
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(r.Body, size, "", sha256sum), metadata)
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
writeErrorResponse(w, ErrAccessDenied, r.URL)
|
||||
hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
objInfo, err := putObject(bucket, object, hashReader, metadata)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to save an object %s", r.URL.Path)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
|
||||
router "github.com/gorilla/mux"
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
// GatewayLayer - Interface to implement gateway mode.
|
||||
@ -30,7 +31,7 @@ type GatewayLayer interface {
|
||||
AnonGetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error)
|
||||
AnonGetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error)
|
||||
|
||||
AnonPutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (ObjectInfo, error)
|
||||
AnonPutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (ObjectInfo, error)
|
||||
|
||||
SetBucketPolicies(string, policy.BucketAccessPolicy) error
|
||||
GetBucketPolicies(string) (policy.BucketAccessPolicy, error)
|
||||
|
@ -17,35 +17,15 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"io"
|
||||
|
||||
minio "github.com/minio/minio-go"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
// AnonPutObject creates a new object anonymously with the incoming data,
|
||||
func (l *s3Objects) AnonPutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, e error) {
|
||||
var sha256sumBytes []byte
|
||||
|
||||
var err error
|
||||
if sha256sum != "" {
|
||||
sha256sumBytes, err = hex.DecodeString(sha256sum)
|
||||
if err != nil {
|
||||
return objInfo, s3ToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
}
|
||||
|
||||
var md5sumBytes []byte
|
||||
md5sum := metadata["etag"]
|
||||
if md5sum != "" {
|
||||
md5sumBytes, err = hex.DecodeString(md5sum)
|
||||
if err != nil {
|
||||
return objInfo, s3ToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
delete(metadata, "etag")
|
||||
}
|
||||
|
||||
oi, err := l.anonClient.PutObject(bucket, object, data, size, md5sumBytes, sha256sumBytes, toMinioClientMetadata(metadata))
|
||||
func (l *s3Objects) AnonPutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, e error) {
|
||||
oi, err := l.anonClient.PutObject(bucket, object, data, data.Size(), data.MD5(), data.SHA256(), toMinioClientMetadata(metadata))
|
||||
if err != nil {
|
||||
return objInfo, s3ToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
|
@ -20,11 +20,10 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"encoding/hex"
|
||||
|
||||
minio "github.com/minio/minio-go"
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
"github.com/minio/minio-go/pkg/s3utils"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
// s3ToObjectError converts Minio errors to minio object layer errors.
|
||||
@ -84,7 +83,7 @@ func s3ToObjectError(err error, params ...string) error {
|
||||
Object: object,
|
||||
}
|
||||
case "XAmzContentSHA256Mismatch":
|
||||
err = SHA256Mismatch{}
|
||||
err = hash.SHA256Mismatch{}
|
||||
case "NoSuchUpload":
|
||||
err = InvalidUploadID{}
|
||||
case "EntityTooSmall":
|
||||
@ -343,17 +342,8 @@ func (l *s3Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectI
|
||||
}
|
||||
|
||||
// PutObject creates a new object with the incoming data,
|
||||
func (l *s3Objects) PutObject(bucket string, object string, data *HashReader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||
sha256sumBytes, err := hex.DecodeString(data.sha256Sum)
|
||||
if err != nil {
|
||||
return objInfo, s3ToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
md5sumBytes, err := hex.DecodeString(metadata["etag"])
|
||||
if err != nil {
|
||||
return objInfo, s3ToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
delete(metadata, "etag")
|
||||
oi, err := l.Client.PutObject(bucket, object, data, data.Size(), md5sumBytes, sha256sumBytes, toMinioClientMetadata(metadata))
|
||||
func (l *s3Objects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||
oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5(), data.SHA256(), toMinioClientMetadata(metadata))
|
||||
if err != nil {
|
||||
return objInfo, s3ToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
@ -472,18 +462,8 @@ func fromMinioClientObjectPart(op minio.ObjectPart) PartInfo {
|
||||
}
|
||||
|
||||
// PutObjectPart puts a part of object in bucket
|
||||
func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *HashReader) (pi PartInfo, e error) {
|
||||
md5HexBytes, err := hex.DecodeString(data.md5Sum)
|
||||
if err != nil {
|
||||
return pi, s3ToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
|
||||
sha256sumBytes, err := hex.DecodeString(data.sha256Sum)
|
||||
if err != nil {
|
||||
return pi, err
|
||||
}
|
||||
|
||||
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), md5HexBytes, sha256sumBytes)
|
||||
func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) {
|
||||
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5(), data.SHA256())
|
||||
if err != nil {
|
||||
return pi, s3ToObjectError(traceError(err), bucket, object)
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"testing"
|
||||
|
||||
minio "github.com/minio/minio-go"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
func errResponse(code string) minio.ErrorResponse {
|
||||
@ -75,7 +76,7 @@ func TestS3ToObjectError(t *testing.T) {
|
||||
},
|
||||
{
|
||||
inputErr: errResponse("XAmzContentSHA256Mismatch"),
|
||||
expectedErr: SHA256Mismatch{},
|
||||
expectedErr: hash.SHA256Mismatch{},
|
||||
},
|
||||
{
|
||||
inputErr: errResponse("EntityTooSmall"),
|
||||
|
@ -16,7 +16,9 @@
|
||||
|
||||
package cmd
|
||||
|
||||
import "io"
|
||||
import (
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
type gatewayUnsupported struct{}
|
||||
|
||||
@ -70,7 +72,7 @@ func (a gatewayUnsupported) AnonGetBucketInfo(bucket string) (bi BucketInfo, err
|
||||
}
|
||||
|
||||
// AnonPutObject creates a new object anonymously with the incoming data,
|
||||
func (a gatewayUnsupported) AnonPutObject(bucket, object string, size int64, data io.Reader,
|
||||
metadata map[string]string, sha256sum string) (ObjectInfo, error) {
|
||||
func (a gatewayUnsupported) AnonPutObject(bucket, object string, data *hash.Reader,
|
||||
metadata map[string]string) (ObjectInfo, error) {
|
||||
return ObjectInfo{}, traceError(NotImplemented{})
|
||||
}
|
||||
|
@ -190,10 +190,11 @@ func logIf(level logrus.Level, source string, err error, msg string, data ...int
|
||||
switch err.(type) {
|
||||
case BucketNotFound, BucketNotEmpty, BucketExists:
|
||||
ok = true
|
||||
case ObjectNotFound, ObjectExistsAsDirectory, BucketPolicyNotFound, InvalidUploadID, BadDigest:
|
||||
case ObjectNotFound, ObjectExistsAsDirectory:
|
||||
ok = true
|
||||
case BucketPolicyNotFound, InvalidUploadID:
|
||||
ok = true
|
||||
}
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
|
@ -93,8 +93,6 @@ func toObjectErr(err error, params ...string) error {
|
||||
err = InsufficientWriteQuorum{}
|
||||
case io.ErrUnexpectedEOF, io.ErrShortWrite:
|
||||
err = IncompleteBody{}
|
||||
case errContentSHA256Mismatch:
|
||||
err = SHA256Mismatch{}
|
||||
}
|
||||
if ok {
|
||||
e.e = err
|
||||
@ -103,13 +101,6 @@ func toObjectErr(err error, params ...string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// SHA256Mismatch - when content sha256 does not match with what was sent from client.
|
||||
type SHA256Mismatch struct{}
|
||||
|
||||
func (e SHA256Mismatch) Error() string {
|
||||
return "sha256 computed does not match with what is expected"
|
||||
}
|
||||
|
||||
// SignatureDoesNotMatch - when content md5 does not match with what was sent from client.
|
||||
type SignatureDoesNotMatch struct{}
|
||||
|
||||
@ -200,16 +191,6 @@ func (e BucketExists) Error() string {
|
||||
return "Bucket exists: " + e.Bucket
|
||||
}
|
||||
|
||||
// BadDigest - Content-MD5 you specified did not match what we received.
|
||||
type BadDigest struct {
|
||||
ExpectedMD5 string
|
||||
CalculatedMD5 string
|
||||
}
|
||||
|
||||
func (e BadDigest) Error() string {
|
||||
return "Bad digest: Expected " + e.ExpectedMD5 + " is not valid with what we calculated " + e.CalculatedMD5
|
||||
}
|
||||
|
||||
// UnsupportedDelimiter - unsupported delimiter.
|
||||
type UnsupportedDelimiter struct {
|
||||
Delimiter string
|
||||
|
@ -68,7 +68,7 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
// iterate through the above set of inputs and upkoad the object.
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
@ -217,7 +217,7 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [
|
||||
// iterate through the above set of inputs and upkoad the object.
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
@ -330,7 +330,7 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str
|
||||
// iterate through the above set of inputs and upkoad the object.
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
|
@ -33,7 +33,7 @@ func testGetObjectInfo(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
_, err = obj.PutObject("test-getobjectinfo", "Asia/asiapics.jpg", NewHashReader(bytes.NewBufferString("asiapics"), int64(len("asiapics")), "", ""), nil)
|
||||
_, err = obj.PutObject("test-getobjectinfo", "Asia/asiapics.jpg", mustGetHashReader(t, bytes.NewBufferString("asiapics"), int64(len("asiapics")), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
@ -17,13 +17,9 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
sha256 "github.com/minio/sha256-simd"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
// ObjectLayer implements primitives for object API layer.
|
||||
@ -42,7 +38,7 @@ type ObjectLayer interface {
|
||||
// Object operations.
|
||||
GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error)
|
||||
GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error)
|
||||
PutObject(bucket, object string, data *HashReader, metadata map[string]string) (objInfo ObjectInfo, err error)
|
||||
PutObject(bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error)
|
||||
CopyObject(srcBucket, srcObject, destBucket, destObject string, metadata map[string]string) (objInfo ObjectInfo, err error)
|
||||
DeleteObject(bucket, object string) error
|
||||
|
||||
@ -50,7 +46,7 @@ type ObjectLayer interface {
|
||||
ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error)
|
||||
NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error)
|
||||
CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error)
|
||||
PutObjectPart(bucket, object, uploadID string, partID int, data *HashReader) (info PartInfo, err error)
|
||||
PutObjectPart(bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error)
|
||||
ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error)
|
||||
AbortMultipartUpload(bucket, object, uploadID string) error
|
||||
CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []completePart) (objInfo ObjectInfo, err error)
|
||||
@ -63,81 +59,3 @@ type ObjectLayer interface {
|
||||
ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
|
||||
delimiter string, maxUploads int) (ListMultipartsInfo, error)
|
||||
}
|
||||
|
||||
// HashReader writes what it reads from an io.Reader to an MD5 and SHA256 hash.Hash.
|
||||
// HashReader verifies that the content of the io.Reader matches the expected checksums.
|
||||
type HashReader struct {
|
||||
src io.Reader
|
||||
size int64
|
||||
md5Hash, sha256Hash hash.Hash
|
||||
md5Sum, sha256Sum string // hex representation
|
||||
}
|
||||
|
||||
// NewHashReader returns a new HashReader computing the MD5 sum and SHA256 sum
|
||||
// (if set) of the provided io.Reader.
|
||||
func NewHashReader(src io.Reader, size int64, md5Sum, sha256Sum string) *HashReader {
|
||||
var sha256Hash hash.Hash
|
||||
if sha256Sum != "" {
|
||||
sha256Hash = sha256.New()
|
||||
}
|
||||
if size >= 0 {
|
||||
src = io.LimitReader(src, size)
|
||||
} else {
|
||||
size = -1
|
||||
}
|
||||
return &HashReader{
|
||||
src: src,
|
||||
size: size,
|
||||
md5Sum: md5Sum,
|
||||
sha256Sum: sha256Sum,
|
||||
md5Hash: md5.New(),
|
||||
sha256Hash: sha256Hash,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *HashReader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.src.Read(p)
|
||||
if err != nil && err != io.EOF {
|
||||
return
|
||||
}
|
||||
if r.md5Hash != nil {
|
||||
r.md5Hash.Write(p[:n])
|
||||
}
|
||||
if r.sha256Hash != nil {
|
||||
r.sha256Hash.Write(p[:n])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Size returns the absolute number of bytes the HashReader
|
||||
// will return during reading. It returns -1 for unlimited
|
||||
// data.
|
||||
func (r *HashReader) Size() int64 { return r.size }
|
||||
|
||||
// MD5 returns the MD5 sum of the processed data. Any
|
||||
// further reads will change the MD5 sum.
|
||||
func (r *HashReader) MD5() []byte { return r.md5Hash.Sum(nil) }
|
||||
|
||||
// Verify verifies if the computed MD5 sum - and SHA256 sum - are
|
||||
// equal to the ones specified when creating the HashReader.
|
||||
func (r *HashReader) Verify() error {
|
||||
if r.sha256Hash != nil {
|
||||
sha256Sum, err := hex.DecodeString(r.sha256Sum)
|
||||
if err != nil {
|
||||
return SHA256Mismatch{}
|
||||
}
|
||||
if !bytes.Equal(sha256Sum, r.sha256Hash.Sum(nil)) {
|
||||
return errContentSHA256Mismatch
|
||||
}
|
||||
}
|
||||
if r.md5Hash != nil && r.md5Sum != "" {
|
||||
md5Sum, err := hex.DecodeString(r.md5Sum)
|
||||
if err != nil {
|
||||
return BadDigest{r.md5Sum, hex.EncodeToString(r.md5Hash.Sum(nil))}
|
||||
}
|
||||
if sum := r.md5Hash.Sum(nil); !bytes.Equal(md5Sum, sum) {
|
||||
return BadDigest{r.md5Sum, hex.EncodeToString(sum)}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -18,6 +18,8 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -65,7 +67,9 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
{"obj2", "obj2", nil},
|
||||
}
|
||||
for _, object := range testObjects {
|
||||
_, err = obj.PutObject(testBuckets[0], object.name, NewHashReader(bytes.NewBufferString(object.content), int64(len(object.content)), object.meta["etag"], ""), object.meta)
|
||||
md5Bytes := md5.Sum([]byte(object.content))
|
||||
_, err = obj.PutObject(testBuckets[0], object.name, mustGetHashReader(t, bytes.NewBufferString(object.content),
|
||||
int64(len(object.content)), hex.EncodeToString(md5Bytes[:]), ""), object.meta)
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
@ -550,7 +554,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
t.Errorf("Test %d: %s: Expected object name to be \"%s\", but found \"%s\" instead", i+1, instanceType, testCase.result.Objects[j].Name, result.Objects[j].Name)
|
||||
}
|
||||
if result.Objects[j].ETag == "" {
|
||||
t.Errorf("Test %d: %s: Expected md5sum to be not empty, but found empty instead", i+1, instanceType)
|
||||
t.Errorf("Test %d: %s: Expected ETag to be not empty, but found empty instead", i+1, instanceType)
|
||||
}
|
||||
|
||||
}
|
||||
@ -607,7 +611,7 @@ func BenchmarkListObjects(b *testing.B) {
|
||||
// Insert objects to be listed and benchmarked later.
|
||||
for i := 0; i < 20000; i++ {
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
_, err = obj.PutObject(bucket, key, NewHashReader(bytes.NewBufferString(key), int64(len(key)), "", ""), nil)
|
||||
_, err = obj.PutObject(bucket, key, mustGetHashReader(b, bytes.NewBufferString(key), int64(len(key)), "", ""), nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"testing"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
// Wrapper for calling NewMultipartUpload tests for both XL multiple disks and single node setup.
|
||||
@ -218,7 +219,7 @@ func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks [
|
||||
sha256sum := ""
|
||||
// Iterating over creatPartCases to generate multipart chunks.
|
||||
for _, testCase := range createPartCases {
|
||||
_, err = obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
_, err = obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
@ -232,7 +233,7 @@ func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks [
|
||||
|
||||
// Object part upload should fail with quorum not available.
|
||||
testCase := createPartCases[len(createPartCases)-1]
|
||||
_, err = obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
_, err = obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
if err == nil {
|
||||
t.Fatalf("Test %s: expected to fail but passed instead", instanceType)
|
||||
}
|
||||
@ -324,18 +325,21 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
|
||||
{bucket, "none-object", uploadID, 1, "", "", "", 0, false, "", fmt.Errorf("%s", "Invalid upload id "+uploadID)},
|
||||
// Test case - 12.
|
||||
// Input to replicate Md5 mismatch.
|
||||
{bucket, object, uploadID, 1, "", "a35", "", 0, false, "",
|
||||
fmt.Errorf("%s", "Bad digest: Expected a35 is not valid with what we calculated "+"d41d8cd98f00b204e9800998ecf8427e")},
|
||||
{bucket, object, uploadID, 1, "", "d41d8cd98f00b204e9800998ecf8427f", "", 0, false, "",
|
||||
hash.BadDigest{"d41d8cd98f00b204e9800998ecf8427f", "d41d8cd98f00b204e9800998ecf8427e"}},
|
||||
// Test case - 13.
|
||||
// When incorrect sha256 is provided.
|
||||
{bucket, object, uploadID, 1, "", "", "incorrect-sha256", 0, false, "", SHA256Mismatch{}},
|
||||
{bucket, object, uploadID, 1, "", "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b854", 0, false, "",
|
||||
hash.SHA256Mismatch{"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b854",
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}},
|
||||
// Test case - 14.
|
||||
// Input with size more than the size of actual data inside the reader.
|
||||
{bucket, object, uploadID, 1, "abcd", "a35", "", int64(len("abcd") + 1), false, "", IncompleteBody{}},
|
||||
{bucket, object, uploadID, 1, "abcd", "e2fc714c4727ee9395f324cd2e7f3335", "", int64(len("abcd") + 1), false, "",
|
||||
hash.BadDigest{"e2fc714c4727ee9395f324cd2e7f3335", "e2fc714c4727ee9395f324cd2e7f331f"}},
|
||||
// Test case - 15.
|
||||
// Input with size less than the size of actual data inside the reader.
|
||||
{bucket, object, uploadID, 1, "abcd", "a35", "", int64(len("abcd") - 1), false, "",
|
||||
fmt.Errorf("%s", "Bad digest: Expected a35 is not valid with what we calculated 900150983cd24fb0d6963f7d28e17f72")},
|
||||
{bucket, object, uploadID, 1, "abcd", "900150983cd24fb0d6963f7d28e17f73", "", int64(len("abcd") - 1), false, "",
|
||||
hash.BadDigest{"900150983cd24fb0d6963f7d28e17f73", "900150983cd24fb0d6963f7d28e17f72"}},
|
||||
|
||||
// Test case - 16-19.
|
||||
// Validating for success cases.
|
||||
@ -347,7 +351,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
|
||||
|
||||
// Validate all the test cases.
|
||||
for i, testCase := range testCases {
|
||||
actualInfo, actualErr := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, testCase.inputSHA256))
|
||||
actualInfo, actualErr := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, testCase.inputSHA256))
|
||||
// All are test cases above are expected to fail.
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
||||
@ -481,7 +485,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
||||
sha256sum := ""
|
||||
// Iterating over creatPartCases to generate multipart chunks.
|
||||
for _, testCase := range createPartCases {
|
||||
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
@ -1336,7 +1340,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
|
||||
sha256sum := ""
|
||||
// Iterating over creatPartCases to generate multipart chunks.
|
||||
for _, testCase := range createPartCases {
|
||||
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
@ -1576,7 +1580,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
|
||||
sha256sum := ""
|
||||
// Iterating over creatPartCases to generate multipart chunks.
|
||||
for _, testCase := range createPartCases {
|
||||
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
@ -1825,7 +1829,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
||||
sha256sum := ""
|
||||
// Iterating over creatPartCases to generate multipart chunks.
|
||||
for _, part := range parts {
|
||||
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID, NewHashReader(bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, sha256sum))
|
||||
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID, mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, sha256sum))
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err)
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"testing"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
func md5Header(data []byte) map[string]string {
|
||||
@ -33,7 +34,7 @@ func md5Header(data []byte) map[string]string {
|
||||
}
|
||||
|
||||
// Wrapper for calling PutObject tests for both XL multiple disks and single node setup.
|
||||
func TestObjectAPIPutObject(t *testing.T) {
|
||||
func TestObjectAPIPutObjectSingle(t *testing.T) {
|
||||
ExecObjectLayerTest(t, testObjectAPIPutObject)
|
||||
}
|
||||
|
||||
@ -94,22 +95,25 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
|
||||
|
||||
// Test case - 7.
|
||||
// Input to replicate Md5 mismatch.
|
||||
{bucket, object, []byte(""), map[string]string{"etag": "a35"}, "", 0, "",
|
||||
BadDigest{ExpectedMD5: "a35", CalculatedMD5: "d41d8cd98f00b204e9800998ecf8427e"}},
|
||||
{bucket, object, []byte(""), map[string]string{"etag": "d41d8cd98f00b204e9800998ecf8427f"}, "", 0, "",
|
||||
hash.BadDigest{"d41d8cd98f00b204e9800998ecf8427f", "d41d8cd98f00b204e9800998ecf8427e"}},
|
||||
|
||||
// Test case - 8.
|
||||
// With incorrect sha256.
|
||||
{bucket, object, []byte("abcd"), map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"}, "incorrect-sha256", int64(len("abcd")), "", SHA256Mismatch{}},
|
||||
{bucket, object, []byte("abcd"), map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331f"},
|
||||
"88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580", int64(len("abcd")),
|
||||
"", hash.SHA256Mismatch{"88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580",
|
||||
"88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589"}},
|
||||
|
||||
// Test case - 9.
|
||||
// Input with size more than the size of actual data inside the reader.
|
||||
{bucket, object, []byte("abcd"), map[string]string{"etag": "a35"}, "", int64(len("abcd") + 1), "",
|
||||
IncompleteBody{}},
|
||||
{bucket, object, []byte("abcd"), map[string]string{"etag": "e2fc714c4727ee9395f324cd2e7f331e"}, "", int64(len("abcd") + 1), "",
|
||||
hash.BadDigest{"e2fc714c4727ee9395f324cd2e7f331e", "e2fc714c4727ee9395f324cd2e7f331f"}},
|
||||
|
||||
// Test case - 10.
|
||||
// Input with size less than the size of actual data inside the reader.
|
||||
{bucket, object, []byte("abcd"), map[string]string{"etag": "a35"}, "", int64(len("abcd") - 1), "",
|
||||
BadDigest{ExpectedMD5: "a35", CalculatedMD5: "900150983cd24fb0d6963f7d28e17f72"}},
|
||||
{bucket, object, []byte("abcd"), map[string]string{"etag": "900150983cd24fb0d6963f7d28e17f73"}, "", int64(len("abcd") - 1), "",
|
||||
hash.BadDigest{"900150983cd24fb0d6963f7d28e17f73", "900150983cd24fb0d6963f7d28e17f72"}},
|
||||
|
||||
// Test case - 11-14.
|
||||
// Validating for success cases.
|
||||
@ -138,9 +142,12 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
|
||||
|
||||
// Test case 24-26.
|
||||
// data with invalid md5sum in header
|
||||
{bucket, object, data, invalidMD5Header, "", int64(len(data)), getMD5Hash(data), BadDigest{invalidMD5, getMD5Hash(data)}},
|
||||
{bucket, object, nilBytes, invalidMD5Header, "", int64(len(nilBytes)), getMD5Hash(nilBytes), BadDigest{invalidMD5, getMD5Hash(nilBytes)}},
|
||||
{bucket, object, fiveMBBytes, invalidMD5Header, "", int64(len(fiveMBBytes)), getMD5Hash(fiveMBBytes), BadDigest{invalidMD5, getMD5Hash(fiveMBBytes)}},
|
||||
{bucket, object, data, invalidMD5Header, "", int64(len(data)), getMD5Hash(data),
|
||||
hash.BadDigest{invalidMD5, getMD5Hash(data)}},
|
||||
{bucket, object, nilBytes, invalidMD5Header, "", int64(len(nilBytes)), getMD5Hash(nilBytes),
|
||||
hash.BadDigest{invalidMD5, getMD5Hash(nilBytes)}},
|
||||
{bucket, object, fiveMBBytes, invalidMD5Header, "", int64(len(fiveMBBytes)), getMD5Hash(fiveMBBytes),
|
||||
hash.BadDigest{invalidMD5, getMD5Hash(fiveMBBytes)}},
|
||||
|
||||
// Test case 27-29.
|
||||
// data with size different from the actual number of bytes available in the reader
|
||||
@ -154,7 +161,7 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, NewHashReader(bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256), testCase.inputMeta)
|
||||
objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256), testCase.inputMeta)
|
||||
actualErr = errorCause(actualErr)
|
||||
if actualErr != nil && testCase.expectedError == nil {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: error %s.", i+1, instanceType, actualErr.Error())
|
||||
@ -228,7 +235,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
|
||||
|
||||
sha256sum := ""
|
||||
for i, testCase := range testCases {
|
||||
objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, NewHashReader(bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta)
|
||||
objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta)
|
||||
actualErr = errorCause(actualErr)
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
||||
@ -278,7 +285,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
|
||||
InsufficientWriteQuorum{},
|
||||
}
|
||||
|
||||
_, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, NewHashReader(bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta)
|
||||
_, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta)
|
||||
actualErr = errorCause(actualErr)
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error())
|
||||
@ -311,7 +318,7 @@ func testObjectAPIPutObjectStaleFiles(obj ObjectLayer, instanceType string, disk
|
||||
|
||||
data := []byte("hello, world")
|
||||
// Create object.
|
||||
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
||||
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
||||
if err != nil {
|
||||
// Failed to create object, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -356,7 +363,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
|
||||
md5Writer.Write(fiveMBBytes)
|
||||
etag1 := hex.EncodeToString(md5Writer.Sum(nil))
|
||||
sha256sum := ""
|
||||
_, err = obj.PutObjectPart(bucket, object, uploadID, 1, NewHashReader(bytes.NewReader(fiveMBBytes), int64(len(fiveMBBytes)), etag1, sha256sum))
|
||||
_, err = obj.PutObjectPart(bucket, object, uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), int64(len(fiveMBBytes)), etag1, sha256sum))
|
||||
if err != nil {
|
||||
// Failed to upload object part, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -367,7 +374,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
|
||||
md5Writer = md5.New()
|
||||
md5Writer.Write(data)
|
||||
etag2 := hex.EncodeToString(md5Writer.Sum(nil))
|
||||
_, err = obj.PutObjectPart(bucket, object, uploadID, 2, NewHashReader(bytes.NewReader(data), int64(len(data)), etag2, sha256sum))
|
||||
_, err = obj.PutObjectPart(bucket, object, uploadID, 2, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), etag2, sha256sum))
|
||||
if err != nil {
|
||||
// Failed to upload object part, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
mux "github.com/gorilla/mux"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
// supportedHeadGetReqParams - supported request parameters for GET and HEAD presigned request.
|
||||
@ -287,6 +288,9 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
// Extract metadata relevant for an CopyObject operation based on conditional
|
||||
// header values specified in X-Amz-Metadata-Directive.
|
||||
func getCpObjMetadataFromHeader(header http.Header, defaultMeta map[string]string) (map[string]string, error) {
|
||||
// Make sure to remove saved etag if any, CopyObject calculates a new one.
|
||||
delete(defaultMeta, "etag")
|
||||
|
||||
// if x-amz-metadata-directive says REPLACE then
|
||||
// we extract metadata from the input headers.
|
||||
if isMetadataReplace(header) {
|
||||
@ -389,12 +393,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
defaultMeta := objInfo.UserDefined
|
||||
|
||||
// Make sure to remove saved etag, CopyObject calculates a new one.
|
||||
delete(defaultMeta, "etag")
|
||||
|
||||
newMetadata, err := getCpObjMetadataFromHeader(r.Header, defaultMeta)
|
||||
newMetadata, err := getCpObjMetadataFromHeader(r.Header, objInfo.UserDefined)
|
||||
if err != nil {
|
||||
errorIf(err, "found invalid http request header")
|
||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||
@ -520,11 +519,6 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure we hex encode md5sum here.
|
||||
metadata["etag"] = hex.EncodeToString(md5Bytes)
|
||||
|
||||
sha256sum := ""
|
||||
|
||||
// Lock the object.
|
||||
objectLock := globalNSMutex.NewNSLock(bucket, object)
|
||||
if objectLock.GetLock(globalObjectTimeout) != nil {
|
||||
@ -533,7 +527,12 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
defer objectLock.Unlock()
|
||||
|
||||
var objInfo ObjectInfo
|
||||
var (
|
||||
md5hex = hex.EncodeToString(md5Bytes)
|
||||
sha256hex = ""
|
||||
reader = r.Body
|
||||
)
|
||||
|
||||
switch rAuthType {
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
@ -547,17 +546,15 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
// Create anonymous object.
|
||||
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(r.Body, size, metadata["etag"], sha256sum), metadata)
|
||||
case authTypeStreamingSigned:
|
||||
// Initialize stream signature verifier.
|
||||
reader, s3Error := newSignV4ChunkedReader(r)
|
||||
var s3Error APIErrorCode
|
||||
reader, s3Error = newSignV4ChunkedReader(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(reader, size, metadata["etag"], sha256sum), metadata)
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
@ -565,7 +562,6 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(r.Body, size, metadata["etag"], sha256sum), metadata)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
if s3Error := reqSignatureV4Verify(r, serverConfig.GetRegion()); s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
@ -573,11 +569,18 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
}
|
||||
if !skipContentSha256Cksum(r) {
|
||||
sha256sum = r.Header.Get("X-Amz-Content-Sha256")
|
||||
sha256hex = r.Header.Get("X-Amz-Content-Sha256")
|
||||
}
|
||||
// Create object.
|
||||
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(r.Body, size, metadata["etag"], sha256sum), metadata)
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Create the object..
|
||||
objInfo, err := objectAPI.PutObject(bucket, object, hashReader, metadata)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to create an object. %s", r.URL.Path)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
@ -821,9 +824,12 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
return
|
||||
}
|
||||
|
||||
var partInfo PartInfo
|
||||
incomingMD5 := hex.EncodeToString(md5Bytes)
|
||||
sha256sum := ""
|
||||
var (
|
||||
md5hex = hex.EncodeToString(md5Bytes)
|
||||
sha256hex = ""
|
||||
reader = r.Body
|
||||
)
|
||||
|
||||
switch rAuthType {
|
||||
default:
|
||||
// For all unknown auth types return error.
|
||||
@ -831,23 +837,20 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
return
|
||||
case authTypeAnonymous:
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html
|
||||
sourceIP := getSourceIPAddress(r)
|
||||
if s3Error := enforceBucketPolicy(bucket, "s3:PutObject", r.URL.Path,
|
||||
r.Referer(), sourceIP, r.URL.Query()); s3Error != ErrNone {
|
||||
r.Referer(), getSourceIPAddress(r), r.URL.Query()); s3Error != ErrNone {
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
// No need to verify signature, anonymous request access is already allowed.
|
||||
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, NewHashReader(r.Body, size, incomingMD5, sha256sum))
|
||||
case authTypeStreamingSigned:
|
||||
// Initialize stream signature verifier.
|
||||
reader, s3Error := newSignV4ChunkedReader(r)
|
||||
var s3Error APIErrorCode
|
||||
reader, s3Error = newSignV4ChunkedReader(r)
|
||||
if s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, NewHashReader(reader, size, incomingMD5, sha256sum))
|
||||
case authTypeSignedV2, authTypePresignedV2:
|
||||
s3Error := isReqAuthenticatedV2(r)
|
||||
if s3Error != ErrNone {
|
||||
@ -855,7 +858,6 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
writeErrorResponse(w, s3Error, r.URL)
|
||||
return
|
||||
}
|
||||
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, NewHashReader(r.Body, size, incomingMD5, sha256sum))
|
||||
case authTypePresigned, authTypeSigned:
|
||||
if s3Error := reqSignatureV4Verify(r, serverConfig.GetRegion()); s3Error != ErrNone {
|
||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||
@ -864,10 +866,19 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
}
|
||||
|
||||
if !skipContentSha256Cksum(r) {
|
||||
sha256sum = r.Header.Get("X-Amz-Content-Sha256")
|
||||
sha256hex = r.Header.Get("X-Amz-Content-Sha256")
|
||||
}
|
||||
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, NewHashReader(r.Body, size, incomingMD5, sha256sum))
|
||||
}
|
||||
|
||||
hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to create object part.")
|
||||
// Verify if the underlying error is signature mismatch.
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
partInfo, err := objectAPI.PutObjectPart(bucket, object, uploadID, partID, hashReader)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to create object part.")
|
||||
// Verify if the underlying error is signature mismatch.
|
||||
|
@ -75,7 +75,7 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
||||
// iterate through the above set of inputs and upload the object.
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err := obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
_, err := obj.PutObject(input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
@ -222,7 +222,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
// iterate through the above set of inputs and upload the object.
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err := obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
_, err := obj.PutObject(input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
@ -1056,7 +1056,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName,
|
||||
NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
@ -1170,7 +1170,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
// iterate through the above set of inputs and upload the object.
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
@ -1511,7 +1511,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
||||
// iterate through the above set of inputs and upload the object.
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
@ -2154,7 +2154,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
// Iterating over creatPartCases to generate multipart chunks.
|
||||
for _, part := range parts {
|
||||
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID,
|
||||
NewHashReader(bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, ""))
|
||||
mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, ""))
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err)
|
||||
}
|
||||
@ -2509,7 +2509,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
// Iterating over createPartCases to generate multipart chunks.
|
||||
for _, part := range parts {
|
||||
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID,
|
||||
NewHashReader(bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, ""))
|
||||
mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, ""))
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err)
|
||||
}
|
||||
@ -2652,7 +2652,7 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// iterate through the above set of inputs and upload the object.
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
@ -3358,7 +3358,7 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
uploadIDCopy := uploadID
|
||||
|
||||
// create an object Part, will be used to test list object parts.
|
||||
_, err = obj.PutObjectPart(bucketName, testObject, uploadID, 1, NewHashReader(bytes.NewReader([]byte("hello")), int64(len("hello")), "5d41402abc4b2a76b9719d911017c592", ""))
|
||||
_, err = obj.PutObjectPart(bucketName, testObject, uploadID, 1, mustGetHashReader(t, bytes.NewReader([]byte("hello")), int64(len("hello")), "5d41402abc4b2a76b9719d911017c592", ""))
|
||||
if err != nil {
|
||||
t.Fatalf("Minio %s : %s.", instanceType, err)
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErr
|
||||
expectedETaghex := getMD5Hash(data)
|
||||
|
||||
var calcPartInfo PartInfo
|
||||
calcPartInfo, err = obj.PutObjectPart("bucket", "key", uploadID, i, NewHashReader(bytes.NewBuffer(data), int64(len(data)), expectedETaghex, ""))
|
||||
calcPartInfo, err = obj.PutObjectPart("bucket", "key", uploadID, i, mustGetHashReader(t, bytes.NewBuffer(data), int64(len(data)), expectedETaghex, ""))
|
||||
if err != nil {
|
||||
t.Errorf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -154,7 +154,7 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHan
|
||||
|
||||
metadata["md5"] = expectedETaghex
|
||||
var calcPartInfo PartInfo
|
||||
calcPartInfo, err = obj.PutObjectPart("bucket", "key", uploadID, i, NewHashReader(bytes.NewBufferString(randomString), int64(len(randomString)), expectedETaghex, ""))
|
||||
calcPartInfo, err = obj.PutObjectPart("bucket", "key", uploadID, i, mustGetHashReader(t, bytes.NewBufferString(randomString), int64(len(randomString)), expectedETaghex, ""))
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -195,7 +195,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH
|
||||
metadata := make(map[string]string)
|
||||
metadata["etag"] = expectedETaghex
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject("bucket", key, NewHashReader(bytes.NewBufferString(randomString), int64(len(randomString)), metadata["etag"], ""), metadata)
|
||||
objInfo, err = obj.PutObject("bucket", key, mustGetHashReader(t, bytes.NewBufferString(randomString), int64(len(randomString)), metadata["etag"], ""), metadata)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -248,7 +248,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
// check before paging occurs.
|
||||
for i := 0; i < 5; i++ {
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
_, err = obj.PutObject("bucket", key, NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
_, err = obj.PutObject("bucket", key, mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -268,7 +268,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
// check after paging occurs pages work.
|
||||
for i := 6; i <= 10; i++ {
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
_, err = obj.PutObject("bucket", key, NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
_, err = obj.PutObject("bucket", key, mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -285,11 +285,11 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
}
|
||||
// check paging with prefix at end returns less objects.
|
||||
{
|
||||
_, err = obj.PutObject("bucket", "newPrefix", NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
_, err = obj.PutObject("bucket", "newPrefix", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
_, err = obj.PutObject("bucket", "newPrefix2", NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
_, err = obj.PutObject("bucket", "newPrefix2", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -327,11 +327,11 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
|
||||
// check delimited results with delimiter and prefix.
|
||||
{
|
||||
_, err = obj.PutObject("bucket", "this/is/delimited", NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
_, err = obj.PutObject("bucket", "this/is/delimited", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
_, err = obj.PutObject("bucket", "this/is/also/a/delimited/file", NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
_, err = obj.PutObject("bucket", "this/is/also/a/delimited/file", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -442,14 +442,14 @@ func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, t TestErrHan
|
||||
|
||||
uploadContent := "The list of parts was not in ascending order. The parts list must be specified in order by part number."
|
||||
length := int64(len(uploadContent))
|
||||
_, err = obj.PutObject("bucket", "object", NewHashReader(bytes.NewBufferString(uploadContent), length, "", ""), nil)
|
||||
_, err = obj.PutObject("bucket", "object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
||||
uploadContent = "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
|
||||
length = int64(len(uploadContent))
|
||||
_, err = obj.PutObject("bucket", "object", NewHashReader(bytes.NewBufferString(uploadContent), length, "", ""), nil)
|
||||
_, err = obj.PutObject("bucket", "object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -471,7 +471,7 @@ func (s *ObjectLayerAPISuite) TestNonExistantBucketOperations(t *testing.T) {
|
||||
|
||||
// Tests validate that bucket operation on non-existent bucket fails.
|
||||
func testNonExistantBucketOperations(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
_, err := obj.PutObject("bucket1", "object", NewHashReader(bytes.NewBufferString("one"), int64(len("one")), "", ""), nil)
|
||||
_, err := obj.PutObject("bucket1", "object", mustGetHashReader(t, bytes.NewBufferString("one"), int64(len("one")), "", ""), nil)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error but found nil")
|
||||
}
|
||||
@ -518,7 +518,7 @@ func testPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
}
|
||||
|
||||
var bytesBuffer1 bytes.Buffer
|
||||
_, err = obj.PutObject("bucket", "object", NewHashReader(readerEOF, length, "", ""), nil)
|
||||
_, err = obj.PutObject("bucket", "object", mustGetHashReader(t, readerEOF, length, "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -531,7 +531,7 @@ func testPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
}
|
||||
|
||||
var bytesBuffer2 bytes.Buffer
|
||||
_, err = obj.PutObject("bucket", "object", NewHashReader(readerNoEOF, length, "", ""), nil)
|
||||
_, err = obj.PutObject("bucket", "object", mustGetHashReader(t, readerNoEOF, length, "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -559,7 +559,7 @@ func testPutObjectInSubdir(obj ObjectLayer, instanceType string, t TestErrHandle
|
||||
uploadContent := `The specified multipart upload does not exist. The upload ID might be invalid, or the multipart
|
||||
upload might have been aborted or completed.`
|
||||
length := int64(len(uploadContent))
|
||||
_, err = obj.PutObject("bucket", "dir1/dir2/object", NewHashReader(bytes.NewBufferString(uploadContent), length, "", ""), nil)
|
||||
_, err = obj.PutObject("bucket", "dir1/dir2/object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -731,7 +731,7 @@ func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string,
|
||||
}
|
||||
content := "One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag."
|
||||
length := int64(len(content))
|
||||
_, err = obj.PutObject(bucketName, "dir1/dir3/object", NewHashReader(bytes.NewBufferString(content), length, "", ""), nil)
|
||||
_, err = obj.PutObject(bucketName, "dir1/dir3/object", mustGetHashReader(t, bytes.NewBufferString(content), length, "", ""), nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
@ -775,7 +775,7 @@ func testContentType(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
}
|
||||
uploadContent := "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
|
||||
// Test empty.
|
||||
_, err = obj.PutObject("bucket", "minio.png", NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
_, err = obj.PutObject("bucket", "minio.png", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
@ -164,7 +164,7 @@ var errMalformedEncoding = errors.New("malformed chunked encoding")
|
||||
//
|
||||
// NewChunkedReader is not needed by normal applications. The http package
|
||||
// automatically decodes chunking when reading response bodies.
|
||||
func newSignV4ChunkedReader(req *http.Request) (io.Reader, APIErrorCode) {
|
||||
func newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, APIErrorCode) {
|
||||
seedSignature, region, seedDate, errCode := calculateSeedSignature(req)
|
||||
if errCode != ErrNone {
|
||||
return nil, errCode
|
||||
@ -242,6 +242,10 @@ func (cs chunkState) String() string {
|
||||
return stateString
|
||||
}
|
||||
|
||||
func (cr *s3ChunkedReader) Close() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read - implements `io.Reader`, which transparently decodes
|
||||
// the incoming AWS Signature V4 streaming signature.
|
||||
func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) {
|
||||
|
@ -53,6 +53,7 @@ import (
|
||||
"github.com/fatih/color"
|
||||
router "github.com/gorilla/mux"
|
||||
"github.com/minio/minio-go/pkg/s3signer"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
// Tests should initNSLock only once.
|
||||
@ -129,6 +130,14 @@ func calculateSignedChunkLength(chunkDataSize int64) int64 {
|
||||
2 // CRLF
|
||||
}
|
||||
|
||||
func mustGetHashReader(t TestErrHandler, data io.Reader, size int64, md5hex, sha256hex string) *hash.Reader {
|
||||
hr, err := hash.NewReader(data, size, md5hex, sha256hex)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return hr
|
||||
}
|
||||
|
||||
// calculateSignedChunkLength - calculates the length of the overall stream (data + metadata)
|
||||
func calculateStreamContentLength(dataLen, chunkSize int64) int64 {
|
||||
if dataLen <= 0 {
|
||||
@ -1975,6 +1984,7 @@ func ExecObjectLayerTest(t TestErrHandler, objTest objTestType) {
|
||||
if err != nil {
|
||||
t.Fatalf("Initialization of object layer failed for single node setup: %s", err)
|
||||
}
|
||||
|
||||
// Executing the object layer tests for single node setup.
|
||||
objTest(objLayer, FSTestStr, t)
|
||||
|
||||
|
@ -27,9 +27,6 @@ var errSignatureMismatch = errors.New("Signature does not match")
|
||||
// used when token used for authentication by the MinioBrowser has expired
|
||||
var errInvalidToken = errors.New("Invalid token")
|
||||
|
||||
// If x-amz-content-sha256 header value mismatches with what we calculate.
|
||||
var errContentSHA256Mismatch = errors.New("Content checksum SHA256 mismatch")
|
||||
|
||||
// used when we deal with data larger than expected
|
||||
var errSizeUnexpected = errors.New("Data size larger than expected")
|
||||
|
||||
|
@ -36,6 +36,7 @@ import (
|
||||
"github.com/gorilla/rpc/v2/json2"
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
"github.com/minio/minio/browser"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
// WebGenericArgs - empty struct for calls that don't accept arguments
|
||||
@ -541,8 +542,13 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
defer objectLock.Unlock()
|
||||
|
||||
sha256sum := ""
|
||||
objInfo, err := objectAPI.PutObject(bucket, object, NewHashReader(r.Body, size, metadata["etag"], sha256sum), metadata)
|
||||
hashReader, err := hash.NewReader(r.Body, size, "", "")
|
||||
if err != nil {
|
||||
writeWebErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.PutObject(bucket, object, hashReader, metadata)
|
||||
if err != nil {
|
||||
writeWebErrorResponse(w, err)
|
||||
return
|
||||
@ -1071,7 +1077,7 @@ func toWebAPIError(err error) APIError {
|
||||
return getAPIError(ErrBucketAlreadyOwnedByYou)
|
||||
case BucketNameInvalid:
|
||||
return getAPIError(ErrInvalidBucketName)
|
||||
case BadDigest:
|
||||
case hash.BadDigest:
|
||||
return getAPIError(ErrBadDigest)
|
||||
case IncompleteBody:
|
||||
return getAPIError(ErrIncompleteBody)
|
||||
|
@ -38,6 +38,7 @@ import (
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio-go/pkg/policy"
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
// Tests private function writeWebErrorResponse.
|
||||
@ -61,7 +62,7 @@ func TestWriteWebErrorResponse(t *testing.T) {
|
||||
apiErrCode: ErrInvalidBucketName,
|
||||
},
|
||||
{
|
||||
webErr: BadDigest{},
|
||||
webErr: hash.BadDigest{},
|
||||
apiErrCode: ErrBadDigest,
|
||||
},
|
||||
{
|
||||
@ -383,7 +384,7 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa
|
||||
|
||||
data := bytes.Repeat([]byte("a"), objectSize)
|
||||
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
||||
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
||||
_, err = obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Was not able to upload an object, %v", err)
|
||||
@ -477,14 +478,14 @@ func testRemoveObjectWebHandler(obj ObjectLayer, instanceType string, t TestErrH
|
||||
|
||||
data := bytes.Repeat([]byte("a"), objectSize)
|
||||
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
||||
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
||||
_, err = obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
||||
if err != nil {
|
||||
t.Fatalf("Was not able to upload an object, %v", err)
|
||||
}
|
||||
|
||||
objectName = "a/object"
|
||||
metadata = map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
||||
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
||||
_, err = obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
||||
if err != nil {
|
||||
t.Fatalf("Was not able to upload an object, %v", err)
|
||||
}
|
||||
@ -865,7 +866,7 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl
|
||||
|
||||
content := []byte("temporary file's content")
|
||||
metadata := map[string]string{"etag": "01ce59706106fe5e02e7f55fffda7f34"}
|
||||
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader(content), int64(len(content)), metadata["etag"], ""), metadata)
|
||||
_, err = obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader(content), int64(len(content)), metadata["etag"], ""), metadata)
|
||||
if err != nil {
|
||||
t.Fatalf("Was not able to upload an object, %v", err)
|
||||
}
|
||||
@ -957,9 +958,9 @@ func testWebHandlerDownloadZip(obj ObjectLayer, instanceType string, t TestErrHa
|
||||
t.Fatalf("%s : %s", instanceType, err)
|
||||
}
|
||||
|
||||
obj.PutObject(bucket, "a/one", NewHashReader(strings.NewReader(fileOne), int64(len(fileOne)), "", ""), nil)
|
||||
obj.PutObject(bucket, "a/b/two", NewHashReader(strings.NewReader(fileTwo), int64(len(fileTwo)), "", ""), nil)
|
||||
obj.PutObject(bucket, "a/c/three", NewHashReader(strings.NewReader(fileThree), int64(len(fileThree)), "", ""), nil)
|
||||
obj.PutObject(bucket, "a/one", mustGetHashReader(t, strings.NewReader(fileOne), int64(len(fileOne)), "", ""), nil)
|
||||
obj.PutObject(bucket, "a/b/two", mustGetHashReader(t, strings.NewReader(fileTwo), int64(len(fileTwo)), "", ""), nil)
|
||||
obj.PutObject(bucket, "a/c/three", mustGetHashReader(t, strings.NewReader(fileThree), int64(len(fileThree)), "", ""), nil)
|
||||
|
||||
test := func(token string) (int, []byte) {
|
||||
rec := httptest.NewRecorder()
|
||||
@ -1044,7 +1045,7 @@ func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrH
|
||||
|
||||
data := bytes.Repeat([]byte("a"), objectSize)
|
||||
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
||||
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
||||
_, err = obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
||||
if err != nil {
|
||||
t.Fatalf("Was not able to upload an object, %v", err)
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ func TestXLParentDirIsObject(t *testing.T) {
|
||||
}
|
||||
objectContent := "12345"
|
||||
objInfo, err := obj.PutObject(bucketName, objectName,
|
||||
NewHashReader(bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil)
|
||||
mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -220,7 +220,7 @@ func TestListOnlineDisks(t *testing.T) {
|
||||
t.Fatalf("Failed to make a bucket %v", err)
|
||||
}
|
||||
|
||||
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
||||
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to putObject %v", err)
|
||||
}
|
||||
@ -358,7 +358,7 @@ func TestDisksWithAllParts(t *testing.T) {
|
||||
t.Fatalf("Failed to make a bucket %v", err)
|
||||
}
|
||||
|
||||
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
||||
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to putObject %v", err)
|
||||
}
|
||||
|
@ -491,7 +491,7 @@ func TestHealObjectXL(t *testing.T) {
|
||||
|
||||
var uploadedParts []completePart
|
||||
for _, partID := range []int{2, 1} {
|
||||
pInfo, err1 := obj.PutObjectPart(bucket, object, uploadID, partID, NewHashReader(bytes.NewReader(data), int64(len(data)), "", ""))
|
||||
pInfo, err1 := obj.PutObjectPart(bucket, object, uploadID, partID, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""))
|
||||
if err1 != nil {
|
||||
t.Fatalf("Failed to upload a part - %v", err1)
|
||||
}
|
||||
|
@ -56,14 +56,14 @@ func TestListObjectsHeal(t *testing.T) {
|
||||
|
||||
// Put 5 objects under sane dir
|
||||
for i := 0; i < 5; i++ {
|
||||
_, err = xl.PutObject(bucketName, "sane/"+objName+strconv.Itoa(i), NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err = xl.PutObject(bucketName, "sane/"+objName+strconv.Itoa(i), mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
|
||||
}
|
||||
}
|
||||
// Put 500 objects under unsane/subdir dir
|
||||
for i := 0; i < 5; i++ {
|
||||
_, err = xl.PutObject(bucketName, "unsane/subdir/"+objName+strconv.Itoa(i), NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err = xl.PutObject(bucketName, "unsane/subdir/"+objName+strconv.Itoa(i), mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
|
||||
}
|
||||
@ -181,7 +181,7 @@ func TestListUploadsHeal(t *testing.T) {
|
||||
// Upload a part.
|
||||
data := bytes.Repeat([]byte("a"), 1024)
|
||||
_, err = xl.PutObjectPart(bucketName, objName, uploadID, 1,
|
||||
NewHashReader(bytes.NewReader(data), int64(len(data)), "", ""))
|
||||
mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ func testXLReadStat(obj ObjectLayer, instanceType string, disks []string, t *tes
|
||||
// iterate through the above set of inputs and upkoad the object.
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
@ -150,7 +150,7 @@ func testXLReadMetaParts(obj ObjectLayer, instanceType string, disks []string, t
|
||||
sha256sum := ""
|
||||
// Iterating over creatPartCases to generate multipart chunks.
|
||||
for _, testCase := range createPartCases {
|
||||
_, perr := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
_, perr := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
if perr != nil {
|
||||
t.Fatalf("%s : %s", instanceType, perr)
|
||||
}
|
||||
|
@ -17,7 +17,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -26,6 +25,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/mimedb"
|
||||
)
|
||||
|
||||
@ -555,7 +555,12 @@ func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
|
||||
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
|
||||
}()
|
||||
|
||||
partInfo, err := xl.PutObjectPart(dstBucket, dstObject, uploadID, partID, NewHashReader(pipeReader, length, "", ""))
|
||||
hashReader, err := hash.NewReader(pipeReader, length, "", "")
|
||||
if err != nil {
|
||||
return pi, toObjectErr(err, dstBucket, dstObject)
|
||||
}
|
||||
|
||||
partInfo, err := xl.PutObjectPart(dstBucket, dstObject, uploadID, partID, hashReader)
|
||||
if err != nil {
|
||||
return pi, toObjectErr(err, dstBucket, dstObject)
|
||||
}
|
||||
@ -572,7 +577,7 @@ func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
|
||||
// of the multipart transaction.
|
||||
//
|
||||
// Implements S3 compatible Upload Part API.
|
||||
func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *HashReader) (pi PartInfo, e error) {
|
||||
func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) {
|
||||
if err := checkPutObjectPartArgs(bucket, object, xl); err != nil {
|
||||
return pi, err
|
||||
}
|
||||
@ -651,10 +656,6 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
|
||||
return pi, traceError(IncompleteBody{})
|
||||
}
|
||||
|
||||
if err = data.Verify(); err != nil {
|
||||
return pi, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// post-upload check (write) lock
|
||||
postUploadIDLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket, uploadIDPath)
|
||||
if err = postUploadIDLock.GetLock(globalOperationTimeout); err != nil {
|
||||
@ -693,9 +694,10 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
|
||||
// Once part is successfully committed, proceed with updating XL metadata.
|
||||
xlMeta.Stat.ModTime = UTCNow()
|
||||
|
||||
md5hex := data.MD5HexString()
|
||||
|
||||
// Add the current part.
|
||||
md5Hex := hex.EncodeToString(data.MD5())
|
||||
xlMeta.AddObjectPart(partID, partSuffix, md5Hex, file.Size)
|
||||
xlMeta.AddObjectPart(partID, partSuffix, md5hex, file.Size)
|
||||
|
||||
for i, disk := range onlineDisks {
|
||||
if disk == OfflineDisk {
|
||||
@ -727,7 +729,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, d
|
||||
return PartInfo{
|
||||
PartNumber: partID,
|
||||
LastModified: fi.ModTime,
|
||||
ETag: md5Hex,
|
||||
ETag: md5hex,
|
||||
Size: fi.Size,
|
||||
}, nil
|
||||
}
|
||||
@ -942,6 +944,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
|
||||
|
||||
// Save successfully calculated md5sum.
|
||||
xlMeta.Meta["etag"] = s3MD5
|
||||
|
||||
uploadIDPath = path.Join(bucket, object, uploadID)
|
||||
tempUploadIDPath := uploadID
|
||||
|
||||
|
@ -17,13 +17,13 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/mimedb"
|
||||
"github.com/minio/minio/pkg/objcache"
|
||||
)
|
||||
@ -113,7 +113,12 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
|
||||
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
|
||||
}()
|
||||
|
||||
objInfo, err := xl.PutObject(dstBucket, dstObject, NewHashReader(pipeReader, length, metadata["etag"], ""), metadata)
|
||||
hashReader, err := hash.NewReader(pipeReader, length, "", "")
|
||||
if err != nil {
|
||||
return oi, toObjectErr(traceError(err), dstBucket, dstObject)
|
||||
}
|
||||
|
||||
objInfo, err := xl.PutObject(dstBucket, dstObject, hashReader, metadata)
|
||||
if err != nil {
|
||||
return oi, toObjectErr(err, dstBucket, dstObject)
|
||||
}
|
||||
@ -424,7 +429,7 @@ func renameObject(disks []StorageAPI, srcBucket, srcObject, dstBucket, dstObject
|
||||
// until EOF, erasure codes the data across all disk and additionally
|
||||
// writes `xl.json` which carries the necessary metadata for future
|
||||
// object operations.
|
||||
func (xl xlObjects) PutObject(bucket string, object string, data *HashReader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||
func (xl xlObjects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||
// This is a special case with size as '0' and object ends with
|
||||
// a slash separator, we treat it like a valid operation and
|
||||
// return success.
|
||||
@ -523,8 +528,7 @@ func (xl xlObjects) PutObject(bucket string, object string, data *HashReader, me
|
||||
// Compute the path of current part
|
||||
tempErasureObj := pathJoin(uniqueID, partName)
|
||||
|
||||
// Calculate the size of the current part. AllowEmptyPart will always be true
|
||||
// if this is the first part and false otherwise.
|
||||
// Calculate the size of the current part.
|
||||
var curPartSize int64
|
||||
curPartSize, err = calculatePartSizeFromIdx(data.Size(), globalPutPartSize, partIdx)
|
||||
if err != nil {
|
||||
@ -533,6 +537,7 @@ func (xl xlObjects) PutObject(bucket string, object string, data *HashReader, me
|
||||
|
||||
// Hint the filesystem to pre-allocate one continuous large block.
|
||||
// This is only an optimization.
|
||||
var curPartReader io.Reader
|
||||
if curPartSize > 0 {
|
||||
pErr := xl.prepareFile(minioMetaTmpBucket, tempErasureObj, curPartSize, storage.disks, xlMeta.Erasure.BlockSize, xlMeta.Erasure.DataBlocks)
|
||||
if pErr != nil {
|
||||
@ -540,7 +545,13 @@ func (xl xlObjects) PutObject(bucket string, object string, data *HashReader, me
|
||||
}
|
||||
}
|
||||
|
||||
file, erasureErr := storage.CreateFile(io.LimitReader(reader, curPartSize), minioMetaTmpBucket,
|
||||
if curPartSize < data.Size() {
|
||||
curPartReader = io.LimitReader(reader, curPartSize)
|
||||
} else {
|
||||
curPartReader = reader
|
||||
}
|
||||
|
||||
file, erasureErr := storage.CreateFile(curPartReader, minioMetaTmpBucket,
|
||||
tempErasureObj, buffer, DefaultBitrotAlgorithm, xl.writeQuorum)
|
||||
if erasureErr != nil {
|
||||
return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj)
|
||||
@ -555,44 +566,20 @@ func (xl xlObjects) PutObject(bucket string, object string, data *HashReader, me
|
||||
// Update the total written size
|
||||
sizeWritten += file.Size
|
||||
|
||||
// allowEmpty creating empty earsure file only when this is the first part. This flag is useful
|
||||
// when size == -1 because in this case, we are not able to predict how many parts we will have.
|
||||
allowEmpty := partIdx == 1
|
||||
if file.Size > 0 || allowEmpty {
|
||||
for i := range partsMetadata {
|
||||
partsMetadata[i].AddObjectPart(partIdx, partName, "", file.Size)
|
||||
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{partName, file.Algorithm, file.Checksums[i]})
|
||||
}
|
||||
for i := range partsMetadata {
|
||||
partsMetadata[i].AddObjectPart(partIdx, partName, "", file.Size)
|
||||
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{partName, file.Algorithm, file.Checksums[i]})
|
||||
}
|
||||
|
||||
// If we didn't write anything or we know that the next part doesn't have any
|
||||
// data to write, we should quit this loop immediately
|
||||
if file.Size == 0 {
|
||||
// We wrote everything, break out.
|
||||
if sizeWritten == data.Size() {
|
||||
break
|
||||
}
|
||||
|
||||
// Check part size for the next index.
|
||||
var partSize int64
|
||||
partSize, err = calculatePartSizeFromIdx(data.Size(), globalPutPartSize, partIdx+1)
|
||||
if err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
if partSize == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if sizeWritten < data.Size() {
|
||||
return ObjectInfo{}, traceError(IncompleteBody{})
|
||||
}
|
||||
|
||||
// Save additional erasureMetadata.
|
||||
modTime := UTCNow()
|
||||
|
||||
if err = data.Verify(); err != nil {
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
metadata["etag"] = hex.EncodeToString(data.MD5())
|
||||
metadata["etag"] = data.MD5HexString()
|
||||
|
||||
// Guess content-type from the extension if possible.
|
||||
if metadata["content-type"] == "" {
|
||||
|
@ -53,12 +53,12 @@ func TestRepeatPutObjectPart(t *testing.T) {
|
||||
}
|
||||
fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
|
||||
md5Hex := getMD5Hash(fiveMBBytes)
|
||||
_, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, NewHashReader(bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""))
|
||||
_, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930
|
||||
_, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, NewHashReader(bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""))
|
||||
_, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -92,7 +92,7 @@ func TestXLDeleteObjectBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create object "obj" under bucket "bucket" for Test 7 to pass
|
||||
_, err = xl.PutObject("bucket", "obj", NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err = xl.PutObject("bucket", "obj", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
|
||||
}
|
||||
@ -128,7 +128,7 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
|
||||
bucket := "bucket"
|
||||
object := "object"
|
||||
// Create object "obj" under bucket "bucket".
|
||||
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -143,7 +143,7 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create "obj" under "bucket".
|
||||
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -178,7 +178,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
|
||||
bucket := "bucket"
|
||||
object := "object"
|
||||
// Create "object" under "bucket".
|
||||
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -230,7 +230,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
|
||||
bucket := "bucket"
|
||||
object := "object"
|
||||
// Create "object" under "bucket".
|
||||
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -253,7 +253,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
|
||||
}
|
||||
}
|
||||
// Upload new content to same object "object"
|
||||
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
err = errorCause(err)
|
||||
if err != toObjectErr(errXLWriteQuorum, bucket, object) {
|
||||
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
|
||||
@ -294,7 +294,7 @@ func TestHealing(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader(data), length, "", ""), nil)
|
||||
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader(data), length, "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
37
pkg/hash/errors.go
Normal file
37
pkg/hash/errors.go
Normal file
@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package hash
|
||||
|
||||
// SHA256Mismatch - when content sha256 does not match with what was sent from client.
|
||||
type SHA256Mismatch struct {
|
||||
ExpectedSHA256 string
|
||||
CalculatedSHA256 string
|
||||
}
|
||||
|
||||
func (e SHA256Mismatch) Error() string {
|
||||
return "Bad sha256: Expected " + e.ExpectedSHA256 + " is not valid with what we calculated " + e.CalculatedSHA256
|
||||
}
|
||||
|
||||
// BadDigest - Content-MD5 you specified did not match what we received.
|
||||
type BadDigest struct {
|
||||
ExpectedMD5 string
|
||||
CalculatedMD5 string
|
||||
}
|
||||
|
||||
func (e BadDigest) Error() string {
|
||||
return "Bad digest: Expected " + e.ExpectedMD5 + " is not valid with what we calculated " + e.CalculatedMD5
|
||||
}
|
136
pkg/hash/reader.go
Normal file
136
pkg/hash/reader.go
Normal file
@ -0,0 +1,136 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package hash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Reader writes what it reads from an io.Reader to an MD5 and SHA256 hash.Hash.
|
||||
// Reader verifies that the content of the io.Reader matches the expected checksums.
|
||||
type Reader struct {
|
||||
src io.Reader
|
||||
size int64
|
||||
|
||||
md5sum, sha256sum []byte // Byte values of md5sum, sha256sum of client sent values.
|
||||
md5Hash, sha256Hash hash.Hash
|
||||
}
|
||||
|
||||
// NewReader returns a new hash Reader which computes the MD5 sum and
|
||||
// SHA256 sum (if set) of the provided io.Reader at EOF.
|
||||
func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string) (*Reader, error) {
|
||||
if _, ok := src.(*Reader); ok {
|
||||
return nil, errors.New("Nesting of Reader detected, not allowed")
|
||||
}
|
||||
|
||||
sha256sum, err := hex.DecodeString(sha256Hex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
md5sum, err := hex.DecodeString(md5Hex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
md5Hash hash.Hash
|
||||
sha256Hash hash.Hash
|
||||
)
|
||||
if len(md5sum) != 0 {
|
||||
md5Hash = md5.New()
|
||||
}
|
||||
if len(sha256sum) != 0 {
|
||||
sha256Hash = sha256.New()
|
||||
}
|
||||
|
||||
return &Reader{
|
||||
md5sum: md5sum,
|
||||
sha256sum: sha256sum,
|
||||
src: io.LimitReader(src, size),
|
||||
size: size,
|
||||
md5Hash: md5Hash,
|
||||
sha256Hash: sha256Hash,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *Reader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.src.Read(p)
|
||||
if n > 0 {
|
||||
if r.md5Hash != nil {
|
||||
r.md5Hash.Write(p[:n])
|
||||
}
|
||||
if r.sha256Hash != nil {
|
||||
r.sha256Hash.Write(p[:n])
|
||||
}
|
||||
}
|
||||
|
||||
// At io.EOF verify if the checksums are right.
|
||||
if err == io.EOF {
|
||||
if cerr := r.Verify(); cerr != nil {
|
||||
return 0, cerr
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Size returns the absolute number of bytes the Reader
|
||||
// will return during reading. It returns -1 for unlimited
|
||||
// data.
|
||||
func (r *Reader) Size() int64 { return r.size }
|
||||
|
||||
// MD5 - returns byte md5 value
|
||||
func (r *Reader) MD5() []byte {
|
||||
return r.md5sum
|
||||
}
|
||||
|
||||
// SHA256 - returns byte sha256 value
|
||||
func (r *Reader) SHA256() []byte {
|
||||
return r.sha256sum
|
||||
}
|
||||
|
||||
// MD5HexString returns hex md5 value.
|
||||
func (r *Reader) MD5HexString() string {
|
||||
return hex.EncodeToString(r.md5sum)
|
||||
}
|
||||
|
||||
// SHA256HexString returns hex sha256 value.
|
||||
func (r *Reader) SHA256HexString() string {
|
||||
return hex.EncodeToString(r.sha256sum)
|
||||
}
|
||||
|
||||
// Verify verifies if the computed MD5 sum and SHA256 sum are
|
||||
// equal to the ones specified when creating the Reader.
|
||||
func (r *Reader) Verify() error {
|
||||
if r.sha256Hash != nil {
|
||||
if sum := r.sha256Hash.Sum(nil); !bytes.Equal(r.sha256sum, sum) {
|
||||
return SHA256Mismatch{hex.EncodeToString(r.sha256sum), hex.EncodeToString(sum)}
|
||||
}
|
||||
}
|
||||
if r.md5Hash != nil {
|
||||
if sum := r.md5Hash.Sum(nil); !bytes.Equal(r.md5sum, sum) {
|
||||
return BadDigest{hex.EncodeToString(r.md5sum), hex.EncodeToString(sum)}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
152
pkg/hash/reader_test.go
Normal file
152
pkg/hash/reader_test.go
Normal file
@ -0,0 +1,152 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package hash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Tests functions like Size(), MD5*(), SHA256*()
|
||||
func TestHashReaderHelperMethods(t *testing.T) {
|
||||
r, err := NewReader(bytes.NewReader([]byte("abcd")), 4, "e2fc714c4727ee9395f324cd2e7f331f", "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = io.Copy(ioutil.Discard, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if r.MD5HexString() != "e2fc714c4727ee9395f324cd2e7f331f" {
|
||||
t.Errorf("Expected md5hex \"e2fc714c4727ee9395f324cd2e7f331f\", got %s", r.MD5HexString())
|
||||
}
|
||||
if r.SHA256HexString() != "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589" {
|
||||
t.Errorf("Expected sha256hex \"88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589\", got %s", r.SHA256HexString())
|
||||
}
|
||||
if r.Size() != 4 {
|
||||
t.Errorf("Expected size 4, got %d", r.Size())
|
||||
}
|
||||
expectedMD5, err := hex.DecodeString("e2fc714c4727ee9395f324cd2e7f331f")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(r.MD5(), expectedMD5) {
|
||||
t.Errorf("Expected md5hex \"e2fc714c4727ee9395f324cd2e7f331f\", got %s", r.MD5HexString())
|
||||
}
|
||||
expectedSHA256, err := hex.DecodeString("88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589")
|
||||
if !bytes.Equal(r.SHA256(), expectedSHA256) {
|
||||
t.Errorf("Expected md5hex \"88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589\", got %s", r.SHA256HexString())
|
||||
}
|
||||
}
|
||||
|
||||
// Tests hash reader checksum verification.
|
||||
func TestHashReaderVerification(t *testing.T) {
|
||||
testCases := []struct {
|
||||
src io.Reader
|
||||
size int64
|
||||
md5hex, sha256hex string
|
||||
err error
|
||||
}{
|
||||
// Success, no checksum verification provided.
|
||||
{
|
||||
src: bytes.NewReader([]byte("abcd")),
|
||||
size: 4,
|
||||
},
|
||||
// Failure md5 mismatch.
|
||||
{
|
||||
src: bytes.NewReader([]byte("abcd")),
|
||||
size: 4,
|
||||
md5hex: "d41d8cd98f00b204e9800998ecf8427f",
|
||||
err: BadDigest{
|
||||
"d41d8cd98f00b204e9800998ecf8427f",
|
||||
"e2fc714c4727ee9395f324cd2e7f331f",
|
||||
},
|
||||
},
|
||||
// Failure sha256 mismatch.
|
||||
{
|
||||
src: bytes.NewReader([]byte("abcd")),
|
||||
size: 4,
|
||||
sha256hex: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580",
|
||||
err: SHA256Mismatch{
|
||||
"88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580",
|
||||
"88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589",
|
||||
},
|
||||
},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
r, err := NewReader(testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: Initializing reader failed %s", i+1, err)
|
||||
}
|
||||
_, err = io.Copy(ioutil.Discard, r)
|
||||
if err != nil {
|
||||
if err.Error() != testCase.err.Error() {
|
||||
t.Errorf("Test %d: Expected error %s, got error %s", i+1, testCase.err, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests NewReader() constructor with invalid arguments.
|
||||
func TestHashReaderInvalidArguments(t *testing.T) {
|
||||
testCases := []struct {
|
||||
src io.Reader
|
||||
size int64
|
||||
md5hex, sha256hex string
|
||||
success bool
|
||||
}{
|
||||
// Invalid md5sum NewReader() will fail.
|
||||
{
|
||||
src: bytes.NewReader([]byte("abcd")),
|
||||
size: 4,
|
||||
md5hex: "invalid-md5",
|
||||
success: false,
|
||||
},
|
||||
// Invalid sha256 NewReader() will fail.
|
||||
{
|
||||
src: bytes.NewReader([]byte("abcd")),
|
||||
size: 4,
|
||||
sha256hex: "invalid-sha256",
|
||||
success: false,
|
||||
},
|
||||
// Nested hash reader NewReader() will fail.
|
||||
{
|
||||
src: &Reader{src: bytes.NewReader([]byte("abcd"))},
|
||||
size: 4,
|
||||
success: false,
|
||||
},
|
||||
// Expected inputs, NewReader() will succeed.
|
||||
{
|
||||
src: bytes.NewReader([]byte("abcd")),
|
||||
size: 4,
|
||||
success: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
_, err := NewReader(testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex)
|
||||
if err != nil && testCase.success {
|
||||
t.Errorf("Test %d: Expected success, but got error %s instead", i+1, err)
|
||||
}
|
||||
if err == nil && !testCase.success {
|
||||
t.Errorf("Test %d: Expected error, but got success", i+1)
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user