mirror of
https://github.com/minio/minio.git
synced 2024-12-24 22:25:54 -05:00
refactor ObjectLayer PutObject and PutObjectPart (#4925)
This change refactor the ObjectLayer PutObject and PutObjectPart functions. Instead of passing an io.Reader and a size to PUT operations ObejectLayer expects an HashReader. A HashReader verifies the MD5 sum (and SHA256 sum if required) of the object. This change updates all all PutObject(Part) calls and removes unnecessary code in all ObjectLayer implementations. Fixes #4923
This commit is contained in:
parent
f8024cadbb
commit
79ba4d3f33
@ -943,7 +943,7 @@ func TestHealObjectHandler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
_, err = adminTestBed.objLayer.PutObject(bucketName, objName,
|
_, err = adminTestBed.objLayer.PutObject(bucketName, objName,
|
||||||
int64(len("hello")), bytes.NewReader([]byte("hello")), nil, "")
|
NewHashReader(bytes.NewReader([]byte("hello")), int64(len("hello")), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create %s - %v", objName, err)
|
t.Fatalf("Failed to create %s - %v", objName, err)
|
||||||
}
|
}
|
||||||
@ -1083,7 +1083,7 @@ func TestHealUploadHandler(t *testing.T) {
|
|||||||
// Upload a part.
|
// Upload a part.
|
||||||
partID := 1
|
partID := 1
|
||||||
_, err = adminTestBed.objLayer.PutObjectPart(bucketName, objName, uploadID,
|
_, err = adminTestBed.objLayer.PutObjectPart(bucketName, objName, uploadID,
|
||||||
partID, int64(len("hello")), bytes.NewReader([]byte("hello")), "", "")
|
partID, NewHashReader(bytes.NewReader([]byte("hello")), int64(len("hello")), "", ""))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to upload part %d of %s/%s - %v", partID,
|
t.Fatalf("Failed to upload part %d of %s/%s - %v", partID,
|
||||||
bucketName, objName, err)
|
bucketName, objName, err)
|
||||||
|
@ -58,7 +58,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
|||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
// insert the object.
|
// insert the object.
|
||||||
objInfo, err := obj.PutObject(bucket, "object"+strconv.Itoa(i), int64(len(textData)), bytes.NewBuffer(textData), metadata, sha256sum)
|
objInfo, err := obj.PutObject(bucket, "object"+strconv.Itoa(i), NewHashReader(bytes.NewBuffer(textData), int64(len(textData)), metadata["etag"], sha256sum), metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -118,7 +118,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
|||||||
metadata := make(map[string]string)
|
metadata := make(map[string]string)
|
||||||
metadata["etag"] = getMD5Hash([]byte(textPartData))
|
metadata["etag"] = getMD5Hash([]byte(textPartData))
|
||||||
var partInfo PartInfo
|
var partInfo PartInfo
|
||||||
partInfo, err = obj.PutObjectPart(bucket, object, uploadID, j, int64(len(textPartData)), bytes.NewBuffer(textPartData), metadata["etag"], sha256sum)
|
partInfo, err = obj.PutObjectPart(bucket, object, uploadID, j, NewHashReader(bytes.NewBuffer(textPartData), int64(len(textPartData)), metadata["etag"], sha256sum))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -216,7 +216,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
|||||||
metadata["etag"] = getMD5Hash(textData)
|
metadata["etag"] = getMD5Hash(textData)
|
||||||
// insert the object.
|
// insert the object.
|
||||||
var objInfo ObjectInfo
|
var objInfo ObjectInfo
|
||||||
objInfo, err = obj.PutObject(bucket, "object"+strconv.Itoa(i), int64(len(textData)), bytes.NewBuffer(textData), metadata, sha256sum)
|
objInfo, err = obj.PutObject(bucket, "object"+strconv.Itoa(i), NewHashReader(bytes.NewBuffer(textData), int64(len(textData)), metadata["etag"], sha256sum), metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -329,7 +329,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
|||||||
i := 0
|
i := 0
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
// insert the object.
|
// insert the object.
|
||||||
objInfo, err := obj.PutObject(bucket, "object"+strconv.Itoa(i), int64(len(textData)), bytes.NewBuffer(textData), metadata, sha256sum)
|
objInfo, err := obj.PutObject(bucket, "object"+strconv.Itoa(i), NewHashReader(bytes.NewBuffer(textData), int64(len(textData)), metadata["etag"], sha256sum), metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -372,7 +372,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
|||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// insert the object.
|
// insert the object.
|
||||||
var objInfo ObjectInfo
|
var objInfo ObjectInfo
|
||||||
objInfo, err = obj.PutObject(bucket, "object"+strconv.Itoa(i), int64(len(textData)), bytes.NewBuffer(textData), metadata, sha256sum)
|
objInfo, err = obj.PutObject(bucket, "object"+strconv.Itoa(i), NewHashReader(bytes.NewBuffer(textData), int64(len(textData)), metadata["etag"], sha256sum), metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -562,7 +562,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
|||||||
}
|
}
|
||||||
defer objectLock.Unlock()
|
defer objectLock.Unlock()
|
||||||
|
|
||||||
objInfo, err := objectAPI.PutObject(bucket, object, fileSize, fileBody, metadata, sha256sum)
|
objInfo, err := objectAPI.PutObject(bucket, object, NewHashReader(fileBody, fileSize, metadata["etag"], sha256sum), metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorIf(err, "Unable to create object.")
|
errorIf(err, "Unable to create object.")
|
||||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||||
|
@ -632,8 +632,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
|||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
objectName := "test-object-" + strconv.Itoa(i)
|
objectName := "test-object-" + strconv.Itoa(i)
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(bucketName, objectName, int64(len(contentBytes)), bytes.NewBuffer(contentBytes),
|
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), nil)
|
||||||
make(map[string]string), sha256sum)
|
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)
|
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)
|
||||||
|
@ -218,7 +218,7 @@ func writeBucketPolicy(bucket string, objAPI ObjectLayer, bpy *bucketPolicy) err
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer objLock.Unlock()
|
defer objLock.Unlock()
|
||||||
if _, err := objAPI.PutObject(minioMetaBucket, policyPath, int64(len(buf)), bytes.NewReader(buf), nil, ""); err != nil {
|
if _, err := objAPI.PutObject(minioMetaBucket, policyPath, NewHashReader(bytes.NewReader(buf), int64(len(buf)), "", ""), nil); err != nil {
|
||||||
errorIf(err, "Unable to set policy for the bucket %s", bucket)
|
errorIf(err, "Unable to set policy for the bucket %s", bucket)
|
||||||
return errorCause(err)
|
return errorCause(err)
|
||||||
}
|
}
|
||||||
|
@ -465,7 +465,7 @@ func persistNotificationConfig(bucket string, ncfg *notificationConfig, obj Obje
|
|||||||
|
|
||||||
// write object to path
|
// write object to path
|
||||||
sha256Sum := getSHA256Hash(buf)
|
sha256Sum := getSHA256Hash(buf)
|
||||||
_, err = obj.PutObject(minioMetaBucket, ncPath, int64(len(buf)), bytes.NewReader(buf), nil, sha256Sum)
|
_, err = obj.PutObject(minioMetaBucket, ncPath, NewHashReader(bytes.NewReader(buf), int64(len(buf)), "", sha256Sum), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorIf(err, "Unable to write bucket notification configuration.")
|
errorIf(err, "Unable to write bucket notification configuration.")
|
||||||
return err
|
return err
|
||||||
@ -492,7 +492,7 @@ func persistListenerConfig(bucket string, lcfg []listenerConfig, obj ObjectLayer
|
|||||||
|
|
||||||
// write object to path
|
// write object to path
|
||||||
sha256Sum := getSHA256Hash(buf)
|
sha256Sum := getSHA256Hash(buf)
|
||||||
_, err = obj.PutObject(minioMetaBucket, lcPath, int64(len(buf)), bytes.NewReader(buf), nil, sha256Sum)
|
_, err = obj.PutObject(minioMetaBucket, lcPath, NewHashReader(bytes.NewReader(buf), int64(len(buf)), "", sha256Sum), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorIf(err, "Unable to write bucket listener configuration to object layer.")
|
errorIf(err, "Unable to write bucket listener configuration to object layer.")
|
||||||
}
|
}
|
||||||
|
@ -62,7 +62,7 @@ func TestInitEventNotifierFaultyDisks(t *testing.T) {
|
|||||||
notificationXML += "</NotificationConfiguration>"
|
notificationXML += "</NotificationConfiguration>"
|
||||||
size := int64(len([]byte(notificationXML)))
|
size := int64(len([]byte(notificationXML)))
|
||||||
reader := bytes.NewReader([]byte(notificationXML))
|
reader := bytes.NewReader([]byte(notificationXML))
|
||||||
if _, err := xl.PutObject(minioMetaBucket, bucketConfigPrefix+"/"+bucketName+"/"+bucketNotificationConfig, size, reader, nil, ""); err != nil {
|
if _, err := xl.PutObject(minioMetaBucket, bucketConfigPrefix+"/"+bucketName+"/"+bucketNotificationConfig, NewHashReader(reader, size, "", ""), nil); err != nil {
|
||||||
t.Fatal("Unexpected error:", err)
|
t.Fatal("Unexpected error:", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -235,9 +235,8 @@ func prepareFormatXLHealFreshDisks(obj ObjectLayer) ([]StorageAPI, error) {
|
|||||||
|
|
||||||
bucket := "bucket"
|
bucket := "bucket"
|
||||||
object := "object"
|
object := "object"
|
||||||
sha256sum := ""
|
|
||||||
|
|
||||||
_, err = obj.PutObject(bucket, object, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, sha256sum)
|
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []StorageAPI{}, err
|
return []StorageAPI{}, err
|
||||||
}
|
}
|
||||||
@ -326,9 +325,8 @@ func TestFormatXLHealCorruptedDisks(t *testing.T) {
|
|||||||
|
|
||||||
bucket := "bucket"
|
bucket := "bucket"
|
||||||
object := "object"
|
object := "object"
|
||||||
sha256sum := ""
|
|
||||||
|
|
||||||
_, err = obj.PutObject(bucket, object, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, sha256sum)
|
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -401,9 +399,8 @@ func TestFormatXLReorderByInspection(t *testing.T) {
|
|||||||
|
|
||||||
bucket := "bucket"
|
bucket := "bucket"
|
||||||
object := "object"
|
object := "object"
|
||||||
sha256sum := ""
|
|
||||||
|
|
||||||
_, err = obj.PutObject(bucket, object, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, sha256sum)
|
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -52,9 +52,7 @@ func TestReadFSMetadata(t *testing.T) {
|
|||||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||||
t.Fatal("Unexpected err: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
}
|
}
|
||||||
sha256sum := ""
|
if _, err := obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil); err != nil {
|
||||||
if _, err := obj.PutObject(bucketName, objectName, int64(len("abcd")), bytes.NewReader([]byte("abcd")),
|
|
||||||
map[string]string{"X-Amz-Meta-AppId": "a"}, sha256sum); err != nil {
|
|
||||||
t.Fatal("Unexpected err: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -89,9 +87,7 @@ func TestWriteFSMetadata(t *testing.T) {
|
|||||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||||
t.Fatal("Unexpected err: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
}
|
}
|
||||||
sha256sum := ""
|
if _, err := obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil); err != nil {
|
||||||
if _, err := obj.PutObject(bucketName, objectName, int64(len("abcd")), bytes.NewReader([]byte("abcd")),
|
|
||||||
map[string]string{"X-Amz-Meta-AppId": "a"}, sha256sum); err != nil {
|
|
||||||
t.Fatal("Unexpected err: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,10 +17,8 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/md5"
|
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
pathutil "path"
|
pathutil "path"
|
||||||
@ -28,7 +26,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/minio/minio/pkg/lock"
|
"github.com/minio/minio/pkg/lock"
|
||||||
"github.com/minio/sha256-simd"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -459,7 +456,7 @@ func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
|
|||||||
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
|
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
|
||||||
}()
|
}()
|
||||||
|
|
||||||
partInfo, err := fs.PutObjectPart(dstBucket, dstObject, uploadID, partID, length, pipeReader, "", "")
|
partInfo, err := fs.PutObjectPart(dstBucket, dstObject, uploadID, partID, NewHashReader(pipeReader, length, "", ""))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pi, toObjectErr(err, dstBucket, dstObject)
|
return pi, toObjectErr(err, dstBucket, dstObject)
|
||||||
}
|
}
|
||||||
@ -474,7 +471,7 @@ func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
|
|||||||
// an ongoing multipart transaction. Internally incoming data is
|
// an ongoing multipart transaction. Internally incoming data is
|
||||||
// written to '.minio.sys/tmp' location and safely renamed to
|
// written to '.minio.sys/tmp' location and safely renamed to
|
||||||
// '.minio.sys/multipart' for reach parts.
|
// '.minio.sys/multipart' for reach parts.
|
||||||
func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (pi PartInfo, e error) {
|
func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *HashReader) (pi PartInfo, e error) {
|
||||||
if err := checkPutObjectPartArgs(bucket, object, fs); err != nil {
|
if err := checkPutObjectPartArgs(bucket, object, fs); err != nil {
|
||||||
return pi, err
|
return pi, err
|
||||||
}
|
}
|
||||||
@ -523,36 +520,14 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
|
|||||||
partSuffix := fmt.Sprintf("object%d", partID)
|
partSuffix := fmt.Sprintf("object%d", partID)
|
||||||
tmpPartPath := uploadID + "." + mustGetUUID() + "." + partSuffix
|
tmpPartPath := uploadID + "." + mustGetUUID() + "." + partSuffix
|
||||||
|
|
||||||
// Initialize md5 writer.
|
|
||||||
md5Writer := md5.New()
|
|
||||||
hashWriters := []io.Writer{md5Writer}
|
|
||||||
|
|
||||||
var sha256Writer hash.Hash
|
|
||||||
if sha256sum != "" {
|
|
||||||
sha256Writer = sha256.New()
|
|
||||||
hashWriters = append(hashWriters, sha256Writer)
|
|
||||||
}
|
|
||||||
multiWriter := io.MultiWriter(hashWriters...)
|
|
||||||
|
|
||||||
// Limit the reader to its provided size if specified.
|
|
||||||
var limitDataReader io.Reader
|
|
||||||
if size > 0 {
|
|
||||||
// This is done so that we can avoid erroneous clients sending more data than the set content size.
|
|
||||||
limitDataReader = io.LimitReader(data, size)
|
|
||||||
} else {
|
|
||||||
// else we read till EOF.
|
|
||||||
limitDataReader = data
|
|
||||||
}
|
|
||||||
|
|
||||||
teeReader := io.TeeReader(limitDataReader, multiWriter)
|
|
||||||
bufSize := int64(readSizeV1)
|
bufSize := int64(readSizeV1)
|
||||||
if size > 0 && bufSize > size {
|
if size := data.Size(); size > 0 && bufSize > size {
|
||||||
bufSize = size
|
bufSize = size
|
||||||
}
|
}
|
||||||
buf := make([]byte, int(bufSize))
|
buf := make([]byte, bufSize)
|
||||||
|
|
||||||
fsPartPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, tmpPartPath)
|
fsPartPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, tmpPartPath)
|
||||||
bytesWritten, cErr := fsCreateFile(fsPartPath, teeReader, buf, size)
|
bytesWritten, cErr := fsCreateFile(fsPartPath, data, buf, data.Size())
|
||||||
if cErr != nil {
|
if cErr != nil {
|
||||||
fsRemoveFile(fsPartPath)
|
fsRemoveFile(fsPartPath)
|
||||||
return pi, toObjectErr(cErr, minioMetaTmpBucket, tmpPartPath)
|
return pi, toObjectErr(cErr, minioMetaTmpBucket, tmpPartPath)
|
||||||
@ -560,7 +535,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
|
|||||||
|
|
||||||
// Should return IncompleteBody{} error when reader has fewer
|
// Should return IncompleteBody{} error when reader has fewer
|
||||||
// bytes than specified in request header.
|
// bytes than specified in request header.
|
||||||
if bytesWritten < size {
|
if bytesWritten < data.Size() {
|
||||||
fsRemoveFile(fsPartPath)
|
fsRemoveFile(fsPartPath)
|
||||||
return pi, traceError(IncompleteBody{})
|
return pi, traceError(IncompleteBody{})
|
||||||
}
|
}
|
||||||
@ -570,18 +545,8 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
|
|||||||
// delete.
|
// delete.
|
||||||
defer fsRemoveFile(fsPartPath)
|
defer fsRemoveFile(fsPartPath)
|
||||||
|
|
||||||
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
|
if err = data.Verify(); err != nil {
|
||||||
if md5Hex != "" {
|
return pi, toObjectErr(err, minioMetaTmpBucket, tmpPartPath)
|
||||||
if newMD5Hex != md5Hex {
|
|
||||||
return pi, traceError(BadDigest{md5Hex, newMD5Hex})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if sha256sum != "" {
|
|
||||||
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
|
|
||||||
if newSHA256sum != sha256sum {
|
|
||||||
return pi, traceError(SHA256Mismatch{})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
partPath := pathJoin(bucket, object, uploadID, partSuffix)
|
partPath := pathJoin(bucket, object, uploadID, partSuffix)
|
||||||
@ -599,7 +564,8 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Save the object part info in `fs.json`.
|
// Save the object part info in `fs.json`.
|
||||||
fsMeta.AddObjectPart(partID, partSuffix, newMD5Hex, size)
|
md5Hex := hex.EncodeToString(data.MD5())
|
||||||
|
fsMeta.AddObjectPart(partID, partSuffix, md5Hex, data.Size())
|
||||||
if _, err = fsMeta.WriteTo(rwlk); err != nil {
|
if _, err = fsMeta.WriteTo(rwlk); err != nil {
|
||||||
partLock.Unlock()
|
partLock.Unlock()
|
||||||
return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
|
||||||
@ -625,7 +591,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
|
|||||||
return PartInfo{
|
return PartInfo{
|
||||||
PartNumber: partID,
|
PartNumber: partID,
|
||||||
LastModified: fi.ModTime(),
|
LastModified: fi.ModTime(),
|
||||||
ETag: newMD5Hex,
|
ETag: md5Hex,
|
||||||
Size: fi.Size(),
|
Size: fi.Size(),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -183,7 +183,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
|
|||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
|
|
||||||
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||||
_, err = fs.PutObjectPart(bucketName, objectName, uploadID, 1, dataLen, bytes.NewReader(data), md5Hex, sha256sum)
|
_, err = fs.PutObjectPart(bucketName, objectName, uploadID, 1, NewHashReader(bytes.NewReader(data), dataLen, md5Hex, sha256sum))
|
||||||
if !isSameType(errorCause(err), BucketNotFound{}) {
|
if !isSameType(errorCause(err), BucketNotFound{}) {
|
||||||
t.Fatal("Unexpected error ", err)
|
t.Fatal("Unexpected error ", err)
|
||||||
}
|
}
|
||||||
@ -211,9 +211,8 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
md5Hex := getMD5Hash(data)
|
md5Hex := getMD5Hash(data)
|
||||||
sha256sum := ""
|
|
||||||
|
|
||||||
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, 5, bytes.NewReader(data), md5Hex, sha256sum); err != nil {
|
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, NewHashReader(bytes.NewReader(data), 5, md5Hex, "")); err != nil {
|
||||||
t.Fatal("Unexpected error ", err)
|
t.Fatal("Unexpected error ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -252,7 +251,7 @@ func TestListMultipartUploadsFaultyDisk(t *testing.T) {
|
|||||||
md5Hex := getMD5Hash(data)
|
md5Hex := getMD5Hash(data)
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
|
|
||||||
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, 5, bytes.NewReader(data), md5Hex, sha256sum); err != nil {
|
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, NewHashReader(bytes.NewReader(data), 5, md5Hex, sha256sum)); err != nil {
|
||||||
t.Fatal("Unexpected error ", err)
|
t.Fatal("Unexpected error ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
62
cmd/fs-v1.go
62
cmd/fs-v1.go
@ -17,10 +17,8 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/md5"
|
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
@ -30,7 +28,6 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/minio/minio/pkg/lock"
|
"github.com/minio/minio/pkg/lock"
|
||||||
"github.com/minio/sha256-simd"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// fsObjects - Implements fs object layer.
|
// fsObjects - Implements fs object layer.
|
||||||
@ -364,7 +361,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
|
|||||||
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
|
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
|
||||||
}()
|
}()
|
||||||
|
|
||||||
objInfo, err := fs.PutObject(dstBucket, dstObject, length, pipeReader, metadata, "")
|
objInfo, err := fs.PutObject(dstBucket, dstObject, NewHashReader(pipeReader, length, metadata["etag"], ""), metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return oi, toObjectErr(err, dstBucket, dstObject)
|
return oi, toObjectErr(err, dstBucket, dstObject)
|
||||||
}
|
}
|
||||||
@ -511,7 +508,7 @@ func (fs fsObjects) parentDirIsObject(bucket, parent string) bool {
|
|||||||
// until EOF, writes data directly to configured filesystem path.
|
// until EOF, writes data directly to configured filesystem path.
|
||||||
// Additionally writes `fs.json` which carries the necessary metadata
|
// Additionally writes `fs.json` which carries the necessary metadata
|
||||||
// for future object operations.
|
// for future object operations.
|
||||||
func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, retErr error) {
|
func (fs fsObjects) PutObject(bucket string, object string, data *HashReader, metadata map[string]string) (objInfo ObjectInfo, retErr error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
// Validate if bucket name is valid and exists.
|
// Validate if bucket name is valid and exists.
|
||||||
@ -522,12 +519,12 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
|
|||||||
// This is a special case with size as '0' and object ends
|
// This is a special case with size as '0' and object ends
|
||||||
// with a slash separator, we treat it like a valid operation
|
// with a slash separator, we treat it like a valid operation
|
||||||
// and return success.
|
// and return success.
|
||||||
if isObjectDir(object, size) {
|
if isObjectDir(object, data.Size()) {
|
||||||
// Check if an object is present as one of the parent dir.
|
// Check if an object is present as one of the parent dir.
|
||||||
if fs.parentDirIsObject(bucket, path.Dir(object)) {
|
if fs.parentDirIsObject(bucket, path.Dir(object)) {
|
||||||
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object)
|
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object)
|
||||||
}
|
}
|
||||||
return dirObjectInfo(bucket, object, size, metadata), nil
|
return dirObjectInfo(bucket, object, data.Size(), metadata), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = checkPutObjectArgs(bucket, object, fs); err != nil {
|
if err = checkPutObjectArgs(bucket, object, fs); err != nil {
|
||||||
@ -571,37 +568,14 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
|
|||||||
// so that cleaning it up will be easy if the server goes down.
|
// so that cleaning it up will be easy if the server goes down.
|
||||||
tempObj := mustGetUUID()
|
tempObj := mustGetUUID()
|
||||||
|
|
||||||
// Initialize md5 writer.
|
|
||||||
md5Writer := md5.New()
|
|
||||||
|
|
||||||
hashWriters := []io.Writer{md5Writer}
|
|
||||||
|
|
||||||
var sha256Writer hash.Hash
|
|
||||||
if sha256sum != "" {
|
|
||||||
sha256Writer = sha256.New()
|
|
||||||
hashWriters = append(hashWriters, sha256Writer)
|
|
||||||
}
|
|
||||||
multiWriter := io.MultiWriter(hashWriters...)
|
|
||||||
|
|
||||||
// Limit the reader to its provided size if specified.
|
|
||||||
var limitDataReader io.Reader
|
|
||||||
if size > 0 {
|
|
||||||
// This is done so that we can avoid erroneous clients sending more data than the set content size.
|
|
||||||
limitDataReader = io.LimitReader(data, size)
|
|
||||||
} else {
|
|
||||||
// else we read till EOF.
|
|
||||||
limitDataReader = data
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allocate a buffer to Read() from request body
|
// Allocate a buffer to Read() from request body
|
||||||
bufSize := int64(readSizeV1)
|
bufSize := int64(readSizeV1)
|
||||||
if size > 0 && bufSize > size {
|
if size := data.Size(); size > 0 && bufSize > size {
|
||||||
bufSize = size
|
bufSize = size
|
||||||
}
|
}
|
||||||
buf := make([]byte, int(bufSize))
|
buf := make([]byte, int(bufSize))
|
||||||
teeReader := io.TeeReader(limitDataReader, multiWriter)
|
|
||||||
fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, tempObj)
|
fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, tempObj)
|
||||||
bytesWritten, err := fsCreateFile(fsTmpObjPath, teeReader, buf, size)
|
bytesWritten, err := fsCreateFile(fsTmpObjPath, data, buf, data.Size())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fsRemoveFile(fsTmpObjPath)
|
fsRemoveFile(fsTmpObjPath)
|
||||||
errorIf(err, "Failed to create object %s/%s", bucket, object)
|
errorIf(err, "Failed to create object %s/%s", bucket, object)
|
||||||
@ -610,7 +584,7 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
|
|||||||
|
|
||||||
// Should return IncompleteBody{} error when reader has fewer
|
// Should return IncompleteBody{} error when reader has fewer
|
||||||
// bytes than specified in request header.
|
// bytes than specified in request header.
|
||||||
if bytesWritten < size {
|
if bytesWritten < data.Size() {
|
||||||
fsRemoveFile(fsTmpObjPath)
|
fsRemoveFile(fsTmpObjPath)
|
||||||
return ObjectInfo{}, traceError(IncompleteBody{})
|
return ObjectInfo{}, traceError(IncompleteBody{})
|
||||||
}
|
}
|
||||||
@ -620,27 +594,11 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
|
|||||||
// nothing to delete.
|
// nothing to delete.
|
||||||
defer fsRemoveFile(fsTmpObjPath)
|
defer fsRemoveFile(fsTmpObjPath)
|
||||||
|
|
||||||
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
|
if err = data.Verify(); err != nil { // verify MD5 and SHA256
|
||||||
// Update the md5sum if not set with the newly calculated one.
|
return ObjectInfo{}, traceError(err)
|
||||||
if len(metadata["etag"]) == 0 {
|
|
||||||
metadata["etag"] = newMD5Hex
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// md5Hex representation.
|
metadata["etag"] = hex.EncodeToString(data.MD5())
|
||||||
md5Hex := metadata["etag"]
|
|
||||||
if md5Hex != "" {
|
|
||||||
if newMD5Hex != md5Hex {
|
|
||||||
// Returns md5 mismatch.
|
|
||||||
return ObjectInfo{}, traceError(BadDigest{md5Hex, newMD5Hex})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if sha256sum != "" {
|
|
||||||
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
|
|
||||||
if newSHA256sum != sha256sum {
|
|
||||||
return ObjectInfo{}, traceError(SHA256Mismatch{})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Entire object was written to the temp location, now it's safe to rename it to the actual location.
|
// Entire object was written to the temp location, now it's safe to rename it to the actual location.
|
||||||
fsNSObjPath := pathJoin(fs.fsPath, bucket, object)
|
fsNSObjPath := pathJoin(fs.fsPath, bucket, object)
|
||||||
|
@ -64,8 +64,7 @@ func TestFSShutdown(t *testing.T) {
|
|||||||
fs := obj.(*fsObjects)
|
fs := obj.(*fsObjects)
|
||||||
objectContent := "12345"
|
objectContent := "12345"
|
||||||
obj.MakeBucketWithLocation(bucketName, "")
|
obj.MakeBucketWithLocation(bucketName, "")
|
||||||
sha256sum := ""
|
obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil)
|
||||||
obj.PutObject(bucketName, objectName, int64(len(objectContent)), bytes.NewReader([]byte(objectContent)), nil, sha256sum)
|
|
||||||
return fs, disk
|
return fs, disk
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -134,10 +133,8 @@ func TestFSPutObject(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
sha256sum := ""
|
|
||||||
|
|
||||||
// With a regular object.
|
// With a regular object.
|
||||||
_, err := obj.PutObject(bucketName+"non-existent", objectName, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, sha256sum)
|
_, err := obj.PutObject(bucketName+"non-existent", objectName, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
||||||
}
|
}
|
||||||
@ -146,7 +143,7 @@ func TestFSPutObject(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// With a directory object.
|
// With a directory object.
|
||||||
_, err = obj.PutObject(bucketName+"non-existent", objectName+"/", int64(0), bytes.NewReader([]byte("")), nil, sha256sum)
|
_, err = obj.PutObject(bucketName+"non-existent", objectName+"/", NewHashReader(bytes.NewReader([]byte("abcd")), 0, "", ""), nil)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
||||||
}
|
}
|
||||||
@ -154,11 +151,11 @@ func TestFSPutObject(t *testing.T) {
|
|||||||
t.Fatalf("Expected error type BucketNotFound, got %#v", err)
|
t.Fatalf("Expected error type BucketNotFound, got %#v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(bucketName, objectName, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, sha256sum)
|
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
_, err = obj.PutObject(bucketName, objectName+"/1", int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, sha256sum)
|
_, err = obj.PutObject(bucketName, objectName+"/1", NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Unexpected should fail here, backend corruption occurred")
|
t.Fatal("Unexpected should fail here, backend corruption occurred")
|
||||||
}
|
}
|
||||||
@ -173,7 +170,7 @@ func TestFSPutObject(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(bucketName, objectName+"/1/", 0, bytes.NewReader([]byte("")), nil, sha256sum)
|
_, err = obj.PutObject(bucketName, objectName+"/1/", NewHashReader(bytes.NewReader([]byte("abcd")), 0, "", ""), nil)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Unexpected should fail here, backned corruption occurred")
|
t.Fatal("Unexpected should fail here, backned corruption occurred")
|
||||||
}
|
}
|
||||||
@ -201,8 +198,7 @@ func TestFSDeleteObject(t *testing.T) {
|
|||||||
objectName := "object"
|
objectName := "object"
|
||||||
|
|
||||||
obj.MakeBucketWithLocation(bucketName, "")
|
obj.MakeBucketWithLocation(bucketName, "")
|
||||||
sha256sum := ""
|
obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||||
obj.PutObject(bucketName, objectName, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, sha256sum)
|
|
||||||
|
|
||||||
// Test with invalid bucket name
|
// Test with invalid bucket name
|
||||||
if err := fs.DeleteObject("fo", objectName); !isSameType(errorCause(err), BucketNameInvalid{}) {
|
if err := fs.DeleteObject("fo", objectName); !isSameType(errorCause(err), BucketNameInvalid{}) {
|
||||||
|
@ -17,11 +17,8 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/md5"
|
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
@ -32,7 +29,6 @@ import (
|
|||||||
"github.com/Azure/azure-sdk-for-go/storage"
|
"github.com/Azure/azure-sdk-for-go/storage"
|
||||||
humanize "github.com/dustin/go-humanize"
|
humanize "github.com/dustin/go-humanize"
|
||||||
"github.com/minio/minio-go/pkg/policy"
|
"github.com/minio/minio-go/pkg/policy"
|
||||||
"github.com/minio/sha256-simd"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const globalAzureAPIVersion = "2016-05-31"
|
const globalAzureAPIVersion = "2016-05-31"
|
||||||
@ -408,52 +404,16 @@ func (a *azureObjects) GetObjectInfo(bucket, object string) (objInfo ObjectInfo,
|
|||||||
|
|
||||||
// PutObject - Create a new blob with the incoming data,
|
// PutObject - Create a new blob with the incoming data,
|
||||||
// uses Azure equivalent CreateBlockBlobFromReader.
|
// uses Azure equivalent CreateBlockBlobFromReader.
|
||||||
func (a *azureObjects) PutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) {
|
func (a *azureObjects) PutObject(bucket, object string, data *HashReader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||||
var sha256Writer hash.Hash
|
|
||||||
var md5sumWriter hash.Hash
|
|
||||||
|
|
||||||
var writers []io.Writer
|
|
||||||
|
|
||||||
md5sum := metadata["etag"]
|
|
||||||
delete(metadata, "etag")
|
delete(metadata, "etag")
|
||||||
|
err = a.client.CreateBlockBlobFromReader(bucket, object, uint64(data.Size()), data, s3ToAzureHeaders(metadata))
|
||||||
teeReader := data
|
|
||||||
|
|
||||||
if sha256sum != "" {
|
|
||||||
sha256Writer = sha256.New()
|
|
||||||
writers = append(writers, sha256Writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
if md5sum != "" {
|
|
||||||
md5sumWriter = md5.New()
|
|
||||||
writers = append(writers, md5sumWriter)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(writers) > 0 {
|
|
||||||
teeReader = io.TeeReader(data, io.MultiWriter(writers...))
|
|
||||||
}
|
|
||||||
|
|
||||||
err = a.client.CreateBlockBlobFromReader(bucket, object, uint64(size), teeReader, s3ToAzureHeaders(metadata))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
return objInfo, azureToObjectError(traceError(err), bucket, object)
|
||||||
}
|
}
|
||||||
|
if err = data.Verify(); err != nil {
|
||||||
if md5sum != "" {
|
|
||||||
newMD5sum := hex.EncodeToString(md5sumWriter.Sum(nil))
|
|
||||||
if newMD5sum != md5sum {
|
|
||||||
a.client.DeleteBlob(bucket, object, nil)
|
a.client.DeleteBlob(bucket, object, nil)
|
||||||
return ObjectInfo{}, azureToObjectError(traceError(BadDigest{md5sum, newMD5sum}))
|
return ObjectInfo{}, azureToObjectError(traceError(err))
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if sha256sum != "" {
|
|
||||||
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
|
|
||||||
if newSHA256sum != sha256sum {
|
|
||||||
a.client.DeleteBlob(bucket, object, nil)
|
|
||||||
return ObjectInfo{}, azureToObjectError(traceError(SHA256Mismatch{}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return a.GetObjectInfo(bucket, object)
|
return a.GetObjectInfo(bucket, object)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -537,39 +497,19 @@ func azureParseBlockID(blockID string) (partID, subPartNumber int, md5Hex string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPart - Use Azure equivalent PutBlockWithLength.
|
// PutObjectPart - Use Azure equivalent PutBlockWithLength.
|
||||||
func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (info PartInfo, err error) {
|
func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *HashReader) (info PartInfo, err error) {
|
||||||
if meta := a.metaInfo.get(uploadID); meta == nil {
|
if meta := a.metaInfo.get(uploadID); meta == nil {
|
||||||
return info, traceError(InvalidUploadID{})
|
return info, traceError(InvalidUploadID{})
|
||||||
}
|
}
|
||||||
var sha256Writer hash.Hash
|
|
||||||
var md5sumWriter hash.Hash
|
|
||||||
var etag string
|
|
||||||
|
|
||||||
var writers []io.Writer
|
etag := data.md5Sum
|
||||||
|
if etag == "" {
|
||||||
if sha256sum != "" {
|
|
||||||
sha256Writer = sha256.New()
|
|
||||||
writers = append(writers, sha256Writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
if md5Hex != "" {
|
|
||||||
md5sumWriter = md5.New()
|
|
||||||
writers = append(writers, md5sumWriter)
|
|
||||||
etag = md5Hex
|
|
||||||
} else {
|
|
||||||
// Generate random ETag.
|
// Generate random ETag.
|
||||||
etag = getMD5Hash([]byte(mustGetUUID()))
|
etag = getMD5Hash([]byte(mustGetUUID()))
|
||||||
}
|
}
|
||||||
|
|
||||||
teeReader := data
|
subPartSize, subPartNumber := int64(azureBlockSize), 1
|
||||||
|
for remainingSize := data.Size(); remainingSize >= 0; remainingSize -= subPartSize {
|
||||||
if len(writers) > 0 {
|
|
||||||
teeReader = io.TeeReader(data, io.MultiWriter(writers...))
|
|
||||||
}
|
|
||||||
|
|
||||||
subPartSize := int64(azureBlockSize)
|
|
||||||
subPartNumber := 1
|
|
||||||
for remainingSize := size; remainingSize >= 0; remainingSize -= subPartSize {
|
|
||||||
// Allow to create zero sized part.
|
// Allow to create zero sized part.
|
||||||
if remainingSize == 0 && subPartNumber > 1 {
|
if remainingSize == 0 && subPartNumber > 1 {
|
||||||
break
|
break
|
||||||
@ -580,33 +520,21 @@ func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int
|
|||||||
}
|
}
|
||||||
|
|
||||||
id := azureGetBlockID(partID, subPartNumber, etag)
|
id := azureGetBlockID(partID, subPartNumber, etag)
|
||||||
err = a.client.PutBlockWithLength(bucket, object, id, uint64(subPartSize), io.LimitReader(teeReader, subPartSize), nil)
|
err = a.client.PutBlockWithLength(bucket, object, id, uint64(subPartSize), io.LimitReader(data, subPartSize), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return info, azureToObjectError(traceError(err), bucket, object)
|
return info, azureToObjectError(traceError(err), bucket, object)
|
||||||
}
|
}
|
||||||
|
|
||||||
subPartNumber++
|
subPartNumber++
|
||||||
}
|
}
|
||||||
|
if err = data.Verify(); err != nil {
|
||||||
if md5Hex != "" {
|
|
||||||
newMD5sum := hex.EncodeToString(md5sumWriter.Sum(nil))
|
|
||||||
if newMD5sum != md5Hex {
|
|
||||||
a.client.DeleteBlob(bucket, object, nil)
|
a.client.DeleteBlob(bucket, object, nil)
|
||||||
return PartInfo{}, azureToObjectError(traceError(BadDigest{md5Hex, newMD5sum}))
|
return info, azureToObjectError(traceError(err), bucket, object)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if sha256sum != "" {
|
|
||||||
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
|
|
||||||
if newSHA256sum != sha256sum {
|
|
||||||
return PartInfo{}, azureToObjectError(traceError(SHA256Mismatch{}))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
info.PartNumber = partID
|
info.PartNumber = partID
|
||||||
info.ETag = etag
|
info.ETag = etag
|
||||||
info.LastModified = UTCNow()
|
info.LastModified = UTCNow()
|
||||||
info.Size = size
|
info.Size = data.Size()
|
||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,12 +18,10 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"regexp"
|
"regexp"
|
||||||
@ -720,8 +718,7 @@ func (l *gcsGateway) GetObjectInfo(bucket string, object string) (ObjectInfo, er
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutObject - Create a new object with the incoming data,
|
// PutObject - Create a new object with the incoming data,
|
||||||
func (l *gcsGateway) PutObject(bucket string, key string, size int64, data io.Reader,
|
func (l *gcsGateway) PutObject(bucket string, key string, data *HashReader, metadata map[string]string) (ObjectInfo, error) {
|
||||||
metadata map[string]string, sha256sum string) (ObjectInfo, error) {
|
|
||||||
|
|
||||||
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
|
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
|
||||||
// otherwise gcs will just return object not exist in case of non-existing bucket
|
// otherwise gcs will just return object not exist in case of non-existing bucket
|
||||||
@ -729,15 +726,9 @@ func (l *gcsGateway) PutObject(bucket string, key string, size int64, data io.Re
|
|||||||
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket)
|
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
reader := data
|
if _, err := hex.DecodeString(metadata["etag"]); err != nil {
|
||||||
|
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key)
|
||||||
var sha256Writer hash.Hash
|
|
||||||
if sha256sum != "" {
|
|
||||||
sha256Writer = sha256.New()
|
|
||||||
reader = io.TeeReader(data, sha256Writer)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
md5sum := metadata["etag"]
|
|
||||||
delete(metadata, "etag")
|
delete(metadata, "etag")
|
||||||
|
|
||||||
object := l.client.Bucket(bucket).Object(key)
|
object := l.client.Bucket(bucket).Object(key)
|
||||||
@ -747,17 +738,8 @@ func (l *gcsGateway) PutObject(bucket string, key string, size int64, data io.Re
|
|||||||
w.ContentType = metadata["content-type"]
|
w.ContentType = metadata["content-type"]
|
||||||
w.ContentEncoding = metadata["content-encoding"]
|
w.ContentEncoding = metadata["content-encoding"]
|
||||||
w.Metadata = metadata
|
w.Metadata = metadata
|
||||||
if md5sum != "" {
|
|
||||||
var err error
|
|
||||||
w.MD5, err = hex.DecodeString(md5sum)
|
|
||||||
if err != nil {
|
|
||||||
// Close the object writer upon error.
|
|
||||||
w.Close()
|
|
||||||
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := io.CopyN(w, reader, size); err != nil {
|
if _, err := io.Copy(w, data); err != nil {
|
||||||
// Close the object writer upon error.
|
// Close the object writer upon error.
|
||||||
w.Close()
|
w.Close()
|
||||||
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key)
|
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key)
|
||||||
@ -765,12 +747,9 @@ func (l *gcsGateway) PutObject(bucket string, key string, size int64, data io.Re
|
|||||||
// Close the object writer upon success.
|
// Close the object writer upon success.
|
||||||
w.Close()
|
w.Close()
|
||||||
|
|
||||||
// Verify sha256sum after close.
|
if err := data.Verify(); err != nil { // Verify sha256sum after close.
|
||||||
if sha256sum != "" {
|
|
||||||
if hex.EncodeToString(sha256Writer.Sum(nil)) != sha256sum {
|
|
||||||
object.Delete(l.ctx)
|
object.Delete(l.ctx)
|
||||||
return ObjectInfo{}, traceError(SHA256Mismatch{})
|
return ObjectInfo{}, gcsToObjectError(traceError(err), bucket, key)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
attrs, err := object.Attrs(l.ctx)
|
attrs, err := object.Attrs(l.ctx)
|
||||||
@ -855,65 +834,37 @@ func (l *gcsGateway) checkUploadIDExists(bucket string, key string, uploadID str
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPart puts a part of object in bucket
|
// PutObjectPart puts a part of object in bucket
|
||||||
func (l *gcsGateway) PutObjectPart(bucket string, key string, uploadID string, partNumber int, size int64, data io.Reader, md5Hex string, sha256sum string) (PartInfo, error) {
|
func (l *gcsGateway) PutObjectPart(bucket string, key string, uploadID string, partNumber int, data *HashReader) (PartInfo, error) {
|
||||||
if err := l.checkUploadIDExists(bucket, key, uploadID); err != nil {
|
if err := l.checkUploadIDExists(bucket, key, uploadID); err != nil {
|
||||||
return PartInfo{}, err
|
return PartInfo{}, err
|
||||||
}
|
}
|
||||||
|
etag := data.md5Sum
|
||||||
var sha256Writer hash.Hash
|
if etag == "" {
|
||||||
|
|
||||||
var etag string
|
|
||||||
// Honor etag if client did send md5Hex.
|
|
||||||
if md5Hex != "" {
|
|
||||||
etag = md5Hex
|
|
||||||
} else {
|
|
||||||
// Generate random ETag.
|
// Generate random ETag.
|
||||||
etag = getMD5Hash([]byte(mustGetUUID()))
|
etag = getMD5Hash([]byte(mustGetUUID()))
|
||||||
}
|
}
|
||||||
|
|
||||||
reader := data
|
|
||||||
if sha256sum != "" {
|
|
||||||
sha256Writer = sha256.New()
|
|
||||||
reader = io.TeeReader(data, sha256Writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
object := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, partNumber, etag))
|
object := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, partNumber, etag))
|
||||||
w := object.NewWriter(l.ctx)
|
w := object.NewWriter(l.ctx)
|
||||||
// Disable "chunked" uploading in GCS client. If enabled, it can cause a corner case
|
// Disable "chunked" uploading in GCS client. If enabled, it can cause a corner case
|
||||||
// where it tries to upload 0 bytes in the last chunk and get error from server.
|
// where it tries to upload 0 bytes in the last chunk and get error from server.
|
||||||
w.ChunkSize = 0
|
w.ChunkSize = 0
|
||||||
if md5Hex != "" {
|
if _, err := io.Copy(w, data); err != nil {
|
||||||
var err error
|
|
||||||
w.MD5, err = hex.DecodeString(md5Hex)
|
|
||||||
if err != nil {
|
|
||||||
// Make sure to close object writer upon error.
|
// Make sure to close object writer upon error.
|
||||||
w.Close()
|
w.Close()
|
||||||
return PartInfo{}, gcsToObjectError(traceError(err), bucket, key)
|
return PartInfo{}, gcsToObjectError(traceError(err), bucket, key)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := io.CopyN(w, reader, size); err != nil {
|
|
||||||
// Make sure to close object writer upon error.
|
|
||||||
w.Close()
|
|
||||||
return PartInfo{}, gcsToObjectError(traceError(err), bucket, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure to close the object writer upon success.
|
// Make sure to close the object writer upon success.
|
||||||
w.Close()
|
w.Close()
|
||||||
|
|
||||||
// Verify sha256sum after Close().
|
if err := data.Verify(); err != nil {
|
||||||
if sha256sum != "" {
|
|
||||||
if hex.EncodeToString(sha256Writer.Sum(nil)) != sha256sum {
|
|
||||||
object.Delete(l.ctx)
|
object.Delete(l.ctx)
|
||||||
return PartInfo{}, traceError(SHA256Mismatch{})
|
return PartInfo{}, gcsToObjectError(traceError(err), bucket, key)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return PartInfo{
|
return PartInfo{
|
||||||
PartNumber: partNumber,
|
PartNumber: partNumber,
|
||||||
ETag: etag,
|
ETag: etag,
|
||||||
LastModified: UTCNow(),
|
LastModified: UTCNow(),
|
||||||
Size: size,
|
Size: data.Size(),
|
||||||
}, nil
|
}, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -281,7 +281,7 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re
|
|||||||
writeErrorResponse(w, s3Error, r.URL)
|
writeErrorResponse(w, s3Error, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
objInfo, err = objectAPI.PutObject(bucket, object, size, reader, metadata, "")
|
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(reader, size, "", ""), metadata)
|
||||||
case authTypeSignedV2, authTypePresignedV2:
|
case authTypeSignedV2, authTypePresignedV2:
|
||||||
s3Error := isReqAuthenticatedV2(r)
|
s3Error := isReqAuthenticatedV2(r)
|
||||||
if s3Error != ErrNone {
|
if s3Error != ErrNone {
|
||||||
@ -289,7 +289,7 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re
|
|||||||
writeErrorResponse(w, s3Error, r.URL)
|
writeErrorResponse(w, s3Error, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata, "")
|
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(r.Body, size, "", ""), metadata)
|
||||||
case authTypePresigned, authTypeSigned:
|
case authTypePresigned, authTypeSigned:
|
||||||
if s3Error := reqSignatureV4Verify(r, serverConfig.GetRegion()); s3Error != ErrNone {
|
if s3Error := reqSignatureV4Verify(r, serverConfig.GetRegion()); s3Error != ErrNone {
|
||||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||||
@ -303,7 +303,7 @@ func (api gatewayAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Re
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create object.
|
// Create object.
|
||||||
objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata, sha256sum)
|
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(r.Body, size, "", sha256sum), metadata)
|
||||||
default:
|
default:
|
||||||
// For all unknown auth types return error.
|
// For all unknown auth types return error.
|
||||||
writeErrorResponse(w, ErrAccessDenied, r.URL)
|
writeErrorResponse(w, ErrAccessDenied, r.URL)
|
||||||
|
@ -330,32 +330,18 @@ func (l *s3Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectI
|
|||||||
return fromMinioClientObjectInfo(bucket, oi), nil
|
return fromMinioClientObjectInfo(bucket, oi), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decodes hex encoded md5, sha256 into their raw byte representations.
|
|
||||||
func getMD5AndSha256SumBytes(md5Hex, sha256Hex string) (md5Bytes, sha256Bytes []byte, err error) {
|
|
||||||
if md5Hex != "" {
|
|
||||||
md5Bytes, err = hex.DecodeString(md5Hex)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if sha256Hex != "" {
|
|
||||||
sha256Bytes, err = hex.DecodeString(sha256Hex)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return md5Bytes, sha256Bytes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutObject creates a new object with the incoming data,
|
// PutObject creates a new object with the incoming data,
|
||||||
func (l *s3Objects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, e error) {
|
func (l *s3Objects) PutObject(bucket string, object string, data *HashReader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||||
md5Bytes, sha256Bytes, err := getMD5AndSha256SumBytes(metadata["etag"], sha256sum)
|
sha256sumBytes, err := hex.DecodeString(data.sha256Sum)
|
||||||
|
if err != nil {
|
||||||
|
return objInfo, s3ToObjectError(traceError(err), bucket, object)
|
||||||
|
}
|
||||||
|
md5sumBytes, err := hex.DecodeString(metadata["etag"])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return objInfo, s3ToObjectError(traceError(err), bucket, object)
|
return objInfo, s3ToObjectError(traceError(err), bucket, object)
|
||||||
}
|
}
|
||||||
delete(metadata, "etag")
|
delete(metadata, "etag")
|
||||||
|
oi, err := l.Client.PutObject(bucket, object, data.Size(), data, md5sumBytes, sha256sumBytes, toMinioClientMetadata(metadata))
|
||||||
oi, err := l.Client.PutObject(bucket, object, size, data, md5Bytes, sha256Bytes, toMinioClientMetadata(metadata))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return objInfo, s3ToObjectError(traceError(err), bucket, object)
|
return objInfo, s3ToObjectError(traceError(err), bucket, object)
|
||||||
}
|
}
|
||||||
@ -492,13 +478,18 @@ func fromMinioClientObjectPart(op minio.ObjectPart) PartInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPart puts a part of object in bucket
|
// PutObjectPart puts a part of object in bucket
|
||||||
func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (pi PartInfo, e error) {
|
func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, data *HashReader) (pi PartInfo, e error) {
|
||||||
md5Bytes, sha256Bytes, err := getMD5AndSha256SumBytes(md5Hex, sha256sum)
|
md5HexBytes, err := hex.DecodeString(data.md5Sum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pi, s3ToObjectError(traceError(err), bucket, object)
|
return pi, err
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, size, data, md5Bytes, sha256Bytes)
|
sha256sumBytes, err := hex.DecodeString(data.sha256Sum)
|
||||||
|
if err != nil {
|
||||||
|
return pi, err
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data.Size(), data, md5HexBytes, sha256sumBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pi, err
|
return pi, err
|
||||||
}
|
}
|
||||||
|
@ -1,58 +0,0 @@
|
|||||||
/*
|
|
||||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Tests extracting md5/sha256 bytes.
|
|
||||||
func TestGetMD5AndSha256Bytes(t *testing.T) {
|
|
||||||
testCases := []struct {
|
|
||||||
md5Hex string
|
|
||||||
sha256Hex string
|
|
||||||
success bool
|
|
||||||
}{
|
|
||||||
// Test 1: Hex encoding failure.
|
|
||||||
{
|
|
||||||
md5Hex: "a",
|
|
||||||
sha256Hex: "b",
|
|
||||||
success: false,
|
|
||||||
},
|
|
||||||
// Test 2: Hex encoding success.
|
|
||||||
{
|
|
||||||
md5Hex: "91be0b892e47ede9de06aac14ca0369e",
|
|
||||||
sha256Hex: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
|
||||||
success: true,
|
|
||||||
},
|
|
||||||
// Test 3: hex values are empty should return success.
|
|
||||||
{
|
|
||||||
md5Hex: "",
|
|
||||||
sha256Hex: "",
|
|
||||||
success: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for i, testCase := range testCases {
|
|
||||||
_, _, err := getMD5AndSha256SumBytes(testCase.md5Hex, testCase.sha256Hex)
|
|
||||||
if err != nil && testCase.success {
|
|
||||||
t.Errorf("Test %d: Expected success, but got failure %s", i+1, err)
|
|
||||||
}
|
|
||||||
if err == nil && !testCase.success {
|
|
||||||
t.Errorf("Test %d: Expected failure, but got success", i+1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -65,11 +65,10 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||||||
// case - 1.
|
// case - 1.
|
||||||
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
||||||
}
|
}
|
||||||
sha256sum := ""
|
|
||||||
// iterate through the above set of inputs and upkoad the object.
|
// iterate through the above set of inputs and upkoad the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData, sha256sum)
|
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
@ -215,11 +214,10 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [
|
|||||||
{bucketName, "test-object2", int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
{bucketName, "test-object2", int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
||||||
{bucketName, "dir/test-object3", int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
{bucketName, "dir/test-object3", int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
||||||
}
|
}
|
||||||
sha256sum := ""
|
|
||||||
// iterate through the above set of inputs and upkoad the object.
|
// iterate through the above set of inputs and upkoad the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData, sha256sum)
|
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
@ -329,11 +327,10 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str
|
|||||||
// case - 1.
|
// case - 1.
|
||||||
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
||||||
}
|
}
|
||||||
sha256sum := ""
|
|
||||||
// iterate through the above set of inputs and upkoad the object.
|
// iterate through the above set of inputs and upkoad the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData, sha256sum)
|
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
|
@ -33,8 +33,7 @@ func testGetObjectInfo(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
sha256sum := ""
|
_, err = obj.PutObject("test-getobjectinfo", "Asia/asiapics.jpg", NewHashReader(bytes.NewBufferString("asiapics"), int64(len("asiapics")), "", ""), nil)
|
||||||
_, err = obj.PutObject("test-getobjectinfo", "Asia/asiapics.jpg", int64(len("asiapics")), bytes.NewBufferString("asiapics"), nil, sha256sum)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,15 @@
|
|||||||
|
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import "io"
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/md5"
|
||||||
|
"encoding/hex"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
sha256 "github.com/minio/sha256-simd"
|
||||||
|
)
|
||||||
|
|
||||||
// ObjectLayer implements primitives for object API layer.
|
// ObjectLayer implements primitives for object API layer.
|
||||||
type ObjectLayer interface {
|
type ObjectLayer interface {
|
||||||
@ -34,7 +42,7 @@ type ObjectLayer interface {
|
|||||||
// Object operations.
|
// Object operations.
|
||||||
GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error)
|
GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer) (err error)
|
||||||
GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error)
|
GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error)
|
||||||
PutObject(bucket, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error)
|
PutObject(bucket, object string, data *HashReader, metadata map[string]string) (objInfo ObjectInfo, err error)
|
||||||
CopyObject(srcBucket, srcObject, destBucket, destObject string, metadata map[string]string) (objInfo ObjectInfo, err error)
|
CopyObject(srcBucket, srcObject, destBucket, destObject string, metadata map[string]string) (objInfo ObjectInfo, err error)
|
||||||
DeleteObject(bucket, object string) error
|
DeleteObject(bucket, object string) error
|
||||||
|
|
||||||
@ -42,7 +50,7 @@ type ObjectLayer interface {
|
|||||||
ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error)
|
ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error)
|
||||||
NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error)
|
NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error)
|
||||||
CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error)
|
CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64) (info PartInfo, err error)
|
||||||
PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (info PartInfo, err error)
|
PutObjectPart(bucket, object, uploadID string, partID int, data *HashReader) (info PartInfo, err error)
|
||||||
ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error)
|
ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error)
|
||||||
AbortMultipartUpload(bucket, object, uploadID string) error
|
AbortMultipartUpload(bucket, object, uploadID string) error
|
||||||
CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []completePart) (objInfo ObjectInfo, err error)
|
CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []completePart) (objInfo ObjectInfo, err error)
|
||||||
@ -55,3 +63,81 @@ type ObjectLayer interface {
|
|||||||
ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
|
ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
|
||||||
delimiter string, maxUploads int) (ListMultipartsInfo, error)
|
delimiter string, maxUploads int) (ListMultipartsInfo, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HashReader writes what it reads from an io.Reader to an MD5 and SHA256 hash.Hash.
|
||||||
|
// HashReader verifies that the content of the io.Reader matches the expected checksums.
|
||||||
|
type HashReader struct {
|
||||||
|
src io.Reader
|
||||||
|
size int64
|
||||||
|
md5Hash, sha256Hash hash.Hash
|
||||||
|
md5Sum, sha256Sum string // hex representation
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHashReader returns a new HashReader computing the MD5 sum and SHA256 sum
|
||||||
|
// (if set) of the provided io.Reader.
|
||||||
|
func NewHashReader(src io.Reader, size int64, md5Sum, sha256Sum string) *HashReader {
|
||||||
|
var sha256Hash hash.Hash
|
||||||
|
if sha256Sum != "" {
|
||||||
|
sha256Hash = sha256.New()
|
||||||
|
}
|
||||||
|
if size >= 0 {
|
||||||
|
src = io.LimitReader(src, size)
|
||||||
|
} else {
|
||||||
|
size = -1
|
||||||
|
}
|
||||||
|
return &HashReader{
|
||||||
|
src: src,
|
||||||
|
size: size,
|
||||||
|
md5Sum: md5Sum,
|
||||||
|
sha256Sum: sha256Sum,
|
||||||
|
md5Hash: md5.New(),
|
||||||
|
sha256Hash: sha256Hash,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *HashReader) Read(p []byte) (n int, err error) {
|
||||||
|
n, err = r.src.Read(p)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if r.md5Hash != nil {
|
||||||
|
r.md5Hash.Write(p[:n])
|
||||||
|
}
|
||||||
|
if r.sha256Hash != nil {
|
||||||
|
r.sha256Hash.Write(p[:n])
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the absolute number of bytes the HashReader
|
||||||
|
// will return during reading. It returns -1 for unlimited
|
||||||
|
// data.
|
||||||
|
func (r *HashReader) Size() int64 { return r.size }
|
||||||
|
|
||||||
|
// MD5 returns the MD5 sum of the processed data. Any
|
||||||
|
// further reads will change the MD5 sum.
|
||||||
|
func (r *HashReader) MD5() []byte { return r.md5Hash.Sum(nil) }
|
||||||
|
|
||||||
|
// Verify verifies if the computed MD5 sum - and SHA256 sum - are
|
||||||
|
// equal to the ones specified when creating the HashReader.
|
||||||
|
func (r *HashReader) Verify() error {
|
||||||
|
if r.sha256Hash != nil {
|
||||||
|
sha256Sum, err := hex.DecodeString(r.sha256Sum)
|
||||||
|
if err != nil {
|
||||||
|
return SHA256Mismatch{}
|
||||||
|
}
|
||||||
|
if !bytes.Equal(sha256Sum, r.sha256Hash.Sum(nil)) {
|
||||||
|
return errContentSHA256Mismatch
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if r.md5Hash != nil && r.md5Sum != "" {
|
||||||
|
md5Sum, err := hex.DecodeString(r.md5Sum)
|
||||||
|
if err != nil {
|
||||||
|
return BadDigest{r.md5Sum, hex.EncodeToString(r.md5Hash.Sum(nil))}
|
||||||
|
}
|
||||||
|
if sum := r.md5Hash.Sum(nil); !bytes.Equal(md5Sum, sum) {
|
||||||
|
return BadDigest{r.md5Sum, hex.EncodeToString(sum)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -64,9 +64,8 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||||||
{"obj1", "obj1", nil},
|
{"obj1", "obj1", nil},
|
||||||
{"obj2", "obj2", nil},
|
{"obj2", "obj2", nil},
|
||||||
}
|
}
|
||||||
sha256sum := ""
|
|
||||||
for _, object := range testObjects {
|
for _, object := range testObjects {
|
||||||
_, err = obj.PutObject(testBuckets[0], object.name, int64(len(object.content)), bytes.NewBufferString(object.content), object.meta, sha256sum)
|
_, err = obj.PutObject(testBuckets[0], object.name, NewHashReader(bytes.NewBufferString(object.content), int64(len(object.content)), object.meta["etag"], ""), object.meta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
@ -605,11 +604,10 @@ func BenchmarkListObjects(b *testing.B) {
|
|||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
sha256sum := ""
|
|
||||||
// Insert objects to be listed and benchmarked later.
|
// Insert objects to be listed and benchmarked later.
|
||||||
for i := 0; i < 20000; i++ {
|
for i := 0; i < 20000; i++ {
|
||||||
key := "obj" + strconv.Itoa(i)
|
key := "obj" + strconv.Itoa(i)
|
||||||
_, err = obj.PutObject(bucket, key, int64(len(key)), bytes.NewBufferString(key), nil, sha256sum)
|
_, err = obj.PutObject(bucket, key, NewHashReader(bytes.NewBufferString(key), int64(len(key)), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -218,7 +218,7 @@ func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks [
|
|||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, testCase := range createPartCases {
|
for _, testCase := range createPartCases {
|
||||||
_, err = obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5, sha256sum)
|
_, err = obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
@ -232,7 +232,7 @@ func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks [
|
|||||||
|
|
||||||
// Object part upload should fail with quorum not available.
|
// Object part upload should fail with quorum not available.
|
||||||
testCase := createPartCases[len(createPartCases)-1]
|
testCase := createPartCases[len(createPartCases)-1]
|
||||||
_, err = obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5, sha256sum)
|
_, err = obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Test %s: expected to fail but passed instead", instanceType)
|
t.Fatalf("Test %s: expected to fail but passed instead", instanceType)
|
||||||
}
|
}
|
||||||
@ -347,7 +347,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
|
|||||||
|
|
||||||
// Validate all the test cases.
|
// Validate all the test cases.
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
actualInfo, actualErr := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5, testCase.inputSHA256)
|
actualInfo, actualErr := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, testCase.inputSHA256))
|
||||||
// All are test cases above are expected to fail.
|
// All are test cases above are expected to fail.
|
||||||
if actualErr != nil && testCase.shouldPass {
|
if actualErr != nil && testCase.shouldPass {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
||||||
@ -481,7 +481,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
|||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, testCase := range createPartCases {
|
for _, testCase := range createPartCases {
|
||||||
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5, sha256sum)
|
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
@ -1336,7 +1336,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
|
|||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, testCase := range createPartCases {
|
for _, testCase := range createPartCases {
|
||||||
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5, sha256sum)
|
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
@ -1576,7 +1576,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
|
|||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, testCase := range createPartCases {
|
for _, testCase := range createPartCases {
|
||||||
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5, sha256sum)
|
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
@ -1825,7 +1825,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
|||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, part := range parts {
|
for _, part := range parts {
|
||||||
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID, part.intputDataSize, bytes.NewBufferString(part.inputReaderData), part.inputMd5, sha256sum)
|
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID, NewHashReader(bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, sha256sum))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err)
|
t.Fatalf("%s : %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -154,7 +154,7 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, testCase.intputDataSize, bytes.NewReader(testCase.inputData), testCase.inputMeta, testCase.inputSHA256)
|
objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, NewHashReader(bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256), testCase.inputMeta)
|
||||||
actualErr = errorCause(actualErr)
|
actualErr = errorCause(actualErr)
|
||||||
if actualErr != nil && testCase.expectedError == nil {
|
if actualErr != nil && testCase.expectedError == nil {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: error %s.", i+1, instanceType, actualErr.Error())
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: error %s.", i+1, instanceType, actualErr.Error())
|
||||||
@ -228,7 +228,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
|
|||||||
|
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, testCase.intputDataSize, bytes.NewReader(testCase.inputData), testCase.inputMeta, sha256sum)
|
objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, NewHashReader(bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta)
|
||||||
actualErr = errorCause(actualErr)
|
actualErr = errorCause(actualErr)
|
||||||
if actualErr != nil && testCase.shouldPass {
|
if actualErr != nil && testCase.shouldPass {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
||||||
@ -278,7 +278,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
|
|||||||
InsufficientWriteQuorum{},
|
InsufficientWriteQuorum{},
|
||||||
}
|
}
|
||||||
|
|
||||||
_, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, testCase.intputDataSize, bytes.NewReader(testCase.inputData), testCase.inputMeta, sha256sum)
|
_, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, NewHashReader(bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta)
|
||||||
actualErr = errorCause(actualErr)
|
actualErr = errorCause(actualErr)
|
||||||
if actualErr != nil && testCase.shouldPass {
|
if actualErr != nil && testCase.shouldPass {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error())
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error())
|
||||||
@ -310,9 +310,8 @@ func testObjectAPIPutObjectStaleFiles(obj ObjectLayer, instanceType string, disk
|
|||||||
}
|
}
|
||||||
|
|
||||||
data := []byte("hello, world")
|
data := []byte("hello, world")
|
||||||
sha256sum := ""
|
|
||||||
// Create object.
|
// Create object.
|
||||||
_, err = obj.PutObject(bucket, object, int64(len(data)), bytes.NewReader(data), nil, sha256sum)
|
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create object, abort.
|
// Failed to create object, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
@ -357,7 +356,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
|
|||||||
md5Writer.Write(fiveMBBytes)
|
md5Writer.Write(fiveMBBytes)
|
||||||
etag1 := hex.EncodeToString(md5Writer.Sum(nil))
|
etag1 := hex.EncodeToString(md5Writer.Sum(nil))
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
_, err = obj.PutObjectPart(bucket, object, uploadID, 1, int64(len(fiveMBBytes)), bytes.NewReader(fiveMBBytes), etag1, sha256sum)
|
_, err = obj.PutObjectPart(bucket, object, uploadID, 1, NewHashReader(bytes.NewReader(fiveMBBytes), int64(len(fiveMBBytes)), etag1, sha256sum))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to upload object part, abort.
|
// Failed to upload object part, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
@ -368,7 +367,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
|
|||||||
md5Writer = md5.New()
|
md5Writer = md5.New()
|
||||||
md5Writer.Write(data)
|
md5Writer.Write(data)
|
||||||
etag2 := hex.EncodeToString(md5Writer.Sum(nil))
|
etag2 := hex.EncodeToString(md5Writer.Sum(nil))
|
||||||
_, err = obj.PutObjectPart(bucket, object, uploadID, 2, int64(len(data)), bytes.NewReader(data), etag2, sha256sum)
|
_, err = obj.PutObjectPart(bucket, object, uploadID, 2, NewHashReader(bytes.NewReader(data), int64(len(data)), etag2, sha256sum))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to upload object part, abort.
|
// Failed to upload object part, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
|
@ -19,7 +19,6 @@ package cmd
|
|||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"path"
|
"path"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
@ -269,27 +268,3 @@ type byBucketName []BucketInfo
|
|||||||
func (d byBucketName) Len() int { return len(d) }
|
func (d byBucketName) Len() int { return len(d) }
|
||||||
func (d byBucketName) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
|
func (d byBucketName) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
|
||||||
func (d byBucketName) Less(i, j int) bool { return d[i].Name < d[j].Name }
|
func (d byBucketName) Less(i, j int) bool { return d[i].Name < d[j].Name }
|
||||||
|
|
||||||
// rangeReader returns a Reader that reads from r
|
|
||||||
// but returns error after Max bytes read as errDataTooLarge.
|
|
||||||
// but returns error if reader exits before reading Min bytes
|
|
||||||
// errDataTooSmall.
|
|
||||||
type rangeReader struct {
|
|
||||||
Reader io.Reader // underlying reader
|
|
||||||
Min int64 // min bytes remaining
|
|
||||||
Max int64 // max bytes remaining
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *rangeReader) Read(p []byte) (n int, err error) {
|
|
||||||
n, err = l.Reader.Read(p)
|
|
||||||
l.Max -= int64(n)
|
|
||||||
l.Min -= int64(n)
|
|
||||||
if l.Max < 0 {
|
|
||||||
// If more data is available than what is expected we return error.
|
|
||||||
return 0, errDataTooLarge
|
|
||||||
}
|
|
||||||
if err == io.EOF && l.Min > 0 {
|
|
||||||
return 0, errDataTooSmall
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
@ -17,8 +17,6 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -131,34 +129,6 @@ func TestIsValidObjectName(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests rangeReader.
|
|
||||||
func TestRangeReader(t *testing.T) {
|
|
||||||
testCases := []struct {
|
|
||||||
data string
|
|
||||||
minLen int64
|
|
||||||
maxLen int64
|
|
||||||
err error
|
|
||||||
}{
|
|
||||||
{"1234567890", 0, 15, nil},
|
|
||||||
{"1234567890", 0, 10, nil},
|
|
||||||
{"1234567890", 0, 5, toObjectErr(errDataTooLarge, "test", "test")},
|
|
||||||
{"123", 5, 10, toObjectErr(errDataTooSmall, "test", "test")},
|
|
||||||
{"123", 2, 10, nil},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, test := range testCases {
|
|
||||||
r := strings.NewReader(test.data)
|
|
||||||
_, err := ioutil.ReadAll(&rangeReader{
|
|
||||||
Reader: r,
|
|
||||||
Min: test.minLen,
|
|
||||||
Max: test.maxLen,
|
|
||||||
})
|
|
||||||
if toObjectErr(err, "test", "test") != test.err {
|
|
||||||
t.Fatalf("test %d failed: expected %v, got %v", i+1, test.err, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tests getCompleteMultipartMD5
|
// Tests getCompleteMultipartMD5
|
||||||
func TestGetCompleteMultipartMD5(t *testing.T) {
|
func TestGetCompleteMultipartMD5(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
|
@ -546,7 +546,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Create anonymous object.
|
// Create anonymous object.
|
||||||
objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata, sha256sum)
|
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(r.Body, size, metadata["etag"], sha256sum), metadata)
|
||||||
case authTypeStreamingSigned:
|
case authTypeStreamingSigned:
|
||||||
// Initialize stream signature verifier.
|
// Initialize stream signature verifier.
|
||||||
reader, s3Error := newSignV4ChunkedReader(r)
|
reader, s3Error := newSignV4ChunkedReader(r)
|
||||||
@ -555,7 +555,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||||||
writeErrorResponse(w, s3Error, r.URL)
|
writeErrorResponse(w, s3Error, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
objInfo, err = objectAPI.PutObject(bucket, object, size, reader, metadata, sha256sum)
|
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(reader, size, metadata["etag"], sha256sum), metadata)
|
||||||
case authTypeSignedV2, authTypePresignedV2:
|
case authTypeSignedV2, authTypePresignedV2:
|
||||||
s3Error := isReqAuthenticatedV2(r)
|
s3Error := isReqAuthenticatedV2(r)
|
||||||
if s3Error != ErrNone {
|
if s3Error != ErrNone {
|
||||||
@ -563,7 +563,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||||||
writeErrorResponse(w, s3Error, r.URL)
|
writeErrorResponse(w, s3Error, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata, sha256sum)
|
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(r.Body, size, metadata["etag"], sha256sum), metadata)
|
||||||
case authTypePresigned, authTypeSigned:
|
case authTypePresigned, authTypeSigned:
|
||||||
if s3Error := reqSignatureV4Verify(r, serverConfig.GetRegion()); s3Error != ErrNone {
|
if s3Error := reqSignatureV4Verify(r, serverConfig.GetRegion()); s3Error != ErrNone {
|
||||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||||
@ -574,7 +574,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||||||
sha256sum = r.Header.Get("X-Amz-Content-Sha256")
|
sha256sum = r.Header.Get("X-Amz-Content-Sha256")
|
||||||
}
|
}
|
||||||
// Create object.
|
// Create object.
|
||||||
objInfo, err = objectAPI.PutObject(bucket, object, size, r.Body, metadata, sha256sum)
|
objInfo, err = objectAPI.PutObject(bucket, object, NewHashReader(r.Body, size, metadata["etag"], sha256sum), metadata)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorIf(err, "Unable to create an object. %s", r.URL.Path)
|
errorIf(err, "Unable to create an object. %s", r.URL.Path)
|
||||||
@ -836,7 +836,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// No need to verify signature, anonymous request access is already allowed.
|
// No need to verify signature, anonymous request access is already allowed.
|
||||||
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, r.Body, incomingMD5, sha256sum)
|
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, NewHashReader(r.Body, size, incomingMD5, sha256sum))
|
||||||
case authTypeStreamingSigned:
|
case authTypeStreamingSigned:
|
||||||
// Initialize stream signature verifier.
|
// Initialize stream signature verifier.
|
||||||
reader, s3Error := newSignV4ChunkedReader(r)
|
reader, s3Error := newSignV4ChunkedReader(r)
|
||||||
@ -845,7 +845,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||||||
writeErrorResponse(w, s3Error, r.URL)
|
writeErrorResponse(w, s3Error, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, reader, incomingMD5, sha256sum)
|
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, NewHashReader(reader, size, incomingMD5, sha256sum))
|
||||||
case authTypeSignedV2, authTypePresignedV2:
|
case authTypeSignedV2, authTypePresignedV2:
|
||||||
s3Error := isReqAuthenticatedV2(r)
|
s3Error := isReqAuthenticatedV2(r)
|
||||||
if s3Error != ErrNone {
|
if s3Error != ErrNone {
|
||||||
@ -853,7 +853,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||||||
writeErrorResponse(w, s3Error, r.URL)
|
writeErrorResponse(w, s3Error, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, r.Body, incomingMD5, sha256sum)
|
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, NewHashReader(r.Body, size, incomingMD5, sha256sum))
|
||||||
case authTypePresigned, authTypeSigned:
|
case authTypePresigned, authTypeSigned:
|
||||||
if s3Error := reqSignatureV4Verify(r, serverConfig.GetRegion()); s3Error != ErrNone {
|
if s3Error := reqSignatureV4Verify(r, serverConfig.GetRegion()); s3Error != ErrNone {
|
||||||
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
errorIf(errSignatureMismatch, "%s", dumpRequest(r))
|
||||||
@ -864,7 +864,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||||||
if !skipContentSha256Cksum(r) {
|
if !skipContentSha256Cksum(r) {
|
||||||
sha256sum = r.Header.Get("X-Amz-Content-Sha256")
|
sha256sum = r.Header.Get("X-Amz-Content-Sha256")
|
||||||
}
|
}
|
||||||
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, size, r.Body, incomingMD5, sha256sum)
|
partInfo, err = objectAPI.PutObjectPart(bucket, object, uploadID, partID, NewHashReader(r.Body, size, incomingMD5, sha256sum))
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorIf(err, "Unable to create object part.")
|
errorIf(err, "Unable to create object part.")
|
||||||
|
@ -72,11 +72,10 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
}{
|
}{
|
||||||
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
||||||
}
|
}
|
||||||
sha256sum := ""
|
|
||||||
// iterate through the above set of inputs and upload the object.
|
// iterate through the above set of inputs and upload the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err := obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData, sha256sum)
|
_, err := obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
@ -220,11 +219,10 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
|||||||
// case - 1.
|
// case - 1.
|
||||||
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
||||||
}
|
}
|
||||||
sha256sum := ""
|
|
||||||
// iterate through the above set of inputs and upload the object.
|
// iterate through the above set of inputs and upload the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err := obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData, sha256sum)
|
_, err := obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
@ -1054,12 +1052,11 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
|
|||||||
// case - 1.
|
// case - 1.
|
||||||
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
||||||
}
|
}
|
||||||
sha256sum := ""
|
|
||||||
// iterate through the above set of inputs and upload the object.
|
// iterate through the above set of inputs and upload the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(input.bucketName, input.objectName, input.contentLength,
|
_, err = obj.PutObject(input.bucketName, input.objectName,
|
||||||
bytes.NewBuffer(input.textData), input.metaData, sha256sum)
|
NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
@ -1170,11 +1167,10 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
// case - 1.
|
// case - 1.
|
||||||
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
||||||
}
|
}
|
||||||
sha256sum := ""
|
|
||||||
// iterate through the above set of inputs and upload the object.
|
// iterate through the above set of inputs and upload the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData, sha256sum)
|
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
@ -1512,11 +1508,10 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
// used for anonymous HTTP request test.
|
// used for anonymous HTTP request test.
|
||||||
{bucketName, anonObject, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
{bucketName, anonObject, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
||||||
}
|
}
|
||||||
sha256sum := ""
|
|
||||||
// iterate through the above set of inputs and upload the object.
|
// iterate through the above set of inputs and upload the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData, sha256sum)
|
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
@ -2158,8 +2153,8 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
|||||||
}
|
}
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, part := range parts {
|
for _, part := range parts {
|
||||||
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID, part.intputDataSize,
|
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID,
|
||||||
bytes.NewBufferString(part.inputReaderData), part.inputMd5, "")
|
NewHashReader(bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, ""))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err)
|
t.Fatalf("%s : %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -2513,8 +2508,8 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
}
|
}
|
||||||
// Iterating over createPartCases to generate multipart chunks.
|
// Iterating over createPartCases to generate multipart chunks.
|
||||||
for _, part := range parts {
|
for _, part := range parts {
|
||||||
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID, part.intputDataSize,
|
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID,
|
||||||
bytes.NewBufferString(part.inputReaderData), part.inputMd5, "")
|
NewHashReader(bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, ""))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err)
|
t.Fatalf("%s : %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -2657,7 +2652,7 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string
|
|||||||
// iterate through the above set of inputs and upload the object.
|
// iterate through the above set of inputs and upload the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData, "")
|
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
@ -3361,8 +3356,7 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str
|
|||||||
uploadIDCopy := uploadID
|
uploadIDCopy := uploadID
|
||||||
|
|
||||||
// create an object Part, will be used to test list object parts.
|
// create an object Part, will be used to test list object parts.
|
||||||
_, err = obj.PutObjectPart(bucketName, testObject, uploadID, 1, int64(len("hello")), bytes.NewReader([]byte("hello")),
|
_, err = obj.PutObjectPart(bucketName, testObject, uploadID, 1, NewHashReader(bytes.NewReader([]byte("hello")), int64(len("hello")), "5d41402abc4b2a76b9719d911017c592", ""))
|
||||||
"5d41402abc4b2a76b9719d911017c592", "")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Minio %s : %s.", instanceType, err)
|
t.Fatalf("Minio %s : %s.", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -107,7 +107,7 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, c TestErr
|
|||||||
expectedETaghex := getMD5Hash(data)
|
expectedETaghex := getMD5Hash(data)
|
||||||
|
|
||||||
var calcPartInfo PartInfo
|
var calcPartInfo PartInfo
|
||||||
calcPartInfo, err = obj.PutObjectPart("bucket", "key", uploadID, i, int64(len(data)), bytes.NewBuffer(data), expectedETaghex, "")
|
calcPartInfo, err = obj.PutObjectPart("bucket", "key", uploadID, i, NewHashReader(bytes.NewBuffer(data), int64(len(data)), expectedETaghex, ""))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Errorf("%s: <ERROR> %s", instanceType, err)
|
c.Errorf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -157,7 +157,7 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, c TestErrHan
|
|||||||
|
|
||||||
metadata["md5"] = expectedETaghex
|
metadata["md5"] = expectedETaghex
|
||||||
var calcPartInfo PartInfo
|
var calcPartInfo PartInfo
|
||||||
calcPartInfo, err = obj.PutObjectPart("bucket", "key", uploadID, i, int64(len(randomString)), bytes.NewBufferString(randomString), expectedETaghex, "")
|
calcPartInfo, err = obj.PutObjectPart("bucket", "key", uploadID, i, NewHashReader(bytes.NewBufferString(randomString), int64(len(randomString)), expectedETaghex, ""))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -198,7 +198,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, c TestErrH
|
|||||||
metadata := make(map[string]string)
|
metadata := make(map[string]string)
|
||||||
metadata["etag"] = expectedETaghex
|
metadata["etag"] = expectedETaghex
|
||||||
var objInfo ObjectInfo
|
var objInfo ObjectInfo
|
||||||
objInfo, err = obj.PutObject("bucket", key, int64(len(randomString)), bytes.NewBufferString(randomString), metadata, "")
|
objInfo, err = obj.PutObject("bucket", key, NewHashReader(bytes.NewBufferString(randomString), int64(len(randomString)), metadata["etag"], ""), metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -251,7 +251,7 @@ func testPaging(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
|||||||
// check before paging occurs.
|
// check before paging occurs.
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
key := "obj" + strconv.Itoa(i)
|
key := "obj" + strconv.Itoa(i)
|
||||||
_, err = obj.PutObject("bucket", key, int64(len(uploadContent)), bytes.NewBufferString(uploadContent), nil, "")
|
_, err = obj.PutObject("bucket", key, NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -271,7 +271,7 @@ func testPaging(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
|||||||
// check after paging occurs pages work.
|
// check after paging occurs pages work.
|
||||||
for i := 6; i <= 10; i++ {
|
for i := 6; i <= 10; i++ {
|
||||||
key := "obj" + strconv.Itoa(i)
|
key := "obj" + strconv.Itoa(i)
|
||||||
_, err = obj.PutObject("bucket", key, int64(len(uploadContent)), bytes.NewBufferString(uploadContent), nil, "")
|
_, err = obj.PutObject("bucket", key, NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -288,11 +288,11 @@ func testPaging(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
|||||||
}
|
}
|
||||||
// check paging with prefix at end returns less objects.
|
// check paging with prefix at end returns less objects.
|
||||||
{
|
{
|
||||||
_, err = obj.PutObject("bucket", "newPrefix", int64(len(uploadContent)), bytes.NewBufferString(uploadContent), nil, "")
|
_, err = obj.PutObject("bucket", "newPrefix", NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
_, err = obj.PutObject("bucket", "newPrefix2", int64(len(uploadContent)), bytes.NewBufferString(uploadContent), nil, "")
|
_, err = obj.PutObject("bucket", "newPrefix2", NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -330,11 +330,11 @@ func testPaging(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
|||||||
|
|
||||||
// check delimited results with delimiter and prefix.
|
// check delimited results with delimiter and prefix.
|
||||||
{
|
{
|
||||||
_, err = obj.PutObject("bucket", "this/is/delimited", int64(len(uploadContent)), bytes.NewBufferString(uploadContent), nil, "")
|
_, err = obj.PutObject("bucket", "this/is/delimited", NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
_, err = obj.PutObject("bucket", "this/is/also/a/delimited/file", int64(len(uploadContent)), bytes.NewBufferString(uploadContent), nil, "")
|
_, err = obj.PutObject("bucket", "this/is/also/a/delimited/file", NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -443,14 +443,16 @@ func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, c TestErrHan
|
|||||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject("bucket", "object", int64(len("The list of parts was not in ascending order. The parts list must be specified in order by part number.")), bytes.NewBufferString("The list of parts was not in ascending order. The parts list must be specified in order by part number."), nil, "")
|
uploadContent := "The list of parts was not in ascending order. The parts list must be specified in order by part number."
|
||||||
|
length := int64(len(uploadContent))
|
||||||
|
_, err = obj.PutObject("bucket", "object", NewHashReader(bytes.NewBufferString(uploadContent), length, "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadContent := "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
|
uploadContent = "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
|
||||||
length := int64(len(uploadContent))
|
length = int64(len(uploadContent))
|
||||||
_, err = obj.PutObject("bucket", "object", length, bytes.NewBufferString(uploadContent), nil, "")
|
_, err = obj.PutObject("bucket", "object", NewHashReader(bytes.NewBufferString(uploadContent), length, "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -472,7 +474,7 @@ func (s *ObjectLayerAPISuite) TestNonExistantBucketOperations(c *C) {
|
|||||||
|
|
||||||
// Tests validate that bucket operation on non-existent bucket fails.
|
// Tests validate that bucket operation on non-existent bucket fails.
|
||||||
func testNonExistantBucketOperations(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
func testNonExistantBucketOperations(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
||||||
_, err := obj.PutObject("bucket1", "object", int64(len("one")), bytes.NewBufferString("one"), nil, "")
|
_, err := obj.PutObject("bucket1", "object", NewHashReader(bytes.NewBufferString("one"), int64(len("one")), "", ""), nil)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
c.Fatal("Expected error but found nil")
|
c.Fatal("Expected error but found nil")
|
||||||
}
|
}
|
||||||
@ -519,7 +521,7 @@ func testPutObject(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var bytesBuffer1 bytes.Buffer
|
var bytesBuffer1 bytes.Buffer
|
||||||
_, err = obj.PutObject("bucket", "object", length, readerEOF, nil, "")
|
_, err = obj.PutObject("bucket", "object", NewHashReader(readerEOF, length, "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -532,7 +534,7 @@ func testPutObject(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var bytesBuffer2 bytes.Buffer
|
var bytesBuffer2 bytes.Buffer
|
||||||
_, err = obj.PutObject("bucket", "object", length, readerNoEOF, nil, "")
|
_, err = obj.PutObject("bucket", "object", NewHashReader(readerNoEOF, length, "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -560,7 +562,7 @@ func testPutObjectInSubdir(obj ObjectLayer, instanceType string, c TestErrHandle
|
|||||||
uploadContent := `The specified multipart upload does not exist. The upload ID might be invalid, or the multipart
|
uploadContent := `The specified multipart upload does not exist. The upload ID might be invalid, or the multipart
|
||||||
upload might have been aborted or completed.`
|
upload might have been aborted or completed.`
|
||||||
length := int64(len(uploadContent))
|
length := int64(len(uploadContent))
|
||||||
_, err = obj.PutObject("bucket", "dir1/dir2/object", length, bytes.NewBufferString(uploadContent), nil, "")
|
_, err = obj.PutObject("bucket", "dir1/dir2/object", NewHashReader(bytes.NewBufferString(uploadContent), length, "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -740,10 +742,9 @@ func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
content := "One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag."
|
||||||
_, err = obj.PutObject(bucketName, "dir1/dir3/object",
|
length := int64(len(content))
|
||||||
int64(len("The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.")),
|
_, err = obj.PutObject(bucketName, "dir1/dir3/object", NewHashReader(bytes.NewBufferString(content), length, "", ""), nil)
|
||||||
bytes.NewBufferString("One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag."), nil, "")
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
@ -787,7 +788,7 @@ func testContentType(obj ObjectLayer, instanceType string, c TestErrHandler) {
|
|||||||
}
|
}
|
||||||
uploadContent := "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
|
uploadContent := "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
|
||||||
// Test empty.
|
// Test empty.
|
||||||
_, err = obj.PutObject("bucket", "minio.png", int64(len(uploadContent)), bytes.NewBufferString(uploadContent), nil, "")
|
_, err = obj.PutObject("bucket", "minio.png", NewHashReader(bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
c.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -542,7 +542,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
|||||||
defer objectLock.Unlock()
|
defer objectLock.Unlock()
|
||||||
|
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
objInfo, err := objectAPI.PutObject(bucket, object, size, r.Body, metadata, sha256sum)
|
objInfo, err := objectAPI.PutObject(bucket, object, NewHashReader(r.Body, size, metadata["etag"], sha256sum), metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeWebErrorResponse(w, err)
|
writeWebErrorResponse(w, err)
|
||||||
return
|
return
|
||||||
|
@ -382,8 +382,8 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa
|
|||||||
}
|
}
|
||||||
|
|
||||||
data := bytes.Repeat([]byte("a"), objectSize)
|
data := bytes.Repeat([]byte("a"), objectSize)
|
||||||
|
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
||||||
_, err = obj.PutObject(bucketName, objectName, int64(len(data)), bytes.NewReader(data), map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}, "")
|
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Was not able to upload an object, %v", err)
|
t.Fatalf("Was not able to upload an object, %v", err)
|
||||||
@ -476,16 +476,15 @@ func testRemoveObjectWebHandler(obj ObjectLayer, instanceType string, t TestErrH
|
|||||||
}
|
}
|
||||||
|
|
||||||
data := bytes.Repeat([]byte("a"), objectSize)
|
data := bytes.Repeat([]byte("a"), objectSize)
|
||||||
|
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
||||||
_, err = obj.PutObject(bucketName, objectName, int64(len(data)), bytes.NewReader(data),
|
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
||||||
map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}, "")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Was not able to upload an object, %v", err)
|
t.Fatalf("Was not able to upload an object, %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
objectName = "a/object"
|
objectName = "a/object"
|
||||||
_, err = obj.PutObject(bucketName, objectName, int64(len(data)), bytes.NewReader(data),
|
metadata = map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
||||||
map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}, "")
|
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Was not able to upload an object, %v", err)
|
t.Fatalf("Was not able to upload an object, %v", err)
|
||||||
}
|
}
|
||||||
@ -865,7 +864,8 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl
|
|||||||
}
|
}
|
||||||
|
|
||||||
content := []byte("temporary file's content")
|
content := []byte("temporary file's content")
|
||||||
_, err = obj.PutObject(bucketName, objectName, int64(len(content)), bytes.NewReader(content), map[string]string{"etag": "01ce59706106fe5e02e7f55fffda7f34"}, "")
|
metadata := map[string]string{"etag": "01ce59706106fe5e02e7f55fffda7f34"}
|
||||||
|
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader(content), int64(len(content)), metadata["etag"], ""), metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Was not able to upload an object, %v", err)
|
t.Fatalf("Was not able to upload an object, %v", err)
|
||||||
}
|
}
|
||||||
@ -957,9 +957,9 @@ func testWebHandlerDownloadZip(obj ObjectLayer, instanceType string, t TestErrHa
|
|||||||
t.Fatalf("%s : %s", instanceType, err)
|
t.Fatalf("%s : %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
obj.PutObject(bucket, "a/one", int64(len(fileOne)), strings.NewReader(fileOne), nil, "")
|
obj.PutObject(bucket, "a/one", NewHashReader(strings.NewReader(fileOne), int64(len(fileOne)), "", ""), nil)
|
||||||
obj.PutObject(bucket, "a/b/two", int64(len(fileTwo)), strings.NewReader(fileTwo), nil, "")
|
obj.PutObject(bucket, "a/b/two", NewHashReader(strings.NewReader(fileTwo), int64(len(fileTwo)), "", ""), nil)
|
||||||
obj.PutObject(bucket, "a/c/three", int64(len(fileThree)), strings.NewReader(fileThree), nil, "")
|
obj.PutObject(bucket, "a/c/three", NewHashReader(strings.NewReader(fileThree), int64(len(fileThree)), "", ""), nil)
|
||||||
|
|
||||||
test := func(token string) (int, []byte) {
|
test := func(token string) (int, []byte) {
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
@ -1043,7 +1043,8 @@ func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrH
|
|||||||
}
|
}
|
||||||
|
|
||||||
data := bytes.Repeat([]byte("a"), objectSize)
|
data := bytes.Repeat([]byte("a"), objectSize)
|
||||||
_, err = obj.PutObject(bucketName, objectName, int64(len(data)), bytes.NewReader(data), map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}, "")
|
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
||||||
|
_, err = obj.PutObject(bucketName, objectName, NewHashReader(bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Was not able to upload an object, %v", err)
|
t.Fatalf("Was not able to upload an object, %v", err)
|
||||||
}
|
}
|
||||||
|
@ -220,7 +220,7 @@ func TestListOnlineDisks(t *testing.T) {
|
|||||||
t.Fatalf("Failed to make a bucket %v", err)
|
t.Fatalf("Failed to make a bucket %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(bucket, object, int64(len(data)), bytes.NewReader(data), nil, "")
|
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to putObject %v", err)
|
t.Fatalf("Failed to putObject %v", err)
|
||||||
}
|
}
|
||||||
@ -358,7 +358,7 @@ func TestDisksWithAllParts(t *testing.T) {
|
|||||||
t.Fatalf("Failed to make a bucket %v", err)
|
t.Fatalf("Failed to make a bucket %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(bucket, object, int64(len(data)), bytes.NewReader(data), nil, "")
|
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to putObject %v", err)
|
t.Fatalf("Failed to putObject %v", err)
|
||||||
}
|
}
|
||||||
|
@ -491,7 +491,7 @@ func TestHealObjectXL(t *testing.T) {
|
|||||||
|
|
||||||
var uploadedParts []completePart
|
var uploadedParts []completePart
|
||||||
for _, partID := range []int{2, 1} {
|
for _, partID := range []int{2, 1} {
|
||||||
pInfo, err1 := obj.PutObjectPart(bucket, object, uploadID, partID, int64(len(data)), bytes.NewReader(data), "", "")
|
pInfo, err1 := obj.PutObjectPart(bucket, object, uploadID, partID, NewHashReader(bytes.NewReader(data), int64(len(data)), "", ""))
|
||||||
if err1 != nil {
|
if err1 != nil {
|
||||||
t.Fatalf("Failed to upload a part - %v", err1)
|
t.Fatalf("Failed to upload a part - %v", err1)
|
||||||
}
|
}
|
||||||
|
@ -56,14 +56,14 @@ func TestListObjectsHeal(t *testing.T) {
|
|||||||
|
|
||||||
// Put 5 objects under sane dir
|
// Put 5 objects under sane dir
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
_, err = xl.PutObject(bucketName, "sane/"+objName+strconv.Itoa(i), int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, "")
|
_, err = xl.PutObject(bucketName, "sane/"+objName+strconv.Itoa(i), NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
|
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Put 500 objects under unsane/subdir dir
|
// Put 500 objects under unsane/subdir dir
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
_, err = xl.PutObject(bucketName, "unsane/subdir/"+objName+strconv.Itoa(i), int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, "")
|
_, err = xl.PutObject(bucketName, "unsane/subdir/"+objName+strconv.Itoa(i), NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
|
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
|
||||||
}
|
}
|
||||||
@ -181,7 +181,7 @@ func TestListUploadsHeal(t *testing.T) {
|
|||||||
// Upload a part.
|
// Upload a part.
|
||||||
data := bytes.Repeat([]byte("a"), 1024)
|
data := bytes.Repeat([]byte("a"), 1024)
|
||||||
_, err = xl.PutObjectPart(bucketName, objName, uploadID, 1,
|
_, err = xl.PutObjectPart(bucketName, objName, uploadID, 1,
|
||||||
int64(len(data)), bytes.NewReader(data), "", "")
|
NewHashReader(bytes.NewReader(data), int64(len(data)), "", ""))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -64,11 +64,10 @@ func testXLReadStat(obj ObjectLayer, instanceType string, disks []string, t *tes
|
|||||||
// case - 1.
|
// case - 1.
|
||||||
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
||||||
}
|
}
|
||||||
sha256sum := ""
|
|
||||||
// iterate through the above set of inputs and upkoad the object.
|
// iterate through the above set of inputs and upkoad the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(input.bucketName, input.objectName, input.contentLength, bytes.NewBuffer(input.textData), input.metaData, sha256sum)
|
_, err = obj.PutObject(input.bucketName, input.objectName, NewHashReader(bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
@ -151,7 +150,7 @@ func testXLReadMetaParts(obj ObjectLayer, instanceType string, disks []string, t
|
|||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, testCase := range createPartCases {
|
for _, testCase := range createPartCases {
|
||||||
_, perr := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, testCase.intputDataSize, bytes.NewBufferString(testCase.inputReaderData), testCase.inputMd5, sha256sum)
|
_, perr := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, NewHashReader(bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||||
if perr != nil {
|
if perr != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, perr)
|
t.Fatalf("%s : %s", instanceType, perr)
|
||||||
}
|
}
|
||||||
|
@ -17,10 +17,8 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/md5"
|
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"path"
|
"path"
|
||||||
@ -29,7 +27,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/minio/minio/pkg/mimedb"
|
"github.com/minio/minio/pkg/mimedb"
|
||||||
"github.com/minio/sha256-simd"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// updateUploadJSON - add or remove upload ID info in all `uploads.json`.
|
// updateUploadJSON - add or remove upload ID info in all `uploads.json`.
|
||||||
@ -558,7 +555,7 @@ func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
|
|||||||
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
|
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
|
||||||
}()
|
}()
|
||||||
|
|
||||||
partInfo, err := xl.PutObjectPart(dstBucket, dstObject, uploadID, partID, length, pipeReader, "", "")
|
partInfo, err := xl.PutObjectPart(dstBucket, dstObject, uploadID, partID, NewHashReader(pipeReader, length, "", ""))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pi, toObjectErr(err, dstBucket, dstObject)
|
return pi, toObjectErr(err, dstBucket, dstObject)
|
||||||
}
|
}
|
||||||
@ -575,7 +572,7 @@ func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
|
|||||||
// of the multipart transaction.
|
// of the multipart transaction.
|
||||||
//
|
//
|
||||||
// Implements S3 compatible Upload Part API.
|
// Implements S3 compatible Upload Part API.
|
||||||
func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (pi PartInfo, e error) {
|
func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *HashReader) (pi PartInfo, e error) {
|
||||||
if err := checkPutObjectPartArgs(bucket, object, xl); err != nil {
|
if err := checkPutObjectPartArgs(bucket, object, xl); err != nil {
|
||||||
return pi, err
|
return pi, err
|
||||||
}
|
}
|
||||||
@ -623,31 +620,10 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
|
|||||||
tmpPart := mustGetUUID()
|
tmpPart := mustGetUUID()
|
||||||
tmpPartPath := path.Join(tmpPart, partSuffix)
|
tmpPartPath := path.Join(tmpPart, partSuffix)
|
||||||
|
|
||||||
// Initialize md5 writer.
|
|
||||||
md5Writer := md5.New()
|
|
||||||
|
|
||||||
writers := []io.Writer{md5Writer}
|
|
||||||
|
|
||||||
var sha256Writer hash.Hash
|
|
||||||
if sha256sum != "" {
|
|
||||||
sha256Writer = sha256.New()
|
|
||||||
writers = append(writers, sha256Writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
mw := io.MultiWriter(writers...)
|
|
||||||
|
|
||||||
var lreader = data
|
|
||||||
// Limit the reader to its provided size > 0.
|
|
||||||
if size > 0 {
|
|
||||||
// This is done so that we can avoid erroneous clients sending
|
|
||||||
// more data than the set content size.
|
|
||||||
lreader = io.LimitReader(data, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete the temporary object part. If PutObjectPart succeeds there would be nothing to delete.
|
// Delete the temporary object part. If PutObjectPart succeeds there would be nothing to delete.
|
||||||
defer xl.deleteObject(minioMetaTmpBucket, tmpPart)
|
defer xl.deleteObject(minioMetaTmpBucket, tmpPart)
|
||||||
if size > 0 {
|
if data.Size() > 0 {
|
||||||
if pErr := xl.prepareFile(minioMetaTmpBucket, tmpPartPath, size, onlineDisks, xlMeta.Erasure.BlockSize, xlMeta.Erasure.DataBlocks); err != nil {
|
if pErr := xl.prepareFile(minioMetaTmpBucket, tmpPartPath, data.Size(), onlineDisks, xlMeta.Erasure.BlockSize, xlMeta.Erasure.DataBlocks); err != nil {
|
||||||
return pi, toObjectErr(pErr, bucket, object)
|
return pi, toObjectErr(pErr, bucket, object)
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -658,37 +634,19 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
|
|||||||
return pi, toObjectErr(err, bucket, object)
|
return pi, toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
buffer := make([]byte, xlMeta.Erasure.BlockSize, 2*xlMeta.Erasure.BlockSize) // alloc additional space for parity blocks created while erasure coding
|
buffer := make([]byte, xlMeta.Erasure.BlockSize, 2*xlMeta.Erasure.BlockSize) // alloc additional space for parity blocks created while erasure coding
|
||||||
file, err := storage.CreateFile(io.TeeReader(lreader, mw), minioMetaTmpBucket, tmpPartPath, buffer, DefaultBitrotAlgorithm, xl.writeQuorum)
|
file, err := storage.CreateFile(data, minioMetaTmpBucket, tmpPartPath, buffer, DefaultBitrotAlgorithm, xl.writeQuorum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pi, toObjectErr(err, bucket, object)
|
return pi, toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Should return IncompleteBody{} error when reader has fewer bytes
|
// Should return IncompleteBody{} error when reader has fewer bytes
|
||||||
// than specified in request header.
|
// than specified in request header.
|
||||||
if file.Size < size {
|
if file.Size < data.Size() {
|
||||||
return pi, traceError(IncompleteBody{})
|
return pi, traceError(IncompleteBody{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// For size == -1, perhaps client is sending in chunked encoding
|
if err = data.Verify(); err != nil {
|
||||||
// set the size as size that was actually written.
|
return pi, toObjectErr(err, bucket, object)
|
||||||
if size == -1 {
|
|
||||||
size = file.Size
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate new md5sum.
|
|
||||||
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
|
|
||||||
if md5Hex != "" {
|
|
||||||
if newMD5Hex != md5Hex {
|
|
||||||
// Returns md5 mismatch.
|
|
||||||
return pi, traceError(BadDigest{md5Hex, newMD5Hex})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if sha256sum != "" {
|
|
||||||
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
|
|
||||||
if newSHA256sum != sha256sum {
|
|
||||||
return pi, traceError(SHA256Mismatch{})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// post-upload check (write) lock
|
// post-upload check (write) lock
|
||||||
@ -730,7 +688,8 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
|
|||||||
xlMeta.Stat.ModTime = UTCNow()
|
xlMeta.Stat.ModTime = UTCNow()
|
||||||
|
|
||||||
// Add the current part.
|
// Add the current part.
|
||||||
xlMeta.AddObjectPart(partID, partSuffix, newMD5Hex, size)
|
md5Hex := hex.EncodeToString(data.MD5())
|
||||||
|
xlMeta.AddObjectPart(partID, partSuffix, md5Hex, file.Size)
|
||||||
|
|
||||||
for i, disk := range onlineDisks {
|
for i, disk := range onlineDisks {
|
||||||
if disk == OfflineDisk {
|
if disk == OfflineDisk {
|
||||||
@ -762,7 +721,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
|
|||||||
return PartInfo{
|
return PartInfo{
|
||||||
PartNumber: partID,
|
PartNumber: partID,
|
||||||
LastModified: fi.ModTime,
|
LastModified: fi.ModTime,
|
||||||
ETag: newMD5Hex,
|
ETag: md5Hex,
|
||||||
Size: fi.Size,
|
Size: fi.Size,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -17,9 +17,7 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/md5"
|
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"hash"
|
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -29,7 +27,6 @@ import (
|
|||||||
"github.com/minio/minio/pkg/bpool"
|
"github.com/minio/minio/pkg/bpool"
|
||||||
"github.com/minio/minio/pkg/mimedb"
|
"github.com/minio/minio/pkg/mimedb"
|
||||||
"github.com/minio/minio/pkg/objcache"
|
"github.com/minio/minio/pkg/objcache"
|
||||||
"github.com/minio/sha256-simd"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// list all errors which can be ignored in object operations.
|
// list all errors which can be ignored in object operations.
|
||||||
@ -117,7 +114,7 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
|
|||||||
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
|
pipeWriter.Close() // Close writer explicitly signalling we wrote all data.
|
||||||
}()
|
}()
|
||||||
|
|
||||||
objInfo, err := xl.PutObject(dstBucket, dstObject, length, pipeReader, metadata, "")
|
objInfo, err := xl.PutObject(dstBucket, dstObject, NewHashReader(pipeReader, length, metadata["etag"], ""), metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return oi, toObjectErr(err, dstBucket, dstObject)
|
return oi, toObjectErr(err, dstBucket, dstObject)
|
||||||
}
|
}
|
||||||
@ -432,18 +429,18 @@ func renameObject(disks []StorageAPI, srcBucket, srcObject, dstBucket, dstObject
|
|||||||
// until EOF, erasure codes the data across all disk and additionally
|
// until EOF, erasure codes the data across all disk and additionally
|
||||||
// writes `xl.json` which carries the necessary metadata for future
|
// writes `xl.json` which carries the necessary metadata for future
|
||||||
// object operations.
|
// object operations.
|
||||||
func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, err error) {
|
func (xl xlObjects) PutObject(bucket string, object string, data *HashReader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||||
// This is a special case with size as '0' and object ends with
|
// This is a special case with size as '0' and object ends with
|
||||||
// a slash separator, we treat it like a valid operation and
|
// a slash separator, we treat it like a valid operation and
|
||||||
// return success.
|
// return success.
|
||||||
if isObjectDir(object, size) {
|
if isObjectDir(object, data.Size()) {
|
||||||
// Check if an object is present as one of the parent dir.
|
// Check if an object is present as one of the parent dir.
|
||||||
// -- FIXME. (needs a new kind of lock).
|
// -- FIXME. (needs a new kind of lock).
|
||||||
// -- FIXME (this also causes performance issue when disks are down).
|
// -- FIXME (this also causes performance issue when disks are down).
|
||||||
if xl.parentDirIsObject(bucket, path.Dir(object)) {
|
if xl.parentDirIsObject(bucket, path.Dir(object)) {
|
||||||
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object)
|
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object)
|
||||||
}
|
}
|
||||||
return dirObjectInfo(bucket, object, size, metadata), nil
|
return dirObjectInfo(bucket, object, data.Size(), metadata), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate put object input args.
|
// Validate put object input args.
|
||||||
@ -466,54 +463,27 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
|
|||||||
uniqueID := mustGetUUID()
|
uniqueID := mustGetUUID()
|
||||||
tempObj := uniqueID
|
tempObj := uniqueID
|
||||||
|
|
||||||
// Initialize md5 writer.
|
// Limit the reader to its provided size if specified.
|
||||||
md5Writer := md5.New()
|
var reader io.Reader = data
|
||||||
|
|
||||||
writers := []io.Writer{md5Writer}
|
|
||||||
|
|
||||||
var sha256Writer hash.Hash
|
|
||||||
if sha256sum != "" {
|
|
||||||
sha256Writer = sha256.New()
|
|
||||||
writers = append(writers, sha256Writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Proceed to set the cache.
|
// Proceed to set the cache.
|
||||||
var newBuffer io.WriteCloser
|
var newBuffer io.WriteCloser
|
||||||
|
|
||||||
// If caching is enabled, proceed to set the cache.
|
// If caching is enabled, proceed to set the cache.
|
||||||
if size > 0 && xl.objCacheEnabled {
|
if data.Size() > 0 && xl.objCacheEnabled {
|
||||||
// PutObject invalidates any previously cached object in memory.
|
// PutObject invalidates any previously cached object in memory.
|
||||||
xl.objCache.Delete(path.Join(bucket, object))
|
xl.objCache.Delete(path.Join(bucket, object))
|
||||||
|
|
||||||
// Create a new entry in memory of size.
|
// Create a new entry in memory of size.
|
||||||
newBuffer, err = xl.objCache.Create(path.Join(bucket, object), size)
|
newBuffer, err = xl.objCache.Create(path.Join(bucket, object), data.Size())
|
||||||
if err == nil {
|
|
||||||
// Create a multi writer to write to both memory and client response.
|
|
||||||
writers = append(writers, newBuffer)
|
|
||||||
}
|
|
||||||
// Ignore error if cache is full, proceed to write the object.
|
// Ignore error if cache is full, proceed to write the object.
|
||||||
if err != nil && err != objcache.ErrCacheFull {
|
if err != nil && err != objcache.ErrCacheFull {
|
||||||
// For any other error return here.
|
// For any other error return here.
|
||||||
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
|
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
|
||||||
}
|
}
|
||||||
|
reader = io.TeeReader(data, newBuffer)
|
||||||
}
|
}
|
||||||
|
|
||||||
mw := io.MultiWriter(writers...)
|
|
||||||
|
|
||||||
// Limit the reader to its provided size if specified.
|
|
||||||
var limitDataReader io.Reader
|
|
||||||
if size > 0 {
|
|
||||||
// This is done so that we can avoid erroneous clients sending
|
|
||||||
// more data than the set content size.
|
|
||||||
limitDataReader = io.LimitReader(data, size)
|
|
||||||
} else {
|
|
||||||
// else we read till EOF.
|
|
||||||
limitDataReader = data
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tee reader combines incoming data stream and md5, data read from input stream is written to md5.
|
|
||||||
teeReader := io.TeeReader(limitDataReader, mw)
|
|
||||||
|
|
||||||
// Initialize parts metadata
|
// Initialize parts metadata
|
||||||
partsMetadata := make([]xlMetaV1, len(xl.storageDisks))
|
partsMetadata := make([]xlMetaV1, len(xl.storageDisks))
|
||||||
|
|
||||||
@ -550,7 +520,7 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
|
|||||||
// Calculate the size of the current part, if size is unknown, curPartSize wil be unknown too.
|
// Calculate the size of the current part, if size is unknown, curPartSize wil be unknown too.
|
||||||
// allowEmptyPart will always be true if this is the first part and false otherwise.
|
// allowEmptyPart will always be true if this is the first part and false otherwise.
|
||||||
var curPartSize int64
|
var curPartSize int64
|
||||||
curPartSize, err = getPartSizeFromIdx(size, globalPutPartSize, partIdx)
|
curPartSize, err = getPartSizeFromIdx(data.Size(), globalPutPartSize, partIdx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
@ -564,7 +534,7 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
file, erasureErr := storage.CreateFile(io.LimitReader(teeReader, globalPutPartSize), minioMetaTmpBucket, tempErasureObj, buffer, DefaultBitrotAlgorithm, xl.writeQuorum)
|
file, erasureErr := storage.CreateFile(io.LimitReader(reader, globalPutPartSize), minioMetaTmpBucket, tempErasureObj, buffer, DefaultBitrotAlgorithm, xl.writeQuorum)
|
||||||
if erasureErr != nil {
|
if erasureErr != nil {
|
||||||
return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj)
|
return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj)
|
||||||
}
|
}
|
||||||
@ -596,7 +566,7 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
|
|||||||
|
|
||||||
// Check part size for the next index.
|
// Check part size for the next index.
|
||||||
var partSize int64
|
var partSize int64
|
||||||
partSize, err = getPartSizeFromIdx(size, globalPutPartSize, partIdx+1)
|
partSize, err = getPartSizeFromIdx(data.Size(), globalPutPartSize, partIdx+1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
@ -605,25 +575,17 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// For size == -1, perhaps client is sending in chunked encoding
|
if size := data.Size(); size > 0 && sizeWritten < data.Size() {
|
||||||
// set the size as size that was actually written.
|
|
||||||
if size == -1 {
|
|
||||||
size = sizeWritten
|
|
||||||
} else {
|
|
||||||
// Check if stored data satisfies what is asked
|
|
||||||
if sizeWritten < size {
|
|
||||||
return ObjectInfo{}, traceError(IncompleteBody{})
|
return ObjectInfo{}, traceError(IncompleteBody{})
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Save additional erasureMetadata.
|
// Save additional erasureMetadata.
|
||||||
modTime := UTCNow()
|
modTime := UTCNow()
|
||||||
|
|
||||||
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
|
if err = data.Verify(); err != nil {
|
||||||
// Update the md5sum if not set with the newly calculated one.
|
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||||
if len(metadata["etag"]) == 0 {
|
|
||||||
metadata["etag"] = newMD5Hex
|
|
||||||
}
|
}
|
||||||
|
metadata["etag"] = hex.EncodeToString(data.MD5())
|
||||||
|
|
||||||
// Guess content-type from the extension if possible.
|
// Guess content-type from the extension if possible.
|
||||||
if metadata["content-type"] == "" {
|
if metadata["content-type"] == "" {
|
||||||
@ -634,22 +596,6 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// md5Hex representation.
|
|
||||||
md5Hex := metadata["etag"]
|
|
||||||
if md5Hex != "" {
|
|
||||||
if newMD5Hex != md5Hex {
|
|
||||||
// Returns md5 mismatch.
|
|
||||||
return ObjectInfo{}, traceError(BadDigest{md5Hex, newMD5Hex})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if sha256sum != "" {
|
|
||||||
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
|
|
||||||
if newSHA256sum != sha256sum {
|
|
||||||
return ObjectInfo{}, traceError(SHA256Mismatch{})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if xl.isObject(bucket, object) {
|
if xl.isObject(bucket, object) {
|
||||||
// Rename if an object already exists to temporary location.
|
// Rename if an object already exists to temporary location.
|
||||||
newUniqueID := mustGetUUID()
|
newUniqueID := mustGetUUID()
|
||||||
@ -670,7 +616,7 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
|
|||||||
// Update `xl.json` content on each disks.
|
// Update `xl.json` content on each disks.
|
||||||
for index := range partsMetadata {
|
for index := range partsMetadata {
|
||||||
partsMetadata[index].Meta = metadata
|
partsMetadata[index].Meta = metadata
|
||||||
partsMetadata[index].Stat.Size = size
|
partsMetadata[index].Stat.Size = sizeWritten
|
||||||
partsMetadata[index].Stat.ModTime = modTime
|
partsMetadata[index].Stat.ModTime = modTime
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -686,7 +632,7 @@ func (xl xlObjects) PutObject(bucket string, object string, size int64, data io.
|
|||||||
|
|
||||||
// Once we have successfully renamed the object, Close the buffer which would
|
// Once we have successfully renamed the object, Close the buffer which would
|
||||||
// save the object on cache.
|
// save the object on cache.
|
||||||
if size > 0 && xl.objCacheEnabled && newBuffer != nil {
|
if sizeWritten > 0 && xl.objCacheEnabled && newBuffer != nil {
|
||||||
newBuffer.Close()
|
newBuffer.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,12 +53,12 @@ func TestRepeatPutObjectPart(t *testing.T) {
|
|||||||
}
|
}
|
||||||
fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
|
fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
|
||||||
md5Hex := getMD5Hash(fiveMBBytes)
|
md5Hex := getMD5Hash(fiveMBBytes)
|
||||||
_, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, 5*humanize.MiByte, bytes.NewReader(fiveMBBytes), md5Hex, "")
|
_, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, NewHashReader(bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930
|
// PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930
|
||||||
_, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, 5*humanize.MiByte, bytes.NewReader(fiveMBBytes), md5Hex, "")
|
_, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, NewHashReader(bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -92,7 +92,7 @@ func TestXLDeleteObjectBasic(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create object "obj" under bucket "bucket" for Test 7 to pass
|
// Create object "obj" under bucket "bucket" for Test 7 to pass
|
||||||
_, err = xl.PutObject("bucket", "obj", int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, "")
|
_, err = xl.PutObject("bucket", "obj", NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
|
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
|
||||||
}
|
}
|
||||||
@ -128,7 +128,7 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
|
|||||||
bucket := "bucket"
|
bucket := "bucket"
|
||||||
object := "object"
|
object := "object"
|
||||||
// Create object "obj" under bucket "bucket".
|
// Create object "obj" under bucket "bucket".
|
||||||
_, err = obj.PutObject(bucket, object, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, "")
|
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -143,7 +143,7 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create "obj" under "bucket".
|
// Create "obj" under "bucket".
|
||||||
_, err = obj.PutObject(bucket, object, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, "")
|
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -178,7 +178,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
|
|||||||
bucket := "bucket"
|
bucket := "bucket"
|
||||||
object := "object"
|
object := "object"
|
||||||
// Create "object" under "bucket".
|
// Create "object" under "bucket".
|
||||||
_, err = obj.PutObject(bucket, object, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, "")
|
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -230,7 +230,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
|
|||||||
bucket := "bucket"
|
bucket := "bucket"
|
||||||
object := "object"
|
object := "object"
|
||||||
// Create "object" under "bucket".
|
// Create "object" under "bucket".
|
||||||
_, err = obj.PutObject(bucket, object, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, "")
|
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -253,7 +253,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Upload new content to same object "object"
|
// Upload new content to same object "object"
|
||||||
_, err = obj.PutObject(bucket, object, int64(len("abcd")), bytes.NewReader([]byte("abcd")), nil, "")
|
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||||
err = errorCause(err)
|
err = errorCause(err)
|
||||||
if err != toObjectErr(errXLWriteQuorum, bucket, object) {
|
if err != toObjectErr(errXLWriteQuorum, bucket, object) {
|
||||||
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
|
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
|
||||||
@ -294,7 +294,7 @@ func TestHealing(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(bucket, object, length, bytes.NewReader(data), nil, "")
|
_, err = obj.PutObject(bucket, object, NewHashReader(bytes.NewReader(data), length, "", ""), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user