mirror of https://github.com/minio/minio.git
Fix: Preserve MD5Sum for SSE encrypted objects (#6680)
To conform with AWS S3 Spec on ETag for SSE-S3 encrypted objects, encrypt client sent MD5Sum and store it on backend as ETag.Extend this behavior to SSE-C encrypted objects.
This commit is contained in:
parent
7e1661f4fa
commit
5f6d717b7a
|
@ -309,7 +309,7 @@ func (atb *adminXLTestBed) GenerateHealTestData(t *testing.T) {
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
objectName := fmt.Sprintf("%s-%d", objName, i)
|
objectName := fmt.Sprintf("%s-%d", objName, i)
|
||||||
_, err = atb.objLayer.PutObject(context.Background(), bucketName, objectName,
|
_, err = atb.objLayer.PutObject(context.Background(), bucketName, objectName,
|
||||||
mustGetHashReader(t, bytes.NewReader([]byte("hello")),
|
mustGetPutObjReader(t, bytes.NewReader([]byte("hello")),
|
||||||
int64(len("hello")), "", ""), nil, ObjectOptions{})
|
int64(len("hello")), "", ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create %s - %v", objectName,
|
t.Fatalf("Failed to create %s - %v", objectName,
|
||||||
|
@ -328,7 +328,7 @@ func (atb *adminXLTestBed) GenerateHealTestData(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = atb.objLayer.PutObjectPart(context.Background(), bucketName, objName,
|
_, err = atb.objLayer.PutObjectPart(context.Background(), bucketName, objName,
|
||||||
uploadID, 3, mustGetHashReader(t, bytes.NewReader(
|
uploadID, 3, mustGetPutObjReader(t, bytes.NewReader(
|
||||||
[]byte("hello")), int64(len("hello")), "", ""), ObjectOptions{})
|
[]byte("hello")), int64(len("hello")), "", ""), ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("mp put error: %v", err)
|
t.Fatalf("mp put error: %v", err)
|
||||||
|
|
|
@ -61,7 +61,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
// insert the object.
|
// insert the object.
|
||||||
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
|
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -123,7 +123,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||||
md5hex = getMD5Hash([]byte(textPartData))
|
md5hex = getMD5Hash([]byte(textPartData))
|
||||||
var partInfo PartInfo
|
var partInfo PartInfo
|
||||||
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
|
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
|
||||||
mustGetHashReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex), opts)
|
mustGetPutObjReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -204,7 +204,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||||
// insert the object.
|
// insert the object.
|
||||||
var objInfo ObjectInfo
|
var objInfo ObjectInfo
|
||||||
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
|
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -304,7 +304,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
// insert the object.
|
// insert the object.
|
||||||
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
|
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -344,7 +344,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||||
// insert the object.
|
// insert the object.
|
||||||
var objInfo ObjectInfo
|
var objInfo ObjectInfo
|
||||||
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
|
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -116,6 +116,7 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||||
// Set the info.Size to the actualSize.
|
// Set the info.Size to the actualSize.
|
||||||
listObjectsV2Info.Objects[i].Size = actualSize
|
listObjectsV2Info.Objects[i].Size = actualSize
|
||||||
} else if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
|
} else if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
|
||||||
|
listObjectsV2Info.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsV2Info.Objects[i], false)
|
||||||
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].DecryptedSize()
|
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].DecryptedSize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
|
@ -198,6 +199,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||||
// Set the info.Size to the actualSize.
|
// Set the info.Size to the actualSize.
|
||||||
listObjectsInfo.Objects[i].Size = actualSize
|
listObjectsInfo.Objects[i].Size = actualSize
|
||||||
} else if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) {
|
} else if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) {
|
||||||
|
listObjectsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsInfo.Objects[i], false)
|
||||||
listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].DecryptedSize()
|
listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].DecryptedSize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
|
|
|
@ -613,7 +613,9 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
rawReader := hashReader
|
||||||
|
pReader := NewPutObjReader(rawReader, nil, nil)
|
||||||
|
var objectEncryptionKey []byte
|
||||||
if objectAPI.IsEncryptionSupported() {
|
if objectAPI.IsEncryptionSupported() {
|
||||||
if hasServerSideEncryptionHeader(formValues) && !hasSuffix(object, slashSeparator) { // handle SSE-C and SSE-S3 requests
|
if hasServerSideEncryptionHeader(formValues) && !hasSuffix(object, slashSeparator) { // handle SSE-C and SSE-S3 requests
|
||||||
var reader io.Reader
|
var reader io.Reader
|
||||||
|
@ -625,7 +627,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
reader, err = newEncryptReader(hashReader, key, bucket, object, metadata, crypto.S3.IsRequested(formValues))
|
reader, objectEncryptionKey, err = newEncryptReader(hashReader, key, bucket, object, metadata, crypto.S3.IsRequested(formValues))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
|
@ -636,10 +638,11 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
pReader = NewPutObjReader(rawReader, hashReader, objectEncryptionKey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
objInfo, err := objectAPI.PutObject(ctx, bucket, object, hashReader, metadata, ObjectOptions{})
|
objInfo, err := objectAPI.PutObject(ctx, bucket, object, pReader, metadata, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
|
|
|
@ -625,7 +625,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
objectName := "test-object-" + strconv.Itoa(i)
|
objectName := "test-object-" + strconv.Itoa(i)
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), nil, ObjectOptions{})
|
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), nil, ObjectOptions{})
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)
|
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)
|
||||||
|
|
|
@ -70,7 +70,7 @@ func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, hashReader, nil, ObjectOptions{})
|
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader, nil, nil), nil, ObjectOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,6 @@ import (
|
||||||
|
|
||||||
"github.com/minio/minio/cmd/logger"
|
"github.com/minio/minio/cmd/logger"
|
||||||
"github.com/minio/minio/pkg/disk"
|
"github.com/minio/minio/pkg/disk"
|
||||||
"github.com/minio/minio/pkg/hash"
|
|
||||||
"github.com/minio/minio/pkg/lock"
|
"github.com/minio/minio/pkg/lock"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -258,7 +257,7 @@ func (cfs *cacheFSObjects) IsOnline() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Caches the object to disk
|
// Caches the object to disk
|
||||||
func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) error {
|
func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data *PutObjReader, metadata map[string]string, opts ObjectOptions) error {
|
||||||
if cfs.diskUsageHigh() {
|
if cfs.diskUsageHigh() {
|
||||||
select {
|
select {
|
||||||
case cfs.purgeChan <- struct{}{}:
|
case cfs.purgeChan <- struct{}{}:
|
||||||
|
@ -301,7 +300,8 @@ func (cfs *cacheFSObjects) Exists(ctx context.Context, bucket, object string) bo
|
||||||
|
|
||||||
// Identical to fs PutObject operation except that it uses ETag in metadata
|
// Identical to fs PutObject operation except that it uses ETag in metadata
|
||||||
// headers.
|
// headers.
|
||||||
func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, retErr error) {
|
func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object string, r *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, retErr error) {
|
||||||
|
data := r.Reader
|
||||||
fs := cfs.FSObjects
|
fs := cfs.FSObjects
|
||||||
// Lock the object.
|
// Lock the object.
|
||||||
objectLock := fs.nsMutex.NewNSLock(bucket, object)
|
objectLock := fs.nsMutex.NewNSLock(bucket, object)
|
||||||
|
|
|
@ -60,16 +60,16 @@ type cacheObjects struct {
|
||||||
GetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
|
GetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
|
||||||
GetObjectFn func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
|
GetObjectFn func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
|
||||||
GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||||
PutObjectFn func(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
PutObjectFn func(ctx context.Context, bucket, object string, data *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||||
DeleteObjectFn func(ctx context.Context, bucket, object string) error
|
DeleteObjectFn func(ctx context.Context, bucket, object string) error
|
||||||
ListObjectsFn func(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
|
ListObjectsFn func(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
|
||||||
ListObjectsV2Fn func(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error)
|
ListObjectsV2Fn func(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error)
|
||||||
ListBucketsFn func(ctx context.Context) (buckets []BucketInfo, err error)
|
ListBucketsFn func(ctx context.Context) (buckets []BucketInfo, err error)
|
||||||
GetBucketInfoFn func(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error)
|
GetBucketInfoFn func(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error)
|
||||||
NewMultipartUploadFn func(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error)
|
NewMultipartUploadFn func(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error)
|
||||||
PutObjectPartFn func(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error)
|
PutObjectPartFn func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
|
||||||
AbortMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string) error
|
AbortMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string) error
|
||||||
CompleteMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
|
CompleteMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||||
DeleteBucketFn func(ctx context.Context, bucket string) error
|
DeleteBucketFn func(ctx context.Context, bucket string) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,14 +92,14 @@ type CacheObjectLayer interface {
|
||||||
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
|
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
|
||||||
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
|
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
|
||||||
GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||||
PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||||
DeleteObject(ctx context.Context, bucket, object string) error
|
DeleteObject(ctx context.Context, bucket, object string) error
|
||||||
|
|
||||||
// Multipart operations.
|
// Multipart operations.
|
||||||
NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error)
|
NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error)
|
||||||
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error)
|
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
|
||||||
AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error
|
AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error
|
||||||
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
|
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||||
|
|
||||||
// Storage operations.
|
// Storage operations.
|
||||||
StorageInfo(ctx context.Context) CacheStorageInfo
|
StorageInfo(ctx context.Context) CacheStorageInfo
|
||||||
|
@ -247,7 +247,7 @@ func (c cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
opts := ObjectOptions{}
|
opts := ObjectOptions{}
|
||||||
putErr := dcache.Put(ctx, bucket, object, hashReader, c.getMetadata(bkReader.ObjInfo), opts)
|
putErr := dcache.Put(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), c.getMetadata(bkReader.ObjInfo), opts)
|
||||||
// close the write end of the pipe, so the error gets
|
// close the write end of the pipe, so the error gets
|
||||||
// propagated to getObjReader
|
// propagated to getObjReader
|
||||||
pipeWriter.CloseWithError(putErr)
|
pipeWriter.CloseWithError(putErr)
|
||||||
|
@ -320,7 +320,7 @@ func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, star
|
||||||
gerr := GetObjectFn(ctx, bucket, object, 0, objInfo.Size, io.MultiWriter(writer, pipeWriter), etag, opts)
|
gerr := GetObjectFn(ctx, bucket, object, 0, objInfo.Size, io.MultiWriter(writer, pipeWriter), etag, opts)
|
||||||
pipeWriter.CloseWithError(gerr) // Close writer explicitly signaling we wrote all data.
|
pipeWriter.CloseWithError(gerr) // Close writer explicitly signaling we wrote all data.
|
||||||
}()
|
}()
|
||||||
err = dcache.Put(ctx, bucket, object, hashReader, c.getMetadata(objInfo), opts)
|
err = dcache.Put(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), c.getMetadata(objInfo), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -644,8 +644,9 @@ func (c cacheObjects) isCacheExclude(bucket, object string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObject - caches the uploaded object for single Put operations
|
// PutObject - caches the uploaded object for single Put operations
|
||||||
func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
putObjectFn := c.PutObjectFn
|
putObjectFn := c.PutObjectFn
|
||||||
|
data := r.Reader
|
||||||
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// disk cache could not be located,execute backend call.
|
// disk cache could not be located,execute backend call.
|
||||||
|
@ -666,20 +667,20 @@ func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *h
|
||||||
objInfo = ObjectInfo{}
|
objInfo = ObjectInfo{}
|
||||||
// Initialize pipe to stream data to backend
|
// Initialize pipe to stream data to backend
|
||||||
pipeReader, pipeWriter := io.Pipe()
|
pipeReader, pipeWriter := io.Pipe()
|
||||||
hashReader, err := hash.NewReader(pipeReader, size, r.MD5HexString(), r.SHA256HexString(), r.ActualSize())
|
hashReader, err := hash.NewReader(pipeReader, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
// Initialize pipe to stream data to cache
|
// Initialize pipe to stream data to cache
|
||||||
rPipe, wPipe := io.Pipe()
|
rPipe, wPipe := io.Pipe()
|
||||||
cHashReader, err := hash.NewReader(rPipe, size, r.MD5HexString(), r.SHA256HexString(), r.ActualSize())
|
cHashReader, err := hash.NewReader(rPipe, size, data.MD5HexString(), data.SHA256HexString(), data.ActualSize())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
oinfoCh := make(chan ObjectInfo)
|
oinfoCh := make(chan ObjectInfo)
|
||||||
errCh := make(chan error)
|
errCh := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
oinfo, perr := putObjectFn(ctx, bucket, object, hashReader, metadata, opts)
|
oinfo, perr := putObjectFn(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), metadata, opts)
|
||||||
if perr != nil {
|
if perr != nil {
|
||||||
pipeWriter.CloseWithError(perr)
|
pipeWriter.CloseWithError(perr)
|
||||||
wPipe.CloseWithError(perr)
|
wPipe.CloseWithError(perr)
|
||||||
|
@ -692,14 +693,14 @@ func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *h
|
||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if err = dcache.Put(ctx, bucket, object, cHashReader, metadata, opts); err != nil {
|
if err = dcache.Put(ctx, bucket, object, NewPutObjReader(cHashReader, nil, nil), metadata, opts); err != nil {
|
||||||
wPipe.CloseWithError(err)
|
wPipe.CloseWithError(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
mwriter := io.MultiWriter(pipeWriter, wPipe)
|
mwriter := io.MultiWriter(pipeWriter, wPipe)
|
||||||
_, err = io.Copy(mwriter, r)
|
_, err = io.Copy(mwriter, data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = <-errCh
|
err = <-errCh
|
||||||
return objInfo, err
|
return objInfo, err
|
||||||
|
@ -734,16 +735,17 @@ func (c cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object str
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPart - uploads part to backend and cache simultaneously.
|
// PutObjectPart - uploads part to backend and cache simultaneously.
|
||||||
func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error) {
|
func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
|
||||||
|
data := r.Reader
|
||||||
putObjectPartFn := c.PutObjectPartFn
|
putObjectPartFn := c.PutObjectPartFn
|
||||||
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// disk cache could not be located,execute backend call.
|
// disk cache could not be located,execute backend call.
|
||||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.isCacheExclude(bucket, object) {
|
if c.isCacheExclude(bucket, object) {
|
||||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// make sure cache has at least size space available
|
// make sure cache has at least size space available
|
||||||
|
@ -753,7 +755,7 @@ func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadI
|
||||||
case dcache.purgeChan <- struct{}{}:
|
case dcache.purgeChan <- struct{}{}:
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
return putObjectPartFn(ctx, bucket, object, uploadID, partID, r, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
info = PartInfo{}
|
info = PartInfo{}
|
||||||
|
@ -772,7 +774,7 @@ func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadI
|
||||||
pinfoCh := make(chan PartInfo)
|
pinfoCh := make(chan PartInfo)
|
||||||
errorCh := make(chan error)
|
errorCh := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
info, err = putObjectPartFn(ctx, bucket, object, uploadID, partID, hashReader, opts)
|
info, err = putObjectPartFn(ctx, bucket, object, uploadID, partID, NewPutObjReader(hashReader, nil, nil), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
close(pinfoCh)
|
close(pinfoCh)
|
||||||
pipeWriter.CloseWithError(err)
|
pipeWriter.CloseWithError(err)
|
||||||
|
@ -784,7 +786,7 @@ func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadI
|
||||||
pinfoCh <- info
|
pinfoCh <- info
|
||||||
}()
|
}()
|
||||||
go func() {
|
go func() {
|
||||||
if _, perr := dcache.PutObjectPart(ctx, bucket, object, uploadID, partID, cHashReader, opts); perr != nil {
|
if _, perr := dcache.PutObjectPart(ctx, bucket, object, uploadID, partID, NewPutObjReader(cHashReader, nil, nil), opts); perr != nil {
|
||||||
wPipe.CloseWithError(perr)
|
wPipe.CloseWithError(perr)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -826,25 +828,25 @@ func (c cacheObjects) AbortMultipartUpload(ctx context.Context, bucket, object,
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompleteMultipartUpload - completes multipart upload operation on backend and cache.
|
// CompleteMultipartUpload - completes multipart upload operation on backend and cache.
|
||||||
func (c cacheObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) {
|
func (c cacheObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
completeMultipartUploadFn := c.CompleteMultipartUploadFn
|
completeMultipartUploadFn := c.CompleteMultipartUploadFn
|
||||||
|
|
||||||
if c.isCacheExclude(bucket, object) {
|
if c.isCacheExclude(bucket, object) {
|
||||||
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts)
|
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// disk cache could not be located,execute backend call.
|
// disk cache could not be located,execute backend call.
|
||||||
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts)
|
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||||
}
|
}
|
||||||
// perform backend operation
|
// perform backend operation
|
||||||
objInfo, err = completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts)
|
objInfo, err = completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// create new multipart upload in cache with same uploadID
|
// create new multipart upload in cache with same uploadID
|
||||||
dcache.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts)
|
dcache.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -969,7 +971,7 @@ func newServerCacheObjects(config CacheConfig) (CacheObjectLayer, error) {
|
||||||
GetObjectNInfoFn: func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
GetObjectNInfoFn: func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||||
return newObjectLayerFn().GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
return newObjectLayerFn().GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
||||||
},
|
},
|
||||||
PutObjectFn: func(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
PutObjectFn: func(ctx context.Context, bucket, object string, data *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
return newObjectLayerFn().PutObject(ctx, bucket, object, data, metadata, opts)
|
return newObjectLayerFn().PutObject(ctx, bucket, object, data, metadata, opts)
|
||||||
},
|
},
|
||||||
DeleteObjectFn: func(ctx context.Context, bucket, object string) error {
|
DeleteObjectFn: func(ctx context.Context, bucket, object string) error {
|
||||||
|
@ -990,14 +992,14 @@ func newServerCacheObjects(config CacheConfig) (CacheObjectLayer, error) {
|
||||||
NewMultipartUploadFn: func(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) {
|
NewMultipartUploadFn: func(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) {
|
||||||
return newObjectLayerFn().NewMultipartUpload(ctx, bucket, object, metadata, opts)
|
return newObjectLayerFn().NewMultipartUpload(ctx, bucket, object, metadata, opts)
|
||||||
},
|
},
|
||||||
PutObjectPartFn: func(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error) {
|
PutObjectPartFn: func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
|
||||||
return newObjectLayerFn().PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
return newObjectLayerFn().PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
||||||
},
|
},
|
||||||
AbortMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string) error {
|
AbortMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string) error {
|
||||||
return newObjectLayerFn().AbortMultipartUpload(ctx, bucket, object, uploadID)
|
return newObjectLayerFn().AbortMultipartUpload(ctx, bucket, object, uploadID)
|
||||||
},
|
},
|
||||||
CompleteMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) {
|
CompleteMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
return newObjectLayerFn().CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts)
|
return newObjectLayerFn().CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||||
},
|
},
|
||||||
DeleteBucketFn: func(ctx context.Context, bucket string) error {
|
DeleteBucketFn: func(ctx context.Context, bucket string) error {
|
||||||
return newObjectLayerFn().DeleteBucket(ctx, bucket)
|
return newObjectLayerFn().DeleteBucket(ctx, bucket)
|
||||||
|
|
|
@ -199,7 +199,7 @@ func TestDiskCache(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta, opts)
|
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), httpMeta, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -275,12 +275,12 @@ func TestDiskCacheMaxUse(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if !cache.diskAvailable(int64(size)) {
|
if !cache.diskAvailable(int64(size)) {
|
||||||
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta, opts)
|
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), httpMeta, opts)
|
||||||
if err != errDiskFull {
|
if err != errDiskFull {
|
||||||
t.Fatal("Cache max-use limit violated.")
|
t.Fatal("Cache max-use limit violated.")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta, opts)
|
err = cache.Put(ctx, bucketName, objectName, NewPutObjReader(hashReader, nil, nil), httpMeta, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/minio/minio/pkg/hash"
|
|
||||||
"github.com/minio/minio/pkg/madmin"
|
"github.com/minio/minio/pkg/madmin"
|
||||||
"github.com/minio/minio/pkg/policy"
|
"github.com/minio/minio/pkg/policy"
|
||||||
)
|
)
|
||||||
|
@ -72,7 +71,7 @@ func (api *DummyObjectLayer) GetObjectInfo(ctx context.Context, bucket, object s
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *DummyObjectLayer) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
func (api *DummyObjectLayer) PutObject(ctx context.Context, bucket, object string, data *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,7 +95,7 @@ func (api *DummyObjectLayer) CopyObjectPart(ctx context.Context, srcBucket, srcO
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *DummyObjectLayer) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error) {
|
func (api *DummyObjectLayer) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -108,7 +107,7 @@ func (api *DummyObjectLayer) AbortMultipartUpload(ctx context.Context, bucket, o
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *DummyObjectLayer) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) {
|
func (api *DummyObjectLayer) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/subtle"
|
"crypto/subtle"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -205,21 +206,20 @@ func newEncryptMetadata(key []byte, bucket, object string, metadata map[string]s
|
||||||
sealedKey = objectKey.Seal(extKey, crypto.GenerateIV(rand.Reader), crypto.SSEC.String(), bucket, object)
|
sealedKey = objectKey.Seal(extKey, crypto.GenerateIV(rand.Reader), crypto.SSEC.String(), bucket, object)
|
||||||
crypto.SSEC.CreateMetadata(metadata, sealedKey)
|
crypto.SSEC.CreateMetadata(metadata, sealedKey)
|
||||||
return objectKey[:], nil
|
return objectKey[:], nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newEncryptReader(content io.Reader, key []byte, bucket, object string, metadata map[string]string, sseS3 bool) (io.Reader, error) {
|
func newEncryptReader(content io.Reader, key []byte, bucket, object string, metadata map[string]string, sseS3 bool) (r io.Reader, encKey []byte, err error) {
|
||||||
objectEncryptionKey, err := newEncryptMetadata(key, bucket, object, metadata, sseS3)
|
objectEncryptionKey, err := newEncryptMetadata(key, bucket, object, metadata, sseS3)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, encKey, err
|
||||||
}
|
}
|
||||||
|
|
||||||
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20})
|
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, crypto.ErrInvalidCustomerKey
|
return nil, encKey, crypto.ErrInvalidCustomerKey
|
||||||
}
|
}
|
||||||
|
|
||||||
return reader, nil
|
return reader, objectEncryptionKey, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// set new encryption metadata from http request headers for SSE-C and generated key from KMS in the case of
|
// set new encryption metadata from http request headers for SSE-C and generated key from KMS in the case of
|
||||||
|
@ -241,19 +241,18 @@ func setEncryptionMetadata(r *http.Request, bucket, object string, metadata map[
|
||||||
// EncryptRequest takes the client provided content and encrypts the data
|
// EncryptRequest takes the client provided content and encrypts the data
|
||||||
// with the client provided key. It also marks the object as client-side-encrypted
|
// with the client provided key. It also marks the object as client-side-encrypted
|
||||||
// and sets the correct headers.
|
// and sets the correct headers.
|
||||||
func EncryptRequest(content io.Reader, r *http.Request, bucket, object string, metadata map[string]string) (io.Reader, error) {
|
func EncryptRequest(content io.Reader, r *http.Request, bucket, object string, metadata map[string]string) (reader io.Reader, objEncKey []byte, err error) {
|
||||||
|
|
||||||
var (
|
var (
|
||||||
key []byte
|
key []byte
|
||||||
err error
|
|
||||||
)
|
)
|
||||||
if crypto.S3.IsRequested(r.Header) && crypto.SSEC.IsRequested(r.Header) {
|
if crypto.S3.IsRequested(r.Header) && crypto.SSEC.IsRequested(r.Header) {
|
||||||
return nil, crypto.ErrIncompatibleEncryptionMethod
|
return nil, objEncKey, crypto.ErrIncompatibleEncryptionMethod
|
||||||
}
|
}
|
||||||
if crypto.SSEC.IsRequested(r.Header) {
|
if crypto.SSEC.IsRequested(r.Header) {
|
||||||
key, err = ParseSSECustomerRequest(r)
|
key, err = ParseSSECustomerRequest(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, objEncKey, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return newEncryptReader(content, key, bucket, object, metadata, crypto.S3.IsRequested(r.Header))
|
return newEncryptReader(content, key, bucket, object, metadata, crypto.S3.IsRequested(r.Header))
|
||||||
|
@ -578,7 +577,6 @@ func (d *DecryptBlocksReader) Read(p []byte) (int, error) {
|
||||||
|
|
||||||
d.partDecRelOffset += int64(n1)
|
d.partDecRelOffset += int64(n1)
|
||||||
}
|
}
|
||||||
|
|
||||||
return len(p), nil
|
return len(p), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -813,7 +811,6 @@ func DecryptBlocksRequest(client io.Writer, r *http.Request, bucket, object stri
|
||||||
|
|
||||||
// getEncryptedMultipartsOffsetLength - fetch sequence number, encrypted start offset and encrypted length.
|
// getEncryptedMultipartsOffsetLength - fetch sequence number, encrypted start offset and encrypted length.
|
||||||
func getEncryptedMultipartsOffsetLength(offset, length int64, obj ObjectInfo) (uint32, int64, int64) {
|
func getEncryptedMultipartsOffsetLength(offset, length int64, obj ObjectInfo) (uint32, int64, int64) {
|
||||||
|
|
||||||
// Calculate encrypted offset of a multipart object
|
// Calculate encrypted offset of a multipart object
|
||||||
computeEncOffset := func(off int64, obj ObjectInfo) (seqNumber uint32, encryptedOffset int64, err error) {
|
computeEncOffset := func(off int64, obj ObjectInfo) (seqNumber uint32, encryptedOffset int64, err error) {
|
||||||
var curPartEndOffset uint64
|
var curPartEndOffset uint64
|
||||||
|
@ -909,6 +906,64 @@ func (o *ObjectInfo) DecryptedSize() (int64, error) {
|
||||||
return size, nil
|
return size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For encrypted objects, the ETag sent by client if available
|
||||||
|
// is stored in encrypted form in the backend. Decrypt the ETag
|
||||||
|
// if ETag was previously encrypted.
|
||||||
|
func getDecryptedETag(headers http.Header, objInfo ObjectInfo, copySource bool) (decryptedETag string) {
|
||||||
|
var (
|
||||||
|
key [32]byte
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
// If ETag is contentMD5Sum return it as is.
|
||||||
|
if len(objInfo.ETag) == 32 {
|
||||||
|
return objInfo.ETag
|
||||||
|
}
|
||||||
|
|
||||||
|
if crypto.IsMultiPart(objInfo.UserDefined) {
|
||||||
|
return objInfo.ETag
|
||||||
|
}
|
||||||
|
if crypto.SSECopy.IsRequested(headers) {
|
||||||
|
key, err = crypto.SSECopy.ParseHTTP(headers)
|
||||||
|
if err != nil {
|
||||||
|
return objInfo.ETag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// As per AWS S3 Spec, ETag for SSE-C encrypted objects need not be MD5Sum of the data.
|
||||||
|
// Since server side copy with same source and dest just replaces the ETag, we save
|
||||||
|
// encrypted content MD5Sum as ETag for both SSE-C and SSE-S3, we standardize the ETag
|
||||||
|
//encryption across SSE-C and SSE-S3, and only return last 32 bytes for SSE-C
|
||||||
|
if crypto.SSEC.IsEncrypted(objInfo.UserDefined) && !copySource {
|
||||||
|
return objInfo.ETag[len(objInfo.ETag)-32:]
|
||||||
|
}
|
||||||
|
|
||||||
|
objectEncryptionKey, err := decryptObjectInfo(key[:], objInfo.Bucket, objInfo.Name, objInfo.UserDefined)
|
||||||
|
if err != nil {
|
||||||
|
return objInfo.ETag
|
||||||
|
}
|
||||||
|
return tryDecryptETag(objectEncryptionKey, objInfo.ETag, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// helper to decrypt Etag given object encryption key and encrypted ETag
|
||||||
|
func tryDecryptETag(key []byte, encryptedETag string, ssec bool) string {
|
||||||
|
// ETag for SSE-C encrypted objects need not be content MD5Sum.While encrypted
|
||||||
|
// md5sum is stored internally, return just the last 32 bytes of hex-encoded and
|
||||||
|
// encrypted md5sum string for SSE-C
|
||||||
|
if ssec {
|
||||||
|
return encryptedETag[len(encryptedETag)-32:]
|
||||||
|
}
|
||||||
|
var objectKey crypto.ObjectKey
|
||||||
|
copy(objectKey[:], key)
|
||||||
|
encBytes, err := hex.DecodeString(encryptedETag)
|
||||||
|
if err != nil {
|
||||||
|
return encryptedETag
|
||||||
|
}
|
||||||
|
etagBytes, err := objectKey.UnsealETag(encBytes)
|
||||||
|
if err != nil {
|
||||||
|
return encryptedETag
|
||||||
|
}
|
||||||
|
return hex.EncodeToString(etagBytes)
|
||||||
|
}
|
||||||
|
|
||||||
// GetDecryptedRange - To decrypt the range (off, length) of the
|
// GetDecryptedRange - To decrypt the range (off, length) of the
|
||||||
// decrypted object stream, we need to read the range (encOff,
|
// decrypted object stream, we need to read the range (encOff,
|
||||||
// encLength) of the encrypted object stream to decrypt it, and
|
// encLength) of the encrypted object stream to decrypt it, and
|
||||||
|
@ -1075,7 +1130,7 @@ func DecryptCopyObjectInfo(info *ObjectInfo, headers http.Header) (apiErr APIErr
|
||||||
// decryption succeeded.
|
// decryption succeeded.
|
||||||
//
|
//
|
||||||
// DecryptObjectInfo also returns whether the object is encrypted or not.
|
// DecryptObjectInfo also returns whether the object is encrypted or not.
|
||||||
func DecryptObjectInfo(info ObjectInfo, headers http.Header) (encrypted bool, err error) {
|
func DecryptObjectInfo(info *ObjectInfo, headers http.Header) (encrypted bool, err error) {
|
||||||
// Directories are never encrypted.
|
// Directories are never encrypted.
|
||||||
if info.IsDir {
|
if info.IsDir {
|
||||||
return false, nil
|
return false, nil
|
||||||
|
@ -1094,6 +1149,11 @@ func DecryptObjectInfo(info ObjectInfo, headers http.Header) (encrypted bool, er
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
_, err = info.DecryptedSize()
|
_, err = info.DecryptedSize()
|
||||||
|
|
||||||
|
if crypto.IsEncrypted(info.UserDefined) && !crypto.IsMultiPart(info.UserDefined) {
|
||||||
|
info.ETag = getDecryptedETag(headers, *info, false)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -139,7 +139,7 @@ func TestEncryptRequest(t *testing.T) {
|
||||||
for k, v := range test.header {
|
for k, v := range test.header {
|
||||||
req.Header.Set(k, v)
|
req.Header.Set(k, v)
|
||||||
}
|
}
|
||||||
_, err := EncryptRequest(content, req, "bucket", "object", test.metadata)
|
_, _, err := EncryptRequest(content, req, "bucket", "object", test.metadata)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %d: Failed to encrypt request: %v", i, err)
|
t.Fatalf("Test %d: Failed to encrypt request: %v", i, err)
|
||||||
|
@ -324,7 +324,7 @@ var decryptObjectInfoTests = []struct {
|
||||||
|
|
||||||
func TestDecryptObjectInfo(t *testing.T) {
|
func TestDecryptObjectInfo(t *testing.T) {
|
||||||
for i, test := range decryptObjectInfoTests {
|
for i, test := range decryptObjectInfoTests {
|
||||||
if encrypted, err := DecryptObjectInfo(test.info, test.headers); err != test.expErr {
|
if encrypted, err := DecryptObjectInfo(&test.info, test.headers); err != test.expErr {
|
||||||
t.Errorf("Test %d: Decryption returned wrong error code: got %d , want %d", i, err, test.expErr)
|
t.Errorf("Test %d: Decryption returned wrong error code: got %d , want %d", i, err, test.expErr)
|
||||||
} else if enc := crypto.IsEncrypted(test.info.UserDefined); encrypted && enc != encrypted {
|
} else if enc := crypto.IsEncrypted(test.info.UserDefined); encrypted && enc != encrypted {
|
||||||
t.Errorf("Test %d: Decryption thinks object is encrypted but it is not", i)
|
t.Errorf("Test %d: Decryption thinks object is encrypted but it is not", i)
|
||||||
|
|
|
@ -54,7 +54,7 @@ func TestReadFSMetadata(t *testing.T) {
|
||||||
if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
|
if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
|
||||||
t.Fatal("Unexpected err: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
}
|
}
|
||||||
if _, err := obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{}); err != nil {
|
if _, err := obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{}); err != nil {
|
||||||
t.Fatal("Unexpected err: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ func TestWriteFSMetadata(t *testing.T) {
|
||||||
if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
|
if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
|
||||||
t.Fatal("Unexpected err: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
}
|
}
|
||||||
if _, err := obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{}); err != nil {
|
if _, err := obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{}); err != nil {
|
||||||
t.Fatal("Unexpected err: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,6 @@ package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -31,8 +30,6 @@ import (
|
||||||
|
|
||||||
"github.com/minio/minio/cmd/logger"
|
"github.com/minio/minio/cmd/logger"
|
||||||
mioutil "github.com/minio/minio/pkg/ioutil"
|
mioutil "github.com/minio/minio/pkg/ioutil"
|
||||||
|
|
||||||
"github.com/minio/minio/pkg/hash"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Returns EXPORT/.minio.sys/multipart/SHA256/UPLOADID
|
// Returns EXPORT/.minio.sys/multipart/SHA256/UPLOADID
|
||||||
|
@ -259,7 +256,7 @@ func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, d
|
||||||
return pi, toObjectErr(err)
|
return pi, toObjectErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
partInfo, err := fs.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.Reader, dstOpts)
|
partInfo, err := fs.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.PutObjReader, dstOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pi, toObjectErr(err, dstBucket, dstObject)
|
return pi, toObjectErr(err, dstBucket, dstObject)
|
||||||
}
|
}
|
||||||
|
@ -271,7 +268,8 @@ func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, d
|
||||||
// an ongoing multipart transaction. Internally incoming data is
|
// an ongoing multipart transaction. Internally incoming data is
|
||||||
// written to '.minio.sys/tmp' location and safely renamed to
|
// written to '.minio.sys/tmp' location and safely renamed to
|
||||||
// '.minio.sys/multipart' for reach parts.
|
// '.minio.sys/multipart' for reach parts.
|
||||||
func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (pi PartInfo, e error) {
|
func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, e error) {
|
||||||
|
data := r.Reader
|
||||||
if err := checkPutObjectPartArgs(ctx, bucket, object, fs); err != nil {
|
if err := checkPutObjectPartArgs(ctx, bucket, object, fs); err != nil {
|
||||||
return pi, toObjectErr(err, bucket)
|
return pi, toObjectErr(err, bucket)
|
||||||
}
|
}
|
||||||
|
@ -321,11 +319,12 @@ func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
|
||||||
// PutObjectPart succeeds then there would be nothing to
|
// PutObjectPart succeeds then there would be nothing to
|
||||||
// delete in which case we just ignore the error.
|
// delete in which case we just ignore the error.
|
||||||
defer fsRemoveFile(ctx, tmpPartPath)
|
defer fsRemoveFile(ctx, tmpPartPath)
|
||||||
|
etag := r.MD5CurrentHexString()
|
||||||
|
|
||||||
etag := hex.EncodeToString(data.MD5Current())
|
|
||||||
if etag == "" {
|
if etag == "" {
|
||||||
etag = GenETag()
|
etag = GenETag()
|
||||||
}
|
}
|
||||||
|
|
||||||
partPath := pathJoin(uploadIDDir, fs.encodePartFile(partID, etag, data.ActualSize()))
|
partPath := pathJoin(uploadIDDir, fs.encodePartFile(partID, etag, data.ActualSize()))
|
||||||
|
|
||||||
if err = fsRenameFile(ctx, tmpPartPath, partPath); err != nil {
|
if err = fsRenameFile(ctx, tmpPartPath, partPath); err != nil {
|
||||||
|
@ -478,7 +477,7 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
|
||||||
// md5sums of all the parts.
|
// md5sums of all the parts.
|
||||||
//
|
//
|
||||||
// Implements S3 compatible Complete multipart API.
|
// Implements S3 compatible Complete multipart API.
|
||||||
func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart) (oi ObjectInfo, e error) {
|
func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, e error) {
|
||||||
|
|
||||||
var actualSize int64
|
var actualSize int64
|
||||||
|
|
||||||
|
|
|
@ -114,7 +114,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
|
|
||||||
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||||
_, err = fs.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), dataLen, md5Hex, sha256sum), ObjectOptions{})
|
_, err = fs.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(data), dataLen, md5Hex, sha256sum), ObjectOptions{})
|
||||||
if !isSameType(err, BucketNotFound{}) {
|
if !isSameType(err, BucketNotFound{}) {
|
||||||
t.Fatal("Unexpected error ", err)
|
t.Fatal("Unexpected error ", err)
|
||||||
}
|
}
|
||||||
|
@ -145,7 +145,7 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) {
|
||||||
|
|
||||||
parts := []CompletePart{{PartNumber: 1, ETag: md5Hex}}
|
parts := []CompletePart{{PartNumber: 1, ETag: md5Hex}}
|
||||||
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||||
if _, err := fs.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, parts); err != nil {
|
if _, err := fs.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, parts, ObjectOptions{}); err != nil {
|
||||||
if !isSameType(err, BucketNotFound{}) {
|
if !isSameType(err, BucketNotFound{}) {
|
||||||
t.Fatal("Unexpected error ", err)
|
t.Fatal("Unexpected error ", err)
|
||||||
}
|
}
|
||||||
|
@ -175,13 +175,13 @@ func TestCompleteMultipartUpload(t *testing.T) {
|
||||||
|
|
||||||
md5Hex := getMD5Hash(data)
|
md5Hex := getMD5Hash(data)
|
||||||
|
|
||||||
if _, err := fs.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), 5, md5Hex, ""), ObjectOptions{}); err != nil {
|
if _, err := fs.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(data), 5, md5Hex, ""), ObjectOptions{}); err != nil {
|
||||||
t.Fatal("Unexpected error ", err)
|
t.Fatal("Unexpected error ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
parts := []CompletePart{{PartNumber: 1, ETag: md5Hex}}
|
parts := []CompletePart{{PartNumber: 1, ETag: md5Hex}}
|
||||||
|
|
||||||
if _, err := fs.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, parts); err != nil {
|
if _, err := fs.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, parts, ObjectOptions{}); err != nil {
|
||||||
t.Fatal("Unexpected error ", err)
|
t.Fatal("Unexpected error ", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -209,7 +209,7 @@ func TestAbortMultipartUpload(t *testing.T) {
|
||||||
|
|
||||||
md5Hex := getMD5Hash(data)
|
md5Hex := getMD5Hash(data)
|
||||||
|
|
||||||
if _, err := fs.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), 5, md5Hex, ""), ObjectOptions{}); err != nil {
|
if _, err := fs.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(data), 5, md5Hex, ""), ObjectOptions{}); err != nil {
|
||||||
t.Fatal("Unexpected error ", err)
|
t.Fatal("Unexpected error ", err)
|
||||||
}
|
}
|
||||||
time.Sleep(time.Second) // Without Sleep on windows, the fs.AbortMultipartUpload() fails with "The process cannot access the file because it is being used by another process."
|
time.Sleep(time.Second) // Without Sleep on windows, the fs.AbortMultipartUpload() fails with "The process cannot access the file because it is being used by another process."
|
||||||
|
|
18
cmd/fs-v1.go
18
cmd/fs-v1.go
|
@ -19,7 +19,6 @@ package cmd
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/hex"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -32,7 +31,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/minio/minio/cmd/logger"
|
"github.com/minio/minio/cmd/logger"
|
||||||
"github.com/minio/minio/pkg/hash"
|
|
||||||
"github.com/minio/minio/pkg/lock"
|
"github.com/minio/minio/pkg/lock"
|
||||||
"github.com/minio/minio/pkg/madmin"
|
"github.com/minio/minio/pkg/madmin"
|
||||||
"github.com/minio/minio/pkg/mimedb"
|
"github.com/minio/minio/pkg/mimedb"
|
||||||
|
@ -461,8 +459,7 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
|
||||||
// Return the new object info.
|
// Return the new object info.
|
||||||
return fsMeta.ToObjectInfo(srcBucket, srcObject, fi), nil
|
return fsMeta.ToObjectInfo(srcBucket, srcObject, fi), nil
|
||||||
}
|
}
|
||||||
|
objInfo, err := fs.putObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, srcInfo.UserDefined, dstOpts)
|
||||||
objInfo, err := fs.putObject(ctx, dstBucket, dstObject, srcInfo.Reader, srcInfo.UserDefined)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return oi, toObjectErr(err, dstBucket, dstObject)
|
return oi, toObjectErr(err, dstBucket, dstObject)
|
||||||
}
|
}
|
||||||
|
@ -817,8 +814,8 @@ func (fs *FSObjects) parentDirIsObject(ctx context.Context, bucket, parent strin
|
||||||
// until EOF, writes data directly to configured filesystem path.
|
// until EOF, writes data directly to configured filesystem path.
|
||||||
// Additionally writes `fs.json` which carries the necessary metadata
|
// Additionally writes `fs.json` which carries the necessary metadata
|
||||||
// for future object operations.
|
// for future object operations.
|
||||||
func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, retErr error) {
|
func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string, r *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, retErr error) {
|
||||||
if err := checkPutObjectArgs(ctx, bucket, object, fs, data.Size()); err != nil {
|
if err := checkPutObjectArgs(ctx, bucket, object, fs, r.Size()); err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
// Lock the object.
|
// Lock the object.
|
||||||
|
@ -828,11 +825,13 @@ func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string
|
||||||
return objInfo, err
|
return objInfo, err
|
||||||
}
|
}
|
||||||
defer objectLock.Unlock()
|
defer objectLock.Unlock()
|
||||||
return fs.putObject(ctx, bucket, object, data, metadata)
|
return fs.putObject(ctx, bucket, object, r, metadata, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// putObject - wrapper for PutObject
|
// putObject - wrapper for PutObject
|
||||||
func (fs *FSObjects) putObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, retErr error) {
|
func (fs *FSObjects) putObject(ctx context.Context, bucket string, object string, r *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, retErr error) {
|
||||||
|
data := r.Reader
|
||||||
|
|
||||||
// No metadata is set, allocate a new one.
|
// No metadata is set, allocate a new one.
|
||||||
meta := make(map[string]string)
|
meta := make(map[string]string)
|
||||||
for k, v := range metadata {
|
for k, v := range metadata {
|
||||||
|
@ -923,8 +922,7 @@ func (fs *FSObjects) putObject(ctx context.Context, bucket string, object string
|
||||||
fsRemoveFile(ctx, fsTmpObjPath)
|
fsRemoveFile(ctx, fsTmpObjPath)
|
||||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
|
fsMeta.Meta["etag"] = r.MD5CurrentHexString()
|
||||||
fsMeta.Meta["etag"] = hex.EncodeToString(data.MD5Current())
|
|
||||||
|
|
||||||
// Should return IncompleteBody{} error when reader has fewer
|
// Should return IncompleteBody{} error when reader has fewer
|
||||||
// bytes than specified in request header.
|
// bytes than specified in request header.
|
||||||
|
|
|
@ -40,7 +40,7 @@ func TestFSParentDirIsObject(t *testing.T) {
|
||||||
}
|
}
|
||||||
objectContent := "12345"
|
objectContent := "12345"
|
||||||
objInfo, err := obj.PutObject(context.Background(), bucketName, objectName,
|
objInfo, err := obj.PutObject(context.Background(), bucketName, objectName,
|
||||||
mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil, ObjectOptions{})
|
mustGetPutObjReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -124,7 +124,7 @@ func TestFSShutdown(t *testing.T) {
|
||||||
|
|
||||||
objectContent := "12345"
|
objectContent := "12345"
|
||||||
obj.MakeBucketWithLocation(context.Background(), bucketName, "")
|
obj.MakeBucketWithLocation(context.Background(), bucketName, "")
|
||||||
obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil, ObjectOptions{})
|
obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil, ObjectOptions{})
|
||||||
return fs, disk
|
return fs, disk
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -203,7 +203,7 @@ func TestFSPutObject(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// With a regular object.
|
// With a regular object.
|
||||||
_, err := obj.PutObject(context.Background(), bucketName+"non-existent", objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{})
|
_, err := obj.PutObject(context.Background(), bucketName+"non-existent", objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
||||||
}
|
}
|
||||||
|
@ -212,7 +212,7 @@ func TestFSPutObject(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// With a directory object.
|
// With a directory object.
|
||||||
_, err = obj.PutObject(context.Background(), bucketName+"non-existent", objectName+"/", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), nil, ObjectOptions{})
|
_, err = obj.PutObject(context.Background(), bucketName+"non-existent", objectName+"/", mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), nil, ObjectOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
||||||
}
|
}
|
||||||
|
@ -220,11 +220,11 @@ func TestFSPutObject(t *testing.T) {
|
||||||
t.Fatalf("Expected error type BucketNotFound, got %#v", err)
|
t.Fatalf("Expected error type BucketNotFound, got %#v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{})
|
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
_, err = obj.PutObject(context.Background(), bucketName, objectName+"/1", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{})
|
_, err = obj.PutObject(context.Background(), bucketName, objectName+"/1", mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Unexpected should fail here, backend corruption occurred")
|
t.Fatal("Unexpected should fail here, backend corruption occurred")
|
||||||
}
|
}
|
||||||
|
@ -239,7 +239,7 @@ func TestFSPutObject(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(context.Background(), bucketName, objectName+"/1/", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), nil, ObjectOptions{})
|
_, err = obj.PutObject(context.Background(), bucketName, objectName+"/1/", mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), nil, ObjectOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Unexpected should fail here, backned corruption occurred")
|
t.Fatal("Unexpected should fail here, backned corruption occurred")
|
||||||
}
|
}
|
||||||
|
@ -267,7 +267,7 @@ func TestFSDeleteObject(t *testing.T) {
|
||||||
objectName := "object"
|
objectName := "object"
|
||||||
|
|
||||||
obj.MakeBucketWithLocation(context.Background(), bucketName, "")
|
obj.MakeBucketWithLocation(context.Background(), bucketName, "")
|
||||||
obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{})
|
obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{})
|
||||||
|
|
||||||
// Test with invalid bucket name
|
// Test with invalid bucket name
|
||||||
if err := fs.DeleteObject(context.Background(), "fo", objectName); !isSameType(err, BucketNameInvalid{}) {
|
if err := fs.DeleteObject(context.Background(), "fo", objectName); !isSameType(err, BucketNameInvalid{}) {
|
||||||
|
|
|
@ -20,7 +20,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/minio/minio/cmd/logger"
|
"github.com/minio/minio/cmd/logger"
|
||||||
"github.com/minio/minio/pkg/hash"
|
|
||||||
"github.com/minio/minio/pkg/madmin"
|
"github.com/minio/minio/pkg/madmin"
|
||||||
"github.com/minio/minio/pkg/policy"
|
"github.com/minio/minio/pkg/policy"
|
||||||
)
|
)
|
||||||
|
@ -44,7 +43,8 @@ func (a GatewayUnsupported) CopyObjectPart(ctx context.Context, srcBucket, srcOb
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPart puts a part of object in bucket
|
// PutObjectPart puts a part of object in bucket
|
||||||
func (a GatewayUnsupported) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (pi PartInfo, err error) {
|
func (a GatewayUnsupported) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (pi PartInfo, err error) {
|
||||||
|
logger.LogIf(ctx, NotImplemented{})
|
||||||
return pi, NotImplemented{}
|
return pi, NotImplemented{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -60,7 +60,8 @@ func (a GatewayUnsupported) AbortMultipartUpload(ctx context.Context, bucket str
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
|
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
|
||||||
func (a GatewayUnsupported) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []CompletePart) (oi ObjectInfo, err error) {
|
func (a GatewayUnsupported) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (oi ObjectInfo, err error) {
|
||||||
|
logger.LogIf(ctx, NotImplemented{})
|
||||||
return oi, NotImplemented{}
|
return oi, NotImplemented{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,6 @@ import (
|
||||||
miniogopolicy "github.com/minio/minio-go/pkg/policy"
|
miniogopolicy "github.com/minio/minio-go/pkg/policy"
|
||||||
"github.com/minio/minio/cmd/logger"
|
"github.com/minio/minio/cmd/logger"
|
||||||
"github.com/minio/minio/pkg/auth"
|
"github.com/minio/minio/pkg/auth"
|
||||||
"github.com/minio/minio/pkg/hash"
|
|
||||||
"github.com/minio/minio/pkg/policy"
|
"github.com/minio/minio/pkg/policy"
|
||||||
"github.com/minio/minio/pkg/policy/condition"
|
"github.com/minio/minio/pkg/policy/condition"
|
||||||
sha256 "github.com/minio/sha256-simd"
|
sha256 "github.com/minio/sha256-simd"
|
||||||
|
@ -737,7 +736,8 @@ func (a *azureObjects) GetObjectInfo(ctx context.Context, bucket, object string,
|
||||||
|
|
||||||
// PutObject - Create a new blob with the incoming data,
|
// PutObject - Create a new blob with the incoming data,
|
||||||
// uses Azure equivalent CreateBlockBlobFromReader.
|
// uses Azure equivalent CreateBlockBlobFromReader.
|
||||||
func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, r *minio.PutObjReader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
|
data := r.Reader
|
||||||
if data.Size() < azureBlockSize/10 {
|
if data.Size() < azureBlockSize/10 {
|
||||||
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
||||||
blob.Metadata, blob.Properties, err = s3MetaToAzureProperties(ctx, metadata)
|
blob.Metadata, blob.Properties, err = s3MetaToAzureProperties(ctx, metadata)
|
||||||
|
@ -938,7 +938,8 @@ func (a *azureObjects) NewMultipartUpload(ctx context.Context, bucket, object st
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPart - Use Azure equivalent PutBlockWithLength.
|
// PutObjectPart - Use Azure equivalent PutBlockWithLength.
|
||||||
func (a *azureObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts minio.ObjectOptions) (info minio.PartInfo, err error) {
|
func (a *azureObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *minio.PutObjReader, opts minio.ObjectOptions) (info minio.PartInfo, err error) {
|
||||||
|
data := r.Reader
|
||||||
if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
||||||
return info, err
|
return info, err
|
||||||
}
|
}
|
||||||
|
@ -1077,7 +1078,7 @@ func (a *azureObjects) AbortMultipartUpload(ctx context.Context, bucket, object,
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompleteMultipartUpload - Use Azure equivalent PutBlockList.
|
// CompleteMultipartUpload - Use Azure equivalent PutBlockList.
|
||||||
func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []minio.CompletePart) (objInfo minio.ObjectInfo, err error) {
|
func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
metadataObject := getAzureMetadataObjectName(object, uploadID)
|
metadataObject := getAzureMetadataObjectName(object, uploadID)
|
||||||
if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
||||||
return objInfo, err
|
return objInfo, err
|
||||||
|
|
|
@ -534,7 +534,9 @@ func (nb *Reader) Read(p []byte) (int, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObject uploads the single upload to B2 backend by using *b2_upload_file* API, uploads upto 5GiB.
|
// PutObject uploads the single upload to B2 backend by using *b2_upload_file* API, uploads upto 5GiB.
|
||||||
func (l *b2Objects) PutObject(ctx context.Context, bucket string, object string, data *h2.Reader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
func (l *b2Objects) PutObject(ctx context.Context, bucket string, object string, r *minio.PutObjReader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
|
data := r.Reader
|
||||||
|
|
||||||
bkt, err := l.Bucket(ctx, bucket)
|
bkt, err := l.Bucket(ctx, bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return objInfo, err
|
return objInfo, err
|
||||||
|
@ -653,7 +655,8 @@ func (l *b2Objects) NewMultipartUpload(ctx context.Context, bucket string, objec
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPart puts a part of object in bucket, uses B2's LargeFile upload API.
|
// PutObjectPart puts a part of object in bucket, uses B2's LargeFile upload API.
|
||||||
func (l *b2Objects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *h2.Reader, opts minio.ObjectOptions) (pi minio.PartInfo, err error) {
|
func (l *b2Objects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, r *minio.PutObjReader, opts minio.ObjectOptions) (pi minio.PartInfo, err error) {
|
||||||
|
data := r.Reader
|
||||||
bkt, err := l.Bucket(ctx, bucket)
|
bkt, err := l.Bucket(ctx, bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pi, err
|
return pi, err
|
||||||
|
@ -726,7 +729,7 @@ func (l *b2Objects) AbortMultipartUpload(ctx context.Context, bucket string, obj
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object, uses B2's LargeFile upload API.
|
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object, uses B2's LargeFile upload API.
|
||||||
func (l *b2Objects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []minio.CompletePart) (oi minio.ObjectInfo, err error) {
|
func (l *b2Objects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (oi minio.ObjectInfo, err error) {
|
||||||
bkt, err := l.Bucket(ctx, bucket)
|
bkt, err := l.Bucket(ctx, bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return oi, err
|
return oi, err
|
||||||
|
|
|
@ -40,7 +40,6 @@ import (
|
||||||
miniogopolicy "github.com/minio/minio-go/pkg/policy"
|
miniogopolicy "github.com/minio/minio-go/pkg/policy"
|
||||||
"github.com/minio/minio/cmd/logger"
|
"github.com/minio/minio/cmd/logger"
|
||||||
"github.com/minio/minio/pkg/auth"
|
"github.com/minio/minio/pkg/auth"
|
||||||
"github.com/minio/minio/pkg/hash"
|
|
||||||
"github.com/minio/minio/pkg/policy"
|
"github.com/minio/minio/pkg/policy"
|
||||||
"github.com/minio/minio/pkg/policy/condition"
|
"github.com/minio/minio/pkg/policy/condition"
|
||||||
|
|
||||||
|
@ -880,7 +879,9 @@ func (l *gcsGateway) GetObjectInfo(ctx context.Context, bucket string, object st
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObject - Create a new object with the incoming data,
|
// PutObject - Create a new object with the incoming data,
|
||||||
func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, data *hash.Reader, metadata map[string]string, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
|
func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, r *minio.PutObjReader, metadata map[string]string, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
|
||||||
|
data := r.Reader
|
||||||
|
|
||||||
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
|
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
|
||||||
// otherwise gcs will just return object not exist in case of non-existing bucket
|
// otherwise gcs will just return object not exist in case of non-existing bucket
|
||||||
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
|
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
|
||||||
|
@ -1061,7 +1062,8 @@ func (l *gcsGateway) checkUploadIDExists(ctx context.Context, bucket string, key
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPart puts a part of object in bucket
|
// PutObjectPart puts a part of object in bucket
|
||||||
func (l *gcsGateway) PutObjectPart(ctx context.Context, bucket string, key string, uploadID string, partNumber int, data *hash.Reader, opts minio.ObjectOptions) (minio.PartInfo, error) {
|
func (l *gcsGateway) PutObjectPart(ctx context.Context, bucket string, key string, uploadID string, partNumber int, r *minio.PutObjReader, opts minio.ObjectOptions) (minio.PartInfo, error) {
|
||||||
|
data := r.Reader
|
||||||
if err := l.checkUploadIDExists(ctx, bucket, key, uploadID); err != nil {
|
if err := l.checkUploadIDExists(ctx, bucket, key, uploadID); err != nil {
|
||||||
return minio.PartInfo{}, err
|
return minio.PartInfo{}, err
|
||||||
}
|
}
|
||||||
|
@ -1216,7 +1218,7 @@ func (l *gcsGateway) AbortMultipartUpload(ctx context.Context, bucket string, ke
|
||||||
// Note that there is a limit (currently 32) to the number of components that can
|
// Note that there is a limit (currently 32) to the number of components that can
|
||||||
// be composed in a single operation. There is a per-project rate limit (currently 200)
|
// be composed in a single operation. There is a per-project rate limit (currently 200)
|
||||||
// to the number of source objects you can compose per second.
|
// to the number of source objects you can compose per second.
|
||||||
func (l *gcsGateway) CompleteMultipartUpload(ctx context.Context, bucket string, key string, uploadID string, uploadedParts []minio.CompletePart) (minio.ObjectInfo, error) {
|
func (l *gcsGateway) CompleteMultipartUpload(ctx context.Context, bucket string, key string, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
|
||||||
meta := gcsMultipartMetaName(uploadID)
|
meta := gcsMultipartMetaName(uploadID)
|
||||||
object := l.client.Bucket(bucket).Object(meta)
|
object := l.client.Bucket(bucket).Object(meta)
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,6 @@ import (
|
||||||
minio "github.com/minio/minio/cmd"
|
minio "github.com/minio/minio/cmd"
|
||||||
"github.com/minio/minio/cmd/logger"
|
"github.com/minio/minio/cmd/logger"
|
||||||
"github.com/minio/minio/pkg/auth"
|
"github.com/minio/minio/pkg/auth"
|
||||||
"github.com/minio/minio/pkg/hash"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// stor is a namespace within manta where you store any documents that are deemed as private
|
// stor is a namespace within manta where you store any documents that are deemed as private
|
||||||
|
@ -608,7 +607,8 @@ func (d dummySeeker) Seek(offset int64, whence int) (int64, error) {
|
||||||
// CreateBlockBlobFromReader.
|
// CreateBlockBlobFromReader.
|
||||||
//
|
//
|
||||||
// https://apidocs.joyent.com/manta/api.html#PutObject
|
// https://apidocs.joyent.com/manta/api.html#PutObject
|
||||||
func (t *tritonObjects) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
func (t *tritonObjects) PutObject(ctx context.Context, bucket, object string, r *minio.PutObjReader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
|
data := r.Reader
|
||||||
if err = t.client.Objects().Put(ctx, &storage.PutObjectInput{
|
if err = t.client.Objects().Put(ctx, &storage.PutObjectInput{
|
||||||
ContentLength: uint64(data.Size()),
|
ContentLength: uint64(data.Size()),
|
||||||
ObjectPath: path.Join(mantaRoot, bucket, object),
|
ObjectPath: path.Join(mantaRoot, bucket, object),
|
||||||
|
|
|
@ -655,7 +655,9 @@ func ossPutObject(ctx context.Context, client *oss.Client, bucket, object string
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObject creates a new object with the incoming data.
|
// PutObject creates a new object with the incoming data.
|
||||||
func (l *ossObjects) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
func (l *ossObjects) PutObject(ctx context.Context, bucket, object string, r *minio.PutObjReader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
|
data := r.Reader
|
||||||
|
|
||||||
return ossPutObject(ctx, l.Client, bucket, object, data, metadata)
|
return ossPutObject(ctx, l.Client, bucket, object, data, metadata)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -773,7 +775,8 @@ func (l *ossObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPart puts a part of object in bucket.
|
// PutObjectPart puts a part of object in bucket.
|
||||||
func (l *ossObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts minio.ObjectOptions) (pi minio.PartInfo, err error) {
|
func (l *ossObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *minio.PutObjReader, opts minio.ObjectOptions) (pi minio.PartInfo, err error) {
|
||||||
|
data := r.Reader
|
||||||
bkt, err := l.Client.Bucket(bucket)
|
bkt, err := l.Client.Bucket(bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
|
@ -914,7 +917,7 @@ func (l *ossObjects) AbortMultipartUpload(ctx context.Context, bucket, object, u
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object.
|
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object.
|
||||||
func (l *ossObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []minio.CompletePart) (oi minio.ObjectInfo, err error) {
|
func (l *ossObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (oi minio.ObjectInfo, err error) {
|
||||||
client := l.Client
|
client := l.Client
|
||||||
bkt, err := client.Bucket(bucket)
|
bkt, err := client.Bucket(bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -31,7 +31,6 @@ import (
|
||||||
"github.com/minio/minio-go/pkg/s3utils"
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
"github.com/minio/minio/cmd/logger"
|
"github.com/minio/minio/cmd/logger"
|
||||||
"github.com/minio/minio/pkg/auth"
|
"github.com/minio/minio/pkg/auth"
|
||||||
"github.com/minio/minio/pkg/hash"
|
|
||||||
"github.com/minio/minio/pkg/policy"
|
"github.com/minio/minio/pkg/policy"
|
||||||
|
|
||||||
minio "github.com/minio/minio/cmd"
|
minio "github.com/minio/minio/cmd"
|
||||||
|
@ -406,7 +405,8 @@ func (l *s3Objects) GetObjectInfo(ctx context.Context, bucket string, object str
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObject creates a new object with the incoming data,
|
// PutObject creates a new object with the incoming data,
|
||||||
func (l *s3Objects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
func (l *s3Objects) PutObject(ctx context.Context, bucket string, object string, r *minio.PutObjReader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
|
data := r.Reader
|
||||||
oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5Base64String(), data.SHA256HexString(), minio.ToMinioClientMetadata(metadata), opts.ServerSideEncryption)
|
oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5Base64String(), data.SHA256HexString(), minio.ToMinioClientMetadata(metadata), opts.ServerSideEncryption)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return objInfo, minio.ErrorRespToObjectError(err, bucket, object)
|
return objInfo, minio.ErrorRespToObjectError(err, bucket, object)
|
||||||
|
@ -464,7 +464,8 @@ func (l *s3Objects) NewMultipartUpload(ctx context.Context, bucket string, objec
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPart puts a part of object in bucket
|
// PutObjectPart puts a part of object in bucket
|
||||||
func (l *s3Objects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *hash.Reader, opts minio.ObjectOptions) (pi minio.PartInfo, e error) {
|
func (l *s3Objects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, r *minio.PutObjReader, opts minio.ObjectOptions) (pi minio.PartInfo, e error) {
|
||||||
|
data := r.Reader
|
||||||
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5Base64String(), data.SHA256HexString(), opts.ServerSideEncryption)
|
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5Base64String(), data.SHA256HexString(), opts.ServerSideEncryption)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pi, minio.ErrorRespToObjectError(err, bucket, object)
|
return pi, minio.ErrorRespToObjectError(err, bucket, object)
|
||||||
|
@ -508,7 +509,7 @@ func (l *s3Objects) AbortMultipartUpload(ctx context.Context, bucket string, obj
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
|
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
|
||||||
func (l *s3Objects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []minio.CompletePart) (oi minio.ObjectInfo, e error) {
|
func (l *s3Objects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (oi minio.ObjectInfo, e error) {
|
||||||
err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, minio.ToMinioClientCompleteParts(uploadedParts))
|
err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, minio.ToMinioClientCompleteParts(uploadedParts))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return oi, minio.ErrorRespToObjectError(err, bucket, object)
|
return oi, minio.ErrorRespToObjectError(err, bucket, object)
|
||||||
|
|
|
@ -38,7 +38,6 @@ import (
|
||||||
minio "github.com/minio/minio/cmd"
|
minio "github.com/minio/minio/cmd"
|
||||||
"github.com/minio/minio/cmd/logger"
|
"github.com/minio/minio/cmd/logger"
|
||||||
"github.com/minio/minio/pkg/auth"
|
"github.com/minio/minio/pkg/auth"
|
||||||
"github.com/minio/minio/pkg/hash"
|
|
||||||
"github.com/minio/sha256-simd"
|
"github.com/minio/sha256-simd"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -554,7 +553,8 @@ func (s *siaObjects) GetObjectInfo(ctx context.Context, bucket string, object st
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObject creates a new object with the incoming data,
|
// PutObject creates a new object with the incoming data,
|
||||||
func (s *siaObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
func (s *siaObjects) PutObject(ctx context.Context, bucket string, object string, r *minio.PutObjReader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
|
data := r.Reader
|
||||||
srcFile := path.Join(s.TempDir, minio.MustGetUUID())
|
srcFile := path.Join(s.TempDir, minio.MustGetUUID())
|
||||||
writer, err := os.Create(srcFile)
|
writer, err := os.Create(srcFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -108,6 +108,8 @@ type ObjectInfo struct {
|
||||||
// Implements writer and reader used by CopyObject API
|
// Implements writer and reader used by CopyObject API
|
||||||
Writer io.WriteCloser `json:"-"`
|
Writer io.WriteCloser `json:"-"`
|
||||||
Reader *hash.Reader `json:"-"`
|
Reader *hash.Reader `json:"-"`
|
||||||
|
PutObjReader *PutObjReader `json:"-"`
|
||||||
|
|
||||||
metadataOnly bool
|
metadataOnly bool
|
||||||
|
|
||||||
// Date and time when the object was last accessed.
|
// Date and time when the object was last accessed.
|
||||||
|
|
|
@ -92,7 +92,7 @@ func testDeleteObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
|
|
||||||
for _, object := range testCase.objectToUploads {
|
for _, object := range testCase.objectToUploads {
|
||||||
md5Bytes := md5.Sum([]byte(object.content))
|
md5Bytes := md5.Sum([]byte(object.content))
|
||||||
_, err = obj.PutObject(context.Background(), testCase.bucketName, object.name, mustGetHashReader(t, bytes.NewBufferString(object.content),
|
_, err = obj.PutObject(context.Background(), testCase.bucketName, object.name, mustGetPutObjReader(t, bytes.NewBufferString(object.content),
|
||||||
int64(len(object.content)), hex.EncodeToString(md5Bytes[:]), ""), nil, ObjectOptions{})
|
int64(len(object.content)), hex.EncodeToString(md5Bytes[:]), ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
|
|
|
@ -75,7 +75,7 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
// iterate through the above set of inputs and upkoad the object.
|
// iterate through the above set of inputs and upkoad the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData, ObjectOptions{})
|
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetPutObjReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData, ObjectOptions{})
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
|
@ -220,7 +220,7 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [
|
||||||
// iterate through the above set of inputs and upkoad the object.
|
// iterate through the above set of inputs and upkoad the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData, ObjectOptions{})
|
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetPutObjReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData, ObjectOptions{})
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
|
@ -333,7 +333,7 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str
|
||||||
// iterate through the above set of inputs and upkoad the object.
|
// iterate through the above set of inputs and upkoad the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData, ObjectOptions{})
|
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetPutObjReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData, ObjectOptions{})
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
|
|
|
@ -35,13 +35,13 @@ func testGetObjectInfo(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
opts := ObjectOptions{}
|
opts := ObjectOptions{}
|
||||||
_, err = obj.PutObject(context.Background(), "test-getobjectinfo", "Asia/asiapics.jpg", mustGetHashReader(t, bytes.NewBufferString("asiapics"), int64(len("asiapics")), "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), "test-getobjectinfo", "Asia/asiapics.jpg", mustGetPutObjReader(t, bytes.NewBufferString("asiapics"), int64(len("asiapics")), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put an empty directory
|
// Put an empty directory
|
||||||
_, err = obj.PutObject(context.Background(), "test-getobjectinfo", "Asia/empty-dir/", mustGetHashReader(t, bytes.NewBufferString(""), int64(len("")), "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), "test-getobjectinfo", "Asia/empty-dir/", mustGetPutObjReader(t, bytes.NewBufferString(""), int64(len("")), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,6 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/minio/minio-go/pkg/encrypt"
|
"github.com/minio/minio-go/pkg/encrypt"
|
||||||
"github.com/minio/minio/pkg/hash"
|
|
||||||
"github.com/minio/minio/pkg/madmin"
|
"github.com/minio/minio/pkg/madmin"
|
||||||
"github.com/minio/minio/pkg/policy"
|
"github.com/minio/minio/pkg/policy"
|
||||||
)
|
)
|
||||||
|
@ -66,7 +65,7 @@ type ObjectLayer interface {
|
||||||
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (reader *GetObjectReader, err error)
|
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (reader *GetObjectReader, err error)
|
||||||
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
|
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
|
||||||
GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||||
PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||||
CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error)
|
CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||||
DeleteObject(ctx context.Context, bucket, object string) error
|
DeleteObject(ctx context.Context, bucket, object string) error
|
||||||
|
|
||||||
|
@ -75,10 +74,10 @@ type ObjectLayer interface {
|
||||||
NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error)
|
NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error)
|
||||||
CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int,
|
CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int,
|
||||||
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (info PartInfo, err error)
|
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (info PartInfo, err error)
|
||||||
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error)
|
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
|
||||||
ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error)
|
ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error)
|
||||||
AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error
|
AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error
|
||||||
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
|
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||||
|
|
||||||
// Healing operations.
|
// Healing operations.
|
||||||
ReloadFormat(ctx context.Context, dryRun bool) error
|
ReloadFormat(ctx context.Context, dryRun bool) error
|
||||||
|
|
|
@ -70,7 +70,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
}
|
}
|
||||||
for _, object := range testObjects {
|
for _, object := range testObjects {
|
||||||
md5Bytes := md5.Sum([]byte(object.content))
|
md5Bytes := md5.Sum([]byte(object.content))
|
||||||
_, err = obj.PutObject(context.Background(), testBuckets[0], object.name, mustGetHashReader(t, bytes.NewBufferString(object.content),
|
_, err = obj.PutObject(context.Background(), testBuckets[0], object.name, mustGetPutObjReader(t, bytes.NewBufferString(object.content),
|
||||||
int64(len(object.content)), hex.EncodeToString(md5Bytes[:]), ""), object.meta, ObjectOptions{})
|
int64(len(object.content)), hex.EncodeToString(md5Bytes[:]), ""), object.meta, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
|
@ -617,7 +617,7 @@ func BenchmarkListObjects(b *testing.B) {
|
||||||
// Insert objects to be listed and benchmarked later.
|
// Insert objects to be listed and benchmarked later.
|
||||||
for i := 0; i < 20000; i++ {
|
for i := 0; i < 20000; i++ {
|
||||||
key := "obj" + strconv.Itoa(i)
|
key := "obj" + strconv.Itoa(i)
|
||||||
_, err = obj.PutObject(context.Background(), bucket, key, mustGetHashReader(b, bytes.NewBufferString(key), int64(len(key)), "", ""), nil, ObjectOptions{})
|
_, err = obj.PutObject(context.Background(), bucket, key, mustGetPutObjReader(b, bytes.NewBufferString(key), int64(len(key)), "", ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -221,7 +221,7 @@ func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks [
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, testCase := range createPartCases {
|
for _, testCase := range createPartCases {
|
||||||
_, err = obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), ObjectOptions{})
|
_, err = obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetPutObjReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
|
@ -235,7 +235,7 @@ func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks [
|
||||||
|
|
||||||
// Object part upload should fail with quorum not available.
|
// Object part upload should fail with quorum not available.
|
||||||
testCase := createPartCases[len(createPartCases)-1]
|
testCase := createPartCases[len(createPartCases)-1]
|
||||||
_, err = obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), ObjectOptions{})
|
_, err = obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetPutObjReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), ObjectOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Test %s: expected to fail but passed instead", instanceType)
|
t.Fatalf("Test %s: expected to fail but passed instead", instanceType)
|
||||||
}
|
}
|
||||||
|
@ -354,7 +354,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
|
||||||
|
|
||||||
// Validate all the test cases.
|
// Validate all the test cases.
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
actualInfo, actualErr := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, testCase.inputSHA256), opts)
|
actualInfo, actualErr := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetPutObjReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, testCase.inputSHA256), opts)
|
||||||
// All are test cases above are expected to fail.
|
// All are test cases above are expected to fail.
|
||||||
if actualErr != nil && testCase.shouldPass {
|
if actualErr != nil && testCase.shouldPass {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
||||||
|
@ -488,7 +488,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, testCase := range createPartCases {
|
for _, testCase := range createPartCases {
|
||||||
_, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), opts)
|
_, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetPutObjReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
|
@ -1309,7 +1309,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, testCase := range createPartCases {
|
for _, testCase := range createPartCases {
|
||||||
_, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), opts)
|
_, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetPutObjReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
|
@ -1550,7 +1550,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, testCase := range createPartCases {
|
for _, testCase := range createPartCases {
|
||||||
_, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), opts)
|
_, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetPutObjReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
|
@ -1801,7 +1801,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, part := range parts {
|
for _, part := range parts {
|
||||||
_, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID, mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, sha256sum), opts)
|
_, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID, mustGetPutObjReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, sha256sum), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err)
|
t.Fatalf("%s : %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -1904,7 +1904,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
actualResult, actualErr := obj.CompleteMultipartUpload(context.Background(), testCase.bucket, testCase.object, testCase.uploadID, testCase.parts)
|
actualResult, actualErr := obj.CompleteMultipartUpload(context.Background(), testCase.bucket, testCase.object, testCase.uploadID, testCase.parts, ObjectOptions{})
|
||||||
if actualErr != nil && testCase.shouldPass {
|
if actualErr != nil && testCase.shouldPass {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, actualErr)
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, actualErr)
|
||||||
}
|
}
|
||||||
|
|
|
@ -172,7 +172,7 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256), testCase.inputMeta, ObjectOptions{})
|
objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetPutObjReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256), testCase.inputMeta, ObjectOptions{})
|
||||||
if actualErr != nil && testCase.expectedError == nil {
|
if actualErr != nil && testCase.expectedError == nil {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: error %s.", i+1, instanceType, actualErr.Error())
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: error %s.", i+1, instanceType, actualErr.Error())
|
||||||
}
|
}
|
||||||
|
@ -245,7 +245,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
|
||||||
|
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta, ObjectOptions{})
|
objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetPutObjReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta, ObjectOptions{})
|
||||||
if actualErr != nil && testCase.shouldPass {
|
if actualErr != nil && testCase.shouldPass {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
||||||
}
|
}
|
||||||
|
@ -294,7 +294,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
|
||||||
InsufficientWriteQuorum{},
|
InsufficientWriteQuorum{},
|
||||||
}
|
}
|
||||||
|
|
||||||
_, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta, ObjectOptions{})
|
_, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetPutObjReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta, ObjectOptions{})
|
||||||
if actualErr != nil && testCase.shouldPass {
|
if actualErr != nil && testCase.shouldPass {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error())
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error())
|
||||||
}
|
}
|
||||||
|
@ -326,7 +326,7 @@ func testObjectAPIPutObjectStaleFiles(obj ObjectLayer, instanceType string, disk
|
||||||
|
|
||||||
data := []byte("hello, world")
|
data := []byte("hello, world")
|
||||||
// Create object.
|
// Create object.
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil, ObjectOptions{})
|
_, err = obj.PutObject(context.Background(), bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create object, abort.
|
// Failed to create object, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
|
@ -371,7 +371,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
|
||||||
md5Writer.Write(fiveMBBytes)
|
md5Writer.Write(fiveMBBytes)
|
||||||
etag1 := hex.EncodeToString(md5Writer.Sum(nil))
|
etag1 := hex.EncodeToString(md5Writer.Sum(nil))
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
_, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), int64(len(fiveMBBytes)), etag1, sha256sum), opts)
|
_, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), int64(len(fiveMBBytes)), etag1, sha256sum), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to upload object part, abort.
|
// Failed to upload object part, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
|
@ -382,7 +382,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
|
||||||
md5Writer = md5.New()
|
md5Writer = md5.New()
|
||||||
md5Writer.Write(data)
|
md5Writer.Write(data)
|
||||||
etag2 := hex.EncodeToString(md5Writer.Sum(nil))
|
etag2 := hex.EncodeToString(md5Writer.Sum(nil))
|
||||||
_, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, 2, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), etag2, sha256sum), opts)
|
_, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, 2, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), etag2, sha256sum), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to upload object part, abort.
|
// Failed to upload object part, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
|
@ -393,7 +393,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
|
||||||
{ETag: etag1, PartNumber: 1},
|
{ETag: etag1, PartNumber: 1},
|
||||||
{ETag: etag2, PartNumber: 2},
|
{ETag: etag2, PartNumber: 2},
|
||||||
}
|
}
|
||||||
_, err = obj.CompleteMultipartUpload(context.Background(), bucket, object, uploadID, parts)
|
_, err = obj.CompleteMultipartUpload(context.Background(), bucket, object, uploadID, parts, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to complete multipart upload, abort.
|
// Failed to complete multipart upload, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -35,6 +36,7 @@ import (
|
||||||
"github.com/minio/minio/cmd/crypto"
|
"github.com/minio/minio/cmd/crypto"
|
||||||
"github.com/minio/minio/cmd/logger"
|
"github.com/minio/minio/cmd/logger"
|
||||||
"github.com/minio/minio/pkg/dns"
|
"github.com/minio/minio/pkg/dns"
|
||||||
|
"github.com/minio/minio/pkg/hash"
|
||||||
"github.com/minio/minio/pkg/ioutil"
|
"github.com/minio/minio/pkg/ioutil"
|
||||||
"github.com/minio/minio/pkg/wildcard"
|
"github.com/minio/minio/pkg/wildcard"
|
||||||
"github.com/skyrings/skyring-common/tools/uuid"
|
"github.com/skyrings/skyring-common/tools/uuid"
|
||||||
|
@ -597,3 +599,61 @@ func (g *GetObjectReader) Read(p []byte) (n int, err error) {
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//SealMD5CurrFn seals md5sum with object encryption key and returns sealed
|
||||||
|
// md5sum
|
||||||
|
type SealMD5CurrFn func([]byte) []byte
|
||||||
|
|
||||||
|
// PutObjReader is a type that wraps sio.EncryptReader and
|
||||||
|
// underlying hash.Reader in a struct
|
||||||
|
type PutObjReader struct {
|
||||||
|
*hash.Reader // actual data stream
|
||||||
|
rawReader *hash.Reader // original data stream
|
||||||
|
sealMD5Fn SealMD5CurrFn
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the absolute number of bytes the Reader
|
||||||
|
// will return during reading. It returns -1 for unlimited
|
||||||
|
// data.
|
||||||
|
func (p *PutObjReader) Size() int64 {
|
||||||
|
return p.Reader.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
// MD5CurrentHexString returns the current MD5Sum or encrypted MD5Sum
|
||||||
|
// as a hex encoded string
|
||||||
|
func (p *PutObjReader) MD5CurrentHexString() string {
|
||||||
|
md5sumCurr := p.rawReader.MD5Current()
|
||||||
|
if p.sealMD5Fn != nil {
|
||||||
|
md5sumCurr = p.sealMD5Fn(md5sumCurr)
|
||||||
|
}
|
||||||
|
return hex.EncodeToString(md5sumCurr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPutObjReader returns a new PutObjReader and holds
|
||||||
|
// reference to underlying data stream from client and the encrypted
|
||||||
|
// data reader
|
||||||
|
func NewPutObjReader(rawReader *hash.Reader, encReader *hash.Reader, encKey []byte) *PutObjReader {
|
||||||
|
p := PutObjReader{Reader: rawReader, rawReader: rawReader}
|
||||||
|
|
||||||
|
var objKey crypto.ObjectKey
|
||||||
|
copy(objKey[:], encKey)
|
||||||
|
p.sealMD5Fn = sealETagFn(objKey)
|
||||||
|
if encReader != nil {
|
||||||
|
p.Reader = encReader
|
||||||
|
}
|
||||||
|
return &p
|
||||||
|
}
|
||||||
|
|
||||||
|
func sealETag(encKey crypto.ObjectKey, md5CurrSum []byte) []byte {
|
||||||
|
var emptyKey [32]byte
|
||||||
|
if bytes.Equal(encKey[:], emptyKey[:]) {
|
||||||
|
return md5CurrSum
|
||||||
|
}
|
||||||
|
return encKey.SealETag(md5CurrSum)
|
||||||
|
}
|
||||||
|
func sealETagFn(key crypto.ObjectKey) SealMD5CurrFn {
|
||||||
|
fn1 := func(md5sumcurr []byte) []byte {
|
||||||
|
return sealETag(key, md5sumcurr)
|
||||||
|
}
|
||||||
|
return fn1
|
||||||
|
}
|
||||||
|
|
|
@ -164,7 +164,8 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
|
||||||
getObjectNInfo = api.CacheAPI().GetObjectNInfo
|
getObjectNInfo = api.CacheAPI().GetObjectNInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
gr, err := getObjectNInfo(ctx, bucket, object, nil, r.Header, readLock, ObjectOptions{})
|
var opts ObjectOptions
|
||||||
|
gr, err := getObjectNInfo(ctx, bucket, object, nil, r.Header, readLock, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
|
@ -319,8 +320,11 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
bucket := vars["bucket"]
|
bucket := vars["bucket"]
|
||||||
object := vars["object"]
|
object := vars["object"]
|
||||||
var opts ObjectOptions
|
|
||||||
|
|
||||||
|
var (
|
||||||
|
opts ObjectOptions
|
||||||
|
err error
|
||||||
|
)
|
||||||
// Check for auth type to return S3 compatible error.
|
// Check for auth type to return S3 compatible error.
|
||||||
// type to return the correct error (NoSuchKey vs AccessDenied)
|
// type to return the correct error (NoSuchKey vs AccessDenied)
|
||||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
|
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
|
||||||
|
@ -349,7 +353,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
||||||
getObjectInfo = api.CacheAPI().GetObjectInfo
|
getObjectInfo = api.CacheAPI().GetObjectInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := getObjectInfo(ctx, bucket, object, opts)
|
_, err = getObjectInfo(ctx, bucket, object, opts)
|
||||||
if toAPIErrorCode(ctx, err) == ErrNoSuchKey {
|
if toAPIErrorCode(ctx, err) == ErrNoSuchKey {
|
||||||
s3Error = ErrNoSuchKey
|
s3Error = ErrNoSuchKey
|
||||||
}
|
}
|
||||||
|
@ -368,7 +372,6 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
||||||
var rs *HTTPRangeSpec
|
var rs *HTTPRangeSpec
|
||||||
rangeHeader := r.Header.Get("Range")
|
rangeHeader := r.Header.Get("Range")
|
||||||
if rangeHeader != "" {
|
if rangeHeader != "" {
|
||||||
var err error
|
|
||||||
if rs, err = parseRequestRangeSpec(rangeHeader); err != nil {
|
if rs, err = parseRequestRangeSpec(rangeHeader); err != nil {
|
||||||
// Handle only errInvalidRange. Ignore other
|
// Handle only errInvalidRange. Ignore other
|
||||||
// parse error and treat it as regular Get
|
// parse error and treat it as regular Get
|
||||||
|
@ -392,7 +395,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
||||||
objInfo := gr.ObjInfo
|
objInfo := gr.ObjInfo
|
||||||
|
|
||||||
if objectAPI.IsEncryptionSupported() {
|
if objectAPI.IsEncryptionSupported() {
|
||||||
if _, err = DecryptObjectInfo(objInfo, r.Header); err != nil {
|
if _, err = DecryptObjectInfo(&objInfo, r.Header); err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -491,7 +494,11 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
|
||||||
getObjectInfo = api.CacheAPI().GetObjectInfo
|
getObjectInfo = api.CacheAPI().GetObjectInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
var opts ObjectOptions
|
var (
|
||||||
|
opts ObjectOptions
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
|
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
|
||||||
if getRequestAuthType(r) == authTypeAnonymous {
|
if getRequestAuthType(r) == authTypeAnonymous {
|
||||||
// As per "Permission" section in
|
// As per "Permission" section in
|
||||||
|
@ -513,7 +520,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
|
||||||
ConditionValues: getConditionValues(r, ""),
|
ConditionValues: getConditionValues(r, ""),
|
||||||
IsOwner: false,
|
IsOwner: false,
|
||||||
}) {
|
}) {
|
||||||
_, err := getObjectInfo(ctx, bucket, object, opts)
|
_, err = getObjectInfo(ctx, bucket, object, opts)
|
||||||
if toAPIErrorCode(ctx, err) == ErrNoSuchKey {
|
if toAPIErrorCode(ctx, err) == ErrNoSuchKey {
|
||||||
s3Error = ErrNoSuchKey
|
s3Error = ErrNoSuchKey
|
||||||
}
|
}
|
||||||
|
@ -527,7 +534,6 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
|
||||||
var rs *HTTPRangeSpec
|
var rs *HTTPRangeSpec
|
||||||
rangeHeader := r.Header.Get("Range")
|
rangeHeader := r.Header.Get("Range")
|
||||||
if rangeHeader != "" {
|
if rangeHeader != "" {
|
||||||
var err error
|
|
||||||
if rs, err = parseRequestRangeSpec(rangeHeader); err != nil {
|
if rs, err = parseRequestRangeSpec(rangeHeader); err != nil {
|
||||||
// Handle only errInvalidRange. Ignore other
|
// Handle only errInvalidRange. Ignore other
|
||||||
// parse error and treat it as regular Get
|
// parse error and treat it as regular Get
|
||||||
|
@ -548,7 +554,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
|
||||||
}
|
}
|
||||||
|
|
||||||
if objectAPI.IsEncryptionSupported() {
|
if objectAPI.IsEncryptionSupported() {
|
||||||
if _, err = DecryptObjectInfo(objInfo, r.Header); err != nil {
|
if _, err = DecryptObjectInfo(&objInfo, r.Header); err != nil {
|
||||||
writeErrorResponseHeadersOnly(w, toAPIErrorCode(ctx, err))
|
writeErrorResponseHeadersOnly(w, toAPIErrorCode(ctx, err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -807,7 +813,8 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
rawReader := srcInfo.Reader
|
||||||
|
pReader := NewPutObjReader(srcInfo.Reader, srcInfo.Reader, nil)
|
||||||
var encMetadata = make(map[string]string)
|
var encMetadata = make(map[string]string)
|
||||||
if objectAPI.IsEncryptionSupported() && !isCompressed {
|
if objectAPI.IsEncryptionSupported() && !isCompressed {
|
||||||
// Encryption parameters not applicable for this object.
|
// Encryption parameters not applicable for this object.
|
||||||
|
@ -820,7 +827,8 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||||
writeErrorResponse(w, ErrInvalidSSECustomerAlgorithm, r.URL)
|
writeErrorResponse(w, ErrInvalidSSECustomerAlgorithm, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var oldKey, newKey []byte
|
|
||||||
|
var oldKey, newKey, objEncKey []byte
|
||||||
sseCopyS3 := crypto.S3.IsEncrypted(srcInfo.UserDefined)
|
sseCopyS3 := crypto.S3.IsEncrypted(srcInfo.UserDefined)
|
||||||
sseCopyC := crypto.SSEC.IsEncrypted(srcInfo.UserDefined) && crypto.SSECopy.IsRequested(r.Header)
|
sseCopyC := crypto.SSEC.IsEncrypted(srcInfo.UserDefined) && crypto.SSECopy.IsRequested(r.Header)
|
||||||
sseC := crypto.SSEC.IsRequested(r.Header)
|
sseC := crypto.SSEC.IsRequested(r.Header)
|
||||||
|
@ -883,9 +891,8 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||||
case isSourceEncrypted && !isTargetEncrypted:
|
case isSourceEncrypted && !isTargetEncrypted:
|
||||||
targetSize, _ = srcInfo.DecryptedSize()
|
targetSize, _ = srcInfo.DecryptedSize()
|
||||||
}
|
}
|
||||||
|
|
||||||
if isTargetEncrypted {
|
if isTargetEncrypted {
|
||||||
reader, err = newEncryptReader(reader, newKey, dstBucket, dstObject, encMetadata, sseS3)
|
reader, objEncKey, err = newEncryptReader(srcInfo.Reader, newKey, dstBucket, dstObject, encMetadata, sseS3)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
|
@ -903,9 +910,12 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
pReader = NewPutObjReader(rawReader, srcInfo.Reader, objEncKey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
srcInfo.PutObjReader = pReader
|
||||||
|
|
||||||
srcInfo.UserDefined, err = getCpObjMetadataFromHeader(ctx, r, srcInfo.UserDefined)
|
srcInfo.UserDefined, err = getCpObjMetadataFromHeader(ctx, r, srcInfo.UserDefined)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||||
|
@ -978,7 +988,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
response := generateCopyObjectResponse(objInfo.ETag, objInfo.ModTime)
|
response := generateCopyObjectResponse(getDecryptedETag(r.Header, objInfo, false), objInfo.ModTime)
|
||||||
encodedSuccessResponse := encodeResponse(response)
|
encodedSuccessResponse := encodeResponse(response)
|
||||||
|
|
||||||
// Write success response.
|
// Write success response.
|
||||||
|
@ -1054,7 +1064,6 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||||
writeErrorResponse(w, ErrInvalidDigest, r.URL)
|
writeErrorResponse(w, ErrInvalidDigest, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
/// if Content-Length is unknown/missing, deny the request
|
/// if Content-Length is unknown/missing, deny the request
|
||||||
size := r.ContentLength
|
size := r.ContentLength
|
||||||
rAuthType := getRequestAuthType(r)
|
rAuthType := getRequestAuthType(r)
|
||||||
|
@ -1184,7 +1193,12 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
opts := ObjectOptions{}
|
|
||||||
|
rawReader := hashReader
|
||||||
|
pReader := NewPutObjReader(rawReader, nil, nil)
|
||||||
|
|
||||||
|
var opts ObjectOptions
|
||||||
|
|
||||||
// Deny if WORM is enabled
|
// Deny if WORM is enabled
|
||||||
if globalWORMEnabled {
|
if globalWORMEnabled {
|
||||||
if _, err = objectAPI.GetObjectInfo(ctx, bucket, object, opts); err == nil {
|
if _, err = objectAPI.GetObjectInfo(ctx, bucket, object, opts); err == nil {
|
||||||
|
@ -1192,10 +1206,10 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
var objectEncryptionKey []byte
|
||||||
if objectAPI.IsEncryptionSupported() {
|
if objectAPI.IsEncryptionSupported() {
|
||||||
if hasServerSideEncryptionHeader(r.Header) && !hasSuffix(object, slashSeparator) { // handle SSE requests
|
if hasServerSideEncryptionHeader(r.Header) && !hasSuffix(object, slashSeparator) { // handle SSE requests
|
||||||
reader, err = EncryptRequest(hashReader, r, bucket, object, metadata)
|
reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
|
@ -1206,6 +1220,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
pReader = NewPutObjReader(rawReader, hashReader, objectEncryptionKey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1217,7 +1232,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the object..
|
// Create the object..
|
||||||
objInfo, err := putObject(ctx, bucket, object, hashReader, metadata, opts)
|
objInfo, err := putObject(ctx, bucket, object, pReader, metadata, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
|
@ -1227,8 +1242,11 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||||
// Ignore compressed ETag.
|
// Ignore compressed ETag.
|
||||||
objInfo.ETag = objInfo.ETag + "-1"
|
objInfo.ETag = objInfo.ETag + "-1"
|
||||||
}
|
}
|
||||||
|
if hasServerSideEncryptionHeader(r.Header) {
|
||||||
|
w.Header().Set("ETag", "\""+getDecryptedETag(r.Header, objInfo, false)+"\"")
|
||||||
|
} else {
|
||||||
w.Header().Set("ETag", "\""+objInfo.ETag+"\"")
|
w.Header().Set("ETag", "\""+objInfo.ETag+"\"")
|
||||||
|
}
|
||||||
if objectAPI.IsEncryptionSupported() {
|
if objectAPI.IsEncryptionSupported() {
|
||||||
if crypto.IsEncrypted(objInfo.UserDefined) {
|
if crypto.IsEncrypted(objInfo.UserDefined) {
|
||||||
switch {
|
switch {
|
||||||
|
@ -1288,8 +1306,11 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
bucket := vars["bucket"]
|
bucket := vars["bucket"]
|
||||||
object := vars["object"]
|
object := vars["object"]
|
||||||
opts := ObjectOptions{}
|
|
||||||
|
|
||||||
|
var (
|
||||||
|
opts ObjectOptions
|
||||||
|
err error
|
||||||
|
)
|
||||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, bucket, object); s3Error != ErrNone {
|
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, bucket, object); s3Error != ErrNone {
|
||||||
writeErrorResponse(w, s3Error, r.URL)
|
writeErrorResponse(w, s3Error, r.URL)
|
||||||
return
|
return
|
||||||
|
@ -1297,7 +1318,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
||||||
|
|
||||||
// Deny if WORM is enabled
|
// Deny if WORM is enabled
|
||||||
if globalWORMEnabled {
|
if globalWORMEnabled {
|
||||||
if _, err := objectAPI.GetObjectInfo(ctx, bucket, object, opts); err == nil {
|
if _, err = objectAPI.GetObjectInfo(ctx, bucket, object, opts); err == nil {
|
||||||
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1315,7 +1336,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
||||||
|
|
||||||
if objectAPI.IsEncryptionSupported() {
|
if objectAPI.IsEncryptionSupported() {
|
||||||
if hasServerSideEncryptionHeader(r.Header) {
|
if hasServerSideEncryptionHeader(r.Header) {
|
||||||
if err := setEncryptionMetadata(r, bucket, object, encMetadata); err != nil {
|
if err = setEncryptionMetadata(r, bucket, object, encMetadata); err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1421,6 +1442,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||||
writeErrorResponse(w, ErrInvalidMaxParts, r.URL)
|
writeErrorResponse(w, ErrInvalidMaxParts, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var srcOpts, dstOpts ObjectOptions
|
var srcOpts, dstOpts ObjectOptions
|
||||||
|
|
||||||
// Deny if WORM is enabled
|
// Deny if WORM is enabled
|
||||||
|
@ -1533,6 +1555,11 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rawReader := srcInfo.Reader
|
||||||
|
pReader := NewPutObjReader(rawReader, nil, nil)
|
||||||
|
|
||||||
|
isEncrypted := false
|
||||||
|
var objectEncryptionKey []byte
|
||||||
if objectAPI.IsEncryptionSupported() && !isCompressed {
|
if objectAPI.IsEncryptionSupported() && !isCompressed {
|
||||||
if crypto.IsEncrypted(li.UserDefined) {
|
if crypto.IsEncrypted(li.UserDefined) {
|
||||||
if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(li.UserDefined) {
|
if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(li.UserDefined) {
|
||||||
|
@ -1543,6 +1570,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||||
writeErrorResponse(w, ErrSSEMultipartEncrypted, r.URL)
|
writeErrorResponse(w, ErrSSEMultipartEncrypted, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
isEncrypted = true // to detect SSE-S3 encryption
|
||||||
var key []byte
|
var key []byte
|
||||||
if crypto.SSEC.IsRequested(r.Header) {
|
if crypto.SSEC.IsRequested(r.Header) {
|
||||||
key, err = ParseSSECustomerRequest(r)
|
key, err = ParseSSECustomerRequest(r)
|
||||||
|
@ -1551,7 +1579,6 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var objectEncryptionKey []byte
|
|
||||||
objectEncryptionKey, err = decryptObjectInfo(key, dstBucket, dstObject, li.UserDefined)
|
objectEncryptionKey, err = decryptObjectInfo(key, dstBucket, dstObject, li.UserDefined)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
|
@ -1576,9 +1603,11 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
pReader = NewPutObjReader(rawReader, srcInfo.Reader, objectEncryptionKey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
srcInfo.PutObjReader = pReader
|
||||||
// Copy source object to destination, if source and destination
|
// Copy source object to destination, if source and destination
|
||||||
// object is same then only metadata is updated.
|
// object is same then only metadata is updated.
|
||||||
partInfo, err := objectAPI.CopyObjectPart(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID,
|
partInfo, err := objectAPI.CopyObjectPart(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID,
|
||||||
|
@ -1588,6 +1617,10 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if isEncrypted {
|
||||||
|
partInfo.ETag = tryDecryptETag(objectEncryptionKey, partInfo.ETag, crypto.SSEC.IsRequested(r.Header))
|
||||||
|
}
|
||||||
|
|
||||||
response := generateCopyObjectPartResponse(partInfo.ETag, partInfo.LastModified)
|
response := generateCopyObjectPartResponse(partInfo.ETag, partInfo.LastModified)
|
||||||
encodedSuccessResponse := encodeResponse(response)
|
encodedSuccessResponse := encodeResponse(response)
|
||||||
|
|
||||||
|
@ -1679,7 +1712,6 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||||
s3Error APIErrorCode
|
s3Error APIErrorCode
|
||||||
)
|
)
|
||||||
reader = r.Body
|
reader = r.Body
|
||||||
|
|
||||||
if s3Error = isPutAllowed(rAuthType, bucket, object, r); s3Error != ErrNone {
|
if s3Error = isPutAllowed(rAuthType, bucket, object, r); s3Error != ErrNone {
|
||||||
writeErrorResponse(w, s3Error, r.URL)
|
writeErrorResponse(w, s3Error, r.URL)
|
||||||
return
|
return
|
||||||
|
@ -1755,7 +1787,10 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := ObjectOptions{}
|
rawReader := hashReader
|
||||||
|
pReader := NewPutObjReader(rawReader, nil, nil)
|
||||||
|
var opts ObjectOptions
|
||||||
|
|
||||||
// Deny if WORM is enabled
|
// Deny if WORM is enabled
|
||||||
if globalWORMEnabled {
|
if globalWORMEnabled {
|
||||||
if _, err = objectAPI.GetObjectInfo(ctx, bucket, object, opts); err == nil {
|
if _, err = objectAPI.GetObjectInfo(ctx, bucket, object, opts); err == nil {
|
||||||
|
@ -1765,6 +1800,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||||
}
|
}
|
||||||
|
|
||||||
isEncrypted := false
|
isEncrypted := false
|
||||||
|
var objectEncryptionKey []byte
|
||||||
if objectAPI.IsEncryptionSupported() && !isCompressed {
|
if objectAPI.IsEncryptionSupported() && !isCompressed {
|
||||||
var li ListPartsInfo
|
var li ListPartsInfo
|
||||||
li, err = objectAPI.ListObjectParts(ctx, bucket, object, uploadID, 0, 1)
|
li, err = objectAPI.ListObjectParts(ctx, bucket, object, uploadID, 0, 1)
|
||||||
|
@ -1773,11 +1809,11 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if crypto.IsEncrypted(li.UserDefined) {
|
if crypto.IsEncrypted(li.UserDefined) {
|
||||||
isEncrypted = true
|
|
||||||
if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(li.UserDefined) {
|
if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(li.UserDefined) {
|
||||||
writeErrorResponse(w, ErrSSEMultipartEncrypted, r.URL)
|
writeErrorResponse(w, ErrSSEMultipartEncrypted, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
isEncrypted = true // to detect SSE-S3 encryption
|
||||||
var key []byte
|
var key []byte
|
||||||
if crypto.SSEC.IsRequested(r.Header) {
|
if crypto.SSEC.IsRequested(r.Header) {
|
||||||
key, err = ParseSSECustomerRequest(r)
|
key, err = ParseSSECustomerRequest(r)
|
||||||
|
@ -1788,7 +1824,6 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculating object encryption key
|
// Calculating object encryption key
|
||||||
var objectEncryptionKey []byte
|
|
||||||
objectEncryptionKey, err = decryptObjectInfo(key, bucket, object, li.UserDefined)
|
objectEncryptionKey, err = decryptObjectInfo(key, bucket, object, li.UserDefined)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
|
@ -1807,13 +1842,13 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
info := ObjectInfo{Size: size}
|
info := ObjectInfo{Size: size}
|
||||||
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", size) // do not try to verify encrypted content
|
hashReader, err = hash.NewReader(reader, info.EncryptedSize(), "", "", size) // do not try to verify encrypted content
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
pReader = NewPutObjReader(rawReader, hashReader, objectEncryptionKey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1821,7 +1856,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||||
if api.CacheAPI() != nil && !isEncrypted {
|
if api.CacheAPI() != nil && !isEncrypted {
|
||||||
putObjectPart = api.CacheAPI().PutObjectPart
|
putObjectPart = api.CacheAPI().PutObjectPart
|
||||||
}
|
}
|
||||||
partInfo, err := putObjectPart(ctx, bucket, object, uploadID, partID, hashReader, opts)
|
partInfo, err := putObjectPart(ctx, bucket, object, uploadID, partID, pReader, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Verify if the underlying error is signature mismatch.
|
// Verify if the underlying error is signature mismatch.
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
|
@ -1832,9 +1867,14 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||||
// Suppress compressed ETag.
|
// Suppress compressed ETag.
|
||||||
partInfo.ETag = partInfo.ETag + "-1"
|
partInfo.ETag = partInfo.ETag + "-1"
|
||||||
}
|
}
|
||||||
|
|
||||||
if partInfo.ETag != "" {
|
if partInfo.ETag != "" {
|
||||||
|
if isEncrypted {
|
||||||
|
w.Header().Set("ETag", "\""+tryDecryptETag(objectEncryptionKey, partInfo.ETag, crypto.SSEC.IsRequested(r.Header))+"\"")
|
||||||
|
} else {
|
||||||
w.Header().Set("ETag", "\""+partInfo.ETag+"\"")
|
w.Header().Set("ETag", "\""+partInfo.ETag+"\"")
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
writeSuccessResponseHeadersOnly(w)
|
writeSuccessResponseHeadersOnly(w)
|
||||||
}
|
}
|
||||||
|
@ -1924,6 +1964,39 @@ func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *ht
|
||||||
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var ssec bool
|
||||||
|
if objectAPI.IsEncryptionSupported() {
|
||||||
|
var li ListPartsInfo
|
||||||
|
li, err = objectAPI.ListObjectParts(ctx, bucket, object, uploadID, 0, 1)
|
||||||
|
if err != nil {
|
||||||
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if crypto.IsEncrypted(li.UserDefined) {
|
||||||
|
var key []byte
|
||||||
|
if crypto.SSEC.IsEncrypted(li.UserDefined) {
|
||||||
|
ssec = true
|
||||||
|
}
|
||||||
|
var objectEncryptionKey []byte
|
||||||
|
if crypto.S3.IsEncrypted(li.UserDefined) {
|
||||||
|
// Calculating object encryption key
|
||||||
|
objectEncryptionKey, err = decryptObjectInfo(key, bucket, object, li.UserDefined)
|
||||||
|
if err != nil {
|
||||||
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
parts := make([]PartInfo, len(listPartsInfo.Parts))
|
||||||
|
for i, p := range listPartsInfo.Parts {
|
||||||
|
part := p
|
||||||
|
part.ETag = tryDecryptETag(objectEncryptionKey, p.ETag, ssec)
|
||||||
|
parts[i] = part
|
||||||
|
}
|
||||||
|
listPartsInfo.Parts = parts
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
response := generateListPartsResponse(listPartsInfo)
|
response := generateListPartsResponse(listPartsInfo)
|
||||||
encodedSuccessResponse := encodeResponse(response)
|
encodedSuccessResponse := encodeResponse(response)
|
||||||
|
|
||||||
|
@ -1985,7 +2058,53 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
||||||
writeErrorResponse(w, ErrInvalidPartOrder, r.URL)
|
writeErrorResponse(w, ErrInvalidPartOrder, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
var objectEncryptionKey []byte
|
||||||
|
var opts ObjectOptions
|
||||||
|
var isEncrypted, ssec bool
|
||||||
|
|
||||||
|
if objectAPI.IsEncryptionSupported() {
|
||||||
|
var li ListPartsInfo
|
||||||
|
li, err = objectAPI.ListObjectParts(ctx, bucket, object, uploadID, 0, 1)
|
||||||
|
if err != nil {
|
||||||
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if crypto.IsEncrypted(li.UserDefined) {
|
||||||
|
isEncrypted = true
|
||||||
|
ssec = crypto.SSEC.IsEncrypted(li.UserDefined)
|
||||||
|
var key []byte
|
||||||
|
if crypto.S3.IsEncrypted(li.UserDefined) {
|
||||||
|
// Calculating object encryption key
|
||||||
|
objectEncryptionKey, err = decryptObjectInfo(key, bucket, object, li.UserDefined)
|
||||||
|
if err != nil {
|
||||||
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
partsMap := make(map[string]PartInfo)
|
||||||
|
var listPartsInfo ListPartsInfo
|
||||||
|
|
||||||
|
if isEncrypted {
|
||||||
|
var partNumberMarker int
|
||||||
|
maxParts := 1000
|
||||||
|
for {
|
||||||
|
listPartsInfo, err = objectAPI.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts)
|
||||||
|
if err != nil {
|
||||||
|
writeErrorResponse(w, toAPIErrorCode(ctx, err), r.URL)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, part := range listPartsInfo.Parts {
|
||||||
|
partsMap[strconv.Itoa(part.PartNumber)] = part
|
||||||
|
}
|
||||||
|
partNumberMarker = listPartsInfo.NextPartNumberMarker
|
||||||
|
if !listPartsInfo.IsTruncated {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
// Complete parts.
|
// Complete parts.
|
||||||
var completeParts []CompletePart
|
var completeParts []CompletePart
|
||||||
for _, part := range complMultipartUpload.Parts {
|
for _, part := range complMultipartUpload.Parts {
|
||||||
|
@ -1995,6 +2114,18 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
||||||
part.ETag = strings.Replace(part.ETag, "-1", "", -1) // For compressed multiparts, We append '-1' for part.ETag.
|
part.ETag = strings.Replace(part.ETag, "-1", "", -1) // For compressed multiparts, We append '-1' for part.ETag.
|
||||||
}
|
}
|
||||||
part.ETag = canonicalizeETag(part.ETag)
|
part.ETag = canonicalizeETag(part.ETag)
|
||||||
|
if isEncrypted {
|
||||||
|
// ETag is stored in the backend in encrypted form. Validate client sent ETag with
|
||||||
|
// decrypted ETag.
|
||||||
|
if bkPartInfo, ok := partsMap[strconv.Itoa(part.PartNumber)]; ok {
|
||||||
|
bkETag := tryDecryptETag(objectEncryptionKey, bkPartInfo.ETag, ssec)
|
||||||
|
if bkETag != part.ETag {
|
||||||
|
writeErrorResponse(w, ErrInvalidPart, r.URL)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
part.ETag = bkPartInfo.ETag
|
||||||
|
}
|
||||||
|
}
|
||||||
completeParts = append(completeParts, part)
|
completeParts = append(completeParts, part)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2002,7 +2133,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
||||||
if api.CacheAPI() != nil {
|
if api.CacheAPI() != nil {
|
||||||
completeMultiPartUpload = api.CacheAPI().CompleteMultipartUpload
|
completeMultiPartUpload = api.CacheAPI().CompleteMultipartUpload
|
||||||
}
|
}
|
||||||
objInfo, err := completeMultiPartUpload(ctx, bucket, object, uploadID, completeParts)
|
objInfo, err := completeMultiPartUpload(ctx, bucket, object, uploadID, completeParts, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
switch oErr := err.(type) {
|
switch oErr := err.(type) {
|
||||||
case PartTooSmall:
|
case PartTooSmall:
|
||||||
|
|
|
@ -83,7 +83,7 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
||||||
// iterate through the above set of inputs and upload the object.
|
// iterate through the above set of inputs and upload the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err := obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData, ObjectOptions{})
|
_, err := obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetPutObjReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData, ObjectOptions{})
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
|
@ -355,7 +355,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||||
// iterate through the above set of inputs and upload the object.
|
// iterate through the above set of inputs and upload the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err := obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData, ObjectOptions{})
|
_, err := obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetPutObjReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData, ObjectOptions{})
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
|
@ -1391,7 +1391,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName,
|
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName,
|
||||||
mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData, opts)
|
mustGetPutObjReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData, opts)
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
|
@ -1449,7 +1449,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
result, err := obj.CompleteMultipartUpload(context.Background(), bucketName, testObject, uploadID, parts)
|
result, err := obj.CompleteMultipartUpload(context.Background(), bucketName, testObject, uploadID, parts, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test: %s complete multipart upload failed: <ERROR> %v", instanceType, err)
|
t.Fatalf("Test: %s complete multipart upload failed: <ERROR> %v", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -1501,7 +1501,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||||
// iterate through the above set of inputs and upload the object.
|
// iterate through the above set of inputs and upload the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData, opts)
|
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetPutObjReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData, opts)
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
|
@ -1838,7 +1838,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
||||||
// iterate through the above set of inputs and upload the object.
|
// iterate through the above set of inputs and upload the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData, opts)
|
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetPutObjReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData, opts)
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
|
@ -2462,7 +2462,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, part := range parts {
|
for _, part := range parts {
|
||||||
_, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID,
|
_, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID,
|
||||||
mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, ""), opts)
|
mustGetPutObjReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, ""), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err)
|
t.Fatalf("%s : %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -2813,7 +2813,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||||
// Iterating over createPartCases to generate multipart chunks.
|
// Iterating over createPartCases to generate multipart chunks.
|
||||||
for _, part := range parts {
|
for _, part := range parts {
|
||||||
_, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID,
|
_, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID,
|
||||||
mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, ""), opts)
|
mustGetPutObjReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, ""), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err)
|
t.Fatalf("%s : %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -2951,7 +2951,7 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string
|
||||||
// iterate through the above set of inputs and upload the object.
|
// iterate through the above set of inputs and upload the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData, opts)
|
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetPutObjReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData, opts)
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
|
@ -3657,7 +3657,7 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str
|
||||||
uploadIDCopy := uploadID
|
uploadIDCopy := uploadID
|
||||||
|
|
||||||
// create an object Part, will be used to test list object parts.
|
// create an object Part, will be used to test list object parts.
|
||||||
_, err = obj.PutObjectPart(context.Background(), bucketName, testObject, uploadID, 1, mustGetHashReader(t, bytes.NewReader([]byte("hello")), int64(len("hello")), "5d41402abc4b2a76b9719d911017c592", ""), opts)
|
_, err = obj.PutObjectPart(context.Background(), bucketName, testObject, uploadID, 1, mustGetPutObjReader(t, bytes.NewReader([]byte("hello")), int64(len("hello")), "5d41402abc4b2a76b9719d911017c592", ""), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Minio %s : %s.", instanceType, err)
|
t.Fatalf("Minio %s : %s.", instanceType, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -106,7 +106,7 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErr
|
||||||
expectedETaghex := getMD5Hash(data)
|
expectedETaghex := getMD5Hash(data)
|
||||||
|
|
||||||
var calcPartInfo PartInfo
|
var calcPartInfo PartInfo
|
||||||
calcPartInfo, err = obj.PutObjectPart(context.Background(), "bucket", "key", uploadID, i, mustGetHashReader(t, bytes.NewBuffer(data), int64(len(data)), expectedETaghex, ""), opts)
|
calcPartInfo, err = obj.PutObjectPart(context.Background(), "bucket", "key", uploadID, i, mustGetPutObjReader(t, bytes.NewBuffer(data), int64(len(data)), expectedETaghex, ""), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("%s: <ERROR> %s", instanceType, err)
|
t.Errorf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -118,7 +118,7 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErr
|
||||||
ETag: calcPartInfo.ETag,
|
ETag: calcPartInfo.ETag,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
objInfo, err := obj.CompleteMultipartUpload(context.Background(), "bucket", "key", uploadID, completedParts.Parts)
|
objInfo, err := obj.CompleteMultipartUpload(context.Background(), "bucket", "key", uploadID, completedParts.Parts, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -157,7 +157,7 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHan
|
||||||
|
|
||||||
metadata["md5"] = expectedETaghex
|
metadata["md5"] = expectedETaghex
|
||||||
var calcPartInfo PartInfo
|
var calcPartInfo PartInfo
|
||||||
calcPartInfo, err = obj.PutObjectPart(context.Background(), "bucket", "key", uploadID, i, mustGetHashReader(t, bytes.NewBufferString(randomString), int64(len(randomString)), expectedETaghex, ""), opts)
|
calcPartInfo, err = obj.PutObjectPart(context.Background(), "bucket", "key", uploadID, i, mustGetPutObjReader(t, bytes.NewBufferString(randomString), int64(len(randomString)), expectedETaghex, ""), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -199,7 +199,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH
|
||||||
metadata := make(map[string]string)
|
metadata := make(map[string]string)
|
||||||
metadata["etag"] = expectedETaghex
|
metadata["etag"] = expectedETaghex
|
||||||
var objInfo ObjectInfo
|
var objInfo ObjectInfo
|
||||||
objInfo, err = obj.PutObject(context.Background(), "bucket", key, mustGetHashReader(t, bytes.NewBufferString(randomString), int64(len(randomString)), metadata["etag"], ""), metadata, opts)
|
objInfo, err = obj.PutObject(context.Background(), "bucket", key, mustGetPutObjReader(t, bytes.NewBufferString(randomString), int64(len(randomString)), metadata["etag"], ""), metadata, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -253,7 +253,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
// check before paging occurs.
|
// check before paging occurs.
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
key := "obj" + strconv.Itoa(i)
|
key := "obj" + strconv.Itoa(i)
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", key, mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), "bucket", key, mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -273,7 +273,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
// check after paging occurs pages work.
|
// check after paging occurs pages work.
|
||||||
for i := 6; i <= 10; i++ {
|
for i := 6; i <= 10; i++ {
|
||||||
key := "obj" + strconv.Itoa(i)
|
key := "obj" + strconv.Itoa(i)
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", key, mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), "bucket", key, mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -290,11 +290,11 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
}
|
}
|
||||||
// check paging with prefix at end returns less objects.
|
// check paging with prefix at end returns less objects.
|
||||||
{
|
{
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", "newPrefix", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), "bucket", "newPrefix", mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", "newPrefix2", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), "bucket", "newPrefix2", mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -332,11 +332,11 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
|
|
||||||
// check delimited results with delimiter and prefix.
|
// check delimited results with delimiter and prefix.
|
||||||
{
|
{
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", "this/is/delimited", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), "bucket", "this/is/delimited", mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", "this/is/also/a/delimited/file", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), "bucket", "this/is/also/a/delimited/file", mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -448,14 +448,14 @@ func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, t TestErrHan
|
||||||
var opts ObjectOptions
|
var opts ObjectOptions
|
||||||
uploadContent := "The list of parts was not in ascending order. The parts list must be specified in order by part number."
|
uploadContent := "The list of parts was not in ascending order. The parts list must be specified in order by part number."
|
||||||
length := int64(len(uploadContent))
|
length := int64(len(uploadContent))
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", "object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), "bucket", "object", mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadContent = "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
|
uploadContent = "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
|
||||||
length = int64(len(uploadContent))
|
length = int64(len(uploadContent))
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", "object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), "bucket", "object", mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -478,7 +478,7 @@ func (s *ObjectLayerAPISuite) TestNonExistantBucketOperations(t *testing.T) {
|
||||||
// Tests validate that bucket operation on non-existent bucket fails.
|
// Tests validate that bucket operation on non-existent bucket fails.
|
||||||
func testNonExistantBucketOperations(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
func testNonExistantBucketOperations(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
var opts ObjectOptions
|
var opts ObjectOptions
|
||||||
_, err := obj.PutObject(context.Background(), "bucket1", "object", mustGetHashReader(t, bytes.NewBufferString("one"), int64(len("one")), "", ""), nil, opts)
|
_, err := obj.PutObject(context.Background(), "bucket1", "object", mustGetPutObjReader(t, bytes.NewBufferString("one"), int64(len("one")), "", ""), nil, opts)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Expected error but found nil")
|
t.Fatal("Expected error but found nil")
|
||||||
}
|
}
|
||||||
|
@ -526,7 +526,7 @@ func testPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
|
|
||||||
var bytesBuffer1 bytes.Buffer
|
var bytesBuffer1 bytes.Buffer
|
||||||
var opts ObjectOptions
|
var opts ObjectOptions
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", "object", mustGetHashReader(t, readerEOF, length, "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), "bucket", "object", mustGetPutObjReader(t, readerEOF, length, "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -539,7 +539,7 @@ func testPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var bytesBuffer2 bytes.Buffer
|
var bytesBuffer2 bytes.Buffer
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", "object", mustGetHashReader(t, readerNoEOF, length, "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), "bucket", "object", mustGetPutObjReader(t, readerNoEOF, length, "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -568,7 +568,7 @@ func testPutObjectInSubdir(obj ObjectLayer, instanceType string, t TestErrHandle
|
||||||
uploadContent := `The specified multipart upload does not exist. The upload ID might be invalid, or the multipart
|
uploadContent := `The specified multipart upload does not exist. The upload ID might be invalid, or the multipart
|
||||||
upload might have been aborted or completed.`
|
upload might have been aborted or completed.`
|
||||||
length := int64(len(uploadContent))
|
length := int64(len(uploadContent))
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", "dir1/dir2/object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), "bucket", "dir1/dir2/object", mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -741,7 +741,7 @@ func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string,
|
||||||
content := "One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag."
|
content := "One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag."
|
||||||
length := int64(len(content))
|
length := int64(len(content))
|
||||||
var opts ObjectOptions
|
var opts ObjectOptions
|
||||||
_, err = obj.PutObject(context.Background(), bucketName, "dir1/dir3/object", mustGetHashReader(t, bytes.NewBufferString(content), length, "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), bucketName, "dir1/dir3/object", mustGetPutObjReader(t, bytes.NewBufferString(content), length, "", ""), nil, opts)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
|
@ -783,7 +783,7 @@ func testContentType(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
var opts ObjectOptions
|
var opts ObjectOptions
|
||||||
uploadContent := "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
|
uploadContent := "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
|
||||||
// Test empty.
|
// Test empty.
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", "minio.png", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), "bucket", "minio.png", mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -189,7 +189,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
||||||
|
|
||||||
// Object for test case 1 - No StorageClass defined, no MetaData in PutObject
|
// Object for test case 1 - No StorageClass defined, no MetaData in PutObject
|
||||||
object1 := "object1"
|
object1 := "object1"
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object1, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), bucket, object1, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to putObject %v", err)
|
t.Fatalf("Failed to putObject %v", err)
|
||||||
}
|
}
|
||||||
|
@ -200,7 +200,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
||||||
object2 := "object2"
|
object2 := "object2"
|
||||||
metadata2 := make(map[string]string)
|
metadata2 := make(map[string]string)
|
||||||
metadata2["x-amz-storage-class"] = reducedRedundancyStorageClass
|
metadata2["x-amz-storage-class"] = reducedRedundancyStorageClass
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object2, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata2, opts)
|
_, err = obj.PutObject(context.Background(), bucket, object2, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata2, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to putObject %v", err)
|
t.Fatalf("Failed to putObject %v", err)
|
||||||
}
|
}
|
||||||
|
@ -211,7 +211,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
||||||
object3 := "object3"
|
object3 := "object3"
|
||||||
metadata3 := make(map[string]string)
|
metadata3 := make(map[string]string)
|
||||||
metadata3["x-amz-storage-class"] = standardStorageClass
|
metadata3["x-amz-storage-class"] = standardStorageClass
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object3, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata3, opts)
|
_, err = obj.PutObject(context.Background(), bucket, object3, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata3, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to putObject %v", err)
|
t.Fatalf("Failed to putObject %v", err)
|
||||||
}
|
}
|
||||||
|
@ -227,7 +227,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
||||||
Scheme: "EC",
|
Scheme: "EC",
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object4, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata4, opts)
|
_, err = obj.PutObject(context.Background(), bucket, object4, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata4, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to putObject %v", err)
|
t.Fatalf("Failed to putObject %v", err)
|
||||||
}
|
}
|
||||||
|
@ -245,7 +245,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
||||||
Scheme: "EC",
|
Scheme: "EC",
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object5, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata5, opts)
|
_, err = obj.PutObject(context.Background(), bucket, object5, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata5, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to putObject %v", err)
|
t.Fatalf("Failed to putObject %v", err)
|
||||||
}
|
}
|
||||||
|
@ -263,7 +263,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
||||||
Scheme: "EC",
|
Scheme: "EC",
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object6, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata6, opts)
|
_, err = obj.PutObject(context.Background(), bucket, object6, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata6, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to putObject %v", err)
|
t.Fatalf("Failed to putObject %v", err)
|
||||||
}
|
}
|
||||||
|
@ -281,7 +281,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
||||||
Scheme: "EC",
|
Scheme: "EC",
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object7, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata7, opts)
|
_, err = obj.PutObject(context.Background(), bucket, object7, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata7, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to putObject %v", err)
|
t.Fatalf("Failed to putObject %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -136,12 +136,12 @@ func calculateSignedChunkLength(chunkDataSize int64) int64 {
|
||||||
2 // CRLF
|
2 // CRLF
|
||||||
}
|
}
|
||||||
|
|
||||||
func mustGetHashReader(t TestErrHandler, data io.Reader, size int64, md5hex, sha256hex string) *hash.Reader {
|
func mustGetPutObjReader(t TestErrHandler, data io.Reader, size int64, md5hex, sha256hex string) *PutObjReader {
|
||||||
hr, err := hash.NewReader(data, size, md5hex, sha256hex, size)
|
hr, err := hash.NewReader(data, size, md5hex, sha256hex, size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
return hr
|
return NewPutObjReader(hr, nil, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// calculateSignedChunkLength - calculates the length of the overall stream (data + metadata)
|
// calculateSignedChunkLength - calculates the length of the overall stream (data + metadata)
|
||||||
|
|
|
@ -807,7 +807,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
||||||
writeWebErrorResponse(w, err)
|
writeWebErrorResponse(w, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
opts := ObjectOptions{}
|
var opts ObjectOptions
|
||||||
// Deny if WORM is enabled
|
// Deny if WORM is enabled
|
||||||
if globalWORMEnabled {
|
if globalWORMEnabled {
|
||||||
if _, err = objectAPI.GetObjectInfo(ctx, bucket, object, opts); err == nil {
|
if _, err = objectAPI.GetObjectInfo(ctx, bucket, object, opts); err == nil {
|
||||||
|
@ -816,7 +816,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
objInfo, err := putObject(ctx, bucket, object, hashReader, metadata, opts)
|
objInfo, err := putObject(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), metadata, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeWebErrorResponse(w, err)
|
writeWebErrorResponse(w, err)
|
||||||
return
|
return
|
||||||
|
@ -916,7 +916,7 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if objectAPI.IsEncryptionSupported() {
|
if objectAPI.IsEncryptionSupported() {
|
||||||
if _, err = DecryptObjectInfo(objInfo, r.Header); err != nil {
|
if _, err = DecryptObjectInfo(&objInfo, r.Header); err != nil {
|
||||||
writeWebErrorResponse(w, err)
|
writeWebErrorResponse(w, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1113,7 +1113,7 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
length = info.Size
|
length = info.Size
|
||||||
if objectAPI.IsEncryptionSupported() {
|
if objectAPI.IsEncryptionSupported() {
|
||||||
if _, err = DecryptObjectInfo(info, r.Header); err != nil {
|
if _, err = DecryptObjectInfo(&info, r.Header); err != nil {
|
||||||
writeWebErrorResponse(w, err)
|
writeWebErrorResponse(w, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -349,7 +349,7 @@ func testDeleteBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrH
|
||||||
for _, test := range testCases {
|
for _, test := range testCases {
|
||||||
if test.initWithObject {
|
if test.initWithObject {
|
||||||
data := bytes.NewBufferString("hello")
|
data := bytes.NewBufferString("hello")
|
||||||
_, err = obj.PutObject(context.Background(), test.bucketName, "object", mustGetHashReader(t, data, int64(data.Len()), "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), test.bucketName, "object", mustGetPutObjReader(t, data, int64(data.Len()), "", ""), nil, opts)
|
||||||
// _, err = obj.PutObject(test.bucketName, "object", int64(data.Len()), data, nil, "")
|
// _, err = obj.PutObject(test.bucketName, "object", int64(data.Len()), data, nil, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not put object to %s, %s", test.bucketName, err.Error())
|
t.Fatalf("could not put object to %s, %s", test.bucketName, err.Error())
|
||||||
|
@ -485,7 +485,7 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa
|
||||||
|
|
||||||
data := bytes.Repeat([]byte("a"), objectSize)
|
data := bytes.Repeat([]byte("a"), objectSize)
|
||||||
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
||||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata, ObjectOptions{})
|
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata, ObjectOptions{})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Was not able to upload an object, %v", err)
|
t.Fatalf("Was not able to upload an object, %v", err)
|
||||||
|
@ -589,14 +589,14 @@ func testRemoveObjectWebHandler(obj ObjectLayer, instanceType string, t TestErrH
|
||||||
|
|
||||||
data := bytes.Repeat([]byte("a"), objectSize)
|
data := bytes.Repeat([]byte("a"), objectSize)
|
||||||
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
||||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata, ObjectOptions{})
|
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Was not able to upload an object, %v", err)
|
t.Fatalf("Was not able to upload an object, %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
objectName = "a/object"
|
objectName = "a/object"
|
||||||
metadata = map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
metadata = map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
||||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata, ObjectOptions{})
|
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Was not able to upload an object, %v", err)
|
t.Fatalf("Was not able to upload an object, %v", err)
|
||||||
}
|
}
|
||||||
|
@ -987,7 +987,7 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl
|
||||||
|
|
||||||
content := []byte("temporary file's content")
|
content := []byte("temporary file's content")
|
||||||
metadata := map[string]string{"etag": "01ce59706106fe5e02e7f55fffda7f34"}
|
metadata := map[string]string{"etag": "01ce59706106fe5e02e7f55fffda7f34"}
|
||||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader(content), int64(len(content)), metadata["etag"], ""), metadata, ObjectOptions{})
|
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader(content), int64(len(content)), metadata["etag"], ""), metadata, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Was not able to upload an object, %v", err)
|
t.Fatalf("Was not able to upload an object, %v", err)
|
||||||
}
|
}
|
||||||
|
@ -1090,9 +1090,9 @@ func testWebHandlerDownloadZip(obj ObjectLayer, instanceType string, t TestErrHa
|
||||||
t.Fatalf("%s : %s", instanceType, err)
|
t.Fatalf("%s : %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
obj.PutObject(context.Background(), bucket, "a/one", mustGetHashReader(t, strings.NewReader(fileOne), int64(len(fileOne)), "", ""), nil, opts)
|
obj.PutObject(context.Background(), bucket, "a/one", mustGetPutObjReader(t, strings.NewReader(fileOne), int64(len(fileOne)), "", ""), nil, opts)
|
||||||
obj.PutObject(context.Background(), bucket, "a/b/two", mustGetHashReader(t, strings.NewReader(fileTwo), int64(len(fileTwo)), "", ""), nil, opts)
|
obj.PutObject(context.Background(), bucket, "a/b/two", mustGetPutObjReader(t, strings.NewReader(fileTwo), int64(len(fileTwo)), "", ""), nil, opts)
|
||||||
obj.PutObject(context.Background(), bucket, "a/c/three", mustGetHashReader(t, strings.NewReader(fileThree), int64(len(fileThree)), "", ""), nil, opts)
|
obj.PutObject(context.Background(), bucket, "a/c/three", mustGetPutObjReader(t, strings.NewReader(fileThree), int64(len(fileThree)), "", ""), nil, opts)
|
||||||
|
|
||||||
test := func(token string) (int, []byte) {
|
test := func(token string) (int, []byte) {
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
|
@ -1177,7 +1177,7 @@ func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrH
|
||||||
|
|
||||||
data := bytes.Repeat([]byte("a"), objectSize)
|
data := bytes.Repeat([]byte("a"), objectSize)
|
||||||
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
||||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata, ObjectOptions{})
|
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Was not able to upload an object, %v", err)
|
t.Fatalf("Was not able to upload an object, %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,6 @@ import (
|
||||||
|
|
||||||
"github.com/minio/minio/cmd/logger"
|
"github.com/minio/minio/cmd/logger"
|
||||||
"github.com/minio/minio/pkg/bpool"
|
"github.com/minio/minio/pkg/bpool"
|
||||||
"github.com/minio/minio/pkg/hash"
|
|
||||||
"github.com/minio/minio/pkg/madmin"
|
"github.com/minio/minio/pkg/madmin"
|
||||||
"github.com/minio/minio/pkg/policy"
|
"github.com/minio/minio/pkg/policy"
|
||||||
"github.com/minio/minio/pkg/sync/errgroup"
|
"github.com/minio/minio/pkg/sync/errgroup"
|
||||||
|
@ -598,7 +597,7 @@ func (s *xlSets) GetObject(ctx context.Context, bucket, object string, startOffs
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObject - writes an object to hashedSet based on the object name.
|
// PutObject - writes an object to hashedSet based on the object name.
|
||||||
func (s *xlSets) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
func (s *xlSets) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
return s.getHashedSet(object).PutObject(ctx, bucket, object, data, metadata, opts)
|
return s.getHashedSet(object).PutObject(ctx, bucket, object, data, metadata, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -630,8 +629,7 @@ func (s *xlSets) CopyObject(ctx context.Context, srcBucket, srcObject, destBucke
|
||||||
}
|
}
|
||||||
defer objectDWLock.Unlock()
|
defer objectDWLock.Unlock()
|
||||||
}
|
}
|
||||||
|
return destSet.putObject(ctx, destBucket, destObject, srcInfo.PutObjReader, srcInfo.UserDefined, dstOpts)
|
||||||
return destSet.putObject(ctx, destBucket, destObject, srcInfo.Reader, srcInfo.UserDefined, dstOpts)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns function "listDir" of the type listDirFunc.
|
// Returns function "listDir" of the type listDirFunc.
|
||||||
|
@ -828,11 +826,11 @@ func (s *xlSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destB
|
||||||
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (partInfo PartInfo, err error) {
|
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (partInfo PartInfo, err error) {
|
||||||
destSet := s.getHashedSet(destObject)
|
destSet := s.getHashedSet(destObject)
|
||||||
|
|
||||||
return destSet.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, srcInfo.Reader, dstOpts)
|
return destSet.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, NewPutObjReader(srcInfo.Reader, nil, nil), dstOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPart - writes part of an object to hashedSet based on the object name.
|
// PutObjectPart - writes part of an object to hashedSet based on the object name.
|
||||||
func (s *xlSets) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error) {
|
func (s *xlSets) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
|
||||||
return s.getHashedSet(object).PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
return s.getHashedSet(object).PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -847,8 +845,8 @@ func (s *xlSets) AbortMultipartUpload(ctx context.Context, bucket, object, uploa
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name.
|
// CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name.
|
||||||
func (s *xlSets) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) {
|
func (s *xlSets) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
return s.getHashedSet(object).CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts)
|
return s.getHashedSet(object).CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -43,7 +43,7 @@ func TestXLParentDirIsObject(t *testing.T) {
|
||||||
}
|
}
|
||||||
objectContent := "12345"
|
objectContent := "12345"
|
||||||
objInfo, err := obj.PutObject(context.Background(), bucketName, objectName,
|
objInfo, err := obj.PutObject(context.Background(), bucketName, objectName,
|
||||||
mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil, ObjectOptions{})
|
mustGetPutObjReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -200,7 +200,7 @@ func TestListOnlineDisks(t *testing.T) {
|
||||||
t.Fatalf("Failed to make a bucket %v", err)
|
t.Fatalf("Failed to make a bucket %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil, ObjectOptions{})
|
_, err = obj.PutObject(context.Background(), bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to putObject %v", err)
|
t.Fatalf("Failed to putObject %v", err)
|
||||||
}
|
}
|
||||||
|
@ -292,7 +292,7 @@ func TestDisksWithAllParts(t *testing.T) {
|
||||||
t.Fatalf("Failed to make a bucket %v", err)
|
t.Fatalf("Failed to make a bucket %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(ctx, bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil, ObjectOptions{})
|
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to putObject %v", err)
|
t.Fatalf("Failed to putObject %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,7 +91,7 @@ func TestHealObjectXL(t *testing.T) {
|
||||||
|
|
||||||
var uploadedParts []CompletePart
|
var uploadedParts []CompletePart
|
||||||
for _, partID := range []int{2, 1} {
|
for _, partID := range []int{2, 1} {
|
||||||
pInfo, err1 := obj.PutObjectPart(context.Background(), bucket, object, uploadID, partID, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
|
pInfo, err1 := obj.PutObjectPart(context.Background(), bucket, object, uploadID, partID, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
|
||||||
if err1 != nil {
|
if err1 != nil {
|
||||||
t.Fatalf("Failed to upload a part - %v", err1)
|
t.Fatalf("Failed to upload a part - %v", err1)
|
||||||
}
|
}
|
||||||
|
@ -101,7 +101,7 @@ func TestHealObjectXL(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.CompleteMultipartUpload(context.Background(), bucket, object, uploadID, uploadedParts)
|
_, err = obj.CompleteMultipartUpload(context.Background(), bucket, object, uploadID, uploadedParts, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to complete multipart upload - %v", err)
|
t.Fatalf("Failed to complete multipart upload - %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,7 +69,7 @@ func testXLReadStat(obj ObjectLayer, instanceType string, disks []string, t *tes
|
||||||
// iterate through the above set of inputs and upkoad the object.
|
// iterate through the above set of inputs and upkoad the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData, ObjectOptions{})
|
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetPutObjReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData, ObjectOptions{})
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
|
@ -153,7 +153,7 @@ func testXLReadMetaParts(obj ObjectLayer, instanceType string, disks []string, t
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, testCase := range createPartCases {
|
for _, testCase := range createPartCases {
|
||||||
_, perr := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), opts)
|
_, perr := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetPutObjReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), opts)
|
||||||
if perr != nil {
|
if perr != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, perr)
|
t.Fatalf("%s : %s", instanceType, perr)
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,6 @@ package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -28,7 +27,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/minio/minio/cmd/logger"
|
"github.com/minio/minio/cmd/logger"
|
||||||
"github.com/minio/minio/pkg/hash"
|
|
||||||
"github.com/minio/minio/pkg/mimedb"
|
"github.com/minio/minio/pkg/mimedb"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -263,7 +261,7 @@ func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, ds
|
||||||
return pi, err
|
return pi, err
|
||||||
}
|
}
|
||||||
|
|
||||||
partInfo, err := xl.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.Reader, dstOpts)
|
partInfo, err := xl.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, NewPutObjReader(srcInfo.Reader, nil, nil), dstOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pi, toObjectErr(err, dstBucket, dstObject)
|
return pi, toObjectErr(err, dstBucket, dstObject)
|
||||||
}
|
}
|
||||||
|
@ -277,7 +275,8 @@ func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, ds
|
||||||
// of the multipart transaction.
|
// of the multipart transaction.
|
||||||
//
|
//
|
||||||
// Implements S3 compatible Upload Part API.
|
// Implements S3 compatible Upload Part API.
|
||||||
func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (pi PartInfo, e error) {
|
func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, e error) {
|
||||||
|
data := r.Reader
|
||||||
if err := checkPutObjectPartArgs(ctx, bucket, object, xl); err != nil {
|
if err := checkPutObjectPartArgs(ctx, bucket, object, xl); err != nil {
|
||||||
return pi, err
|
return pi, err
|
||||||
}
|
}
|
||||||
|
@ -432,7 +431,7 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
|
||||||
// Once part is successfully committed, proceed with updating XL metadata.
|
// Once part is successfully committed, proceed with updating XL metadata.
|
||||||
xlMeta.Stat.ModTime = UTCNow()
|
xlMeta.Stat.ModTime = UTCNow()
|
||||||
|
|
||||||
md5hex := hex.EncodeToString(data.MD5Current())
|
md5hex := r.MD5CurrentHexString()
|
||||||
|
|
||||||
// Add the current part.
|
// Add the current part.
|
||||||
xlMeta.AddObjectPart(partID, partSuffix, md5hex, n, data.ActualSize())
|
xlMeta.AddObjectPart(partID, partSuffix, md5hex, n, data.ActualSize())
|
||||||
|
@ -564,7 +563,7 @@ func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadI
|
||||||
// md5sums of all the parts.
|
// md5sums of all the parts.
|
||||||
//
|
//
|
||||||
// Implements S3 compatible Complete multipart API.
|
// Implements S3 compatible Complete multipart API.
|
||||||
func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart) (oi ObjectInfo, e error) {
|
func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, e error) {
|
||||||
if err := checkCompleteMultipartArgs(ctx, bucket, object, xl); err != nil {
|
if err := checkCompleteMultipartArgs(ctx, bucket, object, xl); err != nil {
|
||||||
return oi, err
|
return oi, err
|
||||||
}
|
}
|
||||||
|
@ -657,7 +656,6 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
|
||||||
return oi, invp
|
return oi, invp
|
||||||
}
|
}
|
||||||
|
|
||||||
// All parts should have same ETag as previously generated.
|
|
||||||
if currentXLMeta.Parts[partIdx].ETag != part.ETag {
|
if currentXLMeta.Parts[partIdx].ETag != part.ETag {
|
||||||
invp := InvalidPart{
|
invp := InvalidPart{
|
||||||
PartNumber: part.PartNumber,
|
PartNumber: part.PartNumber,
|
||||||
|
|
|
@ -19,7 +19,6 @@ package cmd
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/hex"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
|
@ -153,7 +152,7 @@ func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBuc
|
||||||
return oi, toObjectErr(err, dstBucket, dstObject)
|
return oi, toObjectErr(err, dstBucket, dstObject)
|
||||||
}
|
}
|
||||||
|
|
||||||
objInfo, err := xl.putObject(ctx, dstBucket, dstObject, hashReader, srcInfo.UserDefined, dstOpts)
|
objInfo, err := xl.putObject(ctx, dstBucket, dstObject, NewPutObjReader(hashReader, nil, nil), srcInfo.UserDefined, dstOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return oi, toObjectErr(err, dstBucket, dstObject)
|
return oi, toObjectErr(err, dstBucket, dstObject)
|
||||||
}
|
}
|
||||||
|
@ -639,7 +638,7 @@ func rename(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBuc
|
||||||
// until EOF, erasure codes the data across all disk and additionally
|
// until EOF, erasure codes the data across all disk and additionally
|
||||||
// writes `xl.json` which carries the necessary metadata for future
|
// writes `xl.json` which carries the necessary metadata for future
|
||||||
// object operations.
|
// object operations.
|
||||||
func (xl xlObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
func (xl xlObjects) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
// Validate put object input args.
|
// Validate put object input args.
|
||||||
if err = checkPutObjectArgs(ctx, bucket, object, xl, data.Size()); err != nil {
|
if err = checkPutObjectArgs(ctx, bucket, object, xl, data.Size()); err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
|
@ -655,7 +654,9 @@ func (xl xlObjects) PutObject(ctx context.Context, bucket string, object string,
|
||||||
}
|
}
|
||||||
|
|
||||||
// putObject wrapper for xl PutObject
|
// putObject wrapper for xl PutObject
|
||||||
func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, r *PutObjReader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
|
data := r.Reader
|
||||||
|
|
||||||
uniqueID := mustGetUUID()
|
uniqueID := mustGetUUID()
|
||||||
tempObj := uniqueID
|
tempObj := uniqueID
|
||||||
|
|
||||||
|
@ -841,7 +842,8 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string,
|
||||||
|
|
||||||
// Save additional erasureMetadata.
|
// Save additional erasureMetadata.
|
||||||
modTime := UTCNow()
|
modTime := UTCNow()
|
||||||
metadata["etag"] = hex.EncodeToString(data.MD5Current())
|
|
||||||
|
metadata["etag"] = r.MD5CurrentHexString()
|
||||||
|
|
||||||
// Guess content-type from the extension if possible.
|
// Guess content-type from the extension if possible.
|
||||||
if metadata["content-type"] == "" {
|
if metadata["content-type"] == "" {
|
||||||
|
|
|
@ -55,12 +55,12 @@ func TestRepeatPutObjectPart(t *testing.T) {
|
||||||
}
|
}
|
||||||
fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
|
fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
|
||||||
md5Hex := getMD5Hash(fiveMBBytes)
|
md5Hex := getMD5Hash(fiveMBBytes)
|
||||||
_, err = objLayer.PutObjectPart(context.Background(), "bucket1", "mpartObj1", uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts)
|
_, err = objLayer.PutObjectPart(context.Background(), "bucket1", "mpartObj1", uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930
|
// PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930
|
||||||
_, err = objLayer.PutObjectPart(context.Background(), "bucket1", "mpartObj1", uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts)
|
_, err = objLayer.PutObjectPart(context.Background(), "bucket1", "mpartObj1", uploadID, 1, mustGetPutObjReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -94,7 +94,7 @@ func TestXLDeleteObjectBasic(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create object "dir/obj" under bucket "bucket" for Test 7 to pass
|
// Create object "dir/obj" under bucket "bucket" for Test 7 to pass
|
||||||
_, err = xl.PutObject(context.Background(), "bucket", "dir/obj", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{})
|
_, err = xl.PutObject(context.Background(), "bucket", "dir/obj", mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
|
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
|
||||||
}
|
}
|
||||||
|
@ -131,7 +131,7 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
|
||||||
object := "object"
|
object := "object"
|
||||||
opts := ObjectOptions{}
|
opts := ObjectOptions{}
|
||||||
// Create object "obj" under bucket "bucket".
|
// Create object "obj" under bucket "bucket".
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -146,7 +146,7 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create "obj" under "bucket".
|
// Create "obj" under "bucket".
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -182,7 +182,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
|
||||||
object := "object"
|
object := "object"
|
||||||
opts := ObjectOptions{}
|
opts := ObjectOptions{}
|
||||||
// Create "object" under "bucket".
|
// Create "object" under "bucket".
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -233,7 +233,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
|
||||||
object := "object"
|
object := "object"
|
||||||
opts := ObjectOptions{}
|
opts := ObjectOptions{}
|
||||||
// Create "object" under "bucket".
|
// Create "object" under "bucket".
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -256,7 +256,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Upload new content to same object "object"
|
// Upload new content to same object "object"
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, opts)
|
_, err = obj.PutObject(context.Background(), bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, opts)
|
||||||
if err != toObjectErr(errXLWriteQuorum, bucket, object) {
|
if err != toObjectErr(errXLWriteQuorum, bucket, object) {
|
||||||
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
|
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
|
||||||
}
|
}
|
||||||
|
@ -290,7 +290,7 @@ func TestHealing(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader(data), length, "", ""), nil, ObjectOptions{})
|
_, err = obj.PutObject(context.Background(), bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), length, "", ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue