mirror of
https://github.com/minio/minio.git
synced 2025-04-12 07:22:18 -04:00
Add ObjectOptions to ObjectLayer calls (#6382)
This commit is contained in:
parent
30d4a2cf53
commit
5c0b98abf0
@ -121,7 +121,7 @@ func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Before proceeding validate if object exists.
|
// Before proceeding validate if object exists.
|
||||||
_, err := objAPI.GetObjectInfo(ctx, bucket, object)
|
_, err := objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||||
return
|
return
|
||||||
|
@ -284,7 +284,7 @@ func (atb *adminXLTestBed) GenerateHealTestData(t *testing.T) {
|
|||||||
objectName := fmt.Sprintf("%s-%d", objName, i)
|
objectName := fmt.Sprintf("%s-%d", objName, i)
|
||||||
_, err = atb.objLayer.PutObject(context.Background(), bucketName, objectName,
|
_, err = atb.objLayer.PutObject(context.Background(), bucketName, objectName,
|
||||||
mustGetHashReader(t, bytes.NewReader([]byte("hello")),
|
mustGetHashReader(t, bytes.NewReader([]byte("hello")),
|
||||||
int64(len("hello")), "", ""), nil)
|
int64(len("hello")), "", ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create %s - %v", objectName,
|
t.Fatalf("Failed to create %s - %v", objectName,
|
||||||
err)
|
err)
|
||||||
@ -296,14 +296,14 @@ func (atb *adminXLTestBed) GenerateHealTestData(t *testing.T) {
|
|||||||
{
|
{
|
||||||
objName := "mpObject"
|
objName := "mpObject"
|
||||||
uploadID, err := atb.objLayer.NewMultipartUpload(context.Background(), bucketName,
|
uploadID, err := atb.objLayer.NewMultipartUpload(context.Background(), bucketName,
|
||||||
objName, nil)
|
objName, nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("mp new error: %v", err)
|
t.Fatalf("mp new error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = atb.objLayer.PutObjectPart(context.Background(), bucketName, objName,
|
_, err = atb.objLayer.PutObjectPart(context.Background(), bucketName, objName,
|
||||||
uploadID, 3, mustGetHashReader(t, bytes.NewReader(
|
uploadID, 3, mustGetHashReader(t, bytes.NewReader(
|
||||||
[]byte("hello")), int64(len("hello")), "", ""))
|
[]byte("hello")), int64(len("hello")), "", ""), ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("mp put error: %v", err)
|
t.Fatalf("mp put error: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -610,7 +610,7 @@ func (h *healSequence) healBucket(bucket string) error {
|
|||||||
if h.objPrefix != "" {
|
if h.objPrefix != "" {
|
||||||
// Check if an object named as the objPrefix exists,
|
// Check if an object named as the objPrefix exists,
|
||||||
// and if so heal it.
|
// and if so heal it.
|
||||||
_, err = objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix)
|
_, err = objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix, ObjectOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = h.healObject(bucket, h.objPrefix)
|
err = h.healObject(bucket, h.objPrefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -61,7 +61,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
|||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
// insert the object.
|
// insert the object.
|
||||||
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
|
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -97,7 +97,8 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
|||||||
// generate md5sum for the generated data.
|
// generate md5sum for the generated data.
|
||||||
// md5sum of the data to written is required as input for NewMultipartUpload.
|
// md5sum of the data to written is required as input for NewMultipartUpload.
|
||||||
metadata := make(map[string]string)
|
metadata := make(map[string]string)
|
||||||
uploadID, err = obj.NewMultipartUpload(context.Background(), bucket, object, metadata)
|
opts := ObjectOptions{}
|
||||||
|
uploadID, err = obj.NewMultipartUpload(context.Background(), bucket, object, metadata, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -122,7 +123,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
|||||||
md5hex = getMD5Hash([]byte(textPartData))
|
md5hex = getMD5Hash([]byte(textPartData))
|
||||||
var partInfo PartInfo
|
var partInfo PartInfo
|
||||||
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
|
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
|
||||||
mustGetHashReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex))
|
mustGetHashReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -203,7 +204,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
|||||||
// insert the object.
|
// insert the object.
|
||||||
var objInfo ObjectInfo
|
var objInfo ObjectInfo
|
||||||
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
|
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -218,7 +219,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
|||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
var buffer = new(bytes.Buffer)
|
var buffer = new(bytes.Buffer)
|
||||||
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "")
|
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "", ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Error(err)
|
b.Error(err)
|
||||||
}
|
}
|
||||||
@ -303,7 +304,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
|||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
// insert the object.
|
// insert the object.
|
||||||
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
|
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -343,7 +344,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
|||||||
// insert the object.
|
// insert the object.
|
||||||
var objInfo ObjectInfo
|
var objInfo ObjectInfo
|
||||||
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
||||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
|
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -359,7 +360,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
|||||||
b.RunParallel(func(pb *testing.PB) {
|
b.RunParallel(func(pb *testing.PB) {
|
||||||
i := 0
|
i := 0
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "")
|
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "", ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Error(err)
|
b.Error(err)
|
||||||
}
|
}
|
||||||
|
@ -622,7 +622,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
objInfo, err := objectAPI.PutObject(ctx, bucket, object, hashReader, metadata)
|
objInfo, err := objectAPI.PutObject(ctx, bucket, object, hashReader, metadata, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||||
return
|
return
|
||||||
|
@ -625,7 +625,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
|||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
objectName := "test-object-" + strconv.Itoa(i)
|
objectName := "test-object-" + strconv.Itoa(i)
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), nil)
|
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), nil, ObjectOptions{})
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)
|
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)
|
||||||
|
@ -158,7 +158,7 @@ func checkServerConfig(ctx context.Context, objAPI ObjectLayer) error {
|
|||||||
return checkServerConfigEtcd(configFile)
|
return checkServerConfigEtcd(configFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := objAPI.GetObjectInfo(ctx, minioMetaBucket, configFile); err != nil {
|
if _, err := objAPI.GetObjectInfo(ctx, minioMetaBucket, configFile, ObjectOptions{}); err != nil {
|
||||||
// Convert ObjectNotFound, Quorum errors into errConfigNotFound
|
// Convert ObjectNotFound, Quorum errors into errConfigNotFound
|
||||||
if isErrObjectNotFound(err) || isInsufficientReadQuorum(err) {
|
if isErrObjectNotFound(err) || isInsufficientReadQuorum(err) {
|
||||||
return errConfigNotFound
|
return errConfigNotFound
|
||||||
@ -176,7 +176,7 @@ func saveConfig(objAPI ObjectLayer, configFile string, data []byte) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = objAPI.PutObject(context.Background(), minioMetaBucket, configFile, hashReader, nil)
|
_, err = objAPI.PutObject(context.Background(), minioMetaBucket, configFile, hashReader, nil, ObjectOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -185,7 +185,7 @@ var errConfigNotFound = errors.New("config file not found")
|
|||||||
func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) (*bytes.Buffer, error) {
|
func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) (*bytes.Buffer, error) {
|
||||||
var buffer bytes.Buffer
|
var buffer bytes.Buffer
|
||||||
// Read entire content by setting size to -1
|
// Read entire content by setting size to -1
|
||||||
if err := objAPI.GetObject(ctx, minioMetaBucket, configFile, 0, -1, &buffer, ""); err != nil {
|
if err := objAPI.GetObject(ctx, minioMetaBucket, configFile, 0, -1, &buffer, "", ObjectOptions{}); err != nil {
|
||||||
// Convert ObjectNotFound, IncompleteBody and Quorum errors into errConfigNotFound
|
// Convert ObjectNotFound, IncompleteBody and Quorum errors into errConfigNotFound
|
||||||
if isErrObjectNotFound(err) || isErrIncompleteBody(err) || isInsufficientReadQuorum(err) {
|
if isErrObjectNotFound(err) || isErrIncompleteBody(err) || isInsufficientReadQuorum(err) {
|
||||||
return nil, errConfigNotFound
|
return nil, errConfigNotFound
|
||||||
|
@ -258,7 +258,7 @@ func (cfs *cacheFSObjects) IsOnline() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Caches the object to disk
|
// Caches the object to disk
|
||||||
func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) error {
|
func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) error {
|
||||||
if cfs.diskUsageHigh() {
|
if cfs.diskUsageHigh() {
|
||||||
select {
|
select {
|
||||||
case cfs.purgeChan <- struct{}{}:
|
case cfs.purgeChan <- struct{}{}:
|
||||||
@ -275,7 +275,7 @@ func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data
|
|||||||
return pErr
|
return pErr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_, err := cfs.PutObject(ctx, bucket, object, data, metadata)
|
_, err := cfs.PutObject(ctx, bucket, object, data, metadata, opts)
|
||||||
// if err is due to disk being offline , mark cache drive as offline
|
// if err is due to disk being offline , mark cache drive as offline
|
||||||
if IsErr(err, baseErrs...) {
|
if IsErr(err, baseErrs...) {
|
||||||
cfs.setOnline(false)
|
cfs.setOnline(false)
|
||||||
@ -284,8 +284,8 @@ func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Returns the handle for the cached object
|
// Returns the handle for the cached object
|
||||||
func (cfs *cacheFSObjects) Get(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error) {
|
func (cfs *cacheFSObjects) Get(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
|
||||||
return cfs.GetObject(ctx, bucket, object, startOffset, length, writer, etag)
|
return cfs.GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deletes the cached object
|
// Deletes the cached object
|
||||||
@ -295,13 +295,13 @@ func (cfs *cacheFSObjects) Delete(ctx context.Context, bucket, object string) (e
|
|||||||
|
|
||||||
// convenience function to check if object is cached on this cacheFSObjects
|
// convenience function to check if object is cached on this cacheFSObjects
|
||||||
func (cfs *cacheFSObjects) Exists(ctx context.Context, bucket, object string) bool {
|
func (cfs *cacheFSObjects) Exists(ctx context.Context, bucket, object string) bool {
|
||||||
_, err := cfs.GetObjectInfo(ctx, bucket, object)
|
_, err := cfs.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
|
||||||
return err == nil
|
return err == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Identical to fs PutObject operation except that it uses ETag in metadata
|
// Identical to fs PutObject operation except that it uses ETag in metadata
|
||||||
// headers.
|
// headers.
|
||||||
func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, retErr error) {
|
func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, retErr error) {
|
||||||
fs := cfs.FSObjects
|
fs := cfs.FSObjects
|
||||||
// Lock the object.
|
// Lock the object.
|
||||||
objectLock := fs.nsMutex.NewNSLock(bucket, object)
|
objectLock := fs.nsMutex.NewNSLock(bucket, object)
|
||||||
@ -438,7 +438,7 @@ func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object
|
|||||||
// Implements S3 compatible initiate multipart API. Operation here is identical
|
// Implements S3 compatible initiate multipart API. Operation here is identical
|
||||||
// to fs backend implementation - with the exception that cache FS uses the uploadID
|
// to fs backend implementation - with the exception that cache FS uses the uploadID
|
||||||
// generated on the backend
|
// generated on the backend
|
||||||
func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string, uploadID string) (string, error) {
|
func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string, uploadID string, opts ObjectOptions) (string, error) {
|
||||||
if cfs.diskUsageHigh() {
|
if cfs.diskUsageHigh() {
|
||||||
select {
|
select {
|
||||||
case cfs.purgeChan <- struct{}{}:
|
case cfs.purgeChan <- struct{}{}:
|
||||||
|
@ -57,16 +57,16 @@ type cacheObjects struct {
|
|||||||
// file path patterns to exclude from cache
|
// file path patterns to exclude from cache
|
||||||
exclude []string
|
exclude []string
|
||||||
// Object functions pointing to the corresponding functions of backend implementation.
|
// Object functions pointing to the corresponding functions of backend implementation.
|
||||||
GetObjectFn func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error)
|
GetObjectFn func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
|
||||||
GetObjectInfoFn func(ctx context.Context, bucket, object string) (objInfo ObjectInfo, err error)
|
GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||||
PutObjectFn func(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error)
|
PutObjectFn func(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||||
DeleteObjectFn func(ctx context.Context, bucket, object string) error
|
DeleteObjectFn func(ctx context.Context, bucket, object string) error
|
||||||
ListObjectsFn func(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
|
ListObjectsFn func(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
|
||||||
ListObjectsV2Fn func(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error)
|
ListObjectsV2Fn func(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error)
|
||||||
ListBucketsFn func(ctx context.Context) (buckets []BucketInfo, err error)
|
ListBucketsFn func(ctx context.Context) (buckets []BucketInfo, err error)
|
||||||
GetBucketInfoFn func(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error)
|
GetBucketInfoFn func(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error)
|
||||||
NewMultipartUploadFn func(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error)
|
NewMultipartUploadFn func(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error)
|
||||||
PutObjectPartFn func(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error)
|
PutObjectPartFn func(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error)
|
||||||
AbortMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string) error
|
AbortMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string) error
|
||||||
CompleteMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
|
CompleteMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
|
||||||
DeleteBucketFn func(ctx context.Context, bucket string) error
|
DeleteBucketFn func(ctx context.Context, bucket string) error
|
||||||
@ -88,14 +88,14 @@ type CacheObjectLayer interface {
|
|||||||
ListBuckets(ctx context.Context) (buckets []BucketInfo, err error)
|
ListBuckets(ctx context.Context) (buckets []BucketInfo, err error)
|
||||||
DeleteBucket(ctx context.Context, bucket string) error
|
DeleteBucket(ctx context.Context, bucket string) error
|
||||||
// Object operations.
|
// Object operations.
|
||||||
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error)
|
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
|
||||||
GetObjectInfo(ctx context.Context, bucket, object string) (objInfo ObjectInfo, err error)
|
GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||||
PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error)
|
PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||||
DeleteObject(ctx context.Context, bucket, object string) error
|
DeleteObject(ctx context.Context, bucket, object string) error
|
||||||
|
|
||||||
// Multipart operations.
|
// Multipart operations.
|
||||||
NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error)
|
NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error)
|
||||||
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error)
|
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error)
|
||||||
AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error
|
AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error
|
||||||
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
|
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
|
||||||
|
|
||||||
@ -177,20 +177,20 @@ func (c cacheObjects) getMetadata(objInfo ObjectInfo) map[string]string {
|
|||||||
|
|
||||||
// Uses cached-object to serve the request. If object is not cached it serves the request from the backend and also
|
// Uses cached-object to serve the request. If object is not cached it serves the request from the backend and also
|
||||||
// stores it in the cache for serving subsequent requests.
|
// stores it in the cache for serving subsequent requests.
|
||||||
func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error) {
|
func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
|
||||||
GetObjectFn := c.GetObjectFn
|
GetObjectFn := c.GetObjectFn
|
||||||
GetObjectInfoFn := c.GetObjectInfoFn
|
GetObjectInfoFn := c.GetObjectInfoFn
|
||||||
|
|
||||||
if c.isCacheExclude(bucket, object) {
|
if c.isCacheExclude(bucket, object) {
|
||||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag)
|
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||||
}
|
}
|
||||||
// fetch cacheFSObjects if object is currently cached or nearest available cache drive
|
// fetch cacheFSObjects if object is currently cached or nearest available cache drive
|
||||||
dcache, err := c.cache.getCachedFSLoc(ctx, bucket, object)
|
dcache, err := c.cache.getCachedFSLoc(ctx, bucket, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag)
|
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||||
}
|
}
|
||||||
// stat object on backend
|
// stat object on backend
|
||||||
objInfo, err := GetObjectInfoFn(ctx, bucket, object)
|
objInfo, err := GetObjectInfoFn(ctx, bucket, object, opts)
|
||||||
backendDown := backendDownError(err)
|
backendDown := backendDownError(err)
|
||||||
if err != nil && !backendDown {
|
if err != nil && !backendDown {
|
||||||
if _, ok := err.(ObjectNotFound); ok {
|
if _, ok := err.(ObjectNotFound); ok {
|
||||||
@ -201,27 +201,27 @@ func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, star
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !backendDown && filterFromCache(objInfo.UserDefined) {
|
if !backendDown && filterFromCache(objInfo.UserDefined) {
|
||||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag)
|
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
cachedObjInfo, err := dcache.GetObjectInfo(ctx, bucket, object)
|
cachedObjInfo, err := dcache.GetObjectInfo(ctx, bucket, object, opts)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if backendDown {
|
if backendDown {
|
||||||
// If the backend is down, serve the request from cache.
|
// If the backend is down, serve the request from cache.
|
||||||
return dcache.Get(ctx, bucket, object, startOffset, length, writer, etag)
|
return dcache.Get(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||||
}
|
}
|
||||||
if cachedObjInfo.ETag == objInfo.ETag && !isStaleCache(objInfo) {
|
if cachedObjInfo.ETag == objInfo.ETag && !isStaleCache(objInfo) {
|
||||||
return dcache.Get(ctx, bucket, object, startOffset, length, writer, etag)
|
return dcache.Get(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||||
}
|
}
|
||||||
dcache.Delete(ctx, bucket, object)
|
dcache.Delete(ctx, bucket, object)
|
||||||
}
|
}
|
||||||
if startOffset != 0 || length != objInfo.Size {
|
if startOffset != 0 || length != objInfo.Size {
|
||||||
// We don't cache partial objects.
|
// We don't cache partial objects.
|
||||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag)
|
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||||
}
|
}
|
||||||
if !dcache.diskAvailable(objInfo.Size * cacheSizeMultiplier) {
|
if !dcache.diskAvailable(objInfo.Size * cacheSizeMultiplier) {
|
||||||
// cache only objects < 1/100th of disk capacity
|
// cache only objects < 1/100th of disk capacity
|
||||||
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag)
|
return GetObjectFn(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||||
}
|
}
|
||||||
// Initialize pipe.
|
// Initialize pipe.
|
||||||
pipeReader, pipeWriter := io.Pipe()
|
pipeReader, pipeWriter := io.Pipe()
|
||||||
@ -230,13 +230,13 @@ func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, star
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
if err = GetObjectFn(ctx, bucket, object, 0, objInfo.Size, io.MultiWriter(writer, pipeWriter), etag); err != nil {
|
if err = GetObjectFn(ctx, bucket, object, 0, objInfo.Size, io.MultiWriter(writer, pipeWriter), etag, opts); err != nil {
|
||||||
pipeWriter.CloseWithError(err)
|
pipeWriter.CloseWithError(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
pipeWriter.Close() // Close writer explicitly signaling we wrote all data.
|
pipeWriter.Close() // Close writer explicitly signaling we wrote all data.
|
||||||
}()
|
}()
|
||||||
err = dcache.Put(ctx, bucket, object, hashReader, c.getMetadata(objInfo))
|
err = dcache.Put(ctx, bucket, object, hashReader, c.getMetadata(objInfo), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -245,17 +245,17 @@ func (c cacheObjects) GetObject(ctx context.Context, bucket, object string, star
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Returns ObjectInfo from cache if available.
|
// Returns ObjectInfo from cache if available.
|
||||||
func (c cacheObjects) GetObjectInfo(ctx context.Context, bucket, object string) (ObjectInfo, error) {
|
func (c cacheObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
|
||||||
getObjectInfoFn := c.GetObjectInfoFn
|
getObjectInfoFn := c.GetObjectInfoFn
|
||||||
if c.isCacheExclude(bucket, object) {
|
if c.isCacheExclude(bucket, object) {
|
||||||
return getObjectInfoFn(ctx, bucket, object)
|
return getObjectInfoFn(ctx, bucket, object, opts)
|
||||||
}
|
}
|
||||||
// fetch cacheFSObjects if object is currently cached or nearest available cache drive
|
// fetch cacheFSObjects if object is currently cached or nearest available cache drive
|
||||||
dcache, err := c.cache.getCachedFSLoc(ctx, bucket, object)
|
dcache, err := c.cache.getCachedFSLoc(ctx, bucket, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return getObjectInfoFn(ctx, bucket, object)
|
return getObjectInfoFn(ctx, bucket, object, opts)
|
||||||
}
|
}
|
||||||
objInfo, err := getObjectInfoFn(ctx, bucket, object)
|
objInfo, err := getObjectInfoFn(ctx, bucket, object, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if _, ok := err.(ObjectNotFound); ok {
|
if _, ok := err.(ObjectNotFound); ok {
|
||||||
// Delete the cached entry if backend object was deleted.
|
// Delete the cached entry if backend object was deleted.
|
||||||
@ -266,14 +266,14 @@ func (c cacheObjects) GetObjectInfo(ctx context.Context, bucket, object string)
|
|||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
// when backend is down, serve from cache.
|
// when backend is down, serve from cache.
|
||||||
cachedObjInfo, cerr := dcache.GetObjectInfo(ctx, bucket, object)
|
cachedObjInfo, cerr := dcache.GetObjectInfo(ctx, bucket, object, opts)
|
||||||
if cerr == nil {
|
if cerr == nil {
|
||||||
return cachedObjInfo, nil
|
return cachedObjInfo, nil
|
||||||
}
|
}
|
||||||
return ObjectInfo{}, BackendDown{}
|
return ObjectInfo{}, BackendDown{}
|
||||||
}
|
}
|
||||||
// when backend is up, do a sanity check on cached object
|
// when backend is up, do a sanity check on cached object
|
||||||
cachedObjInfo, err := dcache.GetObjectInfo(ctx, bucket, object)
|
cachedObjInfo, err := dcache.GetObjectInfo(ctx, bucket, object, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return objInfo, nil
|
return objInfo, nil
|
||||||
}
|
}
|
||||||
@ -560,24 +560,24 @@ func (c cacheObjects) isCacheExclude(bucket, object string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutObject - caches the uploaded object for single Put operations
|
// PutObject - caches the uploaded object for single Put operations
|
||||||
func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
putObjectFn := c.PutObjectFn
|
putObjectFn := c.PutObjectFn
|
||||||
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// disk cache could not be located,execute backend call.
|
// disk cache could not be located,execute backend call.
|
||||||
return putObjectFn(ctx, bucket, object, r, metadata)
|
return putObjectFn(ctx, bucket, object, r, metadata, opts)
|
||||||
}
|
}
|
||||||
size := r.Size()
|
size := r.Size()
|
||||||
|
|
||||||
// fetch from backend if there is no space on cache drive
|
// fetch from backend if there is no space on cache drive
|
||||||
if !dcache.diskAvailable(size * cacheSizeMultiplier) {
|
if !dcache.diskAvailable(size * cacheSizeMultiplier) {
|
||||||
return putObjectFn(ctx, bucket, object, r, metadata)
|
return putObjectFn(ctx, bucket, object, r, metadata, opts)
|
||||||
}
|
}
|
||||||
// fetch from backend if cache exclude pattern or cache-control
|
// fetch from backend if cache exclude pattern or cache-control
|
||||||
// directive set to exclude
|
// directive set to exclude
|
||||||
if c.isCacheExclude(bucket, object) || filterFromCache(metadata) {
|
if c.isCacheExclude(bucket, object) || filterFromCache(metadata) {
|
||||||
dcache.Delete(ctx, bucket, object)
|
dcache.Delete(ctx, bucket, object)
|
||||||
return putObjectFn(ctx, bucket, object, r, metadata)
|
return putObjectFn(ctx, bucket, object, r, metadata, opts)
|
||||||
}
|
}
|
||||||
objInfo = ObjectInfo{}
|
objInfo = ObjectInfo{}
|
||||||
// Initialize pipe to stream data to backend
|
// Initialize pipe to stream data to backend
|
||||||
@ -595,7 +595,7 @@ func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *h
|
|||||||
oinfoCh := make(chan ObjectInfo)
|
oinfoCh := make(chan ObjectInfo)
|
||||||
errCh := make(chan error)
|
errCh := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
oinfo, perr := putObjectFn(ctx, bucket, object, hashReader, metadata)
|
oinfo, perr := putObjectFn(ctx, bucket, object, hashReader, metadata, opts)
|
||||||
if perr != nil {
|
if perr != nil {
|
||||||
pipeWriter.CloseWithError(perr)
|
pipeWriter.CloseWithError(perr)
|
||||||
wPipe.CloseWithError(perr)
|
wPipe.CloseWithError(perr)
|
||||||
@ -608,7 +608,7 @@ func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *h
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if err = dcache.Put(ctx, bucket, object, cHashReader, metadata); err != nil {
|
if err = dcache.Put(ctx, bucket, object, cHashReader, metadata, opts); err != nil {
|
||||||
wPipe.CloseWithError(err)
|
wPipe.CloseWithError(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -627,39 +627,39 @@ func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *h
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewMultipartUpload - Starts a new multipart upload operation to backend and cache.
|
// NewMultipartUpload - Starts a new multipart upload operation to backend and cache.
|
||||||
func (c cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) {
|
func (c cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) {
|
||||||
newMultipartUploadFn := c.NewMultipartUploadFn
|
newMultipartUploadFn := c.NewMultipartUploadFn
|
||||||
|
|
||||||
if c.isCacheExclude(bucket, object) || filterFromCache(metadata) {
|
if c.isCacheExclude(bucket, object) || filterFromCache(metadata) {
|
||||||
return newMultipartUploadFn(ctx, bucket, object, metadata)
|
return newMultipartUploadFn(ctx, bucket, object, metadata, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// disk cache could not be located,execute backend call.
|
// disk cache could not be located,execute backend call.
|
||||||
return newMultipartUploadFn(ctx, bucket, object, metadata)
|
return newMultipartUploadFn(ctx, bucket, object, metadata, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadID, err = newMultipartUploadFn(ctx, bucket, object, metadata)
|
uploadID, err = newMultipartUploadFn(ctx, bucket, object, metadata, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// create new multipart upload in cache with same uploadID
|
// create new multipart upload in cache with same uploadID
|
||||||
dcache.NewMultipartUpload(ctx, bucket, object, metadata, uploadID)
|
dcache.NewMultipartUpload(ctx, bucket, object, metadata, uploadID, opts)
|
||||||
return uploadID, err
|
return uploadID, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPart - uploads part to backend and cache simultaneously.
|
// PutObjectPart - uploads part to backend and cache simultaneously.
|
||||||
func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error) {
|
func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error) {
|
||||||
putObjectPartFn := c.PutObjectPartFn
|
putObjectPartFn := c.PutObjectPartFn
|
||||||
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
dcache, err := c.cache.getCacheFS(ctx, bucket, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// disk cache could not be located,execute backend call.
|
// disk cache could not be located,execute backend call.
|
||||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data)
|
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.isCacheExclude(bucket, object) {
|
if c.isCacheExclude(bucket, object) {
|
||||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data)
|
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// make sure cache has at least cacheSizeMultiplier * size available
|
// make sure cache has at least cacheSizeMultiplier * size available
|
||||||
@ -669,7 +669,7 @@ func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadI
|
|||||||
case dcache.purgeChan <- struct{}{}:
|
case dcache.purgeChan <- struct{}{}:
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data)
|
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
info = PartInfo{}
|
info = PartInfo{}
|
||||||
@ -688,7 +688,7 @@ func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadI
|
|||||||
pinfoCh := make(chan PartInfo)
|
pinfoCh := make(chan PartInfo)
|
||||||
errorCh := make(chan error)
|
errorCh := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
info, err = putObjectPartFn(ctx, bucket, object, uploadID, partID, hashReader)
|
info, err = putObjectPartFn(ctx, bucket, object, uploadID, partID, hashReader, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
close(pinfoCh)
|
close(pinfoCh)
|
||||||
pipeWriter.CloseWithError(err)
|
pipeWriter.CloseWithError(err)
|
||||||
@ -700,7 +700,7 @@ func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadI
|
|||||||
pinfoCh <- info
|
pinfoCh <- info
|
||||||
}()
|
}()
|
||||||
go func() {
|
go func() {
|
||||||
if _, perr := dcache.PutObjectPart(ctx, bucket, object, uploadID, partID, cHashReader); perr != nil {
|
if _, perr := dcache.PutObjectPart(ctx, bucket, object, uploadID, partID, cHashReader, opts); perr != nil {
|
||||||
wPipe.CloseWithError(perr)
|
wPipe.CloseWithError(perr)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -876,14 +876,14 @@ func newServerCacheObjects(config CacheConfig) (CacheObjectLayer, error) {
|
|||||||
cache: dcache,
|
cache: dcache,
|
||||||
exclude: config.Exclude,
|
exclude: config.Exclude,
|
||||||
listPool: newTreeWalkPool(globalLookupTimeout),
|
listPool: newTreeWalkPool(globalLookupTimeout),
|
||||||
GetObjectFn: func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
GetObjectFn: func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
|
||||||
return newObjectLayerFn().GetObject(ctx, bucket, object, startOffset, length, writer, etag)
|
return newObjectLayerFn().GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||||
},
|
},
|
||||||
GetObjectInfoFn: func(ctx context.Context, bucket, object string) (ObjectInfo, error) {
|
GetObjectInfoFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
|
||||||
return newObjectLayerFn().GetObjectInfo(ctx, bucket, object)
|
return newObjectLayerFn().GetObjectInfo(ctx, bucket, object, opts)
|
||||||
},
|
},
|
||||||
PutObjectFn: func(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
PutObjectFn: func(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
return newObjectLayerFn().PutObject(ctx, bucket, object, data, metadata)
|
return newObjectLayerFn().PutObject(ctx, bucket, object, data, metadata, opts)
|
||||||
},
|
},
|
||||||
DeleteObjectFn: func(ctx context.Context, bucket, object string) error {
|
DeleteObjectFn: func(ctx context.Context, bucket, object string) error {
|
||||||
return newObjectLayerFn().DeleteObject(ctx, bucket, object)
|
return newObjectLayerFn().DeleteObject(ctx, bucket, object)
|
||||||
@ -900,11 +900,11 @@ func newServerCacheObjects(config CacheConfig) (CacheObjectLayer, error) {
|
|||||||
GetBucketInfoFn: func(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
|
GetBucketInfoFn: func(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
|
||||||
return newObjectLayerFn().GetBucketInfo(ctx, bucket)
|
return newObjectLayerFn().GetBucketInfo(ctx, bucket)
|
||||||
},
|
},
|
||||||
NewMultipartUploadFn: func(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) {
|
NewMultipartUploadFn: func(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) {
|
||||||
return newObjectLayerFn().NewMultipartUpload(ctx, bucket, object, metadata)
|
return newObjectLayerFn().NewMultipartUpload(ctx, bucket, object, metadata, opts)
|
||||||
},
|
},
|
||||||
PutObjectPartFn: func(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error) {
|
PutObjectPartFn: func(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error) {
|
||||||
return newObjectLayerFn().PutObjectPart(ctx, bucket, object, uploadID, partID, data)
|
return newObjectLayerFn().PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
||||||
},
|
},
|
||||||
AbortMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string) error {
|
AbortMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string) error {
|
||||||
return newObjectLayerFn().AbortMultipartUpload(ctx, bucket, object, uploadID)
|
return newObjectLayerFn().AbortMultipartUpload(ctx, bucket, object, uploadID)
|
||||||
|
@ -192,17 +192,18 @@ func TestDiskCache(t *testing.T) {
|
|||||||
objInfo.ContentType = contentType
|
objInfo.ContentType = contentType
|
||||||
objInfo.ETag = etag
|
objInfo.ETag = etag
|
||||||
objInfo.UserDefined = httpMeta
|
objInfo.UserDefined = httpMeta
|
||||||
|
opts := ObjectOptions{}
|
||||||
|
|
||||||
byteReader := bytes.NewReader([]byte(content))
|
byteReader := bytes.NewReader([]byte(content))
|
||||||
hashReader, err := hash.NewReader(byteReader, int64(size), "", "")
|
hashReader, err := hash.NewReader(byteReader, int64(size), "", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta)
|
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
cachedObjInfo, err := cache.GetObjectInfo(ctx, bucketName, objectName)
|
cachedObjInfo, err := cache.GetObjectInfo(ctx, bucketName, objectName, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -219,7 +220,7 @@ func TestDiskCache(t *testing.T) {
|
|||||||
t.Fatal("Cached content-type does not match")
|
t.Fatal("Cached content-type does not match")
|
||||||
}
|
}
|
||||||
writer := bytes.NewBuffer(nil)
|
writer := bytes.NewBuffer(nil)
|
||||||
err = cache.Get(ctx, bucketName, objectName, 0, int64(size), writer, "")
|
err = cache.Get(ctx, bucketName, objectName, 0, int64(size), writer, "", opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -266,6 +267,7 @@ func TestDiskCacheMaxUse(t *testing.T) {
|
|||||||
objInfo.ContentType = contentType
|
objInfo.ContentType = contentType
|
||||||
objInfo.ETag = etag
|
objInfo.ETag = etag
|
||||||
objInfo.UserDefined = httpMeta
|
objInfo.UserDefined = httpMeta
|
||||||
|
opts := ObjectOptions{}
|
||||||
|
|
||||||
byteReader := bytes.NewReader([]byte(content))
|
byteReader := bytes.NewReader([]byte(content))
|
||||||
hashReader, err := hash.NewReader(byteReader, int64(size), "", "")
|
hashReader, err := hash.NewReader(byteReader, int64(size), "", "")
|
||||||
@ -273,16 +275,16 @@ func TestDiskCacheMaxUse(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if !cache.diskAvailable(int64(size)) {
|
if !cache.diskAvailable(int64(size)) {
|
||||||
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta)
|
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta, opts)
|
||||||
if err != errDiskFull {
|
if err != errDiskFull {
|
||||||
t.Fatal("Cache max-use limit violated.")
|
t.Fatal("Cache max-use limit violated.")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta)
|
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
cachedObjInfo, err := cache.GetObjectInfo(ctx, bucketName, objectName)
|
cachedObjInfo, err := cache.GetObjectInfo(ctx, bucketName, objectName, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -299,7 +301,7 @@ func TestDiskCacheMaxUse(t *testing.T) {
|
|||||||
t.Fatal("Cached content-type does not match")
|
t.Fatal("Cached content-type does not match")
|
||||||
}
|
}
|
||||||
writer := bytes.NewBuffer(nil)
|
writer := bytes.NewBuffer(nil)
|
||||||
err = cache.Get(ctx, bucketName, objectName, 0, int64(size), writer, "")
|
err = cache.Get(ctx, bucketName, objectName, 0, int64(size), writer, "", opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -59,19 +59,19 @@ func (api *DummyObjectLayer) ListObjectsV2(ctx context.Context, bucket, prefix,
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *DummyObjectLayer) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error) {
|
func (api *DummyObjectLayer) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *DummyObjectLayer) GetObjectInfo(ctx context.Context, bucket, object string) (objInfo ObjectInfo, err error) {
|
func (api *DummyObjectLayer) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *DummyObjectLayer) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
func (api *DummyObjectLayer) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *DummyObjectLayer) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo) (objInfo ObjectInfo, err error) {
|
func (api *DummyObjectLayer) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,15 +83,15 @@ func (api *DummyObjectLayer) ListMultipartUploads(ctx context.Context, bucket, p
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *DummyObjectLayer) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) {
|
func (api *DummyObjectLayer) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *DummyObjectLayer) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo) (info PartInfo, err error) {
|
func (api *DummyObjectLayer) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (info PartInfo, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (api *DummyObjectLayer) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error) {
|
func (api *DummyObjectLayer) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -54,7 +54,7 @@ func TestReadFSMetadata(t *testing.T) {
|
|||||||
if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
|
if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
|
||||||
t.Fatal("Unexpected err: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
}
|
}
|
||||||
if _, err := obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil); err != nil {
|
if _, err := obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{}); err != nil {
|
||||||
t.Fatal("Unexpected err: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -89,7 +89,7 @@ func TestWriteFSMetadata(t *testing.T) {
|
|||||||
if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
|
if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
|
||||||
t.Fatal("Unexpected err: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
}
|
}
|
||||||
if _, err := obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil); err != nil {
|
if _, err := obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{}); err != nil {
|
||||||
t.Fatal("Unexpected err: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -209,7 +209,7 @@ func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, k
|
|||||||
// subsequent request each UUID is unique.
|
// subsequent request each UUID is unique.
|
||||||
//
|
//
|
||||||
// Implements S3 compatible initiate multipart API.
|
// Implements S3 compatible initiate multipart API.
|
||||||
func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string) (string, error) {
|
func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string, opts ObjectOptions) (string, error) {
|
||||||
if err := checkNewMultipartArgs(ctx, bucket, object, fs); err != nil {
|
if err := checkNewMultipartArgs(ctx, bucket, object, fs); err != nil {
|
||||||
return "", toObjectErr(err, bucket)
|
return "", toObjectErr(err, bucket)
|
||||||
}
|
}
|
||||||
@ -249,7 +249,7 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
|
|||||||
// object. Internally incoming data is written to '.minio.sys/tmp' location
|
// object. Internally incoming data is written to '.minio.sys/tmp' location
|
||||||
// and safely renamed to '.minio.sys/multipart' for reach parts.
|
// and safely renamed to '.minio.sys/multipart' for reach parts.
|
||||||
func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int,
|
func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int,
|
||||||
startOffset int64, length int64, srcInfo ObjectInfo) (pi PartInfo, e error) {
|
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) {
|
||||||
|
|
||||||
if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, fs); err != nil {
|
if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, fs); err != nil {
|
||||||
return pi, toObjectErr(err)
|
return pi, toObjectErr(err)
|
||||||
@ -257,7 +257,7 @@ func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, d
|
|||||||
|
|
||||||
// Initialize pipe.
|
// Initialize pipe.
|
||||||
go func() {
|
go func() {
|
||||||
if gerr := fs.GetObject(ctx, srcBucket, srcObject, startOffset, length, srcInfo.Writer, srcInfo.ETag); gerr != nil {
|
if gerr := fs.GetObject(ctx, srcBucket, srcObject, startOffset, length, srcInfo.Writer, srcInfo.ETag, srcOpts); gerr != nil {
|
||||||
if gerr = srcInfo.Writer.Close(); gerr != nil {
|
if gerr = srcInfo.Writer.Close(); gerr != nil {
|
||||||
logger.LogIf(ctx, gerr)
|
logger.LogIf(ctx, gerr)
|
||||||
return
|
return
|
||||||
@ -271,7 +271,7 @@ func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, d
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
partInfo, err := fs.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.Reader)
|
partInfo, err := fs.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.Reader, dstOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pi, toObjectErr(err, dstBucket, dstObject)
|
return pi, toObjectErr(err, dstBucket, dstObject)
|
||||||
}
|
}
|
||||||
@ -283,7 +283,7 @@ func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, d
|
|||||||
// an ongoing multipart transaction. Internally incoming data is
|
// an ongoing multipart transaction. Internally incoming data is
|
||||||
// written to '.minio.sys/tmp' location and safely renamed to
|
// written to '.minio.sys/tmp' location and safely renamed to
|
||||||
// '.minio.sys/multipart' for reach parts.
|
// '.minio.sys/multipart' for reach parts.
|
||||||
func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) {
|
func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (pi PartInfo, e error) {
|
||||||
if err := checkPutObjectPartArgs(ctx, bucket, object, fs); err != nil {
|
if err := checkPutObjectPartArgs(ctx, bucket, object, fs); err != nil {
|
||||||
return pi, toObjectErr(err, bucket)
|
return pi, toObjectErr(err, bucket)
|
||||||
}
|
}
|
||||||
|
@ -42,7 +42,7 @@ func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) {
|
|||||||
objectName := "object"
|
objectName := "object"
|
||||||
|
|
||||||
obj.MakeBucketWithLocation(context.Background(), bucketName, "")
|
obj.MakeBucketWithLocation(context.Background(), bucketName, "")
|
||||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, objectName, nil)
|
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, objectName, nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unexpected err: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
}
|
}
|
||||||
@ -81,7 +81,7 @@ func TestNewMultipartUploadFaultyDisk(t *testing.T) {
|
|||||||
|
|
||||||
// Test with disk removed.
|
// Test with disk removed.
|
||||||
os.RemoveAll(disk)
|
os.RemoveAll(disk)
|
||||||
if _, err := fs.NewMultipartUpload(context.Background(), bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}); err != nil {
|
if _, err := fs.NewMultipartUpload(context.Background(), bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}, ObjectOptions{}); err != nil {
|
||||||
if !isSameType(err, BucketNotFound{}) {
|
if !isSameType(err, BucketNotFound{}) {
|
||||||
t.Fatal("Unexpected error ", err)
|
t.Fatal("Unexpected error ", err)
|
||||||
}
|
}
|
||||||
@ -105,7 +105,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
|
|||||||
t.Fatal("Cannot create bucket, err: ", err)
|
t.Fatal("Cannot create bucket, err: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadID, err := fs.NewMultipartUpload(context.Background(), bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
|
uploadID, err := fs.NewMultipartUpload(context.Background(), bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unexpected error ", err)
|
t.Fatal("Unexpected error ", err)
|
||||||
}
|
}
|
||||||
@ -114,7 +114,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
|
|||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
|
|
||||||
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||||
_, err = fs.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), dataLen, md5Hex, sha256sum))
|
_, err = fs.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), dataLen, md5Hex, sha256sum), ObjectOptions{})
|
||||||
if !isSameType(err, BucketNotFound{}) {
|
if !isSameType(err, BucketNotFound{}) {
|
||||||
t.Fatal("Unexpected error ", err)
|
t.Fatal("Unexpected error ", err)
|
||||||
}
|
}
|
||||||
@ -136,7 +136,7 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) {
|
|||||||
t.Fatal("Cannot create bucket, err: ", err)
|
t.Fatal("Cannot create bucket, err: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadID, err := fs.NewMultipartUpload(context.Background(), bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
|
uploadID, err := fs.NewMultipartUpload(context.Background(), bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unexpected error ", err)
|
t.Fatal("Unexpected error ", err)
|
||||||
}
|
}
|
||||||
@ -168,14 +168,14 @@ func TestCompleteMultipartUpload(t *testing.T) {
|
|||||||
t.Fatal("Cannot create bucket, err: ", err)
|
t.Fatal("Cannot create bucket, err: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadID, err := fs.NewMultipartUpload(context.Background(), bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
|
uploadID, err := fs.NewMultipartUpload(context.Background(), bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unexpected error ", err)
|
t.Fatal("Unexpected error ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
md5Hex := getMD5Hash(data)
|
md5Hex := getMD5Hash(data)
|
||||||
|
|
||||||
if _, err := fs.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), 5, md5Hex, "")); err != nil {
|
if _, err := fs.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), 5, md5Hex, ""), ObjectOptions{}); err != nil {
|
||||||
t.Fatal("Unexpected error ", err)
|
t.Fatal("Unexpected error ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -202,14 +202,14 @@ func TestAbortMultipartUpload(t *testing.T) {
|
|||||||
t.Fatal("Cannot create bucket, err: ", err)
|
t.Fatal("Cannot create bucket, err: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadID, err := fs.NewMultipartUpload(context.Background(), bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
|
uploadID, err := fs.NewMultipartUpload(context.Background(), bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unexpected error ", err)
|
t.Fatal("Unexpected error ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
md5Hex := getMD5Hash(data)
|
md5Hex := getMD5Hash(data)
|
||||||
|
|
||||||
if _, err := fs.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), 5, md5Hex, "")); err != nil {
|
if _, err := fs.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), 5, md5Hex, ""), ObjectOptions{}); err != nil {
|
||||||
t.Fatal("Unexpected error ", err)
|
t.Fatal("Unexpected error ", err)
|
||||||
}
|
}
|
||||||
time.Sleep(time.Second) // Without Sleep on windows, the fs.AbortMultipartUpload() fails with "The process cannot access the file because it is being used by another process."
|
time.Sleep(time.Second) // Without Sleep on windows, the fs.AbortMultipartUpload() fails with "The process cannot access the file because it is being used by another process."
|
||||||
@ -234,7 +234,7 @@ func TestListMultipartUploadsFaultyDisk(t *testing.T) {
|
|||||||
t.Fatal("Cannot create bucket, err: ", err)
|
t.Fatal("Cannot create bucket, err: ", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := fs.NewMultipartUpload(context.Background(), bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
|
_, err := fs.NewMultipartUpload(context.Background(), bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unexpected error ", err)
|
t.Fatal("Unexpected error ", err)
|
||||||
}
|
}
|
||||||
|
@ -414,7 +414,7 @@ func (fs *FSObjects) DeleteBucket(ctx context.Context, bucket string) error {
|
|||||||
// CopyObject - copy object source object to destination object.
|
// CopyObject - copy object source object to destination object.
|
||||||
// if source object and destination object are same we only
|
// if source object and destination object are same we only
|
||||||
// update metadata.
|
// update metadata.
|
||||||
func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo) (oi ObjectInfo, e error) {
|
func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, e error) {
|
||||||
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
||||||
// Hold write lock on destination since in both cases
|
// Hold write lock on destination since in both cases
|
||||||
// - if source and destination are same
|
// - if source and destination are same
|
||||||
@ -504,7 +504,7 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
|
|||||||
//
|
//
|
||||||
// startOffset indicates the starting read location of the object.
|
// startOffset indicates the starting read location of the object.
|
||||||
// length indicates the total length of the object.
|
// length indicates the total length of the object.
|
||||||
func (fs *FSObjects) GetObject(ctx context.Context, bucket, object string, offset int64, length int64, writer io.Writer, etag string) (err error) {
|
func (fs *FSObjects) GetObject(ctx context.Context, bucket, object string, offset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
|
||||||
if err = checkGetObjArgs(ctx, bucket, object); err != nil {
|
if err = checkGetObjArgs(ctx, bucket, object); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -710,7 +710,7 @@ func (fs *FSObjects) getObjectInfoWithLock(ctx context.Context, bucket, object s
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
|
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
|
||||||
func (fs *FSObjects) GetObjectInfo(ctx context.Context, bucket, object string) (oi ObjectInfo, e error) {
|
func (fs *FSObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (oi ObjectInfo, e error) {
|
||||||
oi, err := fs.getObjectInfoWithLock(ctx, bucket, object)
|
oi, err := fs.getObjectInfoWithLock(ctx, bucket, object)
|
||||||
if err == errCorruptedFormat || err == io.EOF {
|
if err == errCorruptedFormat || err == io.EOF {
|
||||||
objectLock := fs.nsMutex.NewNSLock(bucket, object)
|
objectLock := fs.nsMutex.NewNSLock(bucket, object)
|
||||||
@ -754,7 +754,7 @@ func (fs *FSObjects) parentDirIsObject(ctx context.Context, bucket, parent strin
|
|||||||
// until EOF, writes data directly to configured filesystem path.
|
// until EOF, writes data directly to configured filesystem path.
|
||||||
// Additionally writes `fs.json` which carries the necessary metadata
|
// Additionally writes `fs.json` which carries the necessary metadata
|
||||||
// for future object operations.
|
// for future object operations.
|
||||||
func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, retErr error) {
|
func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, retErr error) {
|
||||||
if err := checkPutObjectArgs(ctx, bucket, object, fs, data.Size()); err != nil {
|
if err := checkPutObjectArgs(ctx, bucket, object, fs, data.Size()); err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,7 @@ func TestFSParentDirIsObject(t *testing.T) {
|
|||||||
}
|
}
|
||||||
objectContent := "12345"
|
objectContent := "12345"
|
||||||
objInfo, err := obj.PutObject(context.Background(), bucketName, objectName,
|
objInfo, err := obj.PutObject(context.Background(), bucketName, objectName,
|
||||||
mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil)
|
mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -124,7 +124,7 @@ func TestFSShutdown(t *testing.T) {
|
|||||||
|
|
||||||
objectContent := "12345"
|
objectContent := "12345"
|
||||||
obj.MakeBucketWithLocation(context.Background(), bucketName, "")
|
obj.MakeBucketWithLocation(context.Background(), bucketName, "")
|
||||||
obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil)
|
obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil, ObjectOptions{})
|
||||||
return fs, disk
|
return fs, disk
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -203,7 +203,7 @@ func TestFSPutObject(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// With a regular object.
|
// With a regular object.
|
||||||
_, err := obj.PutObject(context.Background(), bucketName+"non-existent", objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
_, err := obj.PutObject(context.Background(), bucketName+"non-existent", objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
||||||
}
|
}
|
||||||
@ -212,7 +212,7 @@ func TestFSPutObject(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// With a directory object.
|
// With a directory object.
|
||||||
_, err = obj.PutObject(context.Background(), bucketName+"non-existent", objectName+"/", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), nil)
|
_, err = obj.PutObject(context.Background(), bucketName+"non-existent", objectName+"/", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), nil, ObjectOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
||||||
}
|
}
|
||||||
@ -220,11 +220,11 @@ func TestFSPutObject(t *testing.T) {
|
|||||||
t.Fatalf("Expected error type BucketNotFound, got %#v", err)
|
t.Fatalf("Expected error type BucketNotFound, got %#v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
_, err = obj.PutObject(context.Background(), bucketName, objectName+"/1", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
_, err = obj.PutObject(context.Background(), bucketName, objectName+"/1", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Unexpected should fail here, backend corruption occurred")
|
t.Fatal("Unexpected should fail here, backend corruption occurred")
|
||||||
}
|
}
|
||||||
@ -239,7 +239,7 @@ func TestFSPutObject(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(context.Background(), bucketName, objectName+"/1/", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), nil)
|
_, err = obj.PutObject(context.Background(), bucketName, objectName+"/1/", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), nil, ObjectOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Unexpected should fail here, backned corruption occurred")
|
t.Fatal("Unexpected should fail here, backned corruption occurred")
|
||||||
}
|
}
|
||||||
@ -267,7 +267,7 @@ func TestFSDeleteObject(t *testing.T) {
|
|||||||
objectName := "object"
|
objectName := "object"
|
||||||
|
|
||||||
obj.MakeBucketWithLocation(context.Background(), bucketName, "")
|
obj.MakeBucketWithLocation(context.Background(), bucketName, "")
|
||||||
obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{})
|
||||||
|
|
||||||
// Test with invalid bucket name
|
// Test with invalid bucket name
|
||||||
if err := fs.DeleteObject(context.Background(), "fo", objectName); !isSameType(err, BucketNameInvalid{}) {
|
if err := fs.DeleteObject(context.Background(), "fo", objectName); !isSameType(err, BucketNameInvalid{}) {
|
||||||
|
@ -35,19 +35,19 @@ func (a GatewayUnsupported) ListMultipartUploads(ctx context.Context, bucket str
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewMultipartUpload upload object in multiple parts
|
// NewMultipartUpload upload object in multiple parts
|
||||||
func (a GatewayUnsupported) NewMultipartUpload(ctx context.Context, bucket string, object string, metadata map[string]string) (uploadID string, err error) {
|
func (a GatewayUnsupported) NewMultipartUpload(ctx context.Context, bucket string, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) {
|
||||||
logger.LogIf(ctx, NotImplemented{})
|
logger.LogIf(ctx, NotImplemented{})
|
||||||
return "", NotImplemented{}
|
return "", NotImplemented{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyObjectPart copy part of object to uploadID for another object
|
// CopyObjectPart copy part of object to uploadID for another object
|
||||||
func (a GatewayUnsupported) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, partID int, startOffset, length int64, srcInfo ObjectInfo) (pi PartInfo, err error) {
|
func (a GatewayUnsupported) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, partID int, startOffset, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, err error) {
|
||||||
logger.LogIf(ctx, NotImplemented{})
|
logger.LogIf(ctx, NotImplemented{})
|
||||||
return pi, NotImplemented{}
|
return pi, NotImplemented{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPart puts a part of object in bucket
|
// PutObjectPart puts a part of object in bucket
|
||||||
func (a GatewayUnsupported) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi PartInfo, err error) {
|
func (a GatewayUnsupported) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (pi PartInfo, err error) {
|
||||||
logger.LogIf(ctx, NotImplemented{})
|
logger.LogIf(ctx, NotImplemented{})
|
||||||
return pi, NotImplemented{}
|
return pi, NotImplemented{}
|
||||||
}
|
}
|
||||||
@ -131,7 +131,7 @@ func (a GatewayUnsupported) ListObjectsHeal(ctx context.Context, bucket, prefix,
|
|||||||
|
|
||||||
// CopyObject copies a blob from source container to destination container.
|
// CopyObject copies a blob from source container to destination container.
|
||||||
func (a GatewayUnsupported) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string,
|
func (a GatewayUnsupported) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string,
|
||||||
srcInfo ObjectInfo) (objInfo ObjectInfo, err error) {
|
srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
logger.LogIf(ctx, NotImplemented{})
|
logger.LogIf(ctx, NotImplemented{})
|
||||||
return objInfo, NotImplemented{}
|
return objInfo, NotImplemented{}
|
||||||
}
|
}
|
||||||
|
@ -621,7 +621,7 @@ func (a *azureObjects) ListObjectsV2(ctx context.Context, bucket, prefix, contin
|
|||||||
//
|
//
|
||||||
// startOffset indicates the starting read location of the object.
|
// startOffset indicates the starting read location of the object.
|
||||||
// length indicates the total length of the object.
|
// length indicates the total length of the object.
|
||||||
func (a *azureObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
func (a *azureObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
|
||||||
// startOffset cannot be negative.
|
// startOffset cannot be negative.
|
||||||
if startOffset < 0 {
|
if startOffset < 0 {
|
||||||
return azureToObjectError(minio.InvalidRange{}, bucket, object)
|
return azureToObjectError(minio.InvalidRange{}, bucket, object)
|
||||||
@ -653,7 +653,7 @@ func (a *azureObjects) GetObject(ctx context.Context, bucket, object string, sta
|
|||||||
|
|
||||||
// GetObjectInfo - reads blob metadata properties and replies back minio.ObjectInfo,
|
// GetObjectInfo - reads blob metadata properties and replies back minio.ObjectInfo,
|
||||||
// uses zure equivalent GetBlobProperties.
|
// uses zure equivalent GetBlobProperties.
|
||||||
func (a *azureObjects) GetObjectInfo(ctx context.Context, bucket, object string) (objInfo minio.ObjectInfo, err error) {
|
func (a *azureObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
||||||
err = blob.GetProperties(nil)
|
err = blob.GetProperties(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -674,7 +674,7 @@ func (a *azureObjects) GetObjectInfo(ctx context.Context, bucket, object string)
|
|||||||
|
|
||||||
// PutObject - Create a new blob with the incoming data,
|
// PutObject - Create a new blob with the incoming data,
|
||||||
// uses Azure equivalent CreateBlockBlobFromReader.
|
// uses Azure equivalent CreateBlockBlobFromReader.
|
||||||
func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
|
func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
||||||
blob.Metadata, blob.Properties, err = s3MetaToAzureProperties(ctx, metadata)
|
blob.Metadata, blob.Properties, err = s3MetaToAzureProperties(ctx, metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -684,12 +684,12 @@ func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, dat
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return objInfo, azureToObjectError(err, bucket, object)
|
return objInfo, azureToObjectError(err, bucket, object)
|
||||||
}
|
}
|
||||||
return a.GetObjectInfo(ctx, bucket, object)
|
return a.GetObjectInfo(ctx, bucket, object, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyObject - Copies a blob from source container to destination container.
|
// CopyObject - Copies a blob from source container to destination container.
|
||||||
// Uses Azure equivalent CopyBlob API.
|
// Uses Azure equivalent CopyBlob API.
|
||||||
func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo minio.ObjectInfo) (objInfo minio.ObjectInfo, err error) {
|
func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
srcBlobURL := a.client.GetContainerReference(srcBucket).GetBlobReference(srcObject).GetURL()
|
srcBlobURL := a.client.GetContainerReference(srcBucket).GetBlobReference(srcObject).GetURL()
|
||||||
destBlob := a.client.GetContainerReference(destBucket).GetBlobReference(destObject)
|
destBlob := a.client.GetContainerReference(destBucket).GetBlobReference(destObject)
|
||||||
azureMeta, props, err := s3MetaToAzureProperties(ctx, srcInfo.UserDefined)
|
azureMeta, props, err := s3MetaToAzureProperties(ctx, srcInfo.UserDefined)
|
||||||
@ -716,7 +716,7 @@ func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, des
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return objInfo, azureToObjectError(err, srcBucket, srcObject)
|
return objInfo, azureToObjectError(err, srcBucket, srcObject)
|
||||||
}
|
}
|
||||||
return a.GetObjectInfo(ctx, destBucket, destObject)
|
return a.GetObjectInfo(ctx, destBucket, destObject, dstOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteObject - Deletes a blob on azure container, uses Azure
|
// DeleteObject - Deletes a blob on azure container, uses Azure
|
||||||
@ -763,7 +763,7 @@ func (a *azureObjects) checkUploadIDExists(ctx context.Context, bucketName, obje
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewMultipartUpload - Use Azure equivalent CreateBlockBlob.
|
// NewMultipartUpload - Use Azure equivalent CreateBlockBlob.
|
||||||
func (a *azureObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) {
|
func (a *azureObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts minio.ObjectOptions) (uploadID string, err error) {
|
||||||
uploadID, err = getAzureUploadID()
|
uploadID, err = getAzureUploadID()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
@ -787,7 +787,7 @@ func (a *azureObjects) NewMultipartUpload(ctx context.Context, bucket, object st
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPart - Use Azure equivalent PutBlockWithLength.
|
// PutObjectPart - Use Azure equivalent PutBlockWithLength.
|
||||||
func (a *azureObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info minio.PartInfo, err error) {
|
func (a *azureObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts minio.ObjectOptions) (info minio.PartInfo, err error) {
|
||||||
if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
|
||||||
return info, err
|
return info, err
|
||||||
}
|
}
|
||||||
@ -1035,7 +1035,7 @@ func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
|
|||||||
return objInfo, azureToObjectError(err, bucket, object)
|
return objInfo, azureToObjectError(err, bucket, object)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return a.GetObjectInfo(ctx, bucket, object)
|
return a.GetObjectInfo(ctx, bucket, object, minio.ObjectOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetBucketPolicy - Azure supports three types of container policies:
|
// SetBucketPolicy - Azure supports three types of container policies:
|
||||||
|
@ -400,7 +400,7 @@ func (l *b2Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuat
|
|||||||
//
|
//
|
||||||
// startOffset indicates the starting read location of the object.
|
// startOffset indicates the starting read location of the object.
|
||||||
// length indicates the total length of the object.
|
// length indicates the total length of the object.
|
||||||
func (l *b2Objects) GetObject(ctx context.Context, bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
func (l *b2Objects) GetObject(ctx context.Context, bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
|
||||||
bkt, err := l.Bucket(ctx, bucket)
|
bkt, err := l.Bucket(ctx, bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -417,7 +417,7 @@ func (l *b2Objects) GetObject(ctx context.Context, bucket string, object string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetObjectInfo reads object info and replies back ObjectInfo
|
// GetObjectInfo reads object info and replies back ObjectInfo
|
||||||
func (l *b2Objects) GetObjectInfo(ctx context.Context, bucket string, object string) (objInfo minio.ObjectInfo, err error) {
|
func (l *b2Objects) GetObjectInfo(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
bkt, err := l.Bucket(ctx, bucket)
|
bkt, err := l.Bucket(ctx, bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return objInfo, err
|
return objInfo, err
|
||||||
@ -508,7 +508,7 @@ func (nb *Reader) Read(p []byte) (int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutObject uploads the single upload to B2 backend by using *b2_upload_file* API, uploads upto 5GiB.
|
// PutObject uploads the single upload to B2 backend by using *b2_upload_file* API, uploads upto 5GiB.
|
||||||
func (l *b2Objects) PutObject(ctx context.Context, bucket string, object string, data *h2.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
|
func (l *b2Objects) PutObject(ctx context.Context, bucket string, object string, data *h2.Reader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
bkt, err := l.Bucket(ctx, bucket)
|
bkt, err := l.Bucket(ctx, bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return objInfo, err
|
return objInfo, err
|
||||||
@ -608,7 +608,7 @@ func (l *b2Objects) ListMultipartUploads(ctx context.Context, bucket string, pre
|
|||||||
// Each large file must consist of at least 2 parts, and all of the parts except the
|
// Each large file must consist of at least 2 parts, and all of the parts except the
|
||||||
// last one must be at least 5MB in size. The last part must contain at least one byte.
|
// last one must be at least 5MB in size. The last part must contain at least one byte.
|
||||||
// For more information - https://www.backblaze.com/b2/docs/large_files.html
|
// For more information - https://www.backblaze.com/b2/docs/large_files.html
|
||||||
func (l *b2Objects) NewMultipartUpload(ctx context.Context, bucket string, object string, metadata map[string]string) (string, error) {
|
func (l *b2Objects) NewMultipartUpload(ctx context.Context, bucket string, object string, metadata map[string]string, o minio.ObjectOptions) (string, error) {
|
||||||
var uploadID string
|
var uploadID string
|
||||||
bkt, err := l.Bucket(ctx, bucket)
|
bkt, err := l.Bucket(ctx, bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -627,7 +627,7 @@ func (l *b2Objects) NewMultipartUpload(ctx context.Context, bucket string, objec
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPart puts a part of object in bucket, uses B2's LargeFile upload API.
|
// PutObjectPart puts a part of object in bucket, uses B2's LargeFile upload API.
|
||||||
func (l *b2Objects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *h2.Reader) (pi minio.PartInfo, err error) {
|
func (l *b2Objects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *h2.Reader, opts minio.ObjectOptions) (pi minio.PartInfo, err error) {
|
||||||
bkt, err := l.Bucket(ctx, bucket)
|
bkt, err := l.Bucket(ctx, bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pi, err
|
return pi, err
|
||||||
@ -723,7 +723,7 @@ func (l *b2Objects) CompleteMultipartUpload(ctx context.Context, bucket string,
|
|||||||
return oi, b2ToObjectError(err, bucket, object, uploadID)
|
return oi, b2ToObjectError(err, bucket, object, uploadID)
|
||||||
}
|
}
|
||||||
|
|
||||||
return l.GetObjectInfo(ctx, bucket, object)
|
return l.GetObjectInfo(ctx, bucket, object, minio.ObjectOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetBucketPolicy - B2 supports 2 types of bucket policies:
|
// SetBucketPolicy - B2 supports 2 types of bucket policies:
|
||||||
|
@ -742,7 +742,7 @@ func (l *gcsGateway) ListObjectsV2(ctx context.Context, bucket, prefix, continua
|
|||||||
//
|
//
|
||||||
// startOffset indicates the starting read location of the object.
|
// startOffset indicates the starting read location of the object.
|
||||||
// length indicates the total length of the object.
|
// length indicates the total length of the object.
|
||||||
func (l *gcsGateway) GetObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
func (l *gcsGateway) GetObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
|
||||||
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
|
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
|
||||||
// otherwise gcs will just return object not exist in case of non-existing bucket
|
// otherwise gcs will just return object not exist in case of non-existing bucket
|
||||||
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
|
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
|
||||||
@ -831,7 +831,7 @@ func applyMetadataToGCSAttrs(metadata map[string]string, attrs *storage.ObjectAt
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetObjectInfo - reads object info and replies back ObjectInfo
|
// GetObjectInfo - reads object info and replies back ObjectInfo
|
||||||
func (l *gcsGateway) GetObjectInfo(ctx context.Context, bucket string, object string) (minio.ObjectInfo, error) {
|
func (l *gcsGateway) GetObjectInfo(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
|
||||||
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
|
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
|
||||||
// otherwise gcs will just return object not exist in case of non-existing bucket
|
// otherwise gcs will just return object not exist in case of non-existing bucket
|
||||||
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
|
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
|
||||||
@ -849,7 +849,7 @@ func (l *gcsGateway) GetObjectInfo(ctx context.Context, bucket string, object st
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutObject - Create a new object with the incoming data,
|
// PutObject - Create a new object with the incoming data,
|
||||||
func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, data *hash.Reader, metadata map[string]string) (minio.ObjectInfo, error) {
|
func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, data *hash.Reader, metadata map[string]string, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
|
||||||
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
|
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
|
||||||
// otherwise gcs will just return object not exist in case of non-existing bucket
|
// otherwise gcs will just return object not exist in case of non-existing bucket
|
||||||
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
|
if _, err := l.client.Bucket(bucket).Attrs(l.ctx); err != nil {
|
||||||
@ -888,7 +888,7 @@ func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, d
|
|||||||
|
|
||||||
// CopyObject - Copies a blob from source container to destination container.
|
// CopyObject - Copies a blob from source container to destination container.
|
||||||
func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string,
|
func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string,
|
||||||
srcInfo minio.ObjectInfo) (minio.ObjectInfo, error) {
|
srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) {
|
||||||
|
|
||||||
src := l.client.Bucket(srcBucket).Object(srcObject)
|
src := l.client.Bucket(srcBucket).Object(srcObject)
|
||||||
dst := l.client.Bucket(destBucket).Object(destObject)
|
dst := l.client.Bucket(destBucket).Object(destObject)
|
||||||
@ -917,7 +917,7 @@ func (l *gcsGateway) DeleteObject(ctx context.Context, bucket string, object str
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewMultipartUpload - upload object in multiple parts
|
// NewMultipartUpload - upload object in multiple parts
|
||||||
func (l *gcsGateway) NewMultipartUpload(ctx context.Context, bucket string, key string, metadata map[string]string) (uploadID string, err error) {
|
func (l *gcsGateway) NewMultipartUpload(ctx context.Context, bucket string, key string, metadata map[string]string, o minio.ObjectOptions) (uploadID string, err error) {
|
||||||
// generate new uploadid
|
// generate new uploadid
|
||||||
uploadID = minio.MustGetUUID()
|
uploadID = minio.MustGetUUID()
|
||||||
|
|
||||||
@ -1030,7 +1030,7 @@ func (l *gcsGateway) checkUploadIDExists(ctx context.Context, bucket string, key
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPart puts a part of object in bucket
|
// PutObjectPart puts a part of object in bucket
|
||||||
func (l *gcsGateway) PutObjectPart(ctx context.Context, bucket string, key string, uploadID string, partNumber int, data *hash.Reader) (minio.PartInfo, error) {
|
func (l *gcsGateway) PutObjectPart(ctx context.Context, bucket string, key string, uploadID string, partNumber int, data *hash.Reader, opts minio.ObjectOptions) (minio.PartInfo, error) {
|
||||||
if err := l.checkUploadIDExists(ctx, bucket, key, uploadID); err != nil {
|
if err := l.checkUploadIDExists(ctx, bucket, key, uploadID); err != nil {
|
||||||
return minio.PartInfo{}, err
|
return minio.PartInfo{}, err
|
||||||
}
|
}
|
||||||
|
@ -513,7 +513,7 @@ func (t *tritonObjects) ListObjectsV2(ctx context.Context, bucket, prefix, conti
|
|||||||
// indicates the total length of the object.
|
// indicates the total length of the object.
|
||||||
//
|
//
|
||||||
// https://apidocs.joyent.com/manta/api.html#GetObject
|
// https://apidocs.joyent.com/manta/api.html#GetObject
|
||||||
func (t *tritonObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
func (t *tritonObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
|
||||||
// Start offset cannot be negative.
|
// Start offset cannot be negative.
|
||||||
if startOffset < 0 {
|
if startOffset < 0 {
|
||||||
logger.LogIf(ctx, fmt.Errorf("Unexpected error"))
|
logger.LogIf(ctx, fmt.Errorf("Unexpected error"))
|
||||||
@ -545,7 +545,7 @@ func (t *tritonObjects) GetObject(ctx context.Context, bucket, object string, st
|
|||||||
// uses Triton equivalent GetBlobProperties.
|
// uses Triton equivalent GetBlobProperties.
|
||||||
//
|
//
|
||||||
// https://apidocs.joyent.com/manta/api.html#GetObject
|
// https://apidocs.joyent.com/manta/api.html#GetObject
|
||||||
func (t *tritonObjects) GetObjectInfo(ctx context.Context, bucket, object string) (objInfo minio.ObjectInfo, err error) {
|
func (t *tritonObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
info, err := t.client.Objects().GetInfo(ctx, &storage.GetInfoInput{
|
info, err := t.client.Objects().GetInfo(ctx, &storage.GetInfoInput{
|
||||||
ObjectPath: path.Join(mantaRoot, bucket, object),
|
ObjectPath: path.Join(mantaRoot, bucket, object),
|
||||||
})
|
})
|
||||||
@ -583,7 +583,7 @@ func (d dummySeeker) Seek(offset int64, whence int) (int64, error) {
|
|||||||
// CreateBlockBlobFromReader.
|
// CreateBlockBlobFromReader.
|
||||||
//
|
//
|
||||||
// https://apidocs.joyent.com/manta/api.html#PutObject
|
// https://apidocs.joyent.com/manta/api.html#PutObject
|
||||||
func (t *tritonObjects) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
|
func (t *tritonObjects) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
if err = t.client.Objects().Put(ctx, &storage.PutObjectInput{
|
if err = t.client.Objects().Put(ctx, &storage.PutObjectInput{
|
||||||
ContentLength: uint64(data.Size()),
|
ContentLength: uint64(data.Size()),
|
||||||
ObjectPath: path.Join(mantaRoot, bucket, object),
|
ObjectPath: path.Join(mantaRoot, bucket, object),
|
||||||
@ -602,14 +602,14 @@ func (t *tritonObjects) PutObject(ctx context.Context, bucket, object string, da
|
|||||||
return objInfo, err
|
return objInfo, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return t.GetObjectInfo(ctx, bucket, object)
|
return t.GetObjectInfo(ctx, bucket, object, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyObject - Copies a blob from source container to destination container.
|
// CopyObject - Copies a blob from source container to destination container.
|
||||||
// Uses Manta Snaplinks API.
|
// Uses Manta Snaplinks API.
|
||||||
//
|
//
|
||||||
// https://apidocs.joyent.com/manta/api.html#PutSnapLink
|
// https://apidocs.joyent.com/manta/api.html#PutSnapLink
|
||||||
func (t *tritonObjects) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo minio.ObjectInfo) (objInfo minio.ObjectInfo, err error) {
|
func (t *tritonObjects) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
if err = t.client.SnapLinks().Put(ctx, &storage.PutSnapLinkInput{
|
if err = t.client.SnapLinks().Put(ctx, &storage.PutSnapLinkInput{
|
||||||
SourcePath: path.Join(mantaRoot, srcBucket, srcObject),
|
SourcePath: path.Join(mantaRoot, srcBucket, srcObject),
|
||||||
LinkPath: path.Join(mantaRoot, destBucket, destObject),
|
LinkPath: path.Join(mantaRoot, destBucket, destObject),
|
||||||
@ -618,7 +618,7 @@ func (t *tritonObjects) CopyObject(ctx context.Context, srcBucket, srcObject, de
|
|||||||
return objInfo, err
|
return objInfo, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return t.GetObjectInfo(ctx, destBucket, destObject)
|
return t.GetObjectInfo(ctx, destBucket, destObject, dstOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteObject - Delete a blob in Manta, uses Triton equivalent DeleteBlob API.
|
// DeleteObject - Delete a blob in Manta, uses Triton equivalent DeleteBlob API.
|
||||||
|
@ -552,7 +552,7 @@ func ossGetObject(ctx context.Context, client *oss.Client, bucket, key string, s
|
|||||||
//
|
//
|
||||||
// startOffset indicates the starting read location of the object.
|
// startOffset indicates the starting read location of the object.
|
||||||
// length indicates the total length of the object.
|
// length indicates the total length of the object.
|
||||||
func (l *ossObjects) GetObject(ctx context.Context, bucket, key string, startOffset, length int64, writer io.Writer, etag string) error {
|
func (l *ossObjects) GetObject(ctx context.Context, bucket, key string, startOffset, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
|
||||||
return ossGetObject(ctx, l.Client, bucket, key, startOffset, length, writer, etag)
|
return ossGetObject(ctx, l.Client, bucket, key, startOffset, length, writer, etag)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -602,7 +602,7 @@ func ossGetObjectInfo(ctx context.Context, client *oss.Client, bucket, object st
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetObjectInfo reads object info and replies back ObjectInfo.
|
// GetObjectInfo reads object info and replies back ObjectInfo.
|
||||||
func (l *ossObjects) GetObjectInfo(ctx context.Context, bucket, object string) (objInfo minio.ObjectInfo, err error) {
|
func (l *ossObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
return ossGetObjectInfo(ctx, l.Client, bucket, object)
|
return ossGetObjectInfo(ctx, l.Client, bucket, object)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -630,12 +630,12 @@ func ossPutObject(ctx context.Context, client *oss.Client, bucket, object string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutObject creates a new object with the incoming data.
|
// PutObject creates a new object with the incoming data.
|
||||||
func (l *ossObjects) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
|
func (l *ossObjects) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
return ossPutObject(ctx, l.Client, bucket, object, data, metadata)
|
return ossPutObject(ctx, l.Client, bucket, object, data, metadata)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyObject copies an object from source bucket to a destination bucket.
|
// CopyObject copies an object from source bucket to a destination bucket.
|
||||||
func (l *ossObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo minio.ObjectInfo) (objInfo minio.ObjectInfo, err error) {
|
func (l *ossObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
bkt, err := l.Client.Bucket(srcBucket)
|
bkt, err := l.Client.Bucket(srcBucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
@ -659,7 +659,7 @@ func (l *ossObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
|
|||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
return objInfo, ossToObjectError(err, srcBucket, srcObject)
|
return objInfo, ossToObjectError(err, srcBucket, srcObject)
|
||||||
}
|
}
|
||||||
return l.GetObjectInfo(ctx, dstBucket, dstObject)
|
return l.GetObjectInfo(ctx, dstBucket, dstObject, dstOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteObject deletes a blob in bucket.
|
// DeleteObject deletes a blob in bucket.
|
||||||
@ -725,7 +725,7 @@ func (l *ossObjects) ListMultipartUploads(ctx context.Context, bucket, prefix, k
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewMultipartUpload upload object in multiple parts.
|
// NewMultipartUpload upload object in multiple parts.
|
||||||
func (l *ossObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) {
|
func (l *ossObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, o minio.ObjectOptions) (uploadID string, err error) {
|
||||||
bkt, err := l.Client.Bucket(bucket)
|
bkt, err := l.Client.Bucket(bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
@ -748,7 +748,7 @@ func (l *ossObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPart puts a part of object in bucket.
|
// PutObjectPart puts a part of object in bucket.
|
||||||
func (l *ossObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (pi minio.PartInfo, err error) {
|
func (l *ossObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts minio.ObjectOptions) (pi minio.PartInfo, err error) {
|
||||||
bkt, err := l.Client.Bucket(bucket)
|
bkt, err := l.Client.Bucket(bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
@ -828,7 +828,7 @@ func ossListObjectParts(client *oss.Client, bucket, object, uploadID string, par
|
|||||||
// CopyObjectPart creates a part in a multipart upload by copying
|
// CopyObjectPart creates a part in a multipart upload by copying
|
||||||
// existing object or a part of it.
|
// existing object or a part of it.
|
||||||
func (l *ossObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
|
func (l *ossObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
|
||||||
partID int, startOffset, length int64, srcInfo minio.ObjectInfo) (p minio.PartInfo, err error) {
|
partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (p minio.PartInfo, err error) {
|
||||||
|
|
||||||
bkt, err := l.Client.Bucket(destBucket)
|
bkt, err := l.Client.Bucket(destBucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -954,7 +954,7 @@ func (l *ossObjects) CompleteMultipartUpload(ctx context.Context, bucket, object
|
|||||||
return oi, ossToObjectError(err, bucket, object)
|
return oi, ossToObjectError(err, bucket, object)
|
||||||
}
|
}
|
||||||
|
|
||||||
return l.GetObjectInfo(ctx, bucket, object)
|
return l.GetObjectInfo(ctx, bucket, object, minio.ObjectOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetBucketPolicy sets policy on bucket.
|
// SetBucketPolicy sets policy on bucket.
|
||||||
|
@ -307,12 +307,14 @@ func (l *s3Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuat
|
|||||||
//
|
//
|
||||||
// startOffset indicates the starting read location of the object.
|
// startOffset indicates the starting read location of the object.
|
||||||
// length indicates the total length of the object.
|
// length indicates the total length of the object.
|
||||||
func (l *s3Objects) GetObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
func (l *s3Objects) GetObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, o minio.ObjectOptions) error {
|
||||||
if length < 0 && length != -1 {
|
if length < 0 && length != -1 {
|
||||||
return minio.ErrorRespToObjectError(minio.InvalidRange{}, bucket, key)
|
return minio.ErrorRespToObjectError(minio.InvalidRange{}, bucket, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := miniogo.GetObjectOptions{}
|
opts := miniogo.GetObjectOptions{}
|
||||||
|
opts.ServerSideEncryption = o.ServerSideEncryption
|
||||||
|
|
||||||
if startOffset >= 0 && length >= 0 {
|
if startOffset >= 0 && length >= 0 {
|
||||||
if err := opts.SetRange(startOffset, startOffset+length-1); err != nil {
|
if err := opts.SetRange(startOffset, startOffset+length-1); err != nil {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
@ -333,8 +335,8 @@ func (l *s3Objects) GetObject(ctx context.Context, bucket string, key string, st
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetObjectInfo reads object info and replies back ObjectInfo
|
// GetObjectInfo reads object info and replies back ObjectInfo
|
||||||
func (l *s3Objects) GetObjectInfo(ctx context.Context, bucket string, object string) (objInfo minio.ObjectInfo, err error) {
|
func (l *s3Objects) GetObjectInfo(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
oi, err := l.Client.StatObject(bucket, object, miniogo.StatObjectOptions{})
|
oi, err := l.Client.StatObject(bucket, object, miniogo.StatObjectOptions{miniogo.GetObjectOptions{ServerSideEncryption: opts.ServerSideEncryption}})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return minio.ObjectInfo{}, minio.ErrorRespToObjectError(err, bucket, object)
|
return minio.ObjectInfo{}, minio.ErrorRespToObjectError(err, bucket, object)
|
||||||
}
|
}
|
||||||
@ -343,8 +345,8 @@ func (l *s3Objects) GetObjectInfo(ctx context.Context, bucket string, object str
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutObject creates a new object with the incoming data,
|
// PutObject creates a new object with the incoming data,
|
||||||
func (l *s3Objects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
|
func (l *s3Objects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5Base64String(), data.SHA256HexString(), minio.ToMinioClientMetadata(metadata))
|
oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5Base64String(), data.SHA256HexString(), minio.ToMinioClientMetadata(metadata), opts.ServerSideEncryption)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return objInfo, minio.ErrorRespToObjectError(err, bucket, object)
|
return objInfo, minio.ErrorRespToObjectError(err, bucket, object)
|
||||||
}
|
}
|
||||||
@ -353,7 +355,7 @@ func (l *s3Objects) PutObject(ctx context.Context, bucket string, object string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CopyObject copies an object from source bucket to a destination bucket.
|
// CopyObject copies an object from source bucket to a destination bucket.
|
||||||
func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject string, dstBucket string, dstObject string, srcInfo minio.ObjectInfo) (objInfo minio.ObjectInfo, err error) {
|
func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject string, dstBucket string, dstObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
// Set this header such that following CopyObject() always sets the right metadata on the destination.
|
// Set this header such that following CopyObject() always sets the right metadata on the destination.
|
||||||
// metadata input is already a trickled down value from interpreting x-amz-metadata-directive at
|
// metadata input is already a trickled down value from interpreting x-amz-metadata-directive at
|
||||||
// handler layer. So what we have right now is supposed to be applied on the destination object anyways.
|
// handler layer. So what we have right now is supposed to be applied on the destination object anyways.
|
||||||
@ -363,7 +365,7 @@ func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject
|
|||||||
if _, err = l.Client.CopyObject(srcBucket, srcObject, dstBucket, dstObject, srcInfo.UserDefined); err != nil {
|
if _, err = l.Client.CopyObject(srcBucket, srcObject, dstBucket, dstObject, srcInfo.UserDefined); err != nil {
|
||||||
return objInfo, minio.ErrorRespToObjectError(err, srcBucket, srcObject)
|
return objInfo, minio.ErrorRespToObjectError(err, srcBucket, srcObject)
|
||||||
}
|
}
|
||||||
return l.GetObjectInfo(ctx, dstBucket, dstObject)
|
return l.GetObjectInfo(ctx, dstBucket, dstObject, dstOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteObject deletes a blob in bucket
|
// DeleteObject deletes a blob in bucket
|
||||||
@ -387,9 +389,9 @@ func (l *s3Objects) ListMultipartUploads(ctx context.Context, bucket string, pre
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewMultipartUpload upload object in multiple parts
|
// NewMultipartUpload upload object in multiple parts
|
||||||
func (l *s3Objects) NewMultipartUpload(ctx context.Context, bucket string, object string, metadata map[string]string) (uploadID string, err error) {
|
func (l *s3Objects) NewMultipartUpload(ctx context.Context, bucket string, object string, metadata map[string]string, o minio.ObjectOptions) (uploadID string, err error) {
|
||||||
// Create PutObject options
|
// Create PutObject options
|
||||||
opts := miniogo.PutObjectOptions{UserMetadata: metadata}
|
opts := miniogo.PutObjectOptions{UserMetadata: metadata, ServerSideEncryption: o.ServerSideEncryption}
|
||||||
uploadID, err = l.Client.NewMultipartUpload(bucket, object, opts)
|
uploadID, err = l.Client.NewMultipartUpload(bucket, object, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return uploadID, minio.ErrorRespToObjectError(err, bucket, object)
|
return uploadID, minio.ErrorRespToObjectError(err, bucket, object)
|
||||||
@ -398,8 +400,8 @@ func (l *s3Objects) NewMultipartUpload(ctx context.Context, bucket string, objec
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPart puts a part of object in bucket
|
// PutObjectPart puts a part of object in bucket
|
||||||
func (l *s3Objects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *hash.Reader) (pi minio.PartInfo, e error) {
|
func (l *s3Objects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *hash.Reader, opts minio.ObjectOptions) (pi minio.PartInfo, e error) {
|
||||||
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5Base64String(), data.SHA256HexString())
|
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5Base64String(), data.SHA256HexString(), opts.ServerSideEncryption)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pi, minio.ErrorRespToObjectError(err, bucket, object)
|
return pi, minio.ErrorRespToObjectError(err, bucket, object)
|
||||||
}
|
}
|
||||||
@ -410,7 +412,7 @@ func (l *s3Objects) PutObjectPart(ctx context.Context, bucket string, object str
|
|||||||
// CopyObjectPart creates a part in a multipart upload by copying
|
// CopyObjectPart creates a part in a multipart upload by copying
|
||||||
// existing object or a part of it.
|
// existing object or a part of it.
|
||||||
func (l *s3Objects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
|
func (l *s3Objects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
|
||||||
partID int, startOffset, length int64, srcInfo minio.ObjectInfo) (p minio.PartInfo, err error) {
|
partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (p minio.PartInfo, err error) {
|
||||||
|
|
||||||
srcInfo.UserDefined = map[string]string{
|
srcInfo.UserDefined = map[string]string{
|
||||||
"x-amz-copy-source-if-match": srcInfo.ETag,
|
"x-amz-copy-source-if-match": srcInfo.ETag,
|
||||||
@ -448,7 +450,7 @@ func (l *s3Objects) CompleteMultipartUpload(ctx context.Context, bucket string,
|
|||||||
return oi, minio.ErrorRespToObjectError(err, bucket, object)
|
return oi, minio.ErrorRespToObjectError(err, bucket, object)
|
||||||
}
|
}
|
||||||
|
|
||||||
return l.GetObjectInfo(ctx, bucket, object)
|
return l.GetObjectInfo(ctx, bucket, object, minio.ObjectOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetBucketPolicy sets policy on bucket
|
// SetBucketPolicy sets policy on bucket
|
||||||
|
@ -431,7 +431,7 @@ func (s *siaObjects) ListObjects(ctx context.Context, bucket string, prefix stri
|
|||||||
return loi, nil
|
return loi, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *siaObjects) GetObject(ctx context.Context, bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
func (s *siaObjects) GetObject(ctx context.Context, bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
|
||||||
dstFile := path.Join(s.TempDir, minio.MustGetUUID())
|
dstFile := path.Join(s.TempDir, minio.MustGetUUID())
|
||||||
defer os.Remove(dstFile)
|
defer os.Remove(dstFile)
|
||||||
|
|
||||||
@ -512,7 +512,7 @@ func (s *siaObjects) findSiaObject(ctx context.Context, bucket, object string) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetObjectInfo reads object info and replies back ObjectInfo
|
// GetObjectInfo reads object info and replies back ObjectInfo
|
||||||
func (s *siaObjects) GetObjectInfo(ctx context.Context, bucket string, object string) (minio.ObjectInfo, error) {
|
func (s *siaObjects) GetObjectInfo(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
|
||||||
so, err := s.findSiaObject(ctx, bucket, object)
|
so, err := s.findSiaObject(ctx, bucket, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return minio.ObjectInfo{}, err
|
return minio.ObjectInfo{}, err
|
||||||
@ -529,7 +529,7 @@ func (s *siaObjects) GetObjectInfo(ctx context.Context, bucket string, object st
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutObject creates a new object with the incoming data,
|
// PutObject creates a new object with the incoming data,
|
||||||
func (s *siaObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
|
func (s *siaObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
|
||||||
srcFile := path.Join(s.TempDir, minio.MustGetUUID())
|
srcFile := path.Join(s.TempDir, minio.MustGetUUID())
|
||||||
writer, err := os.Create(srcFile)
|
writer, err := os.Create(srcFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -93,7 +93,7 @@ func testDeleteObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||||||
for _, object := range testCase.objectToUploads {
|
for _, object := range testCase.objectToUploads {
|
||||||
md5Bytes := md5.Sum([]byte(object.content))
|
md5Bytes := md5.Sum([]byte(object.content))
|
||||||
_, err = obj.PutObject(context.Background(), testCase.bucketName, object.name, mustGetHashReader(t, bytes.NewBufferString(object.content),
|
_, err = obj.PutObject(context.Background(), testCase.bucketName, object.name, mustGetHashReader(t, bytes.NewBufferString(object.content),
|
||||||
int64(len(object.content)), hex.EncodeToString(md5Bytes[:]), ""), nil)
|
int64(len(object.content)), hex.EncodeToString(md5Bytes[:]), ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
|
@ -75,7 +75,7 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||||||
// iterate through the above set of inputs and upkoad the object.
|
// iterate through the above set of inputs and upkoad the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData, ObjectOptions{})
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
@ -155,7 +155,7 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "")
|
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "", ObjectOptions{})
|
||||||
if err != nil && testCase.shouldPass {
|
if err != nil && testCase.shouldPass {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
|
||||||
}
|
}
|
||||||
@ -220,7 +220,7 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [
|
|||||||
// iterate through the above set of inputs and upkoad the object.
|
// iterate through the above set of inputs and upkoad the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData, ObjectOptions{})
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
@ -265,7 +265,7 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "")
|
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "", ObjectOptions{})
|
||||||
if err != nil && testCase.shouldPass {
|
if err != nil && testCase.shouldPass {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
|
||||||
}
|
}
|
||||||
@ -333,7 +333,7 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str
|
|||||||
// iterate through the above set of inputs and upkoad the object.
|
// iterate through the above set of inputs and upkoad the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData, ObjectOptions{})
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
@ -418,7 +418,7 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "")
|
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "", ObjectOptions{})
|
||||||
if err != nil && testCase.shouldPass {
|
if err != nil && testCase.shouldPass {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
|
||||||
}
|
}
|
||||||
|
@ -34,15 +34,14 @@ func testGetObjectInfo(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
|
opts := ObjectOptions{}
|
||||||
// Put a regular object
|
_, err = obj.PutObject(context.Background(), "test-getobjectinfo", "Asia/asiapics.jpg", mustGetHashReader(t, bytes.NewBufferString("asiapics"), int64(len("asiapics")), "", ""), nil, opts)
|
||||||
_, err = obj.PutObject(context.Background(), "test-getobjectinfo", "Asia/asiapics.jpg", mustGetHashReader(t, bytes.NewBufferString("asiapics"), int64(len("asiapics")), "", ""), nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put an empty directory
|
// Put an empty directory
|
||||||
_, err = obj.PutObject(context.Background(), "test-getobjectinfo", "Asia/empty-dir/", mustGetHashReader(t, bytes.NewBufferString(""), int64(len("")), "", ""), nil)
|
_, err = obj.PutObject(context.Background(), "test-getobjectinfo", "Asia/empty-dir/", mustGetHashReader(t, bytes.NewBufferString(""), int64(len("")), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
@ -83,7 +82,7 @@ func testGetObjectInfo(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||||||
{"test-getobjectinfo", "Asia/empty-dir/", resultCases[1], nil, true},
|
{"test-getobjectinfo", "Asia/empty-dir/", resultCases[1], nil, true},
|
||||||
}
|
}
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
result, err := obj.GetObjectInfo(context.Background(), testCase.bucketName, testCase.objectName)
|
result, err := obj.GetObjectInfo(context.Background(), testCase.bucketName, testCase.objectName, opts)
|
||||||
if err != nil && testCase.shouldPass {
|
if err != nil && testCase.shouldPass {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
|
||||||
}
|
}
|
||||||
|
@ -20,11 +20,17 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/encrypt"
|
||||||
"github.com/minio/minio/pkg/hash"
|
"github.com/minio/minio/pkg/hash"
|
||||||
"github.com/minio/minio/pkg/madmin"
|
"github.com/minio/minio/pkg/madmin"
|
||||||
"github.com/minio/minio/pkg/policy"
|
"github.com/minio/minio/pkg/policy"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ObjectOptions represents object options for ObjectLayer operations
|
||||||
|
type ObjectOptions struct {
|
||||||
|
ServerSideEncryption encrypt.ServerSide
|
||||||
|
}
|
||||||
|
|
||||||
// ObjectLayer implements primitives for object API layer.
|
// ObjectLayer implements primitives for object API layer.
|
||||||
type ObjectLayer interface {
|
type ObjectLayer interface {
|
||||||
// Storage operations.
|
// Storage operations.
|
||||||
@ -40,18 +46,19 @@ type ObjectLayer interface {
|
|||||||
ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error)
|
ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error)
|
||||||
|
|
||||||
// Object operations.
|
// Object operations.
|
||||||
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error)
|
|
||||||
GetObjectInfo(ctx context.Context, bucket, object string) (objInfo ObjectInfo, err error)
|
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
|
||||||
PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error)
|
GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||||
CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo) (objInfo ObjectInfo, err error)
|
PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||||
|
CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||||
DeleteObject(ctx context.Context, bucket, object string) error
|
DeleteObject(ctx context.Context, bucket, object string) error
|
||||||
|
|
||||||
// Multipart operations.
|
// Multipart operations.
|
||||||
ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error)
|
ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error)
|
||||||
NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error)
|
NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error)
|
||||||
CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int,
|
CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int,
|
||||||
startOffset int64, length int64, srcInfo ObjectInfo) (info PartInfo, err error)
|
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (info PartInfo, err error)
|
||||||
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error)
|
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error)
|
||||||
ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error)
|
ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error)
|
||||||
AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error
|
AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error
|
||||||
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
|
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
|
||||||
|
@ -71,7 +71,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||||||
for _, object := range testObjects {
|
for _, object := range testObjects {
|
||||||
md5Bytes := md5.Sum([]byte(object.content))
|
md5Bytes := md5.Sum([]byte(object.content))
|
||||||
_, err = obj.PutObject(context.Background(), testBuckets[0], object.name, mustGetHashReader(t, bytes.NewBufferString(object.content),
|
_, err = obj.PutObject(context.Background(), testBuckets[0], object.name, mustGetHashReader(t, bytes.NewBufferString(object.content),
|
||||||
int64(len(object.content)), hex.EncodeToString(md5Bytes[:]), ""), object.meta)
|
int64(len(object.content)), hex.EncodeToString(md5Bytes[:]), ""), object.meta, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
@ -617,7 +617,7 @@ func BenchmarkListObjects(b *testing.B) {
|
|||||||
// Insert objects to be listed and benchmarked later.
|
// Insert objects to be listed and benchmarked later.
|
||||||
for i := 0; i < 20000; i++ {
|
for i := 0; i < 20000; i++ {
|
||||||
key := "obj" + strconv.Itoa(i)
|
key := "obj" + strconv.Itoa(i)
|
||||||
_, err = obj.PutObject(context.Background(), bucket, key, mustGetHashReader(b, bytes.NewBufferString(key), int64(len(key)), "", ""), nil)
|
_, err = obj.PutObject(context.Background(), bucket, key, mustGetHashReader(b, bytes.NewBufferString(key), int64(len(key)), "", ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal(err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -40,15 +40,15 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr
|
|||||||
|
|
||||||
bucket := "minio-bucket"
|
bucket := "minio-bucket"
|
||||||
object := "minio-object"
|
object := "minio-object"
|
||||||
|
opts := ObjectOptions{}
|
||||||
_, err := obj.NewMultipartUpload(context.Background(), "--", object, nil)
|
_, err := obj.NewMultipartUpload(context.Background(), "--", object, nil, opts)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("%s: Expected to fail since bucket name is invalid.", instanceType)
|
t.Fatalf("%s: Expected to fail since bucket name is invalid.", instanceType)
|
||||||
}
|
}
|
||||||
|
|
||||||
errMsg := "Bucket not found: minio-bucket"
|
errMsg := "Bucket not found: minio-bucket"
|
||||||
// opearation expected to fail since the bucket on which NewMultipartUpload is being initiated doesn't exist.
|
// opearation expected to fail since the bucket on which NewMultipartUpload is being initiated doesn't exist.
|
||||||
_, err = obj.NewMultipartUpload(context.Background(), bucket, object, nil)
|
_, err = obj.NewMultipartUpload(context.Background(), bucket, object, nil, opts)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("%s: Expected to fail since the NewMultipartUpload is intialized on a non-existent bucket.", instanceType)
|
t.Fatalf("%s: Expected to fail since the NewMultipartUpload is intialized on a non-existent bucket.", instanceType)
|
||||||
}
|
}
|
||||||
@ -63,12 +63,12 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr
|
|||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.NewMultipartUpload(context.Background(), bucket, "\\", nil)
|
_, err = obj.NewMultipartUpload(context.Background(), bucket, "\\", nil, opts)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("%s: Expected to fail since object name is invalid.", instanceType)
|
t.Fatalf("%s: Expected to fail since object name is invalid.", instanceType)
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, nil)
|
uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
@ -94,7 +94,7 @@ func testObjectAbortMultipartUpload(obj ObjectLayer, instanceType string, t Test
|
|||||||
|
|
||||||
bucket := "minio-bucket"
|
bucket := "minio-bucket"
|
||||||
object := "minio-object"
|
object := "minio-object"
|
||||||
|
opts := ObjectOptions{}
|
||||||
// Create bucket before intiating NewMultipartUpload.
|
// Create bucket before intiating NewMultipartUpload.
|
||||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -102,7 +102,7 @@ func testObjectAbortMultipartUpload(obj ObjectLayer, instanceType string, t Test
|
|||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, nil)
|
uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
@ -148,7 +148,7 @@ func testObjectAPIIsUploadIDExists(obj ObjectLayer, instanceType string, t TestE
|
|||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.NewMultipartUpload(context.Background(), bucket, object, nil)
|
_, err = obj.NewMultipartUpload(context.Background(), bucket, object, nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
@ -184,7 +184,7 @@ func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks [
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Initiate Multipart Upload on the above created bucket.
|
// Initiate Multipart Upload on the above created bucket.
|
||||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], nil)
|
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create NewMultipartUpload, abort.
|
// Failed to create NewMultipartUpload, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
@ -221,7 +221,7 @@ func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks [
|
|||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, testCase := range createPartCases {
|
for _, testCase := range createPartCases {
|
||||||
_, err = obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
_, err = obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
@ -235,7 +235,7 @@ func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks [
|
|||||||
|
|
||||||
// Object part upload should fail with quorum not available.
|
// Object part upload should fail with quorum not available.
|
||||||
testCase := createPartCases[len(createPartCases)-1]
|
testCase := createPartCases[len(createPartCases)-1]
|
||||||
_, err = obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
_, err = obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), ObjectOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("Test %s: expected to fail but passed instead", instanceType)
|
t.Fatalf("Test %s: expected to fail but passed instead", instanceType)
|
||||||
}
|
}
|
||||||
@ -256,7 +256,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
|
|||||||
// Generating cases for which the PutObjectPart fails.
|
// Generating cases for which the PutObjectPart fails.
|
||||||
bucket := "minio-bucket"
|
bucket := "minio-bucket"
|
||||||
object := "minio-object"
|
object := "minio-object"
|
||||||
|
opts := ObjectOptions{}
|
||||||
// Create bucket before intiating NewMultipartUpload.
|
// Create bucket before intiating NewMultipartUpload.
|
||||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -264,7 +264,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
|
|||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
// Initiate Multipart Upload on the above created bucket.
|
// Initiate Multipart Upload on the above created bucket.
|
||||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, nil)
|
uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create NewMultipartUpload, abort.
|
// Failed to create NewMultipartUpload, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
@ -354,7 +354,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
|
|||||||
|
|
||||||
// Validate all the test cases.
|
// Validate all the test cases.
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
actualInfo, actualErr := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, testCase.inputSHA256))
|
actualInfo, actualErr := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, testCase.inputSHA256), opts)
|
||||||
// All are test cases above are expected to fail.
|
// All are test cases above are expected to fail.
|
||||||
if actualErr != nil && testCase.shouldPass {
|
if actualErr != nil && testCase.shouldPass {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
||||||
@ -389,7 +389,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
|||||||
bucketNames := []string{"minio-bucket", "minio-2-bucket", "minio-3-bucket"}
|
bucketNames := []string{"minio-bucket", "minio-2-bucket", "minio-3-bucket"}
|
||||||
objectNames := []string{"minio-object-1.txt", "minio-object.txt", "neymar-1.jpeg", "neymar.jpeg", "parrot-1.png", "parrot.png"}
|
objectNames := []string{"minio-object-1.txt", "minio-object.txt", "neymar-1.jpeg", "neymar.jpeg", "parrot-1.png", "parrot.png"}
|
||||||
uploadIDs := []string{}
|
uploadIDs := []string{}
|
||||||
|
opts := ObjectOptions{}
|
||||||
// bucketnames[0].
|
// bucketnames[0].
|
||||||
// objectNames[0].
|
// objectNames[0].
|
||||||
// uploadIds [0].
|
// uploadIds [0].
|
||||||
@ -400,7 +400,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
|||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
// Initiate Multipart Upload on the above created bucket.
|
// Initiate Multipart Upload on the above created bucket.
|
||||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], nil)
|
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create NewMultipartUpload, abort.
|
// Failed to create NewMultipartUpload, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
@ -420,7 +420,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
|||||||
for i := 0; i < 3; i++ {
|
for i := 0; i < 3; i++ {
|
||||||
// Initiate Multipart Upload on bucketNames[1] for the same object 3 times.
|
// Initiate Multipart Upload on bucketNames[1] for the same object 3 times.
|
||||||
// Used to test the listing for the case of multiple uploadID's for a given object.
|
// Used to test the listing for the case of multiple uploadID's for a given object.
|
||||||
uploadID, err = obj.NewMultipartUpload(context.Background(), bucketNames[1], objectNames[0], nil)
|
uploadID, err = obj.NewMultipartUpload(context.Background(), bucketNames[1], objectNames[0], nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create NewMultipartUpload, abort.
|
// Failed to create NewMultipartUpload, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
@ -442,7 +442,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
|||||||
// Used to test the listing for the case of multiple objects for a given bucket.
|
// Used to test the listing for the case of multiple objects for a given bucket.
|
||||||
for i := 0; i < 6; i++ {
|
for i := 0; i < 6; i++ {
|
||||||
var uploadID string
|
var uploadID string
|
||||||
uploadID, err = obj.NewMultipartUpload(context.Background(), bucketNames[2], objectNames[i], nil)
|
uploadID, err = obj.NewMultipartUpload(context.Background(), bucketNames[2], objectNames[i], nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create NewMultipartUpload, abort.
|
// Failed to create NewMultipartUpload, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
@ -488,7 +488,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
|||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, testCase := range createPartCases {
|
for _, testCase := range createPartCases {
|
||||||
_, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
_, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
@ -1273,8 +1273,9 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
|
|||||||
// Failed to create newbucket, abort.
|
// Failed to create newbucket, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
|
opts := ObjectOptions{}
|
||||||
// Initiate Multipart Upload on the above created bucket.
|
// Initiate Multipart Upload on the above created bucket.
|
||||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], nil)
|
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create NewMultipartUpload, abort.
|
// Failed to create NewMultipartUpload, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
@ -1308,7 +1309,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
|
|||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, testCase := range createPartCases {
|
for _, testCase := range createPartCases {
|
||||||
_, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
_, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
@ -1507,7 +1508,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
|
|||||||
bucketNames := []string{"minio-bucket", "minio-2-bucket"}
|
bucketNames := []string{"minio-bucket", "minio-2-bucket"}
|
||||||
objectNames := []string{"minio-object-1.txt"}
|
objectNames := []string{"minio-object-1.txt"}
|
||||||
uploadIDs := []string{}
|
uploadIDs := []string{}
|
||||||
|
opts := ObjectOptions{}
|
||||||
// bucketnames[0].
|
// bucketnames[0].
|
||||||
// objectNames[0].
|
// objectNames[0].
|
||||||
// uploadIds [0].
|
// uploadIds [0].
|
||||||
@ -1518,7 +1519,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
|
|||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
// Initiate Multipart Upload on the above created bucket.
|
// Initiate Multipart Upload on the above created bucket.
|
||||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], nil)
|
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create NewMultipartUpload, abort.
|
// Failed to create NewMultipartUpload, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
@ -1549,7 +1550,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
|
|||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, testCase := range createPartCases {
|
for _, testCase := range createPartCases {
|
||||||
_, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
_, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
@ -1763,8 +1764,9 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
|||||||
// Failed to create newbucket, abort.
|
// Failed to create newbucket, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err)
|
t.Fatalf("%s : %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
opts := ObjectOptions{}
|
||||||
// Initiate Multipart Upload on the above created bucket.
|
// Initiate Multipart Upload on the above created bucket.
|
||||||
uploadID, err = obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], map[string]string{"X-Amz-Meta-Id": "id"})
|
uploadID, err = obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], map[string]string{"X-Amz-Meta-Id": "id"}, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create NewMultipartUpload, abort.
|
// Failed to create NewMultipartUpload, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err)
|
t.Fatalf("%s : %s", instanceType, err)
|
||||||
@ -1799,7 +1801,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
|||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, part := range parts {
|
for _, part := range parts {
|
||||||
_, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID, mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, sha256sum))
|
_, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID, mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, sha256sum), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err)
|
t.Fatalf("%s : %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -162,7 +162,7 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256), testCase.inputMeta)
|
objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256), testCase.inputMeta, ObjectOptions{})
|
||||||
if actualErr != nil && testCase.expectedError == nil {
|
if actualErr != nil && testCase.expectedError == nil {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: error %s.", i+1, instanceType, actualErr.Error())
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: error %s.", i+1, instanceType, actualErr.Error())
|
||||||
}
|
}
|
||||||
@ -235,7 +235,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
|
|||||||
|
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta)
|
objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta, ObjectOptions{})
|
||||||
if actualErr != nil && testCase.shouldPass {
|
if actualErr != nil && testCase.shouldPass {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
||||||
}
|
}
|
||||||
@ -284,7 +284,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
|
|||||||
InsufficientWriteQuorum{},
|
InsufficientWriteQuorum{},
|
||||||
}
|
}
|
||||||
|
|
||||||
_, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta)
|
_, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta, ObjectOptions{})
|
||||||
if actualErr != nil && testCase.shouldPass {
|
if actualErr != nil && testCase.shouldPass {
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error())
|
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error())
|
||||||
}
|
}
|
||||||
@ -316,7 +316,7 @@ func testObjectAPIPutObjectStaleFiles(obj ObjectLayer, instanceType string, disk
|
|||||||
|
|
||||||
data := []byte("hello, world")
|
data := []byte("hello, world")
|
||||||
// Create object.
|
// Create object.
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create object, abort.
|
// Failed to create object, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
@ -347,9 +347,9 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
|
|||||||
// Failed to create newbucket, abort.
|
// Failed to create newbucket, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
|
opts := ObjectOptions{}
|
||||||
// Initiate Multipart Upload on the above created bucket.
|
// Initiate Multipart Upload on the above created bucket.
|
||||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, nil)
|
uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create NewMultipartUpload, abort.
|
// Failed to create NewMultipartUpload, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
@ -361,7 +361,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
|
|||||||
md5Writer.Write(fiveMBBytes)
|
md5Writer.Write(fiveMBBytes)
|
||||||
etag1 := hex.EncodeToString(md5Writer.Sum(nil))
|
etag1 := hex.EncodeToString(md5Writer.Sum(nil))
|
||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
_, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), int64(len(fiveMBBytes)), etag1, sha256sum))
|
_, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), int64(len(fiveMBBytes)), etag1, sha256sum), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to upload object part, abort.
|
// Failed to upload object part, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
@ -372,7 +372,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
|
|||||||
md5Writer = md5.New()
|
md5Writer = md5.New()
|
||||||
md5Writer.Write(data)
|
md5Writer.Write(data)
|
||||||
etag2 := hex.EncodeToString(md5Writer.Sum(nil))
|
etag2 := hex.EncodeToString(md5Writer.Sum(nil))
|
||||||
_, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, 2, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), etag2, sha256sum))
|
_, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, 2, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), etag2, sha256sum), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to upload object part, abort.
|
// Failed to upload object part, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
|
@ -89,7 +89,7 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
|
|||||||
if api.CacheAPI() != nil {
|
if api.CacheAPI() != nil {
|
||||||
getObjectInfo = api.CacheAPI().GetObjectInfo
|
getObjectInfo = api.CacheAPI().GetObjectInfo
|
||||||
}
|
}
|
||||||
|
opts := ObjectOptions{}
|
||||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
|
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
|
||||||
if getRequestAuthType(r) == authTypeAnonymous {
|
if getRequestAuthType(r) == authTypeAnonymous {
|
||||||
// As per "Permission" section in
|
// As per "Permission" section in
|
||||||
@ -106,7 +106,7 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
|
|||||||
ConditionValues: getConditionValues(r, ""),
|
ConditionValues: getConditionValues(r, ""),
|
||||||
IsOwner: false,
|
IsOwner: false,
|
||||||
}) {
|
}) {
|
||||||
_, err := getObjectInfo(ctx, bucket, object)
|
_, err := getObjectInfo(ctx, bucket, object, opts)
|
||||||
if toAPIErrorCode(err) == ErrNoSuchKey {
|
if toAPIErrorCode(err) == ErrNoSuchKey {
|
||||||
s3Error = ErrNoSuchKey
|
s3Error = ErrNoSuchKey
|
||||||
}
|
}
|
||||||
@ -125,7 +125,7 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
objInfo, err := getObjectInfo(ctx, bucket, object)
|
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||||
return
|
return
|
||||||
@ -187,7 +187,6 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
|
|||||||
if api.CacheAPI() != nil && !crypto.SSEC.IsRequested(r.Header) {
|
if api.CacheAPI() != nil && !crypto.SSEC.IsRequested(r.Header) {
|
||||||
getObject = api.CacheAPI().GetObject
|
getObject = api.CacheAPI().GetObject
|
||||||
}
|
}
|
||||||
|
|
||||||
reader, pipewriter := io.Pipe()
|
reader, pipewriter := io.Pipe()
|
||||||
|
|
||||||
// Get the object.
|
// Get the object.
|
||||||
@ -212,7 +211,7 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r
|
|||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
defer reader.Close()
|
defer reader.Close()
|
||||||
if gerr := getObject(ctx, bucket, object, 0, objInfo.Size, writer, objInfo.ETag); gerr != nil {
|
if gerr := getObject(ctx, bucket, object, 0, objInfo.Size, writer, objInfo.ETag, opts); gerr != nil {
|
||||||
pipewriter.CloseWithError(gerr)
|
pipewriter.CloseWithError(gerr)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -279,6 +278,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
|||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
bucket := vars["bucket"]
|
bucket := vars["bucket"]
|
||||||
object := vars["object"]
|
object := vars["object"]
|
||||||
|
opts := ObjectOptions{}
|
||||||
|
|
||||||
getObjectInfo := objectAPI.GetObjectInfo
|
getObjectInfo := objectAPI.GetObjectInfo
|
||||||
if api.CacheAPI() != nil {
|
if api.CacheAPI() != nil {
|
||||||
@ -297,7 +297,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
|||||||
ConditionValues: getConditionValues(r, ""),
|
ConditionValues: getConditionValues(r, ""),
|
||||||
IsOwner: false,
|
IsOwner: false,
|
||||||
}) {
|
}) {
|
||||||
_, err := getObjectInfo(ctx, bucket, object)
|
_, err := getObjectInfo(ctx, bucket, object, opts)
|
||||||
if toAPIErrorCode(err) == ErrNoSuchKey {
|
if toAPIErrorCode(err) == ErrNoSuchKey {
|
||||||
s3Error = ErrNoSuchKey
|
s3Error = ErrNoSuchKey
|
||||||
}
|
}
|
||||||
@ -307,7 +307,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
objInfo, err := getObjectInfo(ctx, bucket, object)
|
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||||
return
|
return
|
||||||
@ -390,7 +390,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Reads the object at startOffset and writes to mw.
|
// Reads the object at startOffset and writes to mw.
|
||||||
if err = getObject(ctx, bucket, object, startOffset, length, httpWriter, objInfo.ETag); err != nil {
|
if err = getObject(ctx, bucket, object, startOffset, length, httpWriter, objInfo.ETag, opts); err != nil {
|
||||||
if !httpWriter.HasWritten() && !statusCodeWritten { // write error response only if no data or headers has been written to client yet
|
if !httpWriter.HasWritten() && !statusCodeWritten { // write error response only if no data or headers has been written to client yet
|
||||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||||
}
|
}
|
||||||
@ -449,6 +449,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
|
|||||||
getObjectInfo = api.CacheAPI().GetObjectInfo
|
getObjectInfo = api.CacheAPI().GetObjectInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
|
opts := ObjectOptions{}
|
||||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
|
if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone {
|
||||||
if getRequestAuthType(r) == authTypeAnonymous {
|
if getRequestAuthType(r) == authTypeAnonymous {
|
||||||
// As per "Permission" section in https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html
|
// As per "Permission" section in https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html
|
||||||
@ -461,7 +462,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
|
|||||||
ConditionValues: getConditionValues(r, ""),
|
ConditionValues: getConditionValues(r, ""),
|
||||||
IsOwner: false,
|
IsOwner: false,
|
||||||
}) {
|
}) {
|
||||||
_, err := getObjectInfo(ctx, bucket, object)
|
_, err := getObjectInfo(ctx, bucket, object, opts)
|
||||||
if toAPIErrorCode(err) == ErrNoSuchKey {
|
if toAPIErrorCode(err) == ErrNoSuchKey {
|
||||||
s3Error = ErrNoSuchKey
|
s3Error = ErrNoSuchKey
|
||||||
}
|
}
|
||||||
@ -471,7 +472,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
objInfo, err := getObjectInfo(ctx, bucket, object)
|
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponseHeadersOnly(w, toAPIErrorCode(err))
|
writeErrorResponseHeadersOnly(w, toAPIErrorCode(err))
|
||||||
return
|
return
|
||||||
@ -603,8 +604,9 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var srcOpts, dstOpts ObjectOptions
|
||||||
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
||||||
srcInfo, err := objectAPI.GetObjectInfo(ctx, srcBucket, srcObject)
|
srcInfo, err := objectAPI.GetObjectInfo(ctx, srcBucket, srcObject, srcOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||||
return
|
return
|
||||||
@ -612,7 +614,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||||||
|
|
||||||
// Deny if WORM is enabled
|
// Deny if WORM is enabled
|
||||||
if globalWORMEnabled {
|
if globalWORMEnabled {
|
||||||
if _, err = objectAPI.GetObjectInfo(ctx, dstBucket, dstObject); err == nil {
|
if _, err = objectAPI.GetObjectInfo(ctx, dstBucket, dstObject, dstOpts); err == nil {
|
||||||
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -799,7 +801,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||||||
var dstRecords []dns.SrvRecord
|
var dstRecords []dns.SrvRecord
|
||||||
if dstRecords, err = globalDNSConfig.Get(dstBucket); err == nil {
|
if dstRecords, err = globalDNSConfig.Get(dstBucket); err == nil {
|
||||||
go func() {
|
go func() {
|
||||||
if gerr := objectAPI.GetObject(ctx, srcBucket, srcObject, 0, srcInfo.Size, srcInfo.Writer, srcInfo.ETag); gerr != nil {
|
if gerr := objectAPI.GetObject(ctx, srcBucket, srcObject, 0, srcInfo.Size, srcInfo.Writer, srcInfo.ETag, srcOpts); gerr != nil {
|
||||||
pipeWriter.CloseWithError(gerr)
|
pipeWriter.CloseWithError(gerr)
|
||||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||||
return
|
return
|
||||||
@ -816,7 +818,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
remoteObjInfo, rerr := client.PutObject(dstBucket, dstObject, srcInfo.Reader, srcInfo.Size, "", "", srcInfo.UserDefined)
|
remoteObjInfo, rerr := client.PutObject(dstBucket, dstObject, srcInfo.Reader, srcInfo.Size, "", "", srcInfo.UserDefined, dstOpts.ServerSideEncryption)
|
||||||
if rerr != nil {
|
if rerr != nil {
|
||||||
pipeWriter.CloseWithError(rerr)
|
pipeWriter.CloseWithError(rerr)
|
||||||
writeErrorResponse(w, ErrInternalError, r.URL)
|
writeErrorResponse(w, ErrInternalError, r.URL)
|
||||||
@ -828,7 +830,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
|||||||
} else {
|
} else {
|
||||||
// Copy source object to destination, if source and destination
|
// Copy source object to destination, if source and destination
|
||||||
// object is same then only metadata is updated.
|
// object is same then only metadata is updated.
|
||||||
objInfo, err = objectAPI.CopyObject(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo)
|
objInfo, err = objectAPI.CopyObject(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
pipeWriter.CloseWithError(err)
|
pipeWriter.CloseWithError(err)
|
||||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||||
@ -1009,10 +1011,10 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
opts := ObjectOptions{}
|
||||||
// Deny if WORM is enabled
|
// Deny if WORM is enabled
|
||||||
if globalWORMEnabled {
|
if globalWORMEnabled {
|
||||||
if _, err = objectAPI.GetObjectInfo(ctx, bucket, object); err == nil {
|
if _, err = objectAPI.GetObjectInfo(ctx, bucket, object, opts); err == nil {
|
||||||
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -1039,7 +1041,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create the object..
|
// Create the object..
|
||||||
objInfo, err := putObject(ctx, bucket, object, hashReader, metadata)
|
objInfo, err := putObject(ctx, bucket, object, hashReader, metadata, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||||
return
|
return
|
||||||
@ -1095,6 +1097,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
|||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
bucket := vars["bucket"]
|
bucket := vars["bucket"]
|
||||||
object := vars["object"]
|
object := vars["object"]
|
||||||
|
opts := ObjectOptions{}
|
||||||
|
|
||||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, bucket, object); s3Error != ErrNone {
|
if s3Error := checkRequestAuthType(ctx, r, policy.PutObjectAction, bucket, object); s3Error != ErrNone {
|
||||||
writeErrorResponse(w, s3Error, r.URL)
|
writeErrorResponse(w, s3Error, r.URL)
|
||||||
@ -1103,7 +1106,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
|||||||
|
|
||||||
// Deny if WORM is enabled
|
// Deny if WORM is enabled
|
||||||
if globalWORMEnabled {
|
if globalWORMEnabled {
|
||||||
if _, err := objectAPI.GetObjectInfo(ctx, bucket, object); err == nil {
|
if _, err := objectAPI.GetObjectInfo(ctx, bucket, object, opts); err == nil {
|
||||||
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -1148,7 +1151,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
|||||||
if api.CacheAPI() != nil && !hasServerSideEncryptionHeader(r.Header) {
|
if api.CacheAPI() != nil && !hasServerSideEncryptionHeader(r.Header) {
|
||||||
newMultipartUpload = api.CacheAPI().NewMultipartUpload
|
newMultipartUpload = api.CacheAPI().NewMultipartUpload
|
||||||
}
|
}
|
||||||
uploadID, err := newMultipartUpload(ctx, bucket, object, metadata)
|
uploadID, err := newMultipartUpload(ctx, bucket, object, metadata, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||||
return
|
return
|
||||||
@ -1212,8 +1215,8 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
|||||||
writeErrorResponse(w, ErrInvalidMaxParts, r.URL)
|
writeErrorResponse(w, ErrInvalidMaxParts, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
var srcOpts, dstOpts ObjectOptions
|
||||||
srcInfo, err := objectAPI.GetObjectInfo(ctx, srcBucket, srcObject)
|
srcInfo, err := objectAPI.GetObjectInfo(ctx, srcBucket, srcObject, srcOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||||
return
|
return
|
||||||
@ -1221,7 +1224,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
|||||||
|
|
||||||
// Deny if WORM is enabled
|
// Deny if WORM is enabled
|
||||||
if globalWORMEnabled {
|
if globalWORMEnabled {
|
||||||
if _, err = objectAPI.GetObjectInfo(ctx, dstBucket, dstObject); err == nil {
|
if _, err = objectAPI.GetObjectInfo(ctx, dstBucket, dstObject, dstOpts); err == nil {
|
||||||
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -1346,11 +1349,10 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
srcInfo.Writer = writer
|
srcInfo.Writer = writer
|
||||||
|
|
||||||
// Copy source object to destination, if source and destination
|
// Copy source object to destination, if source and destination
|
||||||
// object is same then only metadata is updated.
|
// object is same then only metadata is updated.
|
||||||
partInfo, err := objectAPI.CopyObjectPart(ctx, srcBucket, srcObject, dstBucket,
|
partInfo, err := objectAPI.CopyObjectPart(ctx, srcBucket, srcObject, dstBucket,
|
||||||
dstObject, uploadID, partID, startOffset, getLength, srcInfo)
|
dstObject, uploadID, partID, startOffset, getLength, srcInfo, srcOpts, dstOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||||
return
|
return
|
||||||
@ -1496,9 +1498,10 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
opts := ObjectOptions{}
|
||||||
// Deny if WORM is enabled
|
// Deny if WORM is enabled
|
||||||
if globalWORMEnabled {
|
if globalWORMEnabled {
|
||||||
if _, err = objectAPI.GetObjectInfo(ctx, bucket, object); err == nil {
|
if _, err = objectAPI.GetObjectInfo(ctx, bucket, object, opts); err == nil {
|
||||||
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -1559,7 +1562,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||||||
if api.CacheAPI() != nil && !hasServerSideEncryptionHeader(r.Header) {
|
if api.CacheAPI() != nil && !hasServerSideEncryptionHeader(r.Header) {
|
||||||
putObjectPart = api.CacheAPI().PutObjectPart
|
putObjectPart = api.CacheAPI().PutObjectPart
|
||||||
}
|
}
|
||||||
partInfo, err := putObjectPart(ctx, bucket, object, uploadID, partID, hashReader)
|
partInfo, err := putObjectPart(ctx, bucket, object, uploadID, partID, hashReader, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Verify if the underlying error is signature mismatch.
|
// Verify if the underlying error is signature mismatch.
|
||||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||||
@ -1597,7 +1600,7 @@ func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter,
|
|||||||
|
|
||||||
// Deny if WORM is enabled
|
// Deny if WORM is enabled
|
||||||
if globalWORMEnabled {
|
if globalWORMEnabled {
|
||||||
if _, err := objectAPI.GetObjectInfo(ctx, bucket, object); err == nil {
|
if _, err := objectAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{}); err == nil {
|
||||||
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -1672,7 +1675,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
|||||||
|
|
||||||
// Deny if WORM is enabled
|
// Deny if WORM is enabled
|
||||||
if globalWORMEnabled {
|
if globalWORMEnabled {
|
||||||
if _, err := objectAPI.GetObjectInfo(ctx, bucket, object); err == nil {
|
if _, err := objectAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{}); err == nil {
|
||||||
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
writeErrorResponse(w, ErrMethodNotAllowed, r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -77,7 +77,7 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
// iterate through the above set of inputs and upload the object.
|
// iterate through the above set of inputs and upload the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err := obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
_, err := obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData, ObjectOptions{})
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
@ -224,7 +224,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
|||||||
// iterate through the above set of inputs and upload the object.
|
// iterate through the above set of inputs and upload the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err := obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
_, err := obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData, ObjectOptions{})
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
@ -746,13 +746,14 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %d: %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
|
t.Fatalf("Test %d: %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
|
||||||
}
|
}
|
||||||
|
opts := ObjectOptions{}
|
||||||
if testCase.shouldPass {
|
if testCase.shouldPass {
|
||||||
// Verify whether the bucket obtained object is same as the one created.
|
// Verify whether the bucket obtained object is same as the one created.
|
||||||
if !bytes.Equal(testCase.expectedContent, actualContent) {
|
if !bytes.Equal(testCase.expectedContent, actualContent) {
|
||||||
t.Errorf("Test %d: %s: Object content differs from expected value.: %s", i+1, instanceType, string(actualContent))
|
t.Errorf("Test %d: %s: Object content differs from expected value.: %s", i+1, instanceType, string(actualContent))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
objInfo, err := obj.GetObjectInfo(context.Background(), testCase.bucketName, testCase.objectName)
|
objInfo, err := obj.GetObjectInfo(context.Background(), testCase.bucketName, testCase.objectName, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
|
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
|
||||||
}
|
}
|
||||||
@ -764,7 +765,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
|
|||||||
t.Fatalf("Test %d: %s: ContentEncoding is set to \"%s\" which is unexpected, expected \"%s\"", i+1, instanceType, objInfo.ContentEncoding, expectedContentEncoding)
|
t.Fatalf("Test %d: %s: ContentEncoding is set to \"%s\" which is unexpected, expected \"%s\"", i+1, instanceType, objInfo.ContentEncoding, expectedContentEncoding)
|
||||||
}
|
}
|
||||||
buffer := new(bytes.Buffer)
|
buffer := new(bytes.Buffer)
|
||||||
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, 0, int64(testCase.dataLen), buffer, objInfo.ETag)
|
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, 0, int64(testCase.dataLen), buffer, objInfo.ETag, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
|
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
|
||||||
}
|
}
|
||||||
@ -787,6 +788,7 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
|||||||
|
|
||||||
var err error
|
var err error
|
||||||
objectName := "test-object"
|
objectName := "test-object"
|
||||||
|
opts := ObjectOptions{}
|
||||||
// byte data for PutObject.
|
// byte data for PutObject.
|
||||||
bytesData := generateBytesData(6 * humanize.KiByte)
|
bytesData := generateBytesData(6 * humanize.KiByte)
|
||||||
|
|
||||||
@ -936,7 +938,7 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
|||||||
buffer := new(bytes.Buffer)
|
buffer := new(bytes.Buffer)
|
||||||
|
|
||||||
// Fetch the object to check whether the content is same as the one uploaded via PutObject.
|
// Fetch the object to check whether the content is same as the one uploaded via PutObject.
|
||||||
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, 0, int64(len(bytesData)), buffer, "")
|
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, 0, int64(len(bytesData)), buffer, "", opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
|
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
|
||||||
}
|
}
|
||||||
@ -979,7 +981,7 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
|||||||
if testCase.expectedRespStatus == http.StatusOK {
|
if testCase.expectedRespStatus == http.StatusOK {
|
||||||
buffer := new(bytes.Buffer)
|
buffer := new(bytes.Buffer)
|
||||||
// Fetch the object to check whether the content is same as the one uploaded via PutObject.
|
// Fetch the object to check whether the content is same as the one uploaded via PutObject.
|
||||||
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, 0, int64(len(bytesData)), buffer, "")
|
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, 0, int64(len(bytesData)), buffer, "", opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
|
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
|
||||||
}
|
}
|
||||||
@ -1035,7 +1037,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
|
|||||||
|
|
||||||
objectName := "test-object"
|
objectName := "test-object"
|
||||||
var err error
|
var err error
|
||||||
|
opts := ObjectOptions{}
|
||||||
// set of byte data for PutObject.
|
// set of byte data for PutObject.
|
||||||
// object has to be created before running tests for Copy Object.
|
// object has to be created before running tests for Copy Object.
|
||||||
// this is required even to assert the copied object,
|
// this is required even to assert the copied object,
|
||||||
@ -1060,7 +1062,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
|
|||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName,
|
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName,
|
||||||
mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData, opts)
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
@ -1073,7 +1075,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
|
|||||||
// PutObjectPart API HTTP Handler has to be tested in isolation,
|
// PutObjectPart API HTTP Handler has to be tested in isolation,
|
||||||
// that is without any other handler being registered,
|
// that is without any other handler being registered,
|
||||||
// That's why NewMultipartUpload is initiated using ObjectLayer.
|
// That's why NewMultipartUpload is initiated using ObjectLayer.
|
||||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, nil)
|
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create NewMultipartUpload, abort.
|
// Failed to create NewMultipartUpload, abort.
|
||||||
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
|
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
|
||||||
@ -1127,7 +1129,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
|
|||||||
}
|
}
|
||||||
|
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
if err = obj.GetObject(context.Background(), bucketName, testObject, 0, int64(len(bytesData[0].byteData)), &buf, ""); err != nil {
|
if err = obj.GetObject(context.Background(), bucketName, testObject, 0, int64(len(bytesData[0].byteData)), &buf, "", opts); err != nil {
|
||||||
t.Fatalf("Test: %s reading completed file failed: <ERROR> %v", instanceType, err)
|
t.Fatalf("Test: %s reading completed file failed: <ERROR> %v", instanceType, err)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(buf.Bytes(), bytesData[0].byteData) {
|
if !bytes.Equal(buf.Bytes(), bytesData[0].byteData) {
|
||||||
@ -1146,7 +1148,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
|
|
||||||
objectName := "test-object"
|
objectName := "test-object"
|
||||||
var err error
|
var err error
|
||||||
|
opts := ObjectOptions{}
|
||||||
// set of byte data for PutObject.
|
// set of byte data for PutObject.
|
||||||
// object has to be created before running tests for Copy Object.
|
// object has to be created before running tests for Copy Object.
|
||||||
// this is required even to assert the copied object,
|
// this is required even to assert the copied object,
|
||||||
@ -1170,7 +1172,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
// iterate through the above set of inputs and upload the object.
|
// iterate through the above set of inputs and upload the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData, opts)
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
@ -1183,7 +1185,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
// PutObjectPart API HTTP Handler has to be tested in isolation,
|
// PutObjectPart API HTTP Handler has to be tested in isolation,
|
||||||
// that is without any other handler being registered,
|
// that is without any other handler being registered,
|
||||||
// That's why NewMultipartUpload is initiated using ObjectLayer.
|
// That's why NewMultipartUpload is initiated using ObjectLayer.
|
||||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, nil)
|
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create NewMultipartUpload, abort.
|
// Failed to create NewMultipartUpload, abort.
|
||||||
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
|
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
|
||||||
@ -1474,7 +1476,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
// object used for anonymous HTTP request test.
|
// object used for anonymous HTTP request test.
|
||||||
anonObject := "anon-object"
|
anonObject := "anon-object"
|
||||||
var err error
|
var err error
|
||||||
|
opts := ObjectOptions{}
|
||||||
// set of byte data for PutObject.
|
// set of byte data for PutObject.
|
||||||
// object has to be created before running tests for Copy Object.
|
// object has to be created before running tests for Copy Object.
|
||||||
// this is required even to assert the copied object,
|
// this is required even to assert the copied object,
|
||||||
@ -1507,7 +1509,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
// iterate through the above set of inputs and upload the object.
|
// iterate through the above set of inputs and upload the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData, opts)
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
@ -1776,7 +1778,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||||||
if rec.Code == http.StatusOK {
|
if rec.Code == http.StatusOK {
|
||||||
// See if the new object is formed.
|
// See if the new object is formed.
|
||||||
// testing whether the copy was successful.
|
// testing whether the copy was successful.
|
||||||
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.newObjectName, 0, int64(len(bytesData[0].byteData)), buffers[0], "")
|
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.newObjectName, 0, int64(len(bytesData[0].byteData)), buffers[0], "", opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
|
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
|
||||||
}
|
}
|
||||||
@ -2092,6 +2094,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
|||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
|
var opts ObjectOptions
|
||||||
// object used for the test.
|
// object used for the test.
|
||||||
objectName := "test-object-new-multipart"
|
objectName := "test-object-new-multipart"
|
||||||
|
|
||||||
@ -2102,7 +2105,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
|||||||
|
|
||||||
for i := 0; i < 2; i++ {
|
for i := 0; i < 2; i++ {
|
||||||
// initiate new multipart uploadID.
|
// initiate new multipart uploadID.
|
||||||
uploadID, err = obj.NewMultipartUpload(context.Background(), bucketName, objectName, nil)
|
uploadID, err = obj.NewMultipartUpload(context.Background(), bucketName, objectName, nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create NewMultipartUpload, abort.
|
// Failed to create NewMultipartUpload, abort.
|
||||||
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
|
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
|
||||||
@ -2144,7 +2147,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
|||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, part := range parts {
|
for _, part := range parts {
|
||||||
_, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID,
|
_, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID,
|
||||||
mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, ""))
|
mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, ""), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err)
|
t.Fatalf("%s : %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -2440,7 +2443,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
credentials auth.Credentials, t *testing.T) {
|
credentials auth.Credentials, t *testing.T) {
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
opts := ObjectOptions{}
|
||||||
// object used for the test.
|
// object used for the test.
|
||||||
objectName := "test-object-new-multipart"
|
objectName := "test-object-new-multipart"
|
||||||
|
|
||||||
@ -2451,7 +2454,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
|
|
||||||
for i := 0; i < 2; i++ {
|
for i := 0; i < 2; i++ {
|
||||||
// initiate new multipart uploadID.
|
// initiate new multipart uploadID.
|
||||||
uploadID, err = obj.NewMultipartUpload(context.Background(), bucketName, objectName, nil)
|
uploadID, err = obj.NewMultipartUpload(context.Background(), bucketName, objectName, nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create NewMultipartUpload, abort.
|
// Failed to create NewMultipartUpload, abort.
|
||||||
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
|
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
|
||||||
@ -2493,7 +2496,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||||||
// Iterating over createPartCases to generate multipart chunks.
|
// Iterating over createPartCases to generate multipart chunks.
|
||||||
for _, part := range parts {
|
for _, part := range parts {
|
||||||
_, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID,
|
_, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID,
|
||||||
mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, ""))
|
mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, ""), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, err)
|
t.Fatalf("%s : %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -2603,7 +2606,7 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string
|
|||||||
credentials auth.Credentials, t *testing.T) {
|
credentials auth.Credentials, t *testing.T) {
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
var opts ObjectOptions
|
||||||
objectName := "test-object"
|
objectName := "test-object"
|
||||||
// Object used for anonymous API request test.
|
// Object used for anonymous API request test.
|
||||||
anonObjectName := "test-anon-obj"
|
anonObjectName := "test-anon-obj"
|
||||||
@ -2631,7 +2634,7 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string
|
|||||||
// iterate through the above set of inputs and upload the object.
|
// iterate through the above set of inputs and upload the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData, opts)
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
@ -2921,11 +2924,11 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
|
|||||||
|
|
||||||
// Initiate Multipart upload for testing PutObjectPartHandler.
|
// Initiate Multipart upload for testing PutObjectPartHandler.
|
||||||
testObject := "testobject"
|
testObject := "testobject"
|
||||||
|
var opts ObjectOptions
|
||||||
// PutObjectPart API HTTP Handler has to be tested in isolation,
|
// PutObjectPart API HTTP Handler has to be tested in isolation,
|
||||||
// that is without any other handler being registered,
|
// that is without any other handler being registered,
|
||||||
// That's why NewMultipartUpload is initiated using ObjectLayer.
|
// That's why NewMultipartUpload is initiated using ObjectLayer.
|
||||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, nil)
|
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create NewMultipartUpload, abort.
|
// Failed to create NewMultipartUpload, abort.
|
||||||
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
|
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
|
||||||
@ -3324,11 +3327,11 @@ func TestAPIListObjectPartsHandler(t *testing.T) {
|
|||||||
func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
|
||||||
credentials auth.Credentials, t *testing.T) {
|
credentials auth.Credentials, t *testing.T) {
|
||||||
testObject := "testobject"
|
testObject := "testobject"
|
||||||
|
var opts ObjectOptions
|
||||||
// PutObjectPart API HTTP Handler has to be tested in isolation,
|
// PutObjectPart API HTTP Handler has to be tested in isolation,
|
||||||
// that is without any other handler being registered,
|
// that is without any other handler being registered,
|
||||||
// That's why NewMultipartUpload is initiated using ObjectLayer.
|
// That's why NewMultipartUpload is initiated using ObjectLayer.
|
||||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, nil)
|
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create NewMultipartUpload, abort.
|
// Failed to create NewMultipartUpload, abort.
|
||||||
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
|
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
|
||||||
@ -3337,7 +3340,7 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str
|
|||||||
uploadIDCopy := uploadID
|
uploadIDCopy := uploadID
|
||||||
|
|
||||||
// create an object Part, will be used to test list object parts.
|
// create an object Part, will be used to test list object parts.
|
||||||
_, err = obj.PutObjectPart(context.Background(), bucketName, testObject, uploadID, 1, mustGetHashReader(t, bytes.NewReader([]byte("hello")), int64(len("hello")), "5d41402abc4b2a76b9719d911017c592", ""))
|
_, err = obj.PutObjectPart(context.Background(), bucketName, testObject, uploadID, 1, mustGetHashReader(t, bytes.NewReader([]byte("hello")), int64(len("hello")), "5d41402abc4b2a76b9719d911017c592", ""), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Minio %s : %s.", instanceType, err)
|
t.Fatalf("Minio %s : %s.", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -90,11 +90,12 @@ func (s *ObjectLayerAPISuite) TestMultipartObjectCreation(t *testing.T) {
|
|||||||
|
|
||||||
// Tests validate creation of part files during Multipart operation.
|
// Tests validate creation of part files during Multipart operation.
|
||||||
func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
|
var opts ObjectOptions
|
||||||
err := obj.MakeBucketWithLocation(context.Background(), "bucket", "")
|
err := obj.MakeBucketWithLocation(context.Background(), "bucket", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
uploadID, err := obj.NewMultipartUpload(context.Background(), "bucket", "key", nil)
|
uploadID, err := obj.NewMultipartUpload(context.Background(), "bucket", "key", nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -105,7 +106,7 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErr
|
|||||||
expectedETaghex := getMD5Hash(data)
|
expectedETaghex := getMD5Hash(data)
|
||||||
|
|
||||||
var calcPartInfo PartInfo
|
var calcPartInfo PartInfo
|
||||||
calcPartInfo, err = obj.PutObjectPart(context.Background(), "bucket", "key", uploadID, i, mustGetHashReader(t, bytes.NewBuffer(data), int64(len(data)), expectedETaghex, ""))
|
calcPartInfo, err = obj.PutObjectPart(context.Background(), "bucket", "key", uploadID, i, mustGetHashReader(t, bytes.NewBuffer(data), int64(len(data)), expectedETaghex, ""), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("%s: <ERROR> %s", instanceType, err)
|
t.Errorf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -133,11 +134,12 @@ func (s *ObjectLayerAPISuite) TestMultipartObjectAbort(t *testing.T) {
|
|||||||
|
|
||||||
// Tests validate abortion of Multipart operation.
|
// Tests validate abortion of Multipart operation.
|
||||||
func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
|
var opts ObjectOptions
|
||||||
err := obj.MakeBucketWithLocation(context.Background(), "bucket", "")
|
err := obj.MakeBucketWithLocation(context.Background(), "bucket", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
uploadID, err := obj.NewMultipartUpload(context.Background(), "bucket", "key", nil)
|
uploadID, err := obj.NewMultipartUpload(context.Background(), "bucket", "key", nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -155,7 +157,7 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHan
|
|||||||
|
|
||||||
metadata["md5"] = expectedETaghex
|
metadata["md5"] = expectedETaghex
|
||||||
var calcPartInfo PartInfo
|
var calcPartInfo PartInfo
|
||||||
calcPartInfo, err = obj.PutObjectPart(context.Background(), "bucket", "key", uploadID, i, mustGetHashReader(t, bytes.NewBufferString(randomString), int64(len(randomString)), expectedETaghex, ""))
|
calcPartInfo, err = obj.PutObjectPart(context.Background(), "bucket", "key", uploadID, i, mustGetHashReader(t, bytes.NewBufferString(randomString), int64(len(randomString)), expectedETaghex, ""), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -179,6 +181,7 @@ func (s *ObjectLayerAPISuite) TestMultipleObjectCreation(t *testing.T) {
|
|||||||
func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
objects := make(map[string][]byte)
|
objects := make(map[string][]byte)
|
||||||
err := obj.MakeBucketWithLocation(context.Background(), "bucket", "")
|
err := obj.MakeBucketWithLocation(context.Background(), "bucket", "")
|
||||||
|
var opts ObjectOptions
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -196,7 +199,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH
|
|||||||
metadata := make(map[string]string)
|
metadata := make(map[string]string)
|
||||||
metadata["etag"] = expectedETaghex
|
metadata["etag"] = expectedETaghex
|
||||||
var objInfo ObjectInfo
|
var objInfo ObjectInfo
|
||||||
objInfo, err = obj.PutObject(context.Background(), "bucket", key, mustGetHashReader(t, bytes.NewBufferString(randomString), int64(len(randomString)), metadata["etag"], ""), metadata)
|
objInfo, err = obj.PutObject(context.Background(), "bucket", key, mustGetHashReader(t, bytes.NewBufferString(randomString), int64(len(randomString)), metadata["etag"], ""), metadata, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -207,7 +210,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH
|
|||||||
|
|
||||||
for key, value := range objects {
|
for key, value := range objects {
|
||||||
var byteBuffer bytes.Buffer
|
var byteBuffer bytes.Buffer
|
||||||
err = obj.GetObject(context.Background(), "bucket", key, 0, int64(len(value)), &byteBuffer, "")
|
err = obj.GetObject(context.Background(), "bucket", key, 0, int64(len(value)), &byteBuffer, "", opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -215,7 +218,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH
|
|||||||
t.Errorf("%s: Mismatch of GetObject data with the expected one.", instanceType)
|
t.Errorf("%s: Mismatch of GetObject data with the expected one.", instanceType)
|
||||||
}
|
}
|
||||||
|
|
||||||
objInfo, err := obj.GetObjectInfo(context.Background(), "bucket", key)
|
objInfo, err := obj.GetObjectInfo(context.Background(), "bucket", key, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -246,10 +249,11 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
uploadContent := "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
|
uploadContent := "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
|
||||||
|
var opts ObjectOptions
|
||||||
// check before paging occurs.
|
// check before paging occurs.
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
key := "obj" + strconv.Itoa(i)
|
key := "obj" + strconv.Itoa(i)
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", key, mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
_, err = obj.PutObject(context.Background(), "bucket", key, mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -269,7 +273,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||||||
// check after paging occurs pages work.
|
// check after paging occurs pages work.
|
||||||
for i := 6; i <= 10; i++ {
|
for i := 6; i <= 10; i++ {
|
||||||
key := "obj" + strconv.Itoa(i)
|
key := "obj" + strconv.Itoa(i)
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", key, mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
_, err = obj.PutObject(context.Background(), "bucket", key, mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -286,11 +290,11 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||||||
}
|
}
|
||||||
// check paging with prefix at end returns less objects.
|
// check paging with prefix at end returns less objects.
|
||||||
{
|
{
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", "newPrefix", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
_, err = obj.PutObject(context.Background(), "bucket", "newPrefix", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", "newPrefix2", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
_, err = obj.PutObject(context.Background(), "bucket", "newPrefix2", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -328,11 +332,11 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||||||
|
|
||||||
// check delimited results with delimiter and prefix.
|
// check delimited results with delimiter and prefix.
|
||||||
{
|
{
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", "this/is/delimited", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
_, err = obj.PutObject(context.Background(), "bucket", "this/is/delimited", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", "this/is/also/a/delimited/file", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
_, err = obj.PutObject(context.Background(), "bucket", "this/is/also/a/delimited/file", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -441,22 +445,23 @@ func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, t TestErrHan
|
|||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var opts ObjectOptions
|
||||||
uploadContent := "The list of parts was not in ascending order. The parts list must be specified in order by part number."
|
uploadContent := "The list of parts was not in ascending order. The parts list must be specified in order by part number."
|
||||||
length := int64(len(uploadContent))
|
length := int64(len(uploadContent))
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", "object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil)
|
_, err = obj.PutObject(context.Background(), "bucket", "object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadContent = "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
|
uploadContent = "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
|
||||||
length = int64(len(uploadContent))
|
length = int64(len(uploadContent))
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", "object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil)
|
_, err = obj.PutObject(context.Background(), "bucket", "object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var bytesBuffer bytes.Buffer
|
var bytesBuffer bytes.Buffer
|
||||||
err = obj.GetObject(context.Background(), "bucket", "object", 0, length, &bytesBuffer, "")
|
err = obj.GetObject(context.Background(), "bucket", "object", 0, length, &bytesBuffer, "", opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -472,7 +477,8 @@ func (s *ObjectLayerAPISuite) TestNonExistantBucketOperations(t *testing.T) {
|
|||||||
|
|
||||||
// Tests validate that bucket operation on non-existent bucket fails.
|
// Tests validate that bucket operation on non-existent bucket fails.
|
||||||
func testNonExistantBucketOperations(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
func testNonExistantBucketOperations(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
_, err := obj.PutObject(context.Background(), "bucket1", "object", mustGetHashReader(t, bytes.NewBufferString("one"), int64(len("one")), "", ""), nil)
|
var opts ObjectOptions
|
||||||
|
_, err := obj.PutObject(context.Background(), "bucket1", "object", mustGetHashReader(t, bytes.NewBufferString("one"), int64(len("one")), "", ""), nil, opts)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Expected error but found nil")
|
t.Fatal("Expected error but found nil")
|
||||||
}
|
}
|
||||||
@ -519,11 +525,12 @@ func testPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var bytesBuffer1 bytes.Buffer
|
var bytesBuffer1 bytes.Buffer
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", "object", mustGetHashReader(t, readerEOF, length, "", ""), nil)
|
var opts ObjectOptions
|
||||||
|
_, err = obj.PutObject(context.Background(), "bucket", "object", mustGetHashReader(t, readerEOF, length, "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
err = obj.GetObject(context.Background(), "bucket", "object", 0, length, &bytesBuffer1, "")
|
err = obj.GetObject(context.Background(), "bucket", "object", 0, length, &bytesBuffer1, "", opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -532,11 +539,11 @@ func testPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var bytesBuffer2 bytes.Buffer
|
var bytesBuffer2 bytes.Buffer
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", "object", mustGetHashReader(t, readerNoEOF, length, "", ""), nil)
|
_, err = obj.PutObject(context.Background(), "bucket", "object", mustGetHashReader(t, readerNoEOF, length, "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
err = obj.GetObject(context.Background(), "bucket", "object", 0, length, &bytesBuffer2, "")
|
err = obj.GetObject(context.Background(), "bucket", "object", 0, length, &bytesBuffer2, "", opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -557,16 +564,17 @@ func testPutObjectInSubdir(obj ObjectLayer, instanceType string, t TestErrHandle
|
|||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var opts ObjectOptions
|
||||||
uploadContent := `The specified multipart upload does not exist. The upload ID might be invalid, or the multipart
|
uploadContent := `The specified multipart upload does not exist. The upload ID might be invalid, or the multipart
|
||||||
upload might have been aborted or completed.`
|
upload might have been aborted or completed.`
|
||||||
length := int64(len(uploadContent))
|
length := int64(len(uploadContent))
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", "dir1/dir2/object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil)
|
_, err = obj.PutObject(context.Background(), "bucket", "dir1/dir2/object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var bytesBuffer bytes.Buffer
|
var bytesBuffer bytes.Buffer
|
||||||
err = obj.GetObject(context.Background(), "bucket", "dir1/dir2/object", 0, length, &bytesBuffer, "")
|
err = obj.GetObject(context.Background(), "bucket", "dir1/dir2/object", 0, length, &bytesBuffer, "", opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
@ -703,7 +711,7 @@ func testNonExistantObjectInBucket(obj ObjectLayer, instanceType string, t TestE
|
|||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.GetObjectInfo(context.Background(), "bucket", "dir1")
|
_, err = obj.GetObjectInfo(context.Background(), "bucket", "dir1", ObjectOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("%s: Expected error but found nil", instanceType)
|
t.Fatalf("%s: Expected error but found nil", instanceType)
|
||||||
}
|
}
|
||||||
@ -732,7 +740,8 @@ func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string,
|
|||||||
}
|
}
|
||||||
content := "One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag."
|
content := "One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag."
|
||||||
length := int64(len(content))
|
length := int64(len(content))
|
||||||
_, err = obj.PutObject(context.Background(), bucketName, "dir1/dir3/object", mustGetHashReader(t, bytes.NewBufferString(content), length, "", ""), nil)
|
var opts ObjectOptions
|
||||||
|
_, err = obj.PutObject(context.Background(), bucketName, "dir1/dir3/object", mustGetHashReader(t, bytes.NewBufferString(content), length, "", ""), nil, opts)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
@ -753,7 +762,7 @@ func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
_, expectedErr := obj.GetObjectInfo(context.Background(), bucketName, testCase.dir)
|
_, expectedErr := obj.GetObjectInfo(context.Background(), bucketName, testCase.dir, opts)
|
||||||
if expectedErr != nil && expectedErr.Error() != testCase.err.Error() {
|
if expectedErr != nil && expectedErr.Error() != testCase.err.Error() {
|
||||||
t.Errorf("Test %d, %s: Expected error %s, got %s", i+1, instanceType, testCase.err, expectedErr)
|
t.Errorf("Test %d, %s: Expected error %s, got %s", i+1, instanceType, testCase.err, expectedErr)
|
||||||
}
|
}
|
||||||
@ -771,13 +780,14 @@ func testContentType(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
var opts ObjectOptions
|
||||||
uploadContent := "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
|
uploadContent := "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
|
||||||
// Test empty.
|
// Test empty.
|
||||||
_, err = obj.PutObject(context.Background(), "bucket", "minio.png", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
_, err = obj.PutObject(context.Background(), "bucket", "minio.png", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
objInfo, err := obj.GetObjectInfo(context.Background(), "bucket", "minio.png")
|
objInfo, err := obj.GetObjectInfo(context.Background(), "bucket", "minio.png", opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -124,6 +124,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
|
|||||||
// get random bucket name.
|
// get random bucket name.
|
||||||
bucketName := getRandomBucketName()
|
bucketName := getRandomBucketName()
|
||||||
|
|
||||||
|
var opts ObjectOptions
|
||||||
// Register the API end points with XL/FS object layer.
|
// Register the API end points with XL/FS object layer.
|
||||||
apiRouter := initTestAPIEndPoints(obj, []string{"PostPolicy"})
|
apiRouter := initTestAPIEndPoints(obj, []string{"PostPolicy"})
|
||||||
|
|
||||||
@ -227,7 +228,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
|
|||||||
}
|
}
|
||||||
// When the operation is successful, check if sending metadata is successful too
|
// When the operation is successful, check if sending metadata is successful too
|
||||||
if rec.Code == http.StatusNoContent {
|
if rec.Code == http.StatusNoContent {
|
||||||
objInfo, err := obj.GetObjectInfo(context.Background(), bucketName, testCase.objectName+"/upload.txt")
|
objInfo, err := obj.GetObjectInfo(context.Background(), bucketName, testCase.objectName+"/upload.txt", opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("Unexpected error: ", err)
|
t.Error("Unexpected error: ", err)
|
||||||
}
|
}
|
||||||
@ -427,6 +428,8 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t
|
|||||||
// Key specified in Form data
|
// Key specified in Form data
|
||||||
keyName := "test/object"
|
keyName := "test/object"
|
||||||
|
|
||||||
|
var opts ObjectOptions
|
||||||
|
|
||||||
// The final name of the upload object
|
// The final name of the upload object
|
||||||
targetObj := keyName + "/upload.txt"
|
targetObj := keyName + "/upload.txt"
|
||||||
|
|
||||||
@ -478,7 +481,7 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get the uploaded object info
|
// Get the uploaded object info
|
||||||
info, err := obj.GetObjectInfo(context.Background(), bucketName, targetObj)
|
info, err := obj.GetObjectInfo(context.Background(), bucketName, targetObj, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("Unexpected error: ", err)
|
t.Error("Unexpected error: ", err)
|
||||||
}
|
}
|
||||||
|
@ -175,6 +175,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
|||||||
resetGlobalStorageEnvs()
|
resetGlobalStorageEnvs()
|
||||||
bucket := getRandomBucketName()
|
bucket := getRandomBucketName()
|
||||||
|
|
||||||
|
var opts ObjectOptions
|
||||||
// make data with more than one part
|
// make data with more than one part
|
||||||
partCount := 3
|
partCount := 3
|
||||||
data := bytes.Repeat([]byte("a"), int(globalPutPartSize)*partCount)
|
data := bytes.Repeat([]byte("a"), int(globalPutPartSize)*partCount)
|
||||||
@ -188,7 +189,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
|||||||
|
|
||||||
// Object for test case 1 - No StorageClass defined, no MetaData in PutObject
|
// Object for test case 1 - No StorageClass defined, no MetaData in PutObject
|
||||||
object1 := "object1"
|
object1 := "object1"
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object1, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
_, err = obj.PutObject(context.Background(), bucket, object1, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to putObject %v", err)
|
t.Fatalf("Failed to putObject %v", err)
|
||||||
}
|
}
|
||||||
@ -199,7 +200,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
|||||||
object2 := "object2"
|
object2 := "object2"
|
||||||
metadata2 := make(map[string]string)
|
metadata2 := make(map[string]string)
|
||||||
metadata2["x-amz-storage-class"] = reducedRedundancyStorageClass
|
metadata2["x-amz-storage-class"] = reducedRedundancyStorageClass
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object2, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata2)
|
_, err = obj.PutObject(context.Background(), bucket, object2, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata2, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to putObject %v", err)
|
t.Fatalf("Failed to putObject %v", err)
|
||||||
}
|
}
|
||||||
@ -210,7 +211,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
|||||||
object3 := "object3"
|
object3 := "object3"
|
||||||
metadata3 := make(map[string]string)
|
metadata3 := make(map[string]string)
|
||||||
metadata3["x-amz-storage-class"] = standardStorageClass
|
metadata3["x-amz-storage-class"] = standardStorageClass
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object3, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata3)
|
_, err = obj.PutObject(context.Background(), bucket, object3, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata3, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to putObject %v", err)
|
t.Fatalf("Failed to putObject %v", err)
|
||||||
}
|
}
|
||||||
@ -226,7 +227,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
|||||||
Scheme: "EC",
|
Scheme: "EC",
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object4, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata4)
|
_, err = obj.PutObject(context.Background(), bucket, object4, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata4, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to putObject %v", err)
|
t.Fatalf("Failed to putObject %v", err)
|
||||||
}
|
}
|
||||||
@ -244,7 +245,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
|||||||
Scheme: "EC",
|
Scheme: "EC",
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object5, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata5)
|
_, err = obj.PutObject(context.Background(), bucket, object5, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata5, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to putObject %v", err)
|
t.Fatalf("Failed to putObject %v", err)
|
||||||
}
|
}
|
||||||
@ -262,7 +263,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
|||||||
Scheme: "EC",
|
Scheme: "EC",
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object6, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata6)
|
_, err = obj.PutObject(context.Background(), bucket, object6, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata6, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to putObject %v", err)
|
t.Fatalf("Failed to putObject %v", err)
|
||||||
}
|
}
|
||||||
@ -280,7 +281,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
|||||||
Scheme: "EC",
|
Scheme: "EC",
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object7, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata7)
|
_, err = obj.PutObject(context.Background(), bucket, object7, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata7, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to putObject %v", err)
|
t.Fatalf("Failed to putObject %v", err)
|
||||||
}
|
}
|
||||||
|
@ -411,7 +411,7 @@ next:
|
|||||||
if !hasSuffix(objectName, slashSeparator) && objectName != "" {
|
if !hasSuffix(objectName, slashSeparator) && objectName != "" {
|
||||||
// Deny if WORM is enabled
|
// Deny if WORM is enabled
|
||||||
if globalWORMEnabled {
|
if globalWORMEnabled {
|
||||||
if _, err = objectAPI.GetObjectInfo(context.Background(), args.BucketName, objectName); err == nil {
|
if _, err = objectAPI.GetObjectInfo(context.Background(), args.BucketName, objectName, ObjectOptions{}); err == nil {
|
||||||
return toJSONError(errMethodNotAllowed)
|
return toJSONError(errMethodNotAllowed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -653,16 +653,16 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
|||||||
writeWebErrorResponse(w, err)
|
writeWebErrorResponse(w, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
opts := ObjectOptions{}
|
||||||
// Deny if WORM is enabled
|
// Deny if WORM is enabled
|
||||||
if globalWORMEnabled {
|
if globalWORMEnabled {
|
||||||
if _, err = objectAPI.GetObjectInfo(context.Background(), bucket, object); err == nil {
|
if _, err = objectAPI.GetObjectInfo(context.Background(), bucket, object, opts); err == nil {
|
||||||
writeWebErrorResponse(w, errMethodNotAllowed)
|
writeWebErrorResponse(w, errMethodNotAllowed)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
objInfo, err := putObject(context.Background(), bucket, object, hashReader, metadata)
|
objInfo, err := putObject(context.Background(), bucket, object, hashReader, metadata, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeWebErrorResponse(w, err)
|
writeWebErrorResponse(w, err)
|
||||||
return
|
return
|
||||||
@ -703,7 +703,7 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
opts := ObjectOptions{}
|
||||||
getObject := objectAPI.GetObject
|
getObject := objectAPI.GetObject
|
||||||
if web.CacheAPI() != nil {
|
if web.CacheAPI() != nil {
|
||||||
getObject = web.CacheAPI().GetObject
|
getObject = web.CacheAPI().GetObject
|
||||||
@ -712,7 +712,7 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) {
|
|||||||
if web.CacheAPI() != nil {
|
if web.CacheAPI() != nil {
|
||||||
getObjectInfo = web.CacheAPI().GetObjectInfo
|
getObjectInfo = web.CacheAPI().GetObjectInfo
|
||||||
}
|
}
|
||||||
objInfo, err := getObjectInfo(context.Background(), bucket, object)
|
objInfo, err := getObjectInfo(context.Background(), bucket, object, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
writeWebErrorResponse(w, err)
|
writeWebErrorResponse(w, err)
|
||||||
return
|
return
|
||||||
@ -745,7 +745,7 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) {
|
|||||||
// Add content disposition.
|
// Add content disposition.
|
||||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", path.Base(object)))
|
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", path.Base(object)))
|
||||||
|
|
||||||
if err = getObject(context.Background(), bucket, object, 0, -1, httpWriter, ""); err != nil {
|
if err = getObject(context.Background(), bucket, object, 0, -1, httpWriter, "", opts); err != nil {
|
||||||
/// No need to print error, response writer already written to.
|
/// No need to print error, response writer already written to.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -814,10 +814,11 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
|
|||||||
if web.CacheAPI() != nil {
|
if web.CacheAPI() != nil {
|
||||||
getObjectInfo = web.CacheAPI().GetObjectInfo
|
getObjectInfo = web.CacheAPI().GetObjectInfo
|
||||||
}
|
}
|
||||||
|
opts := ObjectOptions{}
|
||||||
for _, object := range args.Objects {
|
for _, object := range args.Objects {
|
||||||
// Writes compressed object file to the response.
|
// Writes compressed object file to the response.
|
||||||
zipit := func(objectName string) error {
|
zipit := func(objectName string) error {
|
||||||
info, err := getObjectInfo(context.Background(), args.BucketName, objectName)
|
info, err := getObjectInfo(context.Background(), args.BucketName, objectName, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -853,7 +854,7 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
httpWriter := ioutil.WriteOnClose(writer)
|
httpWriter := ioutil.WriteOnClose(writer)
|
||||||
if err = getObject(context.Background(), args.BucketName, objectName, 0, length, httpWriter, ""); err != nil {
|
if err = getObject(context.Background(), args.BucketName, objectName, 0, length, httpWriter, "", opts); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = httpWriter.Close(); err != nil {
|
if err = httpWriter.Close(); err != nil {
|
||||||
|
@ -318,6 +318,8 @@ func testDeleteBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrH
|
|||||||
}
|
}
|
||||||
|
|
||||||
bucketName := getRandomBucketName()
|
bucketName := getRandomBucketName()
|
||||||
|
var opts ObjectOptions
|
||||||
|
|
||||||
err = obj.MakeBucketWithLocation(context.Background(), bucketName, "")
|
err = obj.MakeBucketWithLocation(context.Background(), bucketName, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create bucket: %s (%s)", err.Error(), instanceType)
|
t.Fatalf("failed to create bucket: %s (%s)", err.Error(), instanceType)
|
||||||
@ -347,7 +349,7 @@ func testDeleteBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrH
|
|||||||
for _, test := range testCases {
|
for _, test := range testCases {
|
||||||
if test.initWithObject {
|
if test.initWithObject {
|
||||||
data := bytes.NewBufferString("hello")
|
data := bytes.NewBufferString("hello")
|
||||||
_, err = obj.PutObject(context.Background(), test.bucketName, "object", mustGetHashReader(t, data, int64(data.Len()), "", ""), nil)
|
_, err = obj.PutObject(context.Background(), test.bucketName, "object", mustGetHashReader(t, data, int64(data.Len()), "", ""), nil, opts)
|
||||||
// _, err = obj.PutObject(test.bucketName, "object", int64(data.Len()), data, nil, "")
|
// _, err = obj.PutObject(test.bucketName, "object", int64(data.Len()), data, nil, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("could not put object to %s, %s", test.bucketName, err.Error())
|
t.Fatalf("could not put object to %s, %s", test.bucketName, err.Error())
|
||||||
@ -483,7 +485,7 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa
|
|||||||
|
|
||||||
data := bytes.Repeat([]byte("a"), objectSize)
|
data := bytes.Repeat([]byte("a"), objectSize)
|
||||||
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
||||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata, ObjectOptions{})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Was not able to upload an object, %v", err)
|
t.Fatalf("Was not able to upload an object, %v", err)
|
||||||
@ -587,14 +589,14 @@ func testRemoveObjectWebHandler(obj ObjectLayer, instanceType string, t TestErrH
|
|||||||
|
|
||||||
data := bytes.Repeat([]byte("a"), objectSize)
|
data := bytes.Repeat([]byte("a"), objectSize)
|
||||||
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
||||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Was not able to upload an object, %v", err)
|
t.Fatalf("Was not able to upload an object, %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
objectName = "a/object"
|
objectName = "a/object"
|
||||||
metadata = map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
metadata = map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
||||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Was not able to upload an object, %v", err)
|
t.Fatalf("Was not able to upload an object, %v", err)
|
||||||
}
|
}
|
||||||
@ -895,7 +897,7 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler
|
|||||||
}
|
}
|
||||||
|
|
||||||
var byteBuffer bytes.Buffer
|
var byteBuffer bytes.Buffer
|
||||||
err = obj.GetObject(context.Background(), bucketName, objectName, 0, int64(len(content)), &byteBuffer, "")
|
err = obj.GetObject(context.Background(), bucketName, objectName, 0, int64(len(content)), &byteBuffer, "", ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed, %v", err)
|
t.Fatalf("Failed, %v", err)
|
||||||
}
|
}
|
||||||
@ -985,7 +987,7 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl
|
|||||||
|
|
||||||
content := []byte("temporary file's content")
|
content := []byte("temporary file's content")
|
||||||
metadata := map[string]string{"etag": "01ce59706106fe5e02e7f55fffda7f34"}
|
metadata := map[string]string{"etag": "01ce59706106fe5e02e7f55fffda7f34"}
|
||||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader(content), int64(len(content)), metadata["etag"], ""), metadata)
|
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader(content), int64(len(content)), metadata["etag"], ""), metadata, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Was not able to upload an object, %v", err)
|
t.Fatalf("Was not able to upload an object, %v", err)
|
||||||
}
|
}
|
||||||
@ -1069,6 +1071,7 @@ func TestWebHandlerDownloadZip(t *testing.T) {
|
|||||||
func testWebHandlerDownloadZip(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
func testWebHandlerDownloadZip(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
apiRouter := initTestWebRPCEndPoint(obj)
|
apiRouter := initTestWebRPCEndPoint(obj)
|
||||||
credentials := globalServerConfig.GetCredential()
|
credentials := globalServerConfig.GetCredential()
|
||||||
|
var opts ObjectOptions
|
||||||
|
|
||||||
authorization, err := authenticateURL(credentials.AccessKey, credentials.SecretKey)
|
authorization, err := authenticateURL(credentials.AccessKey, credentials.SecretKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1087,9 +1090,9 @@ func testWebHandlerDownloadZip(obj ObjectLayer, instanceType string, t TestErrHa
|
|||||||
t.Fatalf("%s : %s", instanceType, err)
|
t.Fatalf("%s : %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
obj.PutObject(context.Background(), bucket, "a/one", mustGetHashReader(t, strings.NewReader(fileOne), int64(len(fileOne)), "", ""), nil)
|
obj.PutObject(context.Background(), bucket, "a/one", mustGetHashReader(t, strings.NewReader(fileOne), int64(len(fileOne)), "", ""), nil, opts)
|
||||||
obj.PutObject(context.Background(), bucket, "a/b/two", mustGetHashReader(t, strings.NewReader(fileTwo), int64(len(fileTwo)), "", ""), nil)
|
obj.PutObject(context.Background(), bucket, "a/b/two", mustGetHashReader(t, strings.NewReader(fileTwo), int64(len(fileTwo)), "", ""), nil, opts)
|
||||||
obj.PutObject(context.Background(), bucket, "a/c/three", mustGetHashReader(t, strings.NewReader(fileThree), int64(len(fileThree)), "", ""), nil)
|
obj.PutObject(context.Background(), bucket, "a/c/three", mustGetHashReader(t, strings.NewReader(fileThree), int64(len(fileThree)), "", ""), nil, opts)
|
||||||
|
|
||||||
test := func(token string) (int, []byte) {
|
test := func(token string) (int, []byte) {
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
@ -1174,7 +1177,7 @@ func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrH
|
|||||||
|
|
||||||
data := bytes.Repeat([]byte("a"), objectSize)
|
data := bytes.Repeat([]byte("a"), objectSize)
|
||||||
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
||||||
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
_, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Was not able to upload an object, %v", err)
|
t.Fatalf("Was not able to upload an object, %v", err)
|
||||||
}
|
}
|
||||||
|
@ -579,18 +579,18 @@ func (s *xlSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, err err
|
|||||||
// --- Object Operations ---
|
// --- Object Operations ---
|
||||||
|
|
||||||
// GetObject - reads an object from the hashedSet based on the object name.
|
// GetObject - reads an object from the hashedSet based on the object name.
|
||||||
func (s *xlSets) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
func (s *xlSets) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
|
||||||
return s.getHashedSet(object).GetObject(ctx, bucket, object, startOffset, length, writer, etag)
|
return s.getHashedSet(object).GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObject - writes an object to hashedSet based on the object name.
|
// PutObject - writes an object to hashedSet based on the object name.
|
||||||
func (s *xlSets) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
func (s *xlSets) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
return s.getHashedSet(object).PutObject(ctx, bucket, object, data, metadata)
|
return s.getHashedSet(object).PutObject(ctx, bucket, object, data, metadata, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObjectInfo - reads object metadata from the hashedSet based on the object name.
|
// GetObjectInfo - reads object metadata from the hashedSet based on the object name.
|
||||||
func (s *xlSets) GetObjectInfo(ctx context.Context, bucket, object string) (objInfo ObjectInfo, err error) {
|
func (s *xlSets) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
return s.getHashedSet(object).GetObjectInfo(ctx, bucket, object)
|
return s.getHashedSet(object).GetObjectInfo(ctx, bucket, object, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteObject - deletes an object from the hashedSet based on the object name.
|
// DeleteObject - deletes an object from the hashedSet based on the object name.
|
||||||
@ -599,14 +599,14 @@ func (s *xlSets) DeleteObject(ctx context.Context, bucket string, object string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CopyObject - copies objects from one hashedSet to another hashedSet, on server side.
|
// CopyObject - copies objects from one hashedSet to another hashedSet, on server side.
|
||||||
func (s *xlSets) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo) (objInfo ObjectInfo, err error) {
|
func (s *xlSets) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
srcSet := s.getHashedSet(srcObject)
|
srcSet := s.getHashedSet(srcObject)
|
||||||
destSet := s.getHashedSet(destObject)
|
destSet := s.getHashedSet(destObject)
|
||||||
|
|
||||||
// Check if this request is only metadata update.
|
// Check if this request is only metadata update.
|
||||||
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(destBucket, destObject))
|
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(destBucket, destObject))
|
||||||
if cpSrcDstSame && srcInfo.metadataOnly {
|
if cpSrcDstSame && srcInfo.metadataOnly {
|
||||||
return srcSet.CopyObject(ctx, srcBucket, srcObject, destBucket, destObject, srcInfo)
|
return srcSet.CopyObject(ctx, srcBucket, srcObject, destBucket, destObject, srcInfo, srcOpts, dstOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hold write lock on destination since in both cases
|
// Hold write lock on destination since in both cases
|
||||||
@ -632,7 +632,7 @@ func (s *xlSets) CopyObject(ctx context.Context, srcBucket, srcObject, destBucke
|
|||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if gerr := srcSet.getObject(ctx, srcBucket, srcObject, 0, srcInfo.Size, srcInfo.Writer, srcInfo.ETag); gerr != nil {
|
if gerr := srcSet.getObject(ctx, srcBucket, srcObject, 0, srcInfo.Size, srcInfo.Writer, srcInfo.ETag, srcOpts); gerr != nil {
|
||||||
if gerr = srcInfo.Writer.Close(); gerr != nil {
|
if gerr = srcInfo.Writer.Close(); gerr != nil {
|
||||||
logger.LogIf(ctx, gerr)
|
logger.LogIf(ctx, gerr)
|
||||||
}
|
}
|
||||||
@ -645,7 +645,7 @@ func (s *xlSets) CopyObject(ctx context.Context, srcBucket, srcObject, destBucke
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return destSet.putObject(ctx, destBucket, destObject, srcInfo.Reader, srcInfo.UserDefined)
|
return destSet.putObject(ctx, destBucket, destObject, srcInfo.Reader, srcInfo.UserDefined, dstOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns function "listDir" of the type listDirFunc.
|
// Returns function "listDir" of the type listDirFunc.
|
||||||
@ -833,19 +833,19 @@ func (s *xlSets) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMa
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Initiate a new multipart upload on a hashedSet based on object name.
|
// Initiate a new multipart upload on a hashedSet based on object name.
|
||||||
func (s *xlSets) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) {
|
func (s *xlSets) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string, opts ObjectOptions) (uploadID string, err error) {
|
||||||
return s.getHashedSet(object).NewMultipartUpload(ctx, bucket, object, metadata)
|
return s.getHashedSet(object).NewMultipartUpload(ctx, bucket, object, metadata, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copies a part of an object from source hashedSet to destination hashedSet.
|
// Copies a part of an object from source hashedSet to destination hashedSet.
|
||||||
func (s *xlSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int,
|
func (s *xlSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int,
|
||||||
startOffset int64, length int64, srcInfo ObjectInfo) (partInfo PartInfo, err error) {
|
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (partInfo PartInfo, err error) {
|
||||||
|
|
||||||
srcSet := s.getHashedSet(srcObject)
|
srcSet := s.getHashedSet(srcObject)
|
||||||
destSet := s.getHashedSet(destObject)
|
destSet := s.getHashedSet(destObject)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if gerr := srcSet.GetObject(ctx, srcBucket, srcObject, startOffset, length, srcInfo.Writer, srcInfo.ETag); gerr != nil {
|
if gerr := srcSet.GetObject(ctx, srcBucket, srcObject, startOffset, length, srcInfo.Writer, srcInfo.ETag, srcOpts); gerr != nil {
|
||||||
if gerr = srcInfo.Writer.Close(); gerr != nil {
|
if gerr = srcInfo.Writer.Close(); gerr != nil {
|
||||||
logger.LogIf(ctx, gerr)
|
logger.LogIf(ctx, gerr)
|
||||||
return
|
return
|
||||||
@ -857,12 +857,12 @@ func (s *xlSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destB
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return destSet.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, srcInfo.Reader)
|
return destSet.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, srcInfo.Reader, dstOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPart - writes part of an object to hashedSet based on the object name.
|
// PutObjectPart - writes part of an object to hashedSet based on the object name.
|
||||||
func (s *xlSets) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error) {
|
func (s *xlSets) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (info PartInfo, err error) {
|
||||||
return s.getHashedSet(object).PutObjectPart(ctx, bucket, object, uploadID, partID, data)
|
return s.getHashedSet(object).PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListObjectParts - lists all uploaded parts to an object in hashedSet.
|
// ListObjectParts - lists all uploaded parts to an object in hashedSet.
|
||||||
|
@ -43,7 +43,7 @@ func TestXLParentDirIsObject(t *testing.T) {
|
|||||||
}
|
}
|
||||||
objectContent := "12345"
|
objectContent := "12345"
|
||||||
objInfo, err := obj.PutObject(context.Background(), bucketName, objectName,
|
objInfo, err := obj.PutObject(context.Background(), bucketName, objectName,
|
||||||
mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil)
|
mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -200,7 +200,7 @@ func TestListOnlineDisks(t *testing.T) {
|
|||||||
t.Fatalf("Failed to make a bucket %v", err)
|
t.Fatalf("Failed to make a bucket %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to putObject %v", err)
|
t.Fatalf("Failed to putObject %v", err)
|
||||||
}
|
}
|
||||||
@ -292,7 +292,7 @@ func TestDisksWithAllParts(t *testing.T) {
|
|||||||
t.Fatalf("Failed to make a bucket %v", err)
|
t.Fatalf("Failed to make a bucket %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(ctx, bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
_, err = obj.PutObject(ctx, bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to putObject %v", err)
|
t.Fatalf("Failed to putObject %v", err)
|
||||||
}
|
}
|
||||||
|
@ -75,6 +75,7 @@ func TestHealObjectXL(t *testing.T) {
|
|||||||
bucket := "bucket"
|
bucket := "bucket"
|
||||||
object := "object"
|
object := "object"
|
||||||
data := bytes.Repeat([]byte("a"), 5*1024*1024)
|
data := bytes.Repeat([]byte("a"), 5*1024*1024)
|
||||||
|
var opts ObjectOptions
|
||||||
|
|
||||||
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -83,14 +84,14 @@ func TestHealObjectXL(t *testing.T) {
|
|||||||
|
|
||||||
// Create an object with multiple parts uploaded in decreasing
|
// Create an object with multiple parts uploaded in decreasing
|
||||||
// part number.
|
// part number.
|
||||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, nil)
|
uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create a multipart upload - %v", err)
|
t.Fatalf("Failed to create a multipart upload - %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var uploadedParts []CompletePart
|
var uploadedParts []CompletePart
|
||||||
for _, partID := range []int{2, 1} {
|
for _, partID := range []int{2, 1} {
|
||||||
pInfo, err1 := obj.PutObjectPart(context.Background(), bucket, object, uploadID, partID, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""))
|
pInfo, err1 := obj.PutObjectPart(context.Background(), bucket, object, uploadID, partID, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
|
||||||
if err1 != nil {
|
if err1 != nil {
|
||||||
t.Fatalf("Failed to upload a part - %v", err1)
|
t.Fatalf("Failed to upload a part - %v", err1)
|
||||||
}
|
}
|
||||||
|
@ -67,7 +67,7 @@ func testXLReadStat(obj ObjectLayer, instanceType string, disks []string, t *tes
|
|||||||
// iterate through the above set of inputs and upkoad the object.
|
// iterate through the above set of inputs and upkoad the object.
|
||||||
for i, input := range putObjectInputs {
|
for i, input := range putObjectInputs {
|
||||||
// uploading the object.
|
// uploading the object.
|
||||||
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData, ObjectOptions{})
|
||||||
// if object upload fails stop the test.
|
// if object upload fails stop the test.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||||
@ -108,6 +108,7 @@ func testXLReadMetaParts(obj ObjectLayer, instanceType string, disks []string, t
|
|||||||
bucketNames := []string{"minio-bucket", "minio-2-bucket"}
|
bucketNames := []string{"minio-bucket", "minio-2-bucket"}
|
||||||
objectNames := []string{"minio-object-1.txt"}
|
objectNames := []string{"minio-object-1.txt"}
|
||||||
uploadIDs := []string{}
|
uploadIDs := []string{}
|
||||||
|
var opts ObjectOptions
|
||||||
|
|
||||||
// bucketnames[0].
|
// bucketnames[0].
|
||||||
// objectNames[0].
|
// objectNames[0].
|
||||||
@ -119,7 +120,7 @@ func testXLReadMetaParts(obj ObjectLayer, instanceType string, disks []string, t
|
|||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
}
|
}
|
||||||
// Initiate Multipart Upload on the above created bucket.
|
// Initiate Multipart Upload on the above created bucket.
|
||||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], nil)
|
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create NewMultipartUpload, abort.
|
// Failed to create NewMultipartUpload, abort.
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||||
@ -150,7 +151,7 @@ func testXLReadMetaParts(obj ObjectLayer, instanceType string, disks []string, t
|
|||||||
sha256sum := ""
|
sha256sum := ""
|
||||||
// Iterating over creatPartCases to generate multipart chunks.
|
// Iterating over creatPartCases to generate multipart chunks.
|
||||||
for _, testCase := range createPartCases {
|
for _, testCase := range createPartCases {
|
||||||
_, perr := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
_, perr := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum), opts)
|
||||||
if perr != nil {
|
if perr != nil {
|
||||||
t.Fatalf("%s : %s", instanceType, perr)
|
t.Fatalf("%s : %s", instanceType, perr)
|
||||||
}
|
}
|
||||||
|
@ -240,7 +240,7 @@ func (xl xlObjects) newMultipartUpload(ctx context.Context, bucket string, objec
|
|||||||
// subsequent request each UUID is unique.
|
// subsequent request each UUID is unique.
|
||||||
//
|
//
|
||||||
// Implements S3 compatible initiate multipart API.
|
// Implements S3 compatible initiate multipart API.
|
||||||
func (xl xlObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string) (string, error) {
|
func (xl xlObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string, opts ObjectOptions) (string, error) {
|
||||||
if err := checkNewMultipartArgs(ctx, bucket, object, xl); err != nil {
|
if err := checkNewMultipartArgs(ctx, bucket, object, xl); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -256,7 +256,7 @@ func (xl xlObjects) NewMultipartUpload(ctx context.Context, bucket, object strin
|
|||||||
// data is read from an existing object.
|
// data is read from an existing object.
|
||||||
//
|
//
|
||||||
// Implements S3 compatible Upload Part Copy API.
|
// Implements S3 compatible Upload Part Copy API.
|
||||||
func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo) (pi PartInfo, e error) {
|
func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) {
|
||||||
// Hold read locks on source object only if we are
|
// Hold read locks on source object only if we are
|
||||||
// going to read data from source object.
|
// going to read data from source object.
|
||||||
objectSRLock := xl.nsMutex.NewNSLock(srcBucket, srcObject)
|
objectSRLock := xl.nsMutex.NewNSLock(srcBucket, srcObject)
|
||||||
@ -270,7 +270,7 @@ func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, ds
|
|||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if gerr := xl.getObject(ctx, srcBucket, srcObject, startOffset, length, srcInfo.Writer, srcInfo.ETag); gerr != nil {
|
if gerr := xl.getObject(ctx, srcBucket, srcObject, startOffset, length, srcInfo.Writer, srcInfo.ETag, srcOpts); gerr != nil {
|
||||||
if gerr = srcInfo.Writer.Close(); gerr != nil {
|
if gerr = srcInfo.Writer.Close(); gerr != nil {
|
||||||
logger.LogIf(ctx, gerr)
|
logger.LogIf(ctx, gerr)
|
||||||
}
|
}
|
||||||
@ -283,7 +283,7 @@ func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, ds
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
partInfo, err := xl.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.Reader)
|
partInfo, err := xl.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.Reader, dstOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pi, toObjectErr(err, dstBucket, dstObject)
|
return pi, toObjectErr(err, dstBucket, dstObject)
|
||||||
}
|
}
|
||||||
@ -297,7 +297,7 @@ func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, ds
|
|||||||
// of the multipart transaction.
|
// of the multipart transaction.
|
||||||
//
|
//
|
||||||
// Implements S3 compatible Upload Part API.
|
// Implements S3 compatible Upload Part API.
|
||||||
func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) {
|
func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader, opts ObjectOptions) (pi PartInfo, e error) {
|
||||||
if err := checkPutObjectPartArgs(ctx, bucket, object, xl); err != nil {
|
if err := checkPutObjectPartArgs(ctx, bucket, object, xl); err != nil {
|
||||||
return pi, err
|
return pi, err
|
||||||
}
|
}
|
||||||
|
@ -40,9 +40,10 @@ func TestXLCleanupStaleMultipartUploads(t *testing.T) {
|
|||||||
|
|
||||||
bucketName := "bucket"
|
bucketName := "bucket"
|
||||||
objectName := "object"
|
objectName := "object"
|
||||||
|
var opts ObjectOptions
|
||||||
|
|
||||||
obj.MakeBucketWithLocation(context.Background(), bucketName, "")
|
obj.MakeBucketWithLocation(context.Background(), bucketName, "")
|
||||||
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, objectName, nil)
|
uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, objectName, nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unexpected err: ", err)
|
t.Fatal("Unexpected err: ", err)
|
||||||
}
|
}
|
||||||
|
@ -80,7 +80,7 @@ func (xl xlObjects) prepareFile(ctx context.Context, bucket, object string, size
|
|||||||
// CopyObject - copy object source object to destination object.
|
// CopyObject - copy object source object to destination object.
|
||||||
// if source object and destination object are same we only
|
// if source object and destination object are same we only
|
||||||
// update metadata.
|
// update metadata.
|
||||||
func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo) (oi ObjectInfo, e error) {
|
func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, e error) {
|
||||||
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
||||||
|
|
||||||
// Read metadata associated with the object from all disks.
|
// Read metadata associated with the object from all disks.
|
||||||
@ -138,7 +138,7 @@ func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBuc
|
|||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
var startOffset int64 // Read the whole file.
|
var startOffset int64 // Read the whole file.
|
||||||
if gerr := xl.getObject(ctx, srcBucket, srcObject, startOffset, length, pipeWriter, srcInfo.ETag); gerr != nil {
|
if gerr := xl.getObject(ctx, srcBucket, srcObject, startOffset, length, pipeWriter, srcInfo.ETag, srcOpts); gerr != nil {
|
||||||
pipeWriter.CloseWithError(toObjectErr(gerr, srcBucket, srcObject))
|
pipeWriter.CloseWithError(toObjectErr(gerr, srcBucket, srcObject))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -151,7 +151,7 @@ func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBuc
|
|||||||
return oi, toObjectErr(err, dstBucket, dstObject)
|
return oi, toObjectErr(err, dstBucket, dstObject)
|
||||||
}
|
}
|
||||||
|
|
||||||
objInfo, err := xl.putObject(ctx, dstBucket, dstObject, hashReader, srcInfo.UserDefined)
|
objInfo, err := xl.putObject(ctx, dstBucket, dstObject, hashReader, srcInfo.UserDefined, dstOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return oi, toObjectErr(err, dstBucket, dstObject)
|
return oi, toObjectErr(err, dstBucket, dstObject)
|
||||||
}
|
}
|
||||||
@ -168,18 +168,18 @@ func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBuc
|
|||||||
//
|
//
|
||||||
// startOffset indicates the starting read location of the object.
|
// startOffset indicates the starting read location of the object.
|
||||||
// length indicates the total length of the object.
|
// length indicates the total length of the object.
|
||||||
func (xl xlObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
func (xl xlObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
|
||||||
// Lock the object before reading.
|
// Lock the object before reading.
|
||||||
objectLock := xl.nsMutex.NewNSLock(bucket, object)
|
objectLock := xl.nsMutex.NewNSLock(bucket, object)
|
||||||
if err := objectLock.GetRLock(globalObjectTimeout); err != nil {
|
if err := objectLock.GetRLock(globalObjectTimeout); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer objectLock.RUnlock()
|
defer objectLock.RUnlock()
|
||||||
return xl.getObject(ctx, bucket, object, startOffset, length, writer, etag)
|
return xl.getObject(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getObject wrapper for xl GetObject
|
// getObject wrapper for xl GetObject
|
||||||
func (xl xlObjects) getObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
func (xl xlObjects) getObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
|
||||||
|
|
||||||
if err := checkGetObjArgs(ctx, bucket, object); err != nil {
|
if err := checkGetObjArgs(ctx, bucket, object); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -350,7 +350,7 @@ func (xl xlObjects) getObjectInfoDir(ctx context.Context, bucket, object string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
|
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
|
||||||
func (xl xlObjects) GetObjectInfo(ctx context.Context, bucket, object string) (oi ObjectInfo, e error) {
|
func (xl xlObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (oi ObjectInfo, e error) {
|
||||||
// Lock the object before reading.
|
// Lock the object before reading.
|
||||||
objectLock := xl.nsMutex.NewNSLock(bucket, object)
|
objectLock := xl.nsMutex.NewNSLock(bucket, object)
|
||||||
if err := objectLock.GetRLock(globalObjectTimeout); err != nil {
|
if err := objectLock.GetRLock(globalObjectTimeout); err != nil {
|
||||||
@ -512,7 +512,7 @@ func renameObject(ctx context.Context, disks []StorageAPI, srcBucket, srcObject,
|
|||||||
// until EOF, erasure codes the data across all disk and additionally
|
// until EOF, erasure codes the data across all disk and additionally
|
||||||
// writes `xl.json` which carries the necessary metadata for future
|
// writes `xl.json` which carries the necessary metadata for future
|
||||||
// object operations.
|
// object operations.
|
||||||
func (xl xlObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
func (xl xlObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
// Validate put object input args.
|
// Validate put object input args.
|
||||||
if err = checkPutObjectArgs(ctx, bucket, object, xl, data.Size()); err != nil {
|
if err = checkPutObjectArgs(ctx, bucket, object, xl, data.Size()); err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
@ -523,11 +523,11 @@ func (xl xlObjects) PutObject(ctx context.Context, bucket string, object string,
|
|||||||
return objInfo, err
|
return objInfo, err
|
||||||
}
|
}
|
||||||
defer objectLock.Unlock()
|
defer objectLock.Unlock()
|
||||||
return xl.putObject(ctx, bucket, object, data, metadata)
|
return xl.putObject(ctx, bucket, object, data, metadata, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// putObject wrapper for xl PutObject
|
// putObject wrapper for xl PutObject
|
||||||
func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
func (xl xlObjects) putObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
uniqueID := mustGetUUID()
|
uniqueID := mustGetUUID()
|
||||||
tempObj := uniqueID
|
tempObj := uniqueID
|
||||||
|
|
||||||
|
@ -34,6 +34,7 @@ func TestRepeatPutObjectPart(t *testing.T) {
|
|||||||
var objLayer ObjectLayer
|
var objLayer ObjectLayer
|
||||||
var disks []string
|
var disks []string
|
||||||
var err error
|
var err error
|
||||||
|
var opts ObjectOptions
|
||||||
|
|
||||||
objLayer, disks, err = prepareXL16()
|
objLayer, disks, err = prepareXL16()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -48,18 +49,18 @@ func TestRepeatPutObjectPart(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadID, err := objLayer.NewMultipartUpload(context.Background(), "bucket1", "mpartObj1", nil)
|
uploadID, err := objLayer.NewMultipartUpload(context.Background(), "bucket1", "mpartObj1", nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
|
fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
|
||||||
md5Hex := getMD5Hash(fiveMBBytes)
|
md5Hex := getMD5Hash(fiveMBBytes)
|
||||||
_, err = objLayer.PutObjectPart(context.Background(), "bucket1", "mpartObj1", uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""))
|
_, err = objLayer.PutObjectPart(context.Background(), "bucket1", "mpartObj1", uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930
|
// PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930
|
||||||
_, err = objLayer.PutObjectPart(context.Background(), "bucket1", "mpartObj1", uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""))
|
_, err = objLayer.PutObjectPart(context.Background(), "bucket1", "mpartObj1", uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""), opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -93,7 +94,7 @@ func TestXLDeleteObjectBasic(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create object "dir/obj" under bucket "bucket" for Test 7 to pass
|
// Create object "dir/obj" under bucket "bucket" for Test 7 to pass
|
||||||
_, err = xl.PutObject(context.Background(), "bucket", "dir/obj", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
_, err = xl.PutObject(context.Background(), "bucket", "dir/obj", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
|
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
|
||||||
}
|
}
|
||||||
@ -109,7 +110,6 @@ func TestXLDeleteObjectBasic(t *testing.T) {
|
|||||||
// Cleanup backend directories
|
// Cleanup backend directories
|
||||||
removeRoots(fsDirs)
|
removeRoots(fsDirs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestXLDeleteObjectDiskNotFound(t *testing.T) {
|
func TestXLDeleteObjectDiskNotFound(t *testing.T) {
|
||||||
// Reset global storage class flags
|
// Reset global storage class flags
|
||||||
resetGlobalStorageEnvs()
|
resetGlobalStorageEnvs()
|
||||||
@ -129,8 +129,9 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
|
|||||||
|
|
||||||
bucket := "bucket"
|
bucket := "bucket"
|
||||||
object := "object"
|
object := "object"
|
||||||
|
opts := ObjectOptions{}
|
||||||
// Create object "obj" under bucket "bucket".
|
// Create object "obj" under bucket "bucket".
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -145,7 +146,7 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create "obj" under "bucket".
|
// Create "obj" under "bucket".
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -179,8 +180,9 @@ func TestGetObjectNoQuorum(t *testing.T) {
|
|||||||
|
|
||||||
bucket := "bucket"
|
bucket := "bucket"
|
||||||
object := "object"
|
object := "object"
|
||||||
|
opts := ObjectOptions{}
|
||||||
// Create "object" under "bucket".
|
// Create "object" under "bucket".
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -203,7 +205,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Fetch object from store.
|
// Fetch object from store.
|
||||||
err = xl.GetObject(context.Background(), bucket, object, 0, int64(len("abcd")), ioutil.Discard, "")
|
err = xl.GetObject(context.Background(), bucket, object, 0, int64(len("abcd")), ioutil.Discard, "", opts)
|
||||||
if err != toObjectErr(errXLReadQuorum, bucket, object) {
|
if err != toObjectErr(errXLReadQuorum, bucket, object) {
|
||||||
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
|
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
|
||||||
}
|
}
|
||||||
@ -229,8 +231,9 @@ func TestPutObjectNoQuorum(t *testing.T) {
|
|||||||
|
|
||||||
bucket := "bucket"
|
bucket := "bucket"
|
||||||
object := "object"
|
object := "object"
|
||||||
|
opts := ObjectOptions{}
|
||||||
// Create "object" under "bucket".
|
// Create "object" under "bucket".
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -253,7 +256,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Upload new content to same object "object"
|
// Upload new content to same object "object"
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil, opts)
|
||||||
if err != toObjectErr(errXLWriteQuorum, bucket, object) {
|
if err != toObjectErr(errXLWriteQuorum, bucket, object) {
|
||||||
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
|
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
|
||||||
}
|
}
|
||||||
@ -287,7 +290,7 @@ func TestHealing(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader(data), length, "", ""), nil)
|
_, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader(data), length, "", ""), nil, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
6
vendor/github.com/minio/minio-go/README.md
generated
vendored
6
vendor/github.com/minio/minio-go/README.md
generated
vendored
@ -139,7 +139,7 @@ The full API Reference is available here.
|
|||||||
|
|
||||||
### API Reference : File Object Operations
|
### API Reference : File Object Operations
|
||||||
* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
|
* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
|
||||||
* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
|
* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FGetObject)
|
||||||
* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext)
|
* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext)
|
||||||
* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext)
|
* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext)
|
||||||
|
|
||||||
@ -183,6 +183,10 @@ The full API Reference is available here.
|
|||||||
* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
|
* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
|
||||||
* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
|
* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
|
||||||
|
|
||||||
|
### Full Examples : Bucket lifecycle Operations
|
||||||
|
* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go)
|
||||||
|
* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go)
|
||||||
|
|
||||||
### Full Examples : Bucket notification Operations
|
### Full Examples : Bucket notification Operations
|
||||||
* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
|
* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
|
||||||
* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
|
* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
|
||||||
|
31
vendor/github.com/minio/minio-go/api-compose-object.go
generated
vendored
31
vendor/github.com/minio/minio-go/api-compose-object.go
generated
vendored
@ -101,8 +101,12 @@ func (d *DestinationInfo) getUserMetaHeadersMap(withCopyDirectiveHeader bool) ma
|
|||||||
r["x-amz-metadata-directive"] = "REPLACE"
|
r["x-amz-metadata-directive"] = "REPLACE"
|
||||||
}
|
}
|
||||||
for k, v := range d.userMetadata {
|
for k, v := range d.userMetadata {
|
||||||
|
if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) {
|
||||||
|
r[k] = v
|
||||||
|
} else {
|
||||||
r["x-amz-meta-"+k] = v
|
r["x-amz-meta-"+k] = v
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -373,15 +377,6 @@ func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo
|
|||||||
fmt.Sprintf("Client side encryption is used in source object %s/%s", src.bucket, src.object))
|
fmt.Sprintf("Client side encryption is used in source object %s/%s", src.bucket, src.object))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Since we did a HEAD to get size, we use the ETag
|
|
||||||
// value to make sure the object has not changed by
|
|
||||||
// the time we perform the copy. This is done, only if
|
|
||||||
// the user has not set their own ETag match
|
|
||||||
// condition.
|
|
||||||
if src.Headers.Get("x-amz-copy-source-if-match") == "" {
|
|
||||||
src.SetMatchETagCond(etag)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if a segment is specified, and if so, is the
|
// Check if a segment is specified, and if so, is the
|
||||||
// segment within object bounds?
|
// segment within object bounds?
|
||||||
if src.start != -1 {
|
if src.start != -1 {
|
||||||
@ -429,7 +424,15 @@ func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo
|
|||||||
|
|
||||||
// Now, handle multipart-copy cases.
|
// Now, handle multipart-copy cases.
|
||||||
|
|
||||||
// 1. Initiate a new multipart upload.
|
// 1. Ensure that the object has not been changed while
|
||||||
|
// we are copying data.
|
||||||
|
for _, src := range srcs {
|
||||||
|
if src.Headers.Get("x-amz-copy-source-if-match") == "" {
|
||||||
|
src.SetMatchETagCond(etag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Initiate a new multipart upload.
|
||||||
|
|
||||||
// Set user-metadata on the destination object. If no
|
// Set user-metadata on the destination object. If no
|
||||||
// user-metadata is specified, and there is only one source,
|
// user-metadata is specified, and there is only one source,
|
||||||
@ -449,13 +452,13 @@ func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// 2. Perform copy part uploads
|
// 3. Perform copy part uploads
|
||||||
objParts := []CompletePart{}
|
objParts := []CompletePart{}
|
||||||
partIndex := 1
|
partIndex := 1
|
||||||
for i, src := range srcs {
|
for i, src := range srcs {
|
||||||
h := src.Headers
|
h := src.Headers
|
||||||
if src.encryption != nil {
|
if src.encryption != nil {
|
||||||
src.encryption.Marshal(h)
|
encrypt.SSECopy(src.encryption).Marshal(h)
|
||||||
}
|
}
|
||||||
// Add destination encryption headers
|
// Add destination encryption headers
|
||||||
if dst.encryption != nil {
|
if dst.encryption != nil {
|
||||||
@ -480,14 +483,14 @@ func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if progress != nil {
|
if progress != nil {
|
||||||
io.CopyN(ioutil.Discard, progress, start+end-1)
|
io.CopyN(ioutil.Discard, progress, end-start+1)
|
||||||
}
|
}
|
||||||
objParts = append(objParts, complPart)
|
objParts = append(objParts, complPart)
|
||||||
partIndex++
|
partIndex++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 3. Make final complete-multipart request.
|
// 4. Make final complete-multipart request.
|
||||||
_, err = c.completeMultipartUpload(ctx, dst.bucket, dst.object, uploadID,
|
_, err = c.completeMultipartUpload(ctx, dst.bucket, dst.object, uploadID,
|
||||||
completeMultipartUpload{Parts: objParts})
|
completeMultipartUpload{Parts: objParts})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
77
vendor/github.com/minio/minio-go/api-get-lifecycle.go
generated
vendored
Normal file
77
vendor/github.com/minio/minio-go/api-get-lifecycle.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage
|
||||||
|
* Copyright 2015-2017 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/s3utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetBucketLifecycle - get bucket lifecycle.
|
||||||
|
func (c Client) GetBucketLifecycle(bucketName string) (string, error) {
|
||||||
|
// Input validation.
|
||||||
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
bucketLifecycle, err := c.getBucketLifecycle(bucketName)
|
||||||
|
if err != nil {
|
||||||
|
errResponse := ToErrorResponse(err)
|
||||||
|
if errResponse.Code == "NoSuchLifecycleConfiguration" {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return bucketLifecycle, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request server for current bucket lifecycle.
|
||||||
|
func (c Client) getBucketLifecycle(bucketName string) (string, error) {
|
||||||
|
// Get resources properly escaped and lined up before
|
||||||
|
// using them in http request.
|
||||||
|
urlValues := make(url.Values)
|
||||||
|
urlValues.Set("lifecycle", "")
|
||||||
|
|
||||||
|
// Execute GET on bucket to get lifecycle.
|
||||||
|
resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
|
||||||
|
bucketName: bucketName,
|
||||||
|
queryValues: urlValues,
|
||||||
|
})
|
||||||
|
|
||||||
|
defer closeResponse(resp)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp != nil {
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return "", httpRespToErrorResponse(resp, bucketName, "")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bucketLifecycleBuf, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
lifecycle := string(bucketLifecycleBuf)
|
||||||
|
return lifecycle, err
|
||||||
|
}
|
136
vendor/github.com/minio/minio-go/api-get-object-acl.go
generated
vendored
Normal file
136
vendor/github.com/minio/minio-go/api-get-object-acl.go
generated
vendored
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
/*
|
||||||
|
* Minio Go Library for Amazon S3 Compatible Cloud Storage
|
||||||
|
* Copyright 2018 Minio, Inc.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package minio
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
)
|
||||||
|
|
||||||
|
type accessControlPolicy struct {
|
||||||
|
Owner struct {
|
||||||
|
ID string `xml:"ID"`
|
||||||
|
DisplayName string `xml:"DisplayName"`
|
||||||
|
} `xml:"Owner"`
|
||||||
|
AccessControlList struct {
|
||||||
|
Grant []struct {
|
||||||
|
Grantee struct {
|
||||||
|
ID string `xml:"ID"`
|
||||||
|
DisplayName string `xml:"DisplayName"`
|
||||||
|
URI string `xml:"URI"`
|
||||||
|
} `xml:"Grantee"`
|
||||||
|
Permission string `xml:"Permission"`
|
||||||
|
} `xml:"Grant"`
|
||||||
|
} `xml:"AccessControlList"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//GetObjectACL get object ACLs
|
||||||
|
func (c Client) GetObjectACL(bucketName, objectName string) (*ObjectInfo, error) {
|
||||||
|
|
||||||
|
resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
|
||||||
|
bucketName: bucketName,
|
||||||
|
objectName: objectName,
|
||||||
|
queryValues: url.Values{
|
||||||
|
"acl": []string{""},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer closeResponse(resp)
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, httpRespToErrorResponse(resp, bucketName, objectName)
|
||||||
|
}
|
||||||
|
|
||||||
|
res := &accessControlPolicy{}
|
||||||
|
|
||||||
|
if err := xmlDecoder(resp.Body, res); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
objInfo, err := c.statObject(context.Background(), bucketName, objectName, StatObjectOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cannedACL := getCannedACL(res)
|
||||||
|
if cannedACL != "" {
|
||||||
|
objInfo.Metadata.Add("X-Amz-Acl", cannedACL)
|
||||||
|
return &objInfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
grantACL := getAmzGrantACL(res)
|
||||||
|
for k, v := range grantACL {
|
||||||
|
objInfo.Metadata[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
return &objInfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCannedACL(aCPolicy *accessControlPolicy) string {
|
||||||
|
grants := aCPolicy.AccessControlList.Grant
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case len(grants) == 1:
|
||||||
|
if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" {
|
||||||
|
return "private"
|
||||||
|
}
|
||||||
|
case len(grants) == 2:
|
||||||
|
for _, g := range grants {
|
||||||
|
if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" {
|
||||||
|
return "authenticated-read"
|
||||||
|
}
|
||||||
|
if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" {
|
||||||
|
return "public-read"
|
||||||
|
}
|
||||||
|
if g.Permission == "READ" && g.Grantee.ID == aCPolicy.Owner.ID {
|
||||||
|
return "bucket-owner-read"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case len(grants) == 3:
|
||||||
|
for _, g := range grants {
|
||||||
|
if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" {
|
||||||
|
return "public-read-write"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string {
|
||||||
|
grants := aCPolicy.AccessControlList.Grant
|
||||||
|
res := map[string][]string{}
|
||||||
|
|
||||||
|
for _, g := range grants {
|
||||||
|
switch {
|
||||||
|
case g.Permission == "READ":
|
||||||
|
res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID)
|
||||||
|
case g.Permission == "WRITE":
|
||||||
|
res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID)
|
||||||
|
case g.Permission == "READ_ACP":
|
||||||
|
res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID)
|
||||||
|
case g.Permission == "WRITE_ACP":
|
||||||
|
res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID)
|
||||||
|
case g.Permission == "FULL_CONTROL":
|
||||||
|
res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
2
vendor/github.com/minio/minio-go/api-get-options.go
generated
vendored
2
vendor/github.com/minio/minio-go/api-get-options.go
generated
vendored
@ -44,7 +44,7 @@ func (o GetObjectOptions) Header() http.Header {
|
|||||||
for k, v := range o.headers {
|
for k, v := range o.headers {
|
||||||
headers.Set(k, v)
|
headers.Set(k, v)
|
||||||
}
|
}
|
||||||
if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() != encrypt.S3 {
|
if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC {
|
||||||
o.ServerSideEncryption.Marshal(headers)
|
o.ServerSideEncryption.Marshal(headers)
|
||||||
}
|
}
|
||||||
return headers
|
return headers
|
||||||
|
15
vendor/github.com/minio/minio-go/api-list.go
generated
vendored
15
vendor/github.com/minio/minio-go/api-list.go
generated
vendored
@ -633,30 +633,27 @@ func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsI
|
|||||||
return partsInfo, nil
|
return partsInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// findUploadID lists all incomplete uploads and finds the uploadID of the matching object name.
|
// findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name.
|
||||||
func (c Client) findUploadID(bucketName, objectName string) (uploadID string, err error) {
|
func (c Client) findUploadIDs(bucketName, objectName string) ([]string, error) {
|
||||||
|
var uploadIDs []string
|
||||||
// Make list incomplete uploads recursive.
|
// Make list incomplete uploads recursive.
|
||||||
isRecursive := true
|
isRecursive := true
|
||||||
// Turn off size aggregation of individual parts, in this request.
|
// Turn off size aggregation of individual parts, in this request.
|
||||||
isAggregateSize := false
|
isAggregateSize := false
|
||||||
// latestUpload to track the latest multipart info for objectName.
|
|
||||||
var latestUpload ObjectMultipartInfo
|
|
||||||
// Create done channel to cleanup the routine.
|
// Create done channel to cleanup the routine.
|
||||||
doneCh := make(chan struct{})
|
doneCh := make(chan struct{})
|
||||||
defer close(doneCh)
|
defer close(doneCh)
|
||||||
// List all incomplete uploads.
|
// List all incomplete uploads.
|
||||||
for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, doneCh) {
|
for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, doneCh) {
|
||||||
if mpUpload.Err != nil {
|
if mpUpload.Err != nil {
|
||||||
return "", mpUpload.Err
|
return nil, mpUpload.Err
|
||||||
}
|
}
|
||||||
if objectName == mpUpload.Key {
|
if objectName == mpUpload.Key {
|
||||||
if mpUpload.Initiated.Sub(latestUpload.Initiated) > 0 {
|
uploadIDs = append(uploadIDs, mpUpload.UploadID)
|
||||||
latestUpload = mpUpload
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Return the latest upload id.
|
// Return the latest upload id.
|
||||||
return latestUpload.UploadID, nil
|
return uploadIDs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getTotalMultipartSize - calculate total uploaded size for the a given multipart object.
|
// getTotalMultipartSize - calculate total uploaded size for the a given multipart object.
|
||||||
|
81
vendor/github.com/minio/minio-go/api-put-bucket.go
generated
vendored
81
vendor/github.com/minio/minio-go/api-put-bucket.go
generated
vendored
@ -178,6 +178,87 @@ func (c Client) removeBucketPolicy(bucketName string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetBucketLifecycle set the lifecycle on an existing bucket.
|
||||||
|
func (c Client) SetBucketLifecycle(bucketName, lifecycle string) error {
|
||||||
|
// Input validation.
|
||||||
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If lifecycle is empty then delete it.
|
||||||
|
if lifecycle == "" {
|
||||||
|
return c.removeBucketLifecycle(bucketName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the updated lifecycle.
|
||||||
|
return c.putBucketLifecycle(bucketName, lifecycle)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Saves a new bucket lifecycle.
|
||||||
|
func (c Client) putBucketLifecycle(bucketName, lifecycle string) error {
|
||||||
|
// Input validation.
|
||||||
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get resources properly escaped and lined up before
|
||||||
|
// using them in http request.
|
||||||
|
urlValues := make(url.Values)
|
||||||
|
urlValues.Set("lifecycle", "")
|
||||||
|
|
||||||
|
// Content-length is mandatory for put lifecycle request
|
||||||
|
lifecycleReader := strings.NewReader(lifecycle)
|
||||||
|
b, err := ioutil.ReadAll(lifecycleReader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
reqMetadata := requestMetadata{
|
||||||
|
bucketName: bucketName,
|
||||||
|
queryValues: urlValues,
|
||||||
|
contentBody: lifecycleReader,
|
||||||
|
contentLength: int64(len(b)),
|
||||||
|
contentMD5Base64: sumMD5Base64(b),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute PUT to upload a new bucket lifecycle.
|
||||||
|
resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
|
||||||
|
defer closeResponse(resp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resp != nil {
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return httpRespToErrorResponse(resp, bucketName, "")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove lifecycle from a bucket.
|
||||||
|
func (c Client) removeBucketLifecycle(bucketName string) error {
|
||||||
|
// Input validation.
|
||||||
|
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Get resources properly escaped and lined up before
|
||||||
|
// using them in http request.
|
||||||
|
urlValues := make(url.Values)
|
||||||
|
urlValues.Set("lifecycle", "")
|
||||||
|
|
||||||
|
// Execute DELETE on objectName.
|
||||||
|
resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
|
||||||
|
bucketName: bucketName,
|
||||||
|
queryValues: urlValues,
|
||||||
|
contentSHA256Hex: emptySHA256Hex,
|
||||||
|
})
|
||||||
|
defer closeResponse(resp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// SetBucketNotification saves a new bucket notification.
|
// SetBucketNotification saves a new bucket notification.
|
||||||
func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error {
|
func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error {
|
||||||
// Input validation.
|
// Input validation.
|
||||||
|
2
vendor/github.com/minio/minio-go/api-put-object-multipart.go
generated
vendored
2
vendor/github.com/minio/minio-go/api-put-object-multipart.go
generated
vendored
@ -259,7 +259,7 @@ func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID
|
|||||||
|
|
||||||
// Set encryption headers, if any.
|
// Set encryption headers, if any.
|
||||||
customHeader := make(http.Header)
|
customHeader := make(http.Header)
|
||||||
if sse != nil && sse.Type() != encrypt.S3 && sse.Type() != encrypt.KMS {
|
if sse != nil {
|
||||||
sse.Marshal(customHeader)
|
sse.Marshal(customHeader)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
10
vendor/github.com/minio/minio-go/api-remove.go
generated
vendored
10
vendor/github.com/minio/minio-go/api-remove.go
generated
vendored
@ -233,18 +233,20 @@ func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
|
|||||||
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
if err := s3utils.CheckValidObjectName(objectName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Find multipart upload id of the object to be aborted.
|
// Find multipart upload ids of the object to be aborted.
|
||||||
uploadID, err := c.findUploadID(bucketName, objectName)
|
uploadIDs, err := c.findUploadIDs(bucketName, objectName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if uploadID != "" {
|
|
||||||
// Upload id found, abort the incomplete multipart upload.
|
for _, uploadID := range uploadIDs {
|
||||||
|
// abort incomplete multipart upload, based on the upload id passed.
|
||||||
err := c.abortMultipartUpload(context.Background(), bucketName, objectName, uploadID)
|
err := c.abortMultipartUpload(context.Background(), bucketName, objectName, uploadID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
19
vendor/github.com/minio/minio-go/api.go
generated
vendored
19
vendor/github.com/minio/minio-go/api.go
generated
vendored
@ -99,7 +99,7 @@ type Options struct {
|
|||||||
// Global constants.
|
// Global constants.
|
||||||
const (
|
const (
|
||||||
libraryName = "minio-go"
|
libraryName = "minio-go"
|
||||||
libraryVersion = "v6.0.3"
|
libraryVersion = "v6.0.6"
|
||||||
)
|
)
|
||||||
|
|
||||||
// User Agent should always following the below style.
|
// User Agent should always following the below style.
|
||||||
@ -454,25 +454,12 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
// WORKAROUND for https://github.com/golang/go/issues/13942.
|
|
||||||
// httputil.DumpResponse does not print response headers for
|
|
||||||
// all successful calls which have response ContentLength set
|
|
||||||
// to zero. Keep this workaround until the above bug is fixed.
|
|
||||||
if resp.ContentLength == 0 {
|
|
||||||
var buffer bytes.Buffer
|
|
||||||
if err = resp.Header.Write(&buffer); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
respTrace = buffer.Bytes()
|
|
||||||
respTrace = append(respTrace, []byte("\r\n")...)
|
|
||||||
} else {
|
} else {
|
||||||
respTrace, err = httputil.DumpResponse(resp, false)
|
respTrace, err = httputil.DumpResponse(resp, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Write response to trace output.
|
// Write response to trace output.
|
||||||
_, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
|
_, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n"))
|
||||||
@ -599,8 +586,8 @@ func (c Client) executeMethod(ctx context.Context, method string, metadata reque
|
|||||||
// Initiate the request.
|
// Initiate the request.
|
||||||
res, err = c.do(req)
|
res, err = c.do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// For supported network errors verify.
|
// For supported http requests errors verify.
|
||||||
if isNetErrorRetryable(err) {
|
if isHTTPReqErrorRetryable(err) {
|
||||||
continue // Retry.
|
continue // Retry.
|
||||||
}
|
}
|
||||||
// For other errors, return here no need to retry.
|
// For other errors, return here no need to retry.
|
||||||
|
9
vendor/github.com/minio/minio-go/core.go
generated
vendored
9
vendor/github.com/minio/minio-go/core.go
generated
vendored
@ -21,6 +21,8 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/encrypt"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Core - Inherits Client and adds new methods to expose the low level S3 APIs.
|
// Core - Inherits Client and adds new methods to expose the low level S3 APIs.
|
||||||
@ -68,7 +70,7 @@ func (c Core) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutObject - Upload object. Uploads using single PUT call.
|
// PutObject - Upload object. Uploads using single PUT call.
|
||||||
func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string) (ObjectInfo, error) {
|
func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string, sse encrypt.ServerSide) (ObjectInfo, error) {
|
||||||
opts := PutObjectOptions{}
|
opts := PutObjectOptions{}
|
||||||
m := make(map[string]string)
|
m := make(map[string]string)
|
||||||
for k, v := range metadata {
|
for k, v := range metadata {
|
||||||
@ -89,6 +91,7 @@ func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Ba
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
opts.UserMetadata = m
|
opts.UserMetadata = m
|
||||||
|
opts.ServerSideEncryption = sse
|
||||||
return c.putObjectDo(context.Background(), bucket, object, data, md5Base64, sha256Hex, size, opts)
|
return c.putObjectDo(context.Background(), bucket, object, data, md5Base64, sha256Hex, size, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -104,8 +107,8 @@ func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, de
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutObjectPart - Upload an object part.
|
// PutObjectPart - Upload an object part.
|
||||||
func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string) (ObjectPart, error) {
|
func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) {
|
||||||
return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, nil)
|
return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListObjectParts - List uploaded parts of an incomplete upload.x
|
// ListObjectParts - List uploaded parts of an incomplete upload.x
|
||||||
|
7499
vendor/github.com/minio/minio-go/functional_tests.go
generated
vendored
7499
vendor/github.com/minio/minio-go/functional_tests.go
generated
vendored
File diff suppressed because it is too large
Load Diff
20
vendor/github.com/minio/minio-go/retry.go
generated
vendored
20
vendor/github.com/minio/minio-go/retry.go
generated
vendored
@ -85,24 +85,21 @@ func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duratio
|
|||||||
return attemptCh
|
return attemptCh
|
||||||
}
|
}
|
||||||
|
|
||||||
// isNetErrorRetryable - is network error retryable.
|
// isHTTPReqErrorRetryable - is http requests error retryable, such
|
||||||
func isNetErrorRetryable(err error) bool {
|
// as i/o timeout, connection broken etc..
|
||||||
|
func isHTTPReqErrorRetryable(err error) bool {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
switch err.(type) {
|
switch e := err.(type) {
|
||||||
case net.Error:
|
case *url.Error:
|
||||||
switch err.(type) {
|
switch e.Err.(type) {
|
||||||
case *net.DNSError, *net.OpError, net.UnknownNetworkError:
|
case *net.DNSError, *net.OpError, net.UnknownNetworkError:
|
||||||
return true
|
return true
|
||||||
case *url.Error:
|
}
|
||||||
// For a URL error, where it replies back "connection closed"
|
|
||||||
// retry again.
|
|
||||||
if strings.Contains(err.Error(), "Connection closed by foreign host") {
|
if strings.Contains(err.Error(), "Connection closed by foreign host") {
|
||||||
return true
|
return true
|
||||||
}
|
} else if strings.Contains(err.Error(), "net/http: TLS handshake timeout") {
|
||||||
default:
|
|
||||||
if strings.Contains(err.Error(), "net/http: TLS handshake timeout") {
|
|
||||||
// If error is - tlsHandshakeTimeoutError, retry.
|
// If error is - tlsHandshakeTimeoutError, retry.
|
||||||
return true
|
return true
|
||||||
} else if strings.Contains(err.Error(), "i/o timeout") {
|
} else if strings.Contains(err.Error(), "i/o timeout") {
|
||||||
@ -116,7 +113,6 @@ func isNetErrorRetryable(err error) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
3
vendor/github.com/minio/minio-go/utils.go
generated
vendored
3
vendor/github.com/minio/minio-go/utils.go
generated
vendored
@ -223,6 +223,7 @@ var supportedHeaders = []string{
|
|||||||
"content-disposition",
|
"content-disposition",
|
||||||
"content-language",
|
"content-language",
|
||||||
"x-amz-website-redirect-location",
|
"x-amz-website-redirect-location",
|
||||||
|
"expires",
|
||||||
// Add more supported headers here.
|
// Add more supported headers here.
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -267,5 +268,5 @@ func isSSEHeader(headerKey string) bool {
|
|||||||
func isAmzHeader(headerKey string) bool {
|
func isAmzHeader(headerKey string) bool {
|
||||||
key := strings.ToLower(headerKey)
|
key := strings.ToLower(headerKey)
|
||||||
|
|
||||||
return strings.HasPrefix(key, "x-amz-meta-") || key == "x-amz-acl"
|
return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey)
|
||||||
}
|
}
|
||||||
|
6
vendor/vendor.json
vendored
6
vendor/vendor.json
vendored
@ -645,10 +645,10 @@
|
|||||||
"revisionTime": "2016-02-29T08:42:30-08:00"
|
"revisionTime": "2016-02-29T08:42:30-08:00"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "hdWmWbGpljSQMBOcpcwhAnP2aaQ=",
|
"checksumSHA1": "Wbe5TjRIOZiWVu4l4dwzCw/uP9w=",
|
||||||
"path": "github.com/minio/minio-go",
|
"path": "github.com/minio/minio-go",
|
||||||
"revision": "10531abd0af1579a12dc1977d67c0fec2b348679",
|
"revision": "519049881e73150d1bbeac1d443e7c96b76e1b8d",
|
||||||
"revisionTime": "2018-06-13T23:01:28Z"
|
"revisionTime": "2018-09-05T00:47:51Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "Qsj+6JPmJ8R5rFNQSHqRb8xAwOw=",
|
"checksumSHA1": "Qsj+6JPmJ8R5rFNQSHqRb8xAwOw=",
|
||||||
|
Loading…
x
Reference in New Issue
Block a user