mirror of https://github.com/minio/minio.git
remove GetObject from ObjectLayer interface (#11635)
This commit removes the `GetObject` method from the `ObjectLayer` interface. The `GetObject` method is not longer used by the HTTP handlers implementing the high-level S3 semantics. Instead, they use the `GetObjectNInfo` method which returns both, an object handle as well as the object metadata. Therefore, it is no longer necessary that a concrete `ObjectLayer` implements `GetObject`.
This commit is contained in:
parent
f9f6fd0421
commit
1f659204a2
|
@ -19,7 +19,6 @@ package cmd
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"io/ioutil"
|
|
||||||
"math"
|
"math"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -175,56 +174,6 @@ func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int)
|
||||||
runPutObjectBenchmarkParallel(b, objLayer, objSize)
|
runPutObjectBenchmarkParallel(b, objLayer, objSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Benchmark utility functions for ObjectLayer.GetObject().
|
|
||||||
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
|
|
||||||
func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
|
||||||
// obtains random bucket name.
|
|
||||||
bucket := getRandomBucketName()
|
|
||||||
// create bucket.
|
|
||||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
textData := generateBytesData(objSize)
|
|
||||||
|
|
||||||
// generate etag for the generated data.
|
|
||||||
// etag of the data to written is required as input for PutObject.
|
|
||||||
// PutObject is the functions which writes the data onto the FS/Erasure backend.
|
|
||||||
|
|
||||||
// get text data generated for number of bytes equal to object size.
|
|
||||||
md5hex := getMD5Hash(textData)
|
|
||||||
sha256hex := ""
|
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
// insert the object.
|
|
||||||
var objInfo ObjectInfo
|
|
||||||
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
|
||||||
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
if objInfo.ETag != md5hex {
|
|
||||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
|
|
||||||
b.ReportAllocs()
|
|
||||||
// the actual benchmark for GetObject starts here. Reset the benchmark timer.
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
var buffer = new(bytes.Buffer)
|
|
||||||
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "", ObjectOptions{})
|
|
||||||
if err != nil {
|
|
||||||
b.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Benchmark ends here. Stop timer.
|
|
||||||
b.StopTimer()
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// randomly picks a character and returns its equivalent byte array.
|
// randomly picks a character and returns its equivalent byte array.
|
||||||
func getRandomByte() []byte {
|
func getRandomByte() []byte {
|
||||||
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||||
|
@ -240,38 +189,6 @@ func generateBytesData(size int) []byte {
|
||||||
return bytes.Repeat(getRandomByte(), size)
|
return bytes.Repeat(getRandomByte(), size)
|
||||||
}
|
}
|
||||||
|
|
||||||
// creates Erasure/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
|
|
||||||
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
|
|
||||||
// create a temp Erasure/FS backend.
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
|
||||||
}
|
|
||||||
// cleaning up the backend by removing all the directories and files created.
|
|
||||||
defer removeRoots(disks)
|
|
||||||
|
|
||||||
// uses *testing.B and the object Layer to run the benchmark.
|
|
||||||
runGetObjectBenchmark(b, objLayer, objSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
// creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
|
|
||||||
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
|
|
||||||
// create a temp Erasure/FS backend.
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
|
|
||||||
}
|
|
||||||
// cleaning up the backend by removing all the directories and files created.
|
|
||||||
defer removeRoots(disks)
|
|
||||||
|
|
||||||
// uses *testing.B and the object Layer to run the benchmark.
|
|
||||||
runGetObjectBenchmarkParallel(b, objLayer, objSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parallel benchmark utility functions for ObjectLayer.PutObject().
|
// Parallel benchmark utility functions for ObjectLayer.PutObject().
|
||||||
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
|
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
|
||||||
func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||||
|
@ -315,58 +232,3 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||||
// Benchmark ends here. Stop timer.
|
// Benchmark ends here. Stop timer.
|
||||||
b.StopTimer()
|
b.StopTimer()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parallel benchmark utility functions for ObjectLayer.GetObject().
|
|
||||||
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
|
|
||||||
func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
|
||||||
// obtains random bucket name.
|
|
||||||
bucket := getRandomBucketName()
|
|
||||||
// create bucket.
|
|
||||||
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// get text data generated for number of bytes equal to object size.
|
|
||||||
textData := generateBytesData(objSize)
|
|
||||||
// generate md5sum for the generated data.
|
|
||||||
// md5sum of the data to written is required as input for PutObject.
|
|
||||||
// PutObject is the functions which writes the data onto the FS/Erasure backend.
|
|
||||||
|
|
||||||
md5hex := getMD5Hash([]byte(textData))
|
|
||||||
sha256hex := ""
|
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
|
||||||
// insert the object.
|
|
||||||
var objInfo ObjectInfo
|
|
||||||
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
|
|
||||||
mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
if objInfo.ETag != md5hex {
|
|
||||||
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
|
|
||||||
b.ReportAllocs()
|
|
||||||
// the actual benchmark for GetObject starts here. Reset the benchmark timer.
|
|
||||||
b.ResetTimer()
|
|
||||||
b.RunParallel(func(pb *testing.PB) {
|
|
||||||
i := 0
|
|
||||||
for pb.Next() {
|
|
||||||
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "", ObjectOptions{})
|
|
||||||
if err != nil {
|
|
||||||
b.Error(err)
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
if i == 10 {
|
|
||||||
i = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
// Benchmark ends here. Stop timer.
|
|
||||||
b.StopTimer()
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
|
@ -595,27 +595,6 @@ func (z *erasureServerPools) GetObjectNInfo(ctx context.Context, bucket, object
|
||||||
return gr, ObjectNotFound{Bucket: bucket, Object: object}
|
return gr, ObjectNotFound{Bucket: bucket, Object: object}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (z *erasureServerPools) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
|
|
||||||
if err := checkGetObjArgs(ctx, bucket, object); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
object = encodeDirObject(object)
|
|
||||||
for _, pool := range z.serverPools {
|
|
||||||
if err := pool.GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts); err != nil {
|
|
||||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if opts.VersionID != "" {
|
|
||||||
return VersionNotFound{Bucket: bucket, Object: object, VersionID: opts.VersionID}
|
|
||||||
}
|
|
||||||
return ObjectNotFound{Bucket: bucket, Object: object}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (z *erasureServerPools) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
func (z *erasureServerPools) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
if err = checkGetObjArgs(ctx, bucket, object); err != nil {
|
if err = checkGetObjArgs(ctx, bucket, object); err != nil {
|
||||||
return objInfo, err
|
return objInfo, err
|
||||||
|
|
|
@ -21,7 +21,6 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/crc32"
|
"hash/crc32"
|
||||||
"io"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net/http"
|
"net/http"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -784,13 +783,6 @@ func (s *erasureSets) GetObjectNInfo(ctx context.Context, bucket, object string,
|
||||||
return set.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
return set.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObject - reads an object from the hashedSet based on the object name.
|
|
||||||
func (s *erasureSets) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
|
|
||||||
set := s.getHashedSet(object)
|
|
||||||
auditObjectErasureSet(ctx, object, set, s.poolNumber)
|
|
||||||
return set.GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *erasureSets) parentDirIsObject(ctx context.Context, bucket, parent string) bool {
|
func (s *erasureSets) parentDirIsObject(ctx context.Context, bucket, parent string) bool {
|
||||||
if parent == "." {
|
if parent == "." {
|
||||||
return false
|
return false
|
||||||
|
|
35
cmd/fs-v1.go
35
cmd/fs-v1.go
|
@ -768,41 +768,6 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
||||||
return objReaderFn(reader, h, opts.CheckPrecondFn, closeFn)
|
return objReaderFn(reader, h, opts.CheckPrecondFn, closeFn)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObject - reads an object from the disk.
|
|
||||||
// Supports additional parameters like offset and length
|
|
||||||
// which are synonymous with HTTP Range requests.
|
|
||||||
//
|
|
||||||
// startOffset indicates the starting read location of the object.
|
|
||||||
// length indicates the total length of the object.
|
|
||||||
func (fs *FSObjects) GetObject(ctx context.Context, bucket, object string, offset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
|
|
||||||
if opts.VersionID != "" && opts.VersionID != nullVersionID {
|
|
||||||
return VersionNotFound{
|
|
||||||
Bucket: bucket,
|
|
||||||
Object: object,
|
|
||||||
VersionID: opts.VersionID,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = checkGetObjArgs(ctx, bucket, object); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lock the object before reading.
|
|
||||||
lk := fs.NewNSLock(bucket, object)
|
|
||||||
if err := lk.GetRLock(ctx, globalOperationTimeout); err != nil {
|
|
||||||
logger.LogIf(ctx, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer lk.RUnlock()
|
|
||||||
|
|
||||||
atomic.AddInt64(&fs.activeIOCount, 1)
|
|
||||||
defer func() {
|
|
||||||
atomic.AddInt64(&fs.activeIOCount, -1)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return fs.getObject(ctx, bucket, object, offset, length, writer, etag, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getObject - wrapper for GetObject
|
// getObject - wrapper for GetObject
|
||||||
func (fs *FSObjects) getObject(ctx context.Context, bucket, object string, offset int64, length int64, writer io.Writer, etag string, lock bool) (err error) {
|
func (fs *FSObjects) getObject(ctx context.Context, bucket, object string, offset int64, length int64, writer io.Writer, etag string, lock bool) (err error) {
|
||||||
if _, err = fs.statBucketDir(ctx, bucket); err != nil {
|
if _, err = fs.statBucketDir(ctx, bucket); err != nil {
|
||||||
|
|
|
@ -799,7 +799,7 @@ func (a *azureObjects) GetObjectNInfo(ctx context.Context, bucket, object string
|
||||||
|
|
||||||
pr, pw := io.Pipe()
|
pr, pw := io.Pipe()
|
||||||
go func() {
|
go func() {
|
||||||
err := a.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.InnerETag, opts)
|
err := a.getObject(ctx, bucket, object, startOffset, length, pw, objInfo.InnerETag, opts)
|
||||||
pw.CloseWithError(err)
|
pw.CloseWithError(err)
|
||||||
}()
|
}()
|
||||||
// Setup cleanup function to cause the above go-routine to
|
// Setup cleanup function to cause the above go-routine to
|
||||||
|
@ -814,7 +814,7 @@ func (a *azureObjects) GetObjectNInfo(ctx context.Context, bucket, object string
|
||||||
//
|
//
|
||||||
// startOffset indicates the starting read location of the object.
|
// startOffset indicates the starting read location of the object.
|
||||||
// length indicates the total length of the object.
|
// length indicates the total length of the object.
|
||||||
func (a *azureObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
|
func (a *azureObjects) getObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
|
||||||
// startOffset cannot be negative.
|
// startOffset cannot be negative.
|
||||||
if startOffset < 0 {
|
if startOffset < 0 {
|
||||||
return azureToObjectError(minio.InvalidRange{}, bucket, object)
|
return azureToObjectError(minio.InvalidRange{}, bucket, object)
|
||||||
|
|
|
@ -751,7 +751,7 @@ func (l *gcsGateway) GetObjectNInfo(ctx context.Context, bucket, object string,
|
||||||
|
|
||||||
pr, pw := io.Pipe()
|
pr, pw := io.Pipe()
|
||||||
go func() {
|
go func() {
|
||||||
err := l.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, opts)
|
err := l.getObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, opts)
|
||||||
pw.CloseWithError(err)
|
pw.CloseWithError(err)
|
||||||
}()
|
}()
|
||||||
// Setup cleanup function to cause the above go-routine to
|
// Setup cleanup function to cause the above go-routine to
|
||||||
|
@ -766,7 +766,7 @@ func (l *gcsGateway) GetObjectNInfo(ctx context.Context, bucket, object string,
|
||||||
//
|
//
|
||||||
// startOffset indicates the starting read location of the object.
|
// startOffset indicates the starting read location of the object.
|
||||||
// length indicates the total length of the object.
|
// length indicates the total length of the object.
|
||||||
func (l *gcsGateway) GetObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
|
func (l *gcsGateway) getObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
|
||||||
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
|
// if we want to mimic S3 behavior exactly, we need to verify if bucket exists first,
|
||||||
// otherwise gcs will just return object not exist in case of non-existing bucket
|
// otherwise gcs will just return object not exist in case of non-existing bucket
|
||||||
if _, err := l.client.Bucket(bucket).Attrs(ctx); err != nil {
|
if _, err := l.client.Bucket(bucket).Attrs(ctx); err != nil {
|
||||||
|
|
|
@ -591,7 +591,7 @@ func (n *hdfsObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
||||||
|
|
||||||
pr, pw := io.Pipe()
|
pr, pw := io.Pipe()
|
||||||
go func() {
|
go func() {
|
||||||
nerr := n.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, opts)
|
nerr := n.getObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, opts)
|
||||||
pw.CloseWithError(nerr)
|
pw.CloseWithError(nerr)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -614,7 +614,7 @@ func (n *hdfsObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstB
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *hdfsObjects) GetObject(ctx context.Context, bucket, key string, startOffset, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
|
func (n *hdfsObjects) getObject(ctx context.Context, bucket, key string, startOffset, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
|
||||||
if _, err := n.clnt.Stat(n.hdfsPathJoin(bucket)); err != nil {
|
if _, err := n.clnt.Stat(n.hdfsPathJoin(bucket)); err != nil {
|
||||||
return hdfsToObjectErr(ctx, err, bucket)
|
return hdfsToObjectErr(ctx, err, bucket)
|
||||||
}
|
}
|
||||||
|
|
|
@ -226,7 +226,7 @@ func (l *s3EncObjects) getGWMetadata(ctx context.Context, bucket, metaFileName s
|
||||||
return m, err1
|
return m, err1
|
||||||
}
|
}
|
||||||
var buffer bytes.Buffer
|
var buffer bytes.Buffer
|
||||||
err = l.s3Objects.GetObject(ctx, bucket, metaFileName, 0, oi.Size, &buffer, oi.ETag, minio.ObjectOptions{})
|
err = l.s3Objects.getObject(ctx, bucket, metaFileName, 0, oi.Size, &buffer, oi.ETag, minio.ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return m, err
|
return m, err
|
||||||
}
|
}
|
||||||
|
@ -272,7 +272,7 @@ func (l *s3EncObjects) getObject(ctx context.Context, bucket string, key string,
|
||||||
dmeta, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(key))
|
dmeta, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(key))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// unencrypted content
|
// unencrypted content
|
||||||
return l.s3Objects.GetObject(ctx, bucket, key, startOffset, length, writer, etag, o)
|
return l.s3Objects.getObject(ctx, bucket, key, startOffset, length, writer, etag, o)
|
||||||
}
|
}
|
||||||
if startOffset < 0 {
|
if startOffset < 0 {
|
||||||
logger.LogIf(ctx, minio.InvalidRange{})
|
logger.LogIf(ctx, minio.InvalidRange{})
|
||||||
|
@ -303,7 +303,7 @@ func (l *s3EncObjects) getObject(ctx context.Context, bucket string, key string,
|
||||||
if _, _, err := dmeta.ObjectToPartOffset(ctx, endOffset); err != nil {
|
if _, _, err := dmeta.ObjectToPartOffset(ctx, endOffset); err != nil {
|
||||||
return minio.InvalidRange{OffsetBegin: startOffset, OffsetEnd: length, ResourceSize: dmeta.Stat.Size}
|
return minio.InvalidRange{OffsetBegin: startOffset, OffsetEnd: length, ResourceSize: dmeta.Stat.Size}
|
||||||
}
|
}
|
||||||
return l.s3Objects.GetObject(ctx, bucket, key, partOffset, endOffset, writer, dmeta.ETag, o)
|
return l.s3Objects.getObject(ctx, bucket, key, partOffset, endOffset, writer, dmeta.ETag, o)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObjectNInfo - returns object info and locked object ReadCloser
|
// GetObjectNInfo - returns object info and locked object ReadCloser
|
||||||
|
|
|
@ -403,7 +403,7 @@ func (l *s3Objects) GetObjectNInfo(ctx context.Context, bucket, object string, r
|
||||||
|
|
||||||
pr, pw := io.Pipe()
|
pr, pw := io.Pipe()
|
||||||
go func() {
|
go func() {
|
||||||
err := l.GetObject(ctx, bucket, object, off, length, pw, objInfo.ETag, opts)
|
err := l.getObject(ctx, bucket, object, off, length, pw, objInfo.ETag, opts)
|
||||||
pw.CloseWithError(err)
|
pw.CloseWithError(err)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -419,7 +419,7 @@ func (l *s3Objects) GetObjectNInfo(ctx context.Context, bucket, object string, r
|
||||||
//
|
//
|
||||||
// startOffset indicates the starting read location of the object.
|
// startOffset indicates the starting read location of the object.
|
||||||
// length indicates the total length of the object.
|
// length indicates the total length of the object.
|
||||||
func (l *s3Objects) GetObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, o minio.ObjectOptions) error {
|
func (l *s3Objects) getObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, o minio.ObjectOptions) error {
|
||||||
if length < 0 && length != -1 {
|
if length < 0 && length != -1 {
|
||||||
return minio.ErrorRespToObjectError(minio.InvalidRange{}, bucket, key)
|
return minio.ErrorRespToObjectError(minio.InvalidRange{}, bucket, key)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,613 +0,0 @@
|
||||||
/*
|
|
||||||
* MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
humanize "github.com/dustin/go-humanize"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Wrapper for calling GetObject tests for both Erasure multiple disks and single node setup.
|
|
||||||
func TestGetObject(t *testing.T) {
|
|
||||||
ExecExtendedObjectLayerTest(t, testGetObject)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ObjectLayer.GetObject is called with series of cases for valid and erroneous inputs and the result is validated.
|
|
||||||
func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|
||||||
// Setup for the tests.
|
|
||||||
bucketName := getRandomBucketName()
|
|
||||||
objectName := "test-object"
|
|
||||||
emptyDirName := "test-empty-dir/"
|
|
||||||
|
|
||||||
// create bucket.
|
|
||||||
err := obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{})
|
|
||||||
// Stop the test if creation of the bucket fails.
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// set of byte data for PutObject.
|
|
||||||
// object has to be created before running tests for GetObject.
|
|
||||||
// this is required even to assert the GetObject data,
|
|
||||||
// since dataInserted === dataFetched back is a primary criteria for any object storage this assertion is critical.
|
|
||||||
bytesData := []struct {
|
|
||||||
byteData []byte
|
|
||||||
}{
|
|
||||||
// Regular data
|
|
||||||
{generateBytesData(6 * humanize.MiByte)},
|
|
||||||
// Empty data for empty directory
|
|
||||||
{},
|
|
||||||
}
|
|
||||||
// set of inputs for uploading the objects before tests for downloading is done.
|
|
||||||
putObjectInputs := []struct {
|
|
||||||
bucketName string
|
|
||||||
objectName string
|
|
||||||
contentLength int64
|
|
||||||
textData []byte
|
|
||||||
metaData map[string]string
|
|
||||||
}{
|
|
||||||
// case - 1.
|
|
||||||
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
|
||||||
{bucketName, emptyDirName, int64(len(bytesData[1].byteData)), bytesData[1].byteData, make(map[string]string)},
|
|
||||||
}
|
|
||||||
// iterate through the above set of inputs and upkoad the object.
|
|
||||||
for i, input := range putObjectInputs {
|
|
||||||
// uploading the object.
|
|
||||||
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetPutObjReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), ObjectOptions{UserDefined: input.metaData})
|
|
||||||
// if object upload fails stop the test.
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// set of empty buffers used to fill GetObject data.
|
|
||||||
buffers := []*bytes.Buffer{
|
|
||||||
new(bytes.Buffer),
|
|
||||||
new(bytes.Buffer),
|
|
||||||
new(bytes.Buffer),
|
|
||||||
}
|
|
||||||
|
|
||||||
// test cases with set of inputs
|
|
||||||
testCases := []struct {
|
|
||||||
bucketName string
|
|
||||||
objectName string
|
|
||||||
startOffset int64
|
|
||||||
length int64
|
|
||||||
// data obtained/fetched from GetObject.
|
|
||||||
getObjectData *bytes.Buffer
|
|
||||||
// writer which governs the write into the `getObjectData`.
|
|
||||||
writer io.Writer
|
|
||||||
// flag indicating whether the test for given ase should pass.
|
|
||||||
shouldPass bool
|
|
||||||
// expected Result.
|
|
||||||
expectedData []byte
|
|
||||||
err error
|
|
||||||
}{
|
|
||||||
// Test case 1-4.
|
|
||||||
// Cases with invalid bucket names.
|
|
||||||
{".test", "obj", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Bucket name invalid: .test")},
|
|
||||||
{"------", "obj", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Bucket name invalid: ------")},
|
|
||||||
{"$this-is-not-valid-too", "obj", 0, 0, nil, nil, false,
|
|
||||||
[]byte(""), fmt.Errorf("%s", "Bucket name invalid: $this-is-not-valid-too")},
|
|
||||||
{"a", "obj", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Bucket name invalid: a")},
|
|
||||||
// Test case - 5.
|
|
||||||
// Case with invalid object names.
|
|
||||||
{bucketName, "", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Object name invalid: "+bucketName+"/")},
|
|
||||||
// Test case - 6.
|
|
||||||
{bucketName, objectName, 0, int64(len(bytesData[0].byteData)), buffers[0], NewEOFWriter(buffers[0], 100), false, []byte{}, io.EOF},
|
|
||||||
// Test case with start offset set to 0 and length set to size of the object.
|
|
||||||
// Fetching the entire object.
|
|
||||||
// Test case - 7.
|
|
||||||
{bucketName, objectName, 0, int64(len(bytesData[0].byteData)), buffers[1], buffers[1], true, bytesData[0].byteData, nil},
|
|
||||||
// Test case with `length` parameter set to a negative value.
|
|
||||||
// Test case - 8.
|
|
||||||
{bucketName, objectName, 0, int64(-1), buffers[1], buffers[1], true, bytesData[0].byteData, nil},
|
|
||||||
// Test case with content-range 1 to objectSize .
|
|
||||||
// Test case - 9.
|
|
||||||
{bucketName, objectName, 1, int64(len(bytesData[0].byteData) - 1), buffers[1], buffers[1], true, bytesData[0].byteData[1:], nil},
|
|
||||||
// Test case with content-range 100 to objectSize - 100.
|
|
||||||
// Test case - 10.
|
|
||||||
{bucketName, objectName, 100, int64(len(bytesData[0].byteData) - 200), buffers[1], buffers[1], true,
|
|
||||||
bytesData[0].byteData[100 : len(bytesData[0].byteData)-100], nil},
|
|
||||||
// Test case with offset greater than the size of the object
|
|
||||||
// Test case - 11.
|
|
||||||
{bucketName, objectName, int64(len(bytesData[0].byteData) + 1), int64(len(bytesData[0].byteData)), buffers[0],
|
|
||||||
NewEOFWriter(buffers[0], 100), false, []byte{},
|
|
||||||
InvalidRange{int64(len(bytesData[0].byteData) + 1), int64(len(bytesData[0].byteData)), int64(len(bytesData[0].byteData))}},
|
|
||||||
// Test case with offset greater than the size of the object.
|
|
||||||
// Test case - 12.
|
|
||||||
{bucketName, objectName, -1, int64(len(bytesData[0].byteData)), buffers[0], new(bytes.Buffer), false, []byte{}, errUnexpected},
|
|
||||||
// Test case length parameter is more than the object size.
|
|
||||||
// Test case - 13.
|
|
||||||
{bucketName, objectName, 0, int64(len(bytesData[0].byteData) + 1), buffers[1], buffers[1], false, bytesData[0].byteData,
|
|
||||||
InvalidRange{0, int64(len(bytesData[0].byteData) + 1), int64(len(bytesData[0].byteData))}},
|
|
||||||
// Test case with offset + length > objectSize parameter set to a negative value.
|
|
||||||
// Test case - 14.
|
|
||||||
{bucketName, objectName, 2, int64(len(bytesData[0].byteData)), buffers[1], buffers[1], false, bytesData[0].byteData,
|
|
||||||
InvalidRange{2, int64(len(bytesData[0].byteData)), int64(len(bytesData[0].byteData))}},
|
|
||||||
// Test case with the writer set to nil.
|
|
||||||
// Test case - 15.
|
|
||||||
{bucketName, objectName, 0, int64(len(bytesData[0].byteData)), buffers[1], nil, false, bytesData[0].byteData, errUnexpected},
|
|
||||||
// Test case - 16.
|
|
||||||
// Test case when it is an empty directory
|
|
||||||
{bucketName, emptyDirName, 0, int64(len(bytesData[1].byteData)), buffers[2], buffers[2], true, bytesData[1].byteData, nil},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
|
||||||
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "", ObjectOptions{})
|
|
||||||
if err != nil && testCase.shouldPass {
|
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
|
|
||||||
}
|
|
||||||
if err == nil && !testCase.shouldPass {
|
|
||||||
t.Errorf("Test %d: %s: Expected to fail with <ERROR> \"%s\", but passed instead.", i+1, instanceType, testCase.err.Error())
|
|
||||||
}
|
|
||||||
// Failed as expected, but does it fail for the expected reason.
|
|
||||||
if err != nil && !testCase.shouldPass {
|
|
||||||
if !strings.Contains(err.Error(), testCase.err.Error()) {
|
|
||||||
t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1, instanceType, testCase.err.Error(), err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Since there are cases for which GetObject fails, this is
|
|
||||||
// necessary. Test passes as expected, but the output values
|
|
||||||
// are verified for correctness here.
|
|
||||||
if err == nil && testCase.shouldPass {
|
|
||||||
if !bytes.Equal(testCase.expectedData, testCase.getObjectData.Bytes()) {
|
|
||||||
t.Errorf("Test %d: %s: Data Mismatch: Expected data and the fetched data from GetObject doesn't match.", i+1, instanceType)
|
|
||||||
}
|
|
||||||
// empty the buffer so that it can be used to further cases.
|
|
||||||
testCase.getObjectData.Reset()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper for calling GetObject with permission denied expected
|
|
||||||
func TestGetObjectPermissionDenied(t *testing.T) {
|
|
||||||
// Windows doesn't support Chmod under golang
|
|
||||||
if runtime.GOOS != globalWindowsOSName {
|
|
||||||
ExecObjectLayerDiskAlteredTest(t, testGetObjectPermissionDenied)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test GetObject when we are allowed to access some dirs and objects
|
|
||||||
func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks []string, t *testing.T) {
|
|
||||||
// Setup for the tests.
|
|
||||||
bucketName := getRandomBucketName()
|
|
||||||
// create bucket.
|
|
||||||
err := obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{})
|
|
||||||
// Stop the test if creation of the bucket fails.
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
bytesData := []struct {
|
|
||||||
byteData []byte
|
|
||||||
}{
|
|
||||||
{generateBytesData(6 * humanize.MiByte)},
|
|
||||||
}
|
|
||||||
// set of inputs for uploading the objects before tests for downloading is done.
|
|
||||||
putObjectInputs := []struct {
|
|
||||||
bucketName string
|
|
||||||
objectName string
|
|
||||||
contentLength int64
|
|
||||||
textData []byte
|
|
||||||
metaData map[string]string
|
|
||||||
}{
|
|
||||||
{bucketName, "test-object1", int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
|
||||||
{bucketName, "test-object2", int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
|
||||||
{bucketName, "dir/test-object3", int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
|
||||||
}
|
|
||||||
// iterate through the above set of inputs and upkoad the object.
|
|
||||||
for i, input := range putObjectInputs {
|
|
||||||
// uploading the object.
|
|
||||||
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetPutObjReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), ObjectOptions{UserDefined: input.metaData})
|
|
||||||
// if object upload fails stop the test.
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// set of empty buffers used to fill GetObject data.
|
|
||||||
buffers := []*bytes.Buffer{
|
|
||||||
new(bytes.Buffer),
|
|
||||||
}
|
|
||||||
|
|
||||||
// test cases with set of inputs
|
|
||||||
testCases := []struct {
|
|
||||||
bucketName string
|
|
||||||
objectName string
|
|
||||||
chmodPath string
|
|
||||||
startOffset int64
|
|
||||||
length int64
|
|
||||||
// data obtained/fetched from GetObject.
|
|
||||||
getObjectData *bytes.Buffer
|
|
||||||
// writer which governs the write into the `getObjectData`.
|
|
||||||
writer io.Writer
|
|
||||||
// flag indicating whether the test for given ase should pass.
|
|
||||||
shouldPass bool
|
|
||||||
// expected Result.
|
|
||||||
expectedData []byte
|
|
||||||
err error
|
|
||||||
}{
|
|
||||||
// Test 1 - chmod 000 bucket/test-object1
|
|
||||||
{bucketName, "test-object1", "test-object1", 0, int64(len(bytesData[0].byteData)), buffers[0], buffers[0], false, bytesData[0].byteData, PrefixAccessDenied{Bucket: bucketName, Object: "test-object1"}},
|
|
||||||
// Test 2 - chmod 000 bucket/dir/
|
|
||||||
{bucketName, "dir/test-object2", "dir", 0, int64(len(bytesData[0].byteData)), buffers[0], buffers[0], false, bytesData[0].byteData, PrefixAccessDenied{Bucket: bucketName, Object: "dir/test-object2"}},
|
|
||||||
// Test 3 - chmod 000 bucket/
|
|
||||||
{bucketName, "test-object3", "", 0, int64(len(bytesData[0].byteData)), buffers[0], buffers[0], false, bytesData[0].byteData, PrefixAccessDenied{Bucket: bucketName, Object: "test-object3"}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
|
||||||
for _, d := range disks {
|
|
||||||
err = os.Chmod(d+SlashSeparator+testCase.bucketName+SlashSeparator+testCase.chmodPath, 0)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Test %d, Unable to chmod: %v", i+1, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "", ObjectOptions{})
|
|
||||||
if err != nil && testCase.shouldPass {
|
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
|
|
||||||
}
|
|
||||||
if err == nil && !testCase.shouldPass {
|
|
||||||
t.Errorf("Test %d: %s: Expected to fail with <ERROR> \"%s\", but passed instead.", i+1, instanceType, testCase.err.Error())
|
|
||||||
}
|
|
||||||
// Failed as expected, but does it fail for the expected reason.
|
|
||||||
if err != nil && !testCase.shouldPass {
|
|
||||||
if !strings.Contains(err.Error(), testCase.err.Error()) {
|
|
||||||
t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1, instanceType, testCase.err.Error(), err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Since there are cases for which GetObject fails, this is
|
|
||||||
// necessary. Test passes as expected, but the output values
|
|
||||||
// are verified for correctness here.
|
|
||||||
if err == nil && testCase.shouldPass {
|
|
||||||
if !bytes.Equal(testCase.expectedData, testCase.getObjectData.Bytes()) {
|
|
||||||
t.Errorf("Test %d: %s: Data Mismatch: Expected data and the fetched data from GetObject doesn't match.", i+1, instanceType)
|
|
||||||
}
|
|
||||||
// empty the buffer so that it can be used to further cases.
|
|
||||||
testCase.getObjectData.Reset()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper for calling GetObject tests for both Erasure multiple disks and single node setup.
|
|
||||||
func TestGetObjectDiskNotFound(t *testing.T) {
|
|
||||||
ExecObjectLayerDiskAlteredTest(t, testGetObjectDiskNotFound)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ObjectLayer.GetObject is called with series of cases for valid and erroneous inputs and the result is validated.
|
|
||||||
// Before the Get Object call Erasure disks are moved so that the quorum just holds.
|
|
||||||
func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []string, t *testing.T) {
|
|
||||||
// Setup for the tests.
|
|
||||||
bucketName := getRandomBucketName()
|
|
||||||
objectName := "test-object"
|
|
||||||
// create bucket.
|
|
||||||
err := obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{})
|
|
||||||
// Stop the test if creation of the bucket fails.
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// set of byte data for PutObject.
|
|
||||||
// object has to be created before running tests for GetObject.
|
|
||||||
// this is required even to assert the GetObject data,
|
|
||||||
// since dataInserted === dataFetched back is a primary criteria for any object storage this assertion is critical.
|
|
||||||
bytesData := []struct {
|
|
||||||
byteData []byte
|
|
||||||
}{
|
|
||||||
{generateBytesData(6 * humanize.MiByte)},
|
|
||||||
}
|
|
||||||
// set of inputs for uploading the objects before tests for downloading is done.
|
|
||||||
putObjectInputs := []struct {
|
|
||||||
bucketName string
|
|
||||||
objectName string
|
|
||||||
contentLength int64
|
|
||||||
textData []byte
|
|
||||||
metaData map[string]string
|
|
||||||
}{
|
|
||||||
// case - 1.
|
|
||||||
{bucketName, objectName, int64(len(bytesData[0].byteData)), bytesData[0].byteData, make(map[string]string)},
|
|
||||||
}
|
|
||||||
// iterate through the above set of inputs and upkoad the object.
|
|
||||||
for i, input := range putObjectInputs {
|
|
||||||
// uploading the object.
|
|
||||||
_, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetPutObjReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), ObjectOptions{UserDefined: input.metaData})
|
|
||||||
// if object upload fails stop the test.
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Take 4 disks down before GetObject is called, one more we loose quorum on 16 disk node.
|
|
||||||
for _, disk := range disks[:4] {
|
|
||||||
os.RemoveAll(disk)
|
|
||||||
}
|
|
||||||
|
|
||||||
// set of empty buffers used to fill GetObject data.
|
|
||||||
buffers := []*bytes.Buffer{
|
|
||||||
new(bytes.Buffer),
|
|
||||||
new(bytes.Buffer),
|
|
||||||
}
|
|
||||||
|
|
||||||
// test cases with set of inputs
|
|
||||||
testCases := []struct {
|
|
||||||
bucketName string
|
|
||||||
objectName string
|
|
||||||
startOffset int64
|
|
||||||
length int64
|
|
||||||
// data obtained/fetched from GetObject.
|
|
||||||
getObjectData *bytes.Buffer
|
|
||||||
// writer which governs the write into the `getObjectData`.
|
|
||||||
writer io.Writer
|
|
||||||
// flag indicating whether the test for given ase should pass.
|
|
||||||
shouldPass bool
|
|
||||||
// expected Result.
|
|
||||||
expectedData []byte
|
|
||||||
err error
|
|
||||||
}{
|
|
||||||
// Test case 1-4.
|
|
||||||
// Cases with invalid bucket names.
|
|
||||||
{".test", "obj", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Bucket name invalid: .test")},
|
|
||||||
{"------", "obj", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Bucket name invalid: ------")},
|
|
||||||
{"$this-is-not-valid-too", "obj", 0, 0, nil, nil, false,
|
|
||||||
[]byte(""), fmt.Errorf("%s", "Bucket name invalid: $this-is-not-valid-too")},
|
|
||||||
{"a", "obj", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Bucket name invalid: a")},
|
|
||||||
// Test case - 5.
|
|
||||||
// Case with invalid object names.
|
|
||||||
{bucketName, "", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Object name invalid: "+bucketName+"/")},
|
|
||||||
// Test case - 7.
|
|
||||||
{bucketName, objectName, 0, int64(len(bytesData[0].byteData)), buffers[0], NewEOFWriter(buffers[0], 100), false, []byte{}, io.EOF},
|
|
||||||
// Test case with start offset set to 0 and length set to size of the object.
|
|
||||||
// Fetching the entire object.
|
|
||||||
// Test case - 8.
|
|
||||||
{bucketName, objectName, 0, int64(len(bytesData[0].byteData)), buffers[1], buffers[1], true, bytesData[0].byteData, nil},
|
|
||||||
// Test case with `length` parameter set to a negative value.
|
|
||||||
// Test case - 9.
|
|
||||||
{bucketName, objectName, 0, int64(-1), buffers[1], buffers[1], true, bytesData[0].byteData, nil},
|
|
||||||
// Test case with `length` parameter set to a negative value and offset is positive.
|
|
||||||
// Test case - 10.
|
|
||||||
{bucketName, objectName, 1, int64(-1), buffers[1], buffers[1], true, bytesData[0].byteData[1:], nil},
|
|
||||||
// Test case with content-range 1 to objectSize .
|
|
||||||
// Test case - 11.
|
|
||||||
{bucketName, objectName, 1, int64(len(bytesData[0].byteData) - 1), buffers[1], buffers[1], true, bytesData[0].byteData[1:], nil},
|
|
||||||
// Test case with content-range 100 to objectSize - 100.
|
|
||||||
// Test case - 12.
|
|
||||||
{bucketName, objectName, 100, int64(len(bytesData[0].byteData) - 200), buffers[1], buffers[1], true,
|
|
||||||
bytesData[0].byteData[100 : len(bytesData[0].byteData)-100], nil},
|
|
||||||
// Test case with offset greater than the size of the object
|
|
||||||
// Test case - 13.
|
|
||||||
{bucketName, objectName, int64(len(bytesData[0].byteData) + 1), int64(len(bytesData[0].byteData)), buffers[0],
|
|
||||||
NewEOFWriter(buffers[0], 100), false, []byte{},
|
|
||||||
InvalidRange{int64(len(bytesData[0].byteData) + 1), int64(len(bytesData[0].byteData)), int64(len(bytesData[0].byteData))}},
|
|
||||||
// Test case with offset greater than the size of the object.
|
|
||||||
// Test case - 14.
|
|
||||||
{bucketName, objectName, -1, int64(len(bytesData[0].byteData)), buffers[0], new(bytes.Buffer), false, []byte{}, errUnexpected},
|
|
||||||
// Test case length parameter is more than the object size.
|
|
||||||
// Test case - 15.
|
|
||||||
{bucketName, objectName, 0, int64(len(bytesData[0].byteData) + 1), buffers[1], buffers[1], false, bytesData[0].byteData,
|
|
||||||
InvalidRange{0, int64(len(bytesData[0].byteData) + 1), int64(len(bytesData[0].byteData))}},
|
|
||||||
// Test case with offset + length > objectSize parameter set to a negative value.
|
|
||||||
// Test case - 16.
|
|
||||||
{bucketName, objectName, 2, int64(len(bytesData[0].byteData)), buffers[1], buffers[1], false, bytesData[0].byteData,
|
|
||||||
InvalidRange{2, int64(len(bytesData[0].byteData)), int64(len(bytesData[0].byteData))}},
|
|
||||||
// Test case with the writer set to nil.
|
|
||||||
// Test case - 17.
|
|
||||||
{bucketName, objectName, 0, int64(len(bytesData[0].byteData)), buffers[1], nil, false, bytesData[0].byteData, errUnexpected},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
|
||||||
err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "", ObjectOptions{})
|
|
||||||
if err != nil && testCase.shouldPass {
|
|
||||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
|
|
||||||
}
|
|
||||||
if err == nil && !testCase.shouldPass {
|
|
||||||
t.Errorf("Test %d: %s: Expected to fail with <ERROR> \"%s\", but passed instead.", i+1, instanceType, testCase.err.Error())
|
|
||||||
}
|
|
||||||
// Failed as expected, but does it fail for the expected reason.
|
|
||||||
if err != nil && !testCase.shouldPass {
|
|
||||||
if !strings.Contains(err.Error(), testCase.err.Error()) {
|
|
||||||
t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead.", i+1, instanceType, testCase.err.Error(), err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Since there are cases for which GetObject fails, this is
|
|
||||||
// necessary. Test passes as expected, but the output values
|
|
||||||
// are verified for correctness here.
|
|
||||||
if err == nil && testCase.shouldPass {
|
|
||||||
if !bytes.Equal(testCase.expectedData, testCase.getObjectData.Bytes()) {
|
|
||||||
t.Errorf("Test %d: %s: Data Mismatch: Expected data and the fetched data from GetObject doesn't match.", i+1, instanceType)
|
|
||||||
}
|
|
||||||
// empty the buffer so that it can be used to further cases.
|
|
||||||
testCase.getObjectData.Reset()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Benchmarks for ObjectLayer.GetObject().
|
|
||||||
// The intent is to benchmark GetObject for various sizes ranging from few bytes to 100MB.
|
|
||||||
// Also each of these Benchmarks are run both Erasure and FS backends.
|
|
||||||
|
|
||||||
// BenchmarkGetObjectVerySmallFS - Benchmark FS.GetObject() for object size of 10 bytes.
|
|
||||||
func BenchmarkGetObjectVerySmallFS(b *testing.B) {
|
|
||||||
benchmarkGetObject(b, "FS", 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObjectVerySmallErasure - Benchmark Erasure.GetObject() for object size of 10 bytes.
|
|
||||||
func BenchmarkGetObjectVerySmallErasure(b *testing.B) {
|
|
||||||
benchmarkGetObject(b, "Erasure", 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObject10KbFS - Benchmark FS.GetObject() for object size of 10KB.
|
|
||||||
func BenchmarkGetObject10KbFS(b *testing.B) {
|
|
||||||
benchmarkGetObject(b, "FS", 10*humanize.KiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObject10KbErasure - Benchmark Erasure.GetObject() for object size of 10KB.
|
|
||||||
func BenchmarkGetObject10KbErasure(b *testing.B) {
|
|
||||||
benchmarkGetObject(b, "Erasure", 10*humanize.KiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObject100KbFS - Benchmark FS.GetObject() for object size of 100KB.
|
|
||||||
func BenchmarkGetObject100KbFS(b *testing.B) {
|
|
||||||
benchmarkGetObject(b, "FS", 100*humanize.KiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObject100KbErasure - Benchmark Erasure.GetObject() for object size of 100KB.
|
|
||||||
func BenchmarkGetObject100KbErasure(b *testing.B) {
|
|
||||||
benchmarkGetObject(b, "Erasure", 100*humanize.KiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObject1MbFS - Benchmark FS.GetObject() for object size of 1MB.
|
|
||||||
func BenchmarkGetObject1MbFS(b *testing.B) {
|
|
||||||
benchmarkGetObject(b, "FS", 1*humanize.MiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObject1MbErasure - Benchmark Erasure.GetObject() for object size of 1MB.
|
|
||||||
func BenchmarkGetObject1MbErasure(b *testing.B) {
|
|
||||||
benchmarkGetObject(b, "Erasure", 1*humanize.MiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObject5MbFS - Benchmark FS.GetObject() for object size of 5MB.
|
|
||||||
func BenchmarkGetObject5MbFS(b *testing.B) {
|
|
||||||
benchmarkGetObject(b, "FS", 5*humanize.MiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObject5MbErasure - Benchmark Erasure.GetObject() for object size of 5MB.
|
|
||||||
func BenchmarkGetObject5MbErasure(b *testing.B) {
|
|
||||||
benchmarkGetObject(b, "Erasure", 5*humanize.MiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObject10MbFS - Benchmark FS.GetObject() for object size of 10MB.
|
|
||||||
func BenchmarkGetObject10MbFS(b *testing.B) {
|
|
||||||
benchmarkGetObject(b, "FS", 10*humanize.MiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObject10MbErasure - Benchmark Erasure.GetObject() for object size of 10MB.
|
|
||||||
func BenchmarkGetObject10MbErasure(b *testing.B) {
|
|
||||||
benchmarkGetObject(b, "Erasure", 10*humanize.MiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObject25MbFS - Benchmark FS.GetObject() for object size of 25MB.
|
|
||||||
func BenchmarkGetObject25MbFS(b *testing.B) {
|
|
||||||
benchmarkGetObject(b, "FS", 25*humanize.MiByte)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObject25MbErasure - Benchmark Erasure.GetObject() for object size of 25MB.
|
|
||||||
func BenchmarkGetObject25MbErasure(b *testing.B) {
|
|
||||||
benchmarkGetObject(b, "Erasure", 25*humanize.MiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObject50MbFS - Benchmark FS.GetObject() for object size of 50MB.
|
|
||||||
func BenchmarkGetObject50MbFS(b *testing.B) {
|
|
||||||
benchmarkGetObject(b, "FS", 50*humanize.MiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObject50MbErasure - Benchmark Erasure.GetObject() for object size of 50MB.
|
|
||||||
func BenchmarkGetObject50MbErasure(b *testing.B) {
|
|
||||||
benchmarkGetObject(b, "Erasure", 50*humanize.MiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parallel benchmarks for ObjectLayer.GetObject() .
|
|
||||||
|
|
||||||
// BenchmarkGetObjectParallelVerySmallFS - Benchmark FS.GetObject() for object size of 10 bytes.
|
|
||||||
func BenchmarkGetObjectParallelVerySmallFS(b *testing.B) {
|
|
||||||
benchmarkGetObjectParallel(b, "FS", 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObjectParallelVerySmallErasure - Benchmark Erasure.GetObject() for object size of 10 bytes.
|
|
||||||
func BenchmarkGetObjectParallelVerySmallErasure(b *testing.B) {
|
|
||||||
benchmarkGetObjectParallel(b, "Erasure", 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObjectParallel10KbFS - Benchmark FS.GetObject() for object size of 10KB.
|
|
||||||
func BenchmarkGetObjectParallel10KbFS(b *testing.B) {
|
|
||||||
benchmarkGetObjectParallel(b, "FS", 10*humanize.KiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObjectParallel10KbErasure - Benchmark Erasure.GetObject() for object size of 10KB.
|
|
||||||
func BenchmarkGetObjectParallel10KbErasure(b *testing.B) {
|
|
||||||
benchmarkGetObjectParallel(b, "Erasure", 10*humanize.KiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObjectParallel100KbFS - Benchmark FS.GetObject() for object size of 100KB.
|
|
||||||
func BenchmarkGetObjectParallel100KbFS(b *testing.B) {
|
|
||||||
benchmarkGetObjectParallel(b, "FS", 100*humanize.KiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObjectParallel100KbErasure - Benchmark Erasure.GetObject() for object size of 100KB.
|
|
||||||
func BenchmarkGetObjectParallel100KbErasure(b *testing.B) {
|
|
||||||
benchmarkGetObjectParallel(b, "Erasure", 100*humanize.KiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObjectParallel1MbFS - Benchmark FS.GetObject() for object size of 1MB.
|
|
||||||
func BenchmarkGetObjectParallel1MbFS(b *testing.B) {
|
|
||||||
benchmarkGetObjectParallel(b, "FS", 1*humanize.MiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObjectParallel1MbErasure - Benchmark Erasure.GetObject() for object size of 1MB.
|
|
||||||
func BenchmarkGetObjectParallel1MbErasure(b *testing.B) {
|
|
||||||
benchmarkGetObjectParallel(b, "Erasure", 1*humanize.MiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObjectParallel5MbFS - Benchmark FS.GetObject() for object size of 5MB.
|
|
||||||
func BenchmarkGetObjectParallel5MbFS(b *testing.B) {
|
|
||||||
benchmarkGetObjectParallel(b, "FS", 5*humanize.MiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObjectParallel5MbErasure - Benchmark Erasure.GetObject() for object size of 5MB.
|
|
||||||
func BenchmarkGetObjectParallel5MbErasure(b *testing.B) {
|
|
||||||
benchmarkGetObjectParallel(b, "Erasure", 5*humanize.MiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObjectParallel10MbFS - Benchmark FS.GetObject() for object size of 10MB.
|
|
||||||
func BenchmarkGetObjectParallel10MbFS(b *testing.B) {
|
|
||||||
benchmarkGetObjectParallel(b, "FS", 10*humanize.MiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObjectParallel10MbErasure - Benchmark Erasure.GetObject() for object size of 10MB.
|
|
||||||
func BenchmarkGetObjectParallel10MbErasure(b *testing.B) {
|
|
||||||
benchmarkGetObjectParallel(b, "Erasure", 10*humanize.MiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObjectParallel25MbFS - Benchmark FS.GetObject() for object size of 25MB.
|
|
||||||
func BenchmarkGetObjectParallel25MbFS(b *testing.B) {
|
|
||||||
benchmarkGetObjectParallel(b, "FS", 25*humanize.MiByte)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObjectParallel25MbErasure - Benchmark Erasure.GetObject() for object size of 25MB.
|
|
||||||
func BenchmarkGetObjectParallel25MbErasure(b *testing.B) {
|
|
||||||
benchmarkGetObjectParallel(b, "Erasure", 25*humanize.MiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObjectParallel50MbFS - Benchmark FS.GetObject() for object size of 50MB.
|
|
||||||
func BenchmarkGetObjectParallel50MbFS(b *testing.B) {
|
|
||||||
benchmarkGetObjectParallel(b, "FS", 50*humanize.MiByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BenchmarkGetObjectParallel50MbErasure - Benchmark Erasure.GetObject() for object size of 50MB.
|
|
||||||
func BenchmarkGetObjectParallel50MbErasure(b *testing.B) {
|
|
||||||
benchmarkGetObjectParallel(b, "Erasure", 50*humanize.MiByte)
|
|
||||||
}
|
|
|
@ -112,7 +112,6 @@ type ObjectLayer interface {
|
||||||
// IMPORTANTLY, when implementations return err != nil, this
|
// IMPORTANTLY, when implementations return err != nil, this
|
||||||
// function MUST NOT return a non-nil ReadCloser.
|
// function MUST NOT return a non-nil ReadCloser.
|
||||||
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (reader *GetObjectReader, err error)
|
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (reader *GetObjectReader, err error)
|
||||||
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error)
|
|
||||||
GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||||
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||||
CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error)
|
CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||||
|
@ -162,3 +161,23 @@ type ObjectLayer interface {
|
||||||
GetObjectTags(context.Context, string, string, ObjectOptions) (*tags.Tags, error)
|
GetObjectTags(context.Context, string, string, ObjectOptions) (*tags.Tags, error)
|
||||||
DeleteObjectTags(context.Context, string, string, ObjectOptions) (ObjectInfo, error)
|
DeleteObjectTags(context.Context, string, string, ObjectOptions) (ObjectInfo, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetObject - TODO(aead): This function just acts as an adapter for GetObject tests and benchmarks
|
||||||
|
// since the GetObject method of the ObjectLayer interface has been removed. Once, the
|
||||||
|
// tests are adjusted to use GetObjectNInfo this function can be removed.
|
||||||
|
func GetObject(ctx context.Context, api ObjectLayer, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
|
||||||
|
var header http.Header
|
||||||
|
if etag != "" {
|
||||||
|
header.Set("ETag", etag)
|
||||||
|
}
|
||||||
|
Range := &HTTPRangeSpec{Start: startOffset, End: startOffset + length}
|
||||||
|
|
||||||
|
reader, err := api.GetObjectNInfo(ctx, bucket, object, Range, header, readLock, opts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
|
||||||
|
_, err = io.Copy(writer, reader)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
|
@ -210,7 +210,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH
|
||||||
|
|
||||||
for key, value := range objects {
|
for key, value := range objects {
|
||||||
var byteBuffer bytes.Buffer
|
var byteBuffer bytes.Buffer
|
||||||
err = obj.GetObject(context.Background(), "bucket", key, 0, int64(len(value)), &byteBuffer, "", opts)
|
err = GetObject(context.Background(), obj, "bucket", key, 0, int64(len(value)), &byteBuffer, "", opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -461,7 +461,7 @@ func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, t TestErrHan
|
||||||
}
|
}
|
||||||
|
|
||||||
var bytesBuffer bytes.Buffer
|
var bytesBuffer bytes.Buffer
|
||||||
err = obj.GetObject(context.Background(), "bucket", "object", 0, length, &bytesBuffer, "", opts)
|
err = GetObject(context.Background(), obj, "bucket", "object", 0, length, &bytesBuffer, "", opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -614,7 +614,7 @@ func testPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
err = obj.GetObject(context.Background(), "bucket", "object", 0, length, &bytesBuffer1, "", opts)
|
err = GetObject(context.Background(), obj, "bucket", "object", 0, length, &bytesBuffer1, "", opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -627,7 +627,7 @@ func testPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
err = obj.GetObject(context.Background(), "bucket", "object", 0, length, &bytesBuffer2, "", opts)
|
err = GetObject(context.Background(), obj, "bucket", "object", 0, length, &bytesBuffer2, "", opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
@ -658,7 +658,7 @@ func testPutObjectInSubdir(obj ObjectLayer, instanceType string, t TestErrHandle
|
||||||
}
|
}
|
||||||
|
|
||||||
var bytesBuffer bytes.Buffer
|
var bytesBuffer bytes.Buffer
|
||||||
err = obj.GetObject(context.Background(), "bucket", "dir1/dir2/object", 0, length, &bytesBuffer, "", opts)
|
err = GetObject(context.Background(), obj, "bucket", "dir1/dir2/object", 0, length, &bytesBuffer, "", opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -789,7 +789,7 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler
|
||||||
}
|
}
|
||||||
|
|
||||||
var byteBuffer bytes.Buffer
|
var byteBuffer bytes.Buffer
|
||||||
err = obj.GetObject(context.Background(), bucketName, objectName, 0, int64(len(content)), &byteBuffer, "", ObjectOptions{})
|
err = GetObject(context.Background(), obj, bucketName, objectName, 0, int64(len(content)), &byteBuffer, "", ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed, %v", err)
|
t.Fatalf("Failed, %v", err)
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue