Use context.Background() instead of nil

Rename Context[Get|Set] -> [Get|Set]Context
This commit is contained in:
Krishna Srinivas 2018-03-15 13:27:16 -07:00 committed by kannappanr
parent 33fe42df8a
commit 9ede179a21
45 changed files with 431 additions and 468 deletions

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
@ -204,7 +205,7 @@ func (atb *adminXLTestBed) TearDown() {
func (atb *adminXLTestBed) GenerateHealTestData(t *testing.T) { func (atb *adminXLTestBed) GenerateHealTestData(t *testing.T) {
// Create an object myobject under bucket mybucket. // Create an object myobject under bucket mybucket.
bucketName := "mybucket" bucketName := "mybucket"
err := atb.objLayer.MakeBucketWithLocation(nil, bucketName, "") err := atb.objLayer.MakeBucketWithLocation(context.Background(), bucketName, "")
if err != nil { if err != nil {
t.Fatalf("Failed to make bucket %s - %v", bucketName, t.Fatalf("Failed to make bucket %s - %v", bucketName,
err) err)
@ -215,7 +216,7 @@ func (atb *adminXLTestBed) GenerateHealTestData(t *testing.T) {
objName := "myobject" objName := "myobject"
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
objectName := fmt.Sprintf("%s-%d", objName, i) objectName := fmt.Sprintf("%s-%d", objName, i)
_, err = atb.objLayer.PutObject(nil, bucketName, objectName, _, err = atb.objLayer.PutObject(context.Background(), bucketName, objectName,
mustGetHashReader(t, bytes.NewReader([]byte("hello")), mustGetHashReader(t, bytes.NewReader([]byte("hello")),
int64(len("hello")), "", ""), nil) int64(len("hello")), "", ""), nil)
if err != nil { if err != nil {
@ -228,13 +229,13 @@ func (atb *adminXLTestBed) GenerateHealTestData(t *testing.T) {
// create a multipart upload (incomplete) // create a multipart upload (incomplete)
{ {
objName := "mpObject" objName := "mpObject"
uploadID, err := atb.objLayer.NewMultipartUpload(nil, bucketName, uploadID, err := atb.objLayer.NewMultipartUpload(context.Background(), bucketName,
objName, nil) objName, nil)
if err != nil { if err != nil {
t.Fatalf("mp new error: %v", err) t.Fatalf("mp new error: %v", err)
} }
_, err = atb.objLayer.PutObjectPart(nil, bucketName, objName, _, err = atb.objLayer.PutObjectPart(context.Background(), bucketName, objName,
uploadID, 3, mustGetHashReader(t, bytes.NewReader( uploadID, 3, mustGetHashReader(t, bytes.NewReader(
[]byte("hello")), int64(len("hello")), "", "")) []byte("hello")), int64(len("hello")), "", ""))
if err != nil { if err != nil {
@ -248,11 +249,11 @@ func (atb *adminXLTestBed) CleanupHealTestData(t *testing.T) {
bucketName := "mybucket" bucketName := "mybucket"
objName := "myobject" objName := "myobject"
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
atb.objLayer.DeleteObject(nil, bucketName, atb.objLayer.DeleteObject(context.Background(), bucketName,
fmt.Sprintf("%s-%d", objName, i)) fmt.Sprintf("%s-%d", objName, i))
} }
atb.objLayer.DeleteBucket(nil, bucketName) atb.objLayer.DeleteBucket(context.Background(), bucketName)
} }
// initTestObjLayer - Helper function to initialize an XL-based object // initTestObjLayer - Helper function to initialize an XL-based object

View File

@ -321,7 +321,7 @@ type healSequence struct {
func newHealSequence(bucket, objPrefix, clientAddr string, func newHealSequence(bucket, objPrefix, clientAddr string,
numDisks int, hs madmin.HealOpts, forceStart bool) *healSequence { numDisks int, hs madmin.HealOpts, forceStart bool) *healSequence {
ctx := logger.ContextSet(context.Background(), &logger.ReqInfo{clientAddr, "", "", "Heal", bucket, objPrefix, nil}) ctx := logger.SetContext(context.Background(), &logger.ReqInfo{clientAddr, "", "", "Heal", bucket, objPrefix, nil})
return &healSequence{ return &healSequence{
bucket: bucket, bucket: bucket,

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"net" "net"
@ -83,7 +84,7 @@ func (lc localAdminClient) ReInitFormat(dryRun bool) error {
if objectAPI == nil { if objectAPI == nil {
return errServerNotInitialized return errServerNotInitialized
} }
_, err := objectAPI.HealFormat(nil, dryRun) _, err := objectAPI.HealFormat(context.Background(), dryRun)
return err return err
} }
@ -137,7 +138,7 @@ func (lc localAdminClient) ServerInfoData() (sid ServerInfoData, e error) {
if objLayer == nil { if objLayer == nil {
return sid, errServerNotInitialized return sid, errServerNotInitialized
} }
storage := objLayer.StorageInfo(nil) storage := objLayer.StorageInfo(context.Background())
return ServerInfoData{ return ServerInfoData{
StorageInfo: storage, StorageInfo: storage,

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
@ -89,7 +90,7 @@ func (s *adminCmd) ReInitFormat(args *ReInitFormatArgs, reply *AuthRPCReply) err
if err := args.IsAuthenticated(); err != nil { if err := args.IsAuthenticated(); err != nil {
return err return err
} }
_, err := newObjectLayerFn().HealFormat(nil, args.DryRun) _, err := newObjectLayerFn().HealFormat(context.Background(), args.DryRun)
return err return err
} }
@ -118,7 +119,7 @@ func (s *adminCmd) ServerInfoData(args *AuthRPCArgs, reply *ServerInfoDataReply)
if objLayer == nil { if objLayer == nil {
return errServerNotInitialized return errServerNotInitialized
} }
storageInfo := objLayer.StorageInfo(nil) storageInfo := objLayer.StorageInfo(context.Background())
reply.ServerInfoData = ServerInfoData{ reply.ServerInfoData = ServerInfoData{
Properties: ServerProperties{ Properties: ServerProperties{

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"io/ioutil" "io/ioutil"
"math" "math"
"math/rand" "math/rand"
@ -40,7 +41,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name. // obtains random bucket name.
bucket := getRandomBucketName() bucket := getRandomBucketName()
// create bucket. // create bucket.
err = obj.MakeBucketWithLocation(nil, bucket, "") err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -60,7 +61,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
// insert the object. // insert the object.
objInfo, err := obj.PutObject(nil, bucket, "object"+strconv.Itoa(i), objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata) mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
@ -82,7 +83,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
object := getRandomObjectName() object := getRandomObjectName()
// create bucket. // create bucket.
err = obj.MakeBucketWithLocation(nil, bucket, "") err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -97,7 +98,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
// generate md5sum for the generated data. // generate md5sum for the generated data.
// md5sum of the data to written is required as input for NewMultipartUpload. // md5sum of the data to written is required as input for NewMultipartUpload.
metadata := make(map[string]string) metadata := make(map[string]string)
uploadID, err = obj.NewMultipartUpload(nil, bucket, object, metadata) uploadID, err = obj.NewMultipartUpload(context.Background(), bucket, object, metadata)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -121,7 +122,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
} }
md5hex = getMD5Hash([]byte(textPartData)) md5hex = getMD5Hash([]byte(textPartData))
var partInfo PartInfo var partInfo PartInfo
partInfo, err = obj.PutObjectPart(nil, bucket, object, uploadID, j, partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
mustGetHashReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex)) mustGetHashReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex))
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
@ -204,7 +205,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name. // obtains random bucket name.
bucket := getRandomBucketName() bucket := getRandomBucketName()
// create bucket. // create bucket.
err = obj.MakeBucketWithLocation(nil, bucket, "") err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -223,7 +224,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
// insert the object. // insert the object.
var objInfo ObjectInfo var objInfo ObjectInfo
objInfo, err = obj.PutObject(nil, bucket, "object"+strconv.Itoa(i), objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata) mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
@ -239,7 +240,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
var buffer = new(bytes.Buffer) var buffer = new(bytes.Buffer)
err = obj.GetObject(nil, bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "") err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "")
if err != nil { if err != nil {
b.Error(err) b.Error(err)
} }
@ -316,7 +317,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name. // obtains random bucket name.
bucket := getRandomBucketName() bucket := getRandomBucketName()
// create bucket. // create bucket.
err = obj.MakeBucketWithLocation(nil, bucket, "") err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -339,7 +340,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
i := 0 i := 0
for pb.Next() { for pb.Next() {
// insert the object. // insert the object.
objInfo, err := obj.PutObject(nil, bucket, "object"+strconv.Itoa(i), objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata) mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
@ -367,7 +368,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name. // obtains random bucket name.
bucket := getRandomBucketName() bucket := getRandomBucketName()
// create bucket. // create bucket.
err = obj.MakeBucketWithLocation(nil, bucket, "") err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -385,7 +386,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
// insert the object. // insert the object.
var objInfo ObjectInfo var objInfo ObjectInfo
objInfo, err = obj.PutObject(nil, bucket, "object"+strconv.Itoa(i), objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata) mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
@ -402,7 +403,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
b.RunParallel(func(pb *testing.PB) { b.RunParallel(func(pb *testing.PB) {
i := 0 i := 0
for pb.Next() { for pb.Next() {
err = obj.GetObject(nil, bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "") err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "")
if err != nil { if err != nil {
b.Error(err) b.Error(err)
} }

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"encoding/base64" "encoding/base64"
"encoding/xml" "encoding/xml"
"io" "io"
@ -58,7 +59,7 @@ func enforceBucketPolicy(bucket, action, resource, referer, sourceIP string, que
} }
// Fetch bucket policy, if policy is not set return access denied. // Fetch bucket policy, if policy is not set return access denied.
p, err := objAPI.GetBucketPolicy(nil, bucket) p, err := objAPI.GetBucketPolicy(context.Background(), bucket)
if err != nil { if err != nil {
return ErrAccessDenied return ErrAccessDenied
} }
@ -91,7 +92,7 @@ func enforceBucketPolicy(bucket, action, resource, referer, sourceIP string, que
// Check if the action is allowed on the bucket/prefix. // Check if the action is allowed on the bucket/prefix.
func isBucketActionAllowed(action, bucket, prefix string, objectAPI ObjectLayer) bool { func isBucketActionAllowed(action, bucket, prefix string, objectAPI ObjectLayer) bool {
bp, err := objectAPI.GetBucketPolicy(nil, bucket) bp, err := objectAPI.GetBucketPolicy(context.Background(), bucket)
if err != nil { if err != nil {
return false return false
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"encoding/xml" "encoding/xml"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
@ -624,7 +625,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
objectName := "test-object-" + strconv.Itoa(i) objectName := "test-object-" + strconv.Itoa(i)
// uploading the object. // uploading the object.
_, err = obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), nil) _, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), nil)
// if object upload fails stop the test. // if object upload fails stop the test.
if err != nil { if err != nil {
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err) t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@ -252,7 +253,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
credentials auth.Credentials, t *testing.T) { credentials auth.Credentials, t *testing.T) {
bucketName1 := fmt.Sprintf("%s-1", bucketName) bucketName1 := fmt.Sprintf("%s-1", bucketName)
if err := obj.MakeBucketWithLocation(nil, bucketName1, ""); err != nil { if err := obj.MakeBucketWithLocation(context.Background(), bucketName1, ""); err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"encoding/json" "encoding/json"
"io" "io"
"reflect" "reflect"
@ -82,7 +83,7 @@ func initBucketPolicies(objAPI ObjectLayer) (*bucketPolicies, error) {
} }
// List buckets to proceed loading all notification configuration. // List buckets to proceed loading all notification configuration.
buckets, err := objAPI.ListBuckets(nil) buckets, err := objAPI.ListBuckets(context.Background())
if err != nil { if err != nil {
return nil, errors.Cause(err) return nil, errors.Cause(err)
} }
@ -118,7 +119,7 @@ func readBucketPolicyJSON(bucket string, objAPI ObjectLayer) (bucketPolicyReader
policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig) policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig)
var buffer bytes.Buffer var buffer bytes.Buffer
err = objAPI.GetObject(nil, minioMetaBucket, policyPath, 0, -1, &buffer, "") err = objAPI.GetObject(context.Background(), minioMetaBucket, policyPath, 0, -1, &buffer, "")
if err != nil { if err != nil {
if isErrObjectNotFound(err) || isErrIncompleteBody(err) { if isErrObjectNotFound(err) || isErrIncompleteBody(err) {
return nil, PolicyNotFound{Bucket: bucket} return nil, PolicyNotFound{Bucket: bucket}
@ -152,7 +153,7 @@ func ReadBucketPolicy(bucket string, objAPI ObjectLayer) (policy.BucketAccessPol
// if no policies are found. // if no policies are found.
func removeBucketPolicy(bucket string, objAPI ObjectLayer) error { func removeBucketPolicy(bucket string, objAPI ObjectLayer) error {
policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig) policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig)
err := objAPI.DeleteObject(nil, minioMetaBucket, policyPath) err := objAPI.DeleteObject(context.Background(), minioMetaBucket, policyPath)
if err != nil { if err != nil {
err = errors.Cause(err) err = errors.Cause(err)
if _, ok := err.(ObjectNotFound); ok { if _, ok := err.(ObjectNotFound); ok {
@ -177,7 +178,7 @@ func writeBucketPolicy(bucket string, objAPI ObjectLayer, bpy policy.BucketAcces
return errors.Cause(err) return errors.Cause(err)
} }
if _, err = objAPI.PutObject(nil, minioMetaBucket, policyPath, hashReader, nil); err != nil { if _, err = objAPI.PutObject(context.Background(), minioMetaBucket, policyPath, hashReader, nil); err != nil {
errorIf(err, "Unable to set policy for the bucket %s", bucket) errorIf(err, "Unable to set policy for the bucket %s", bucket)
return errors.Cause(err) return errors.Cause(err)
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
@ -49,10 +50,10 @@ func TestReadFSMetadata(t *testing.T) {
bucketName := "bucket" bucketName := "bucket"
objectName := "object" objectName := "object"
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil { if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
t.Fatal("Unexpected err: ", err) t.Fatal("Unexpected err: ", err)
} }
if _, err := obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil); err != nil { if _, err := obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil); err != nil {
t.Fatal("Unexpected err: ", err) t.Fatal("Unexpected err: ", err)
} }
@ -84,10 +85,10 @@ func TestWriteFSMetadata(t *testing.T) {
bucketName := "bucket" bucketName := "bucket"
objectName := "object" objectName := "object"
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil { if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
t.Fatal("Unexpected err: ", err) t.Fatal("Unexpected err: ", err)
} }
if _, err := obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil); err != nil { if _, err := obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil); err != nil {
t.Fatal("Unexpected err: ", err) t.Fatal("Unexpected err: ", err)
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
@ -42,8 +43,8 @@ func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) {
bucketName := "bucket" bucketName := "bucket"
objectName := "object" objectName := "object"
obj.MakeBucketWithLocation(nil, bucketName, "") obj.MakeBucketWithLocation(context.Background(), bucketName, "")
uploadID, err := obj.NewMultipartUpload(nil, bucketName, objectName, nil) uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, objectName, nil)
if err != nil { if err != nil {
t.Fatal("Unexpected err: ", err) t.Fatal("Unexpected err: ", err)
} }
@ -58,7 +59,7 @@ func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) {
globalServiceDoneCh <- struct{}{} globalServiceDoneCh <- struct{}{}
// Check if upload id was already purged. // Check if upload id was already purged.
if err = obj.AbortMultipartUpload(nil, bucketName, objectName, uploadID); err != nil { if err = obj.AbortMultipartUpload(context.Background(), bucketName, objectName, uploadID); err != nil {
err = errors.Cause(err) err = errors.Cause(err)
if _, ok := err.(InvalidUploadID); !ok { if _, ok := err.(InvalidUploadID); !ok {
t.Fatal("Unexpected err: ", err) t.Fatal("Unexpected err: ", err)
@ -77,13 +78,13 @@ func TestNewMultipartUploadFaultyDisk(t *testing.T) {
bucketName := "bucket" bucketName := "bucket"
objectName := "object" objectName := "object"
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil { if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }
// Test with disk removed. // Test with disk removed.
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
if _, err := fs.NewMultipartUpload(nil, bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}); err != nil { if _, err := fs.NewMultipartUpload(context.Background(), bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}); err != nil {
if !isSameType(errors.Cause(err), BucketNotFound{}) { if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
@ -108,11 +109,11 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
data := []byte("12345") data := []byte("12345")
dataLen := int64(len(data)) dataLen := int64(len(data))
if err = obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil { if err = obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }
uploadID, err := fs.NewMultipartUpload(nil, bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}) uploadID, err := fs.NewMultipartUpload(context.Background(), bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
if err != nil { if err != nil {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
@ -121,7 +122,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
sha256sum := "" sha256sum := ""
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
_, err = fs.PutObjectPart(nil, bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), dataLen, md5Hex, sha256sum)) _, err = fs.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), dataLen, md5Hex, sha256sum))
if !isSameType(errors.Cause(err), BucketNotFound{}) { if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
@ -139,11 +140,11 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) {
objectName := "object" objectName := "object"
data := []byte("12345") data := []byte("12345")
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil { if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }
uploadID, err := fs.NewMultipartUpload(nil, bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}) uploadID, err := fs.NewMultipartUpload(context.Background(), bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
if err != nil { if err != nil {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
@ -152,7 +153,7 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) {
parts := []CompletePart{{PartNumber: 1, ETag: md5Hex}} parts := []CompletePart{{PartNumber: 1, ETag: md5Hex}}
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
if _, err := fs.CompleteMultipartUpload(nil, bucketName, objectName, uploadID, parts); err != nil { if _, err := fs.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, parts); err != nil {
if !isSameType(errors.Cause(err), BucketNotFound{}) { if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
@ -171,24 +172,24 @@ func TestCompleteMultipartUpload(t *testing.T) {
objectName := "object" objectName := "object"
data := []byte("12345") data := []byte("12345")
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil { if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }
uploadID, err := fs.NewMultipartUpload(nil, bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}) uploadID, err := fs.NewMultipartUpload(context.Background(), bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
if err != nil { if err != nil {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
md5Hex := getMD5Hash(data) md5Hex := getMD5Hash(data)
if _, err := fs.PutObjectPart(nil, bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), 5, md5Hex, "")); err != nil { if _, err := fs.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), 5, md5Hex, "")); err != nil {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
parts := []CompletePart{{PartNumber: 1, ETag: md5Hex}} parts := []CompletePart{{PartNumber: 1, ETag: md5Hex}}
if _, err := fs.CompleteMultipartUpload(nil, bucketName, objectName, uploadID, parts); err != nil { if _, err := fs.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, parts); err != nil {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
} }
@ -205,22 +206,22 @@ func TestAbortMultipartUpload(t *testing.T) {
objectName := "object" objectName := "object"
data := []byte("12345") data := []byte("12345")
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil { if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }
uploadID, err := fs.NewMultipartUpload(nil, bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}) uploadID, err := fs.NewMultipartUpload(context.Background(), bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
if err != nil { if err != nil {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
md5Hex := getMD5Hash(data) md5Hex := getMD5Hash(data)
if _, err := fs.PutObjectPart(nil, bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), 5, md5Hex, "")); err != nil { if _, err := fs.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), 5, md5Hex, "")); err != nil {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
time.Sleep(time.Second) // Without Sleep on windows, the fs.AbortMultipartUpload() fails with "The process cannot access the file because it is being used by another process." time.Sleep(time.Second) // Without Sleep on windows, the fs.AbortMultipartUpload() fails with "The process cannot access the file because it is being used by another process."
if err := fs.AbortMultipartUpload(nil, bucketName, objectName, uploadID); err != nil { if err := fs.AbortMultipartUpload(context.Background(), bucketName, objectName, uploadID); err != nil {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
} }
@ -237,17 +238,17 @@ func TestListMultipartUploadsFaultyDisk(t *testing.T) {
bucketName := "bucket" bucketName := "bucket"
objectName := "object" objectName := "object"
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil { if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }
_, err := fs.NewMultipartUpload(nil, bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}) _, err := fs.NewMultipartUpload(context.Background(), bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
if err != nil { if err != nil {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
if _, err := fs.ListMultipartUploads(nil, bucketName, objectName, "", "", "", 1000); err != nil { if _, err := fs.ListMultipartUploads(context.Background(), bucketName, objectName, "", "", "", 1000); err != nil {
if !isSameType(errors.Cause(err), BucketNotFound{}) { if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error ", err) t.Fatal("Unexpected error ", err)
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
@ -43,11 +44,11 @@ func TestFSParentDirIsObject(t *testing.T) {
bucketName := "testbucket" bucketName := "testbucket"
objectName := "object" objectName := "object"
if err = obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil { if err = obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
t.Fatal(err) t.Fatal(err)
} }
objectContent := "12345" objectContent := "12345"
objInfo, err := obj.PutObject(nil, bucketName, objectName, objInfo, err := obj.PutObject(context.Background(), bucketName, objectName,
mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil) mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -136,23 +137,23 @@ func TestFSShutdown(t *testing.T) {
obj := initFSObjects(disk, t) obj := initFSObjects(disk, t)
fs := obj.(*FSObjects) fs := obj.(*FSObjects)
objectContent := "12345" objectContent := "12345"
obj.MakeBucketWithLocation(nil, bucketName, "") obj.MakeBucketWithLocation(context.Background(), bucketName, "")
obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil) obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil)
return fs, disk return fs, disk
} }
// Test Shutdown with regular conditions // Test Shutdown with regular conditions
fs, disk := prepareTest() fs, disk := prepareTest()
if err := fs.Shutdown(nil); err != nil { if err := fs.Shutdown(context.Background()); err != nil {
t.Fatal("Cannot shutdown the FS object: ", err) t.Fatal("Cannot shutdown the FS object: ", err)
} }
os.RemoveAll(disk) os.RemoveAll(disk)
// Test Shutdown with faulty disk // Test Shutdown with faulty disk
fs, disk = prepareTest() fs, disk = prepareTest()
fs.DeleteObject(nil, bucketName, objectName) fs.DeleteObject(context.Background(), bucketName, objectName)
os.RemoveAll(disk) os.RemoveAll(disk)
if err := fs.Shutdown(nil); err != nil { if err := fs.Shutdown(context.Background()); err != nil {
t.Fatal("Got unexpected fs shutdown error: ", err) t.Fatal("Got unexpected fs shutdown error: ", err)
} }
} }
@ -167,10 +168,10 @@ func TestFSGetBucketInfo(t *testing.T) {
fs := obj.(*FSObjects) fs := obj.(*FSObjects)
bucketName := "bucket" bucketName := "bucket"
obj.MakeBucketWithLocation(nil, bucketName, "") obj.MakeBucketWithLocation(context.Background(), bucketName, "")
// Test with valid parameters // Test with valid parameters
info, err := fs.GetBucketInfo(nil, bucketName) info, err := fs.GetBucketInfo(context.Background(), bucketName)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -179,7 +180,7 @@ func TestFSGetBucketInfo(t *testing.T) {
} }
// Test with inexistant bucket // Test with inexistant bucket
_, err = fs.GetBucketInfo(nil, "a") _, err = fs.GetBucketInfo(context.Background(), "a")
if !isSameType(errors.Cause(err), BucketNameInvalid{}) { if !isSameType(errors.Cause(err), BucketNameInvalid{}) {
t.Fatal("BucketNameInvalid error not returned") t.Fatal("BucketNameInvalid error not returned")
} }
@ -187,7 +188,7 @@ func TestFSGetBucketInfo(t *testing.T) {
// Check for buckets and should get disk not found. // Check for buckets and should get disk not found.
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
_, err = fs.GetBucketInfo(nil, bucketName) _, err = fs.GetBucketInfo(context.Background(), bucketName)
if !isSameType(errors.Cause(err), BucketNotFound{}) { if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("BucketNotFound error not returned") t.Fatal("BucketNotFound error not returned")
} }
@ -202,12 +203,12 @@ func TestFSPutObject(t *testing.T) {
bucketName := "bucket" bucketName := "bucket"
objectName := "1/2/3/4/object" objectName := "1/2/3/4/object"
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil { if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
t.Fatal(err) t.Fatal(err)
} }
// With a regular object. // With a regular object.
_, err := obj.PutObject(nil, bucketName+"non-existent", objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil) _, err := obj.PutObject(context.Background(), bucketName+"non-existent", objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err == nil { if err == nil {
t.Fatal("Unexpected should fail here, bucket doesn't exist") t.Fatal("Unexpected should fail here, bucket doesn't exist")
} }
@ -216,7 +217,7 @@ func TestFSPutObject(t *testing.T) {
} }
// With a directory object. // With a directory object.
_, err = obj.PutObject(nil, bucketName+"non-existent", objectName+"/", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), nil) _, err = obj.PutObject(context.Background(), bucketName+"non-existent", objectName+"/", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), nil)
if err == nil { if err == nil {
t.Fatal("Unexpected should fail here, bucket doesn't exist") t.Fatal("Unexpected should fail here, bucket doesn't exist")
} }
@ -224,11 +225,11 @@ func TestFSPutObject(t *testing.T) {
t.Fatalf("Expected error type BucketNotFound, got %#v", err) t.Fatalf("Expected error type BucketNotFound, got %#v", err)
} }
_, err = obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil) _, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
_, err = obj.PutObject(nil, bucketName, objectName+"/1", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil) _, err = obj.PutObject(context.Background(), bucketName, objectName+"/1", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err == nil { if err == nil {
t.Fatal("Unexpected should fail here, backend corruption occurred") t.Fatal("Unexpected should fail here, backend corruption occurred")
} }
@ -243,7 +244,7 @@ func TestFSPutObject(t *testing.T) {
} }
} }
_, err = obj.PutObject(nil, bucketName, objectName+"/1/", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), nil) _, err = obj.PutObject(context.Background(), bucketName, objectName+"/1/", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), nil)
if err == nil { if err == nil {
t.Fatal("Unexpected should fail here, backned corruption occurred") t.Fatal("Unexpected should fail here, backned corruption occurred")
} }
@ -270,33 +271,33 @@ func TestFSDeleteObject(t *testing.T) {
bucketName := "bucket" bucketName := "bucket"
objectName := "object" objectName := "object"
obj.MakeBucketWithLocation(nil, bucketName, "") obj.MakeBucketWithLocation(context.Background(), bucketName, "")
obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil) obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
// Test with invalid bucket name // Test with invalid bucket name
if err := fs.DeleteObject(nil, "fo", objectName); !isSameType(errors.Cause(err), BucketNameInvalid{}) { if err := fs.DeleteObject(context.Background(), "fo", objectName); !isSameType(errors.Cause(err), BucketNameInvalid{}) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
// Test with bucket does not exist // Test with bucket does not exist
if err := fs.DeleteObject(nil, "foobucket", "fooobject"); !isSameType(errors.Cause(err), BucketNotFound{}) { if err := fs.DeleteObject(context.Background(), "foobucket", "fooobject"); !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
// Test with invalid object name // Test with invalid object name
if err := fs.DeleteObject(nil, bucketName, "\\"); !isSameType(errors.Cause(err), ObjectNameInvalid{}) { if err := fs.DeleteObject(context.Background(), bucketName, "\\"); !isSameType(errors.Cause(err), ObjectNameInvalid{}) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
// Test with object does not exist. // Test with object does not exist.
if err := fs.DeleteObject(nil, bucketName, "foooobject"); !isSameType(errors.Cause(err), ObjectNotFound{}) { if err := fs.DeleteObject(context.Background(), bucketName, "foooobject"); !isSameType(errors.Cause(err), ObjectNotFound{}) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
// Test with valid condition // Test with valid condition
if err := fs.DeleteObject(nil, bucketName, objectName); err != nil { if err := fs.DeleteObject(context.Background(), bucketName, objectName); err != nil {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
// Delete object should err disk not found. // Delete object should err disk not found.
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
if err := fs.DeleteObject(nil, bucketName, objectName); err != nil { if err := fs.DeleteObject(context.Background(), bucketName, objectName); err != nil {
if !isSameType(errors.Cause(err), BucketNotFound{}) { if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
@ -314,29 +315,29 @@ func TestFSDeleteBucket(t *testing.T) {
fs := obj.(*FSObjects) fs := obj.(*FSObjects)
bucketName := "bucket" bucketName := "bucket"
err := obj.MakeBucketWithLocation(nil, bucketName, "") err := obj.MakeBucketWithLocation(context.Background(), bucketName, "")
if err != nil { if err != nil {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
// Test with an invalid bucket name // Test with an invalid bucket name
if err = fs.DeleteBucket(nil, "fo"); !isSameType(errors.Cause(err), BucketNameInvalid{}) { if err = fs.DeleteBucket(context.Background(), "fo"); !isSameType(errors.Cause(err), BucketNameInvalid{}) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
// Test with an inexistant bucket // Test with an inexistant bucket
if err = fs.DeleteBucket(nil, "foobucket"); !isSameType(errors.Cause(err), BucketNotFound{}) { if err = fs.DeleteBucket(context.Background(), "foobucket"); !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
// Test with a valid case // Test with a valid case
if err = fs.DeleteBucket(nil, bucketName); err != nil { if err = fs.DeleteBucket(context.Background(), bucketName); err != nil {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
obj.MakeBucketWithLocation(nil, bucketName, "") obj.MakeBucketWithLocation(context.Background(), bucketName, "")
// Delete bucket should get error disk not found. // Delete bucket should get error disk not found.
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
if err = fs.DeleteBucket(nil, bucketName); err != nil { if err = fs.DeleteBucket(context.Background(), bucketName); err != nil {
if !isSameType(errors.Cause(err), BucketNotFound{}) { if !isSameType(errors.Cause(err), BucketNotFound{}) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
@ -353,7 +354,7 @@ func TestFSListBuckets(t *testing.T) {
fs := obj.(*FSObjects) fs := obj.(*FSObjects)
bucketName := "bucket" bucketName := "bucket"
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil { if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
@ -368,7 +369,7 @@ func TestFSListBuckets(t *testing.T) {
f.Close() f.Close()
// Test list buckets to have only one entry. // Test list buckets to have only one entry.
buckets, err := fs.ListBuckets(nil) buckets, err := fs.ListBuckets(context.Background())
if err != nil { if err != nil {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
@ -379,7 +380,7 @@ func TestFSListBuckets(t *testing.T) {
// Test ListBuckets with disk not found. // Test ListBuckets with disk not found.
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
if _, err := fs.ListBuckets(nil); err != nil { if _, err := fs.ListBuckets(context.Background()); err != nil {
if errors.Cause(err) != errDiskNotFound { if errors.Cause(err) != errDiskNotFound {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
@ -387,7 +388,7 @@ func TestFSListBuckets(t *testing.T) {
longPath := fmt.Sprintf("%0256d", 1) longPath := fmt.Sprintf("%0256d", 1)
fs.fsPath = longPath fs.fsPath = longPath
if _, err := fs.ListBuckets(nil); err != nil { if _, err := fs.ListBuckets(context.Background()); err != nil {
if errors.Cause(err) != errFileNameTooLong { if errors.Cause(err) != errFileNameTooLong {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
@ -400,7 +401,7 @@ func TestFSHealObject(t *testing.T) {
defer os.RemoveAll(disk) defer os.RemoveAll(disk)
obj := initFSObjects(disk, t) obj := initFSObjects(disk, t)
_, err := obj.HealObject(nil, "bucket", "object", false) _, err := obj.HealObject(context.Background(), "bucket", "object", false)
if err == nil || !isSameType(errors.Cause(err), NotImplemented{}) { if err == nil || !isSameType(errors.Cause(err), NotImplemented{}) {
t.Fatalf("Heal Object should return NotImplemented error ") t.Fatalf("Heal Object should return NotImplemented error ")
} }
@ -412,7 +413,7 @@ func TestFSListObjectsHeal(t *testing.T) {
defer os.RemoveAll(disk) defer os.RemoveAll(disk)
obj := initFSObjects(disk, t) obj := initFSObjects(disk, t)
_, err := obj.ListObjectsHeal(nil, "bucket", "prefix", "marker", "delimiter", 1000) _, err := obj.ListObjectsHeal(context.Background(), "bucket", "prefix", "marker", "delimiter", 1000)
if err == nil || !isSameType(errors.Cause(err), NotImplemented{}) { if err == nil || !isSameType(errors.Cause(err), NotImplemented{}) {
t.Fatalf("Heal Object should return NotImplemented error ") t.Fatalf("Heal Object should return NotImplemented error ")
} }

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"fmt" "fmt"
"net/http" "net/http"
"runtime" "runtime"
@ -51,7 +52,7 @@ func LivenessCheckHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
// List buckets is unsuccessful, means server is having issues, send 503 service unavailable // List buckets is unsuccessful, means server is having issues, send 503 service unavailable
if _, err := objLayer.ListBuckets(nil); err != nil { if _, err := objLayer.ListBuckets(context.Background()); err != nil {
writeResponse(w, http.StatusServiceUnavailable, nil, mimeNone) writeResponse(w, http.StatusServiceUnavailable, nil, mimeNone)
return return
} }

View File

@ -16,7 +16,10 @@
package cmd package cmd
import "time" import (
"context"
"time"
)
// SystemLockState - Structure to fill the lock state of entire object storage. // SystemLockState - Structure to fill the lock state of entire object storage.
// That is the total locks held, total calls blocked on locks and state of all the locks for the entire system. // That is the total locks held, total calls blocked on locks and state of all the locks for the entire system.
@ -66,6 +69,6 @@ type OpsLockState struct {
// listLocksInfo - Fetches locks held on bucket, matching prefix held for longer than duration. // listLocksInfo - Fetches locks held on bucket, matching prefix held for longer than duration.
func listLocksInfo(bucket, prefix string, duration time.Duration) []VolumeLockInfo { func listLocksInfo(bucket, prefix string, duration time.Duration) []VolumeLockInfo {
locksInfo, _ := newObjectLayerFn().ListLocks(nil, bucket, prefix, duration) locksInfo, _ := newObjectLayerFn().ListLocks(context.Background(), bucket, prefix, duration)
return locksInfo return locksInfo
} }

View File

@ -18,7 +18,7 @@ package logger
import "context" import "context"
// Key used for ContextSet/Get // Key used for Get/SetContext
type contextKeyType string type contextKeyType string
const contextLogKey = contextKeyType("miniolog") const contextLogKey = contextKeyType("miniolog")
@ -37,7 +37,7 @@ type ReqInfo struct {
API string // API name - GetObject PutObject NewMultipartUpload etc. API string // API name - GetObject PutObject NewMultipartUpload etc.
BucketName string // Bucket name BucketName string // Bucket name
ObjectName string // Object name ObjectName string // Object name
Tags []KeyVal // Any additional info not accomodated by above fields Tags []KeyVal // Any additional info not accommodated by above fields
} }
// AppendTags - appends key/val to ReqInfo.Tags // AppendTags - appends key/val to ReqInfo.Tags
@ -48,13 +48,13 @@ func (r *ReqInfo) AppendTags(key string, val string) {
r.Tags = append(r.Tags, KeyVal{key, val}) r.Tags = append(r.Tags, KeyVal{key, val})
} }
// ContextSet sets ReqInfo in the context. // SetContext sets ReqInfo in the context.
func ContextSet(ctx context.Context, req *ReqInfo) context.Context { func SetContext(ctx context.Context, req *ReqInfo) context.Context {
return context.WithValue(ctx, contextLogKey, req) return context.WithValue(ctx, contextLogKey, req)
} }
// ContextGet returns ReqInfo if set. // GetContext returns ReqInfo if set.
func ContextGet(ctx context.Context) *ReqInfo { func GetContext(ctx context.Context) *ReqInfo {
r, ok := ctx.Value(contextLogKey).(*ReqInfo) r, ok := ctx.Value(contextLogKey).(*ReqInfo)
if ok { if ok {
return r return r

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"encoding/json" "encoding/json"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
@ -246,7 +247,7 @@ func (sys *NotificationSys) Init(objAPI ObjectLayer) error {
return errInvalidArgument return errInvalidArgument
} }
buckets, err := objAPI.ListBuckets() buckets, err := objAPI.ListBuckets(context.Background())
if err != nil { if err != nil {
return err return err
} }
@ -467,14 +468,14 @@ func saveConfig(objAPI ObjectLayer, configFile string, data []byte) error {
return err return err
} }
_, err = objAPI.PutObject(minioMetaBucket, configFile, hashReader, nil) _, err = objAPI.PutObject(context.Background(), minioMetaBucket, configFile, hashReader, nil)
return err return err
} }
func readConfig(objAPI ObjectLayer, configFile string) (*bytes.Buffer, error) { func readConfig(objAPI ObjectLayer, configFile string) (*bytes.Buffer, error) {
var buffer bytes.Buffer var buffer bytes.Buffer
// Read entire content by setting size to -1 // Read entire content by setting size to -1
err := objAPI.GetObject(minioMetaBucket, configFile, 0, -1, &buffer, "") err := objAPI.GetObject(context.Background(), minioMetaBucket, configFile, 0, -1, &buffer, "")
if err != nil { if err != nil {
// Ignore if err is ObjectNotFound or IncompleteBody when bucket is not configured with notification // Ignore if err is ObjectNotFound or IncompleteBody when bucket is not configured with notification
if isErrObjectNotFound(err) || isErrIncompleteBody(err) { if isErrObjectNotFound(err) || isErrIncompleteBody(err) {

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"path" "path"
"sync" "sync"
@ -144,7 +145,7 @@ func removeNotificationConfig(objAPI ObjectLayer, bucket string) error {
ncPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig) ncPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig)
return objAPI.DeleteObject(minioMetaBucket, ncPath) return objAPI.DeleteObject(context.Background(), minioMetaBucket, ncPath)
} }
// Remove listener configuration from storage layer. Used when a bucket is deleted. // Remove listener configuration from storage layer. Used when a bucket is deleted.
@ -152,5 +153,5 @@ func removeListenerConfig(objAPI ObjectLayer, bucket string) error {
// make the path // make the path
lcPath := path.Join(bucketConfigPrefix, bucket, bucketListenerConfig) lcPath := path.Join(bucketConfigPrefix, bucket, bucketListenerConfig)
return objAPI.DeleteObject(minioMetaBucket, lcPath) return objAPI.DeleteObject(context.Background(), minioMetaBucket, lcPath)
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"io" "io"
"os" "os"
@ -39,7 +40,7 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
bucketName := getRandomBucketName() bucketName := getRandomBucketName()
objectName := "test-object" objectName := "test-object"
// create bucket. // create bucket.
err := obj.MakeBucketWithLocation(nil, bucketName, "") err := obj.MakeBucketWithLocation(context.Background(), bucketName, "")
// Stop the test if creation of the bucket fails. // Stop the test if creation of the bucket fails.
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -68,7 +69,7 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
// iterate through the above set of inputs and upkoad the object. // iterate through the above set of inputs and upkoad the object.
for i, input := range putObjectInputs { for i, input := range putObjectInputs {
// uploading the object. // uploading the object.
_, err = obj.PutObject(nil, input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData) _, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
// if object upload fails stop the test. // if object upload fails stop the test.
if err != nil { if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err) t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
@ -144,7 +145,7 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
} }
for i, testCase := range testCases { for i, testCase := range testCases {
err = obj.GetObject(nil, testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "") err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "")
if err != nil && testCase.shouldPass { if err != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error()) t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
} }
@ -183,7 +184,7 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [
// Setup for the tests. // Setup for the tests.
bucketName := getRandomBucketName() bucketName := getRandomBucketName()
// create bucket. // create bucket.
err := obj.MakeBucketWithLocation(nil, bucketName, "") err := obj.MakeBucketWithLocation(context.Background(), bucketName, "")
// Stop the test if creation of the bucket fails. // Stop the test if creation of the bucket fails.
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -209,7 +210,7 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [
// iterate through the above set of inputs and upkoad the object. // iterate through the above set of inputs and upkoad the object.
for i, input := range putObjectInputs { for i, input := range putObjectInputs {
// uploading the object. // uploading the object.
_, err = obj.PutObject(nil, input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData) _, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
// if object upload fails stop the test. // if object upload fails stop the test.
if err != nil { if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err) t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
@ -254,7 +255,7 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [
} }
} }
err = obj.GetObject(nil, testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "") err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "")
if err != nil && testCase.shouldPass { if err != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error()) t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
} }
@ -293,7 +294,7 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str
bucketName := getRandomBucketName() bucketName := getRandomBucketName()
objectName := "test-object" objectName := "test-object"
// create bucket. // create bucket.
err := obj.MakeBucketWithLocation(nil, bucketName, "") err := obj.MakeBucketWithLocation(context.Background(), bucketName, "")
// Stop the test if creation of the bucket fails. // Stop the test if creation of the bucket fails.
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -322,7 +323,7 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str
// iterate through the above set of inputs and upkoad the object. // iterate through the above set of inputs and upkoad the object.
for i, input := range putObjectInputs { for i, input := range putObjectInputs {
// uploading the object. // uploading the object.
_, err = obj.PutObject(nil, input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData) _, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
// if object upload fails stop the test. // if object upload fails stop the test.
if err != nil { if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err) t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
@ -407,7 +408,7 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str
} }
for i, testCase := range testCases { for i, testCase := range testCases {
err = obj.GetObject(nil, testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "") err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "")
if err != nil && testCase.shouldPass { if err != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error()) t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"testing" "testing"
) )
@ -29,11 +30,11 @@ func TestGetObjectInfo(t *testing.T) {
// Testing GetObjectInfo(). // Testing GetObjectInfo().
func testGetObjectInfo(obj ObjectLayer, instanceType string, t TestErrHandler) { func testGetObjectInfo(obj ObjectLayer, instanceType string, t TestErrHandler) {
// This bucket is used for testing getObjectInfo operations. // This bucket is used for testing getObjectInfo operations.
err := obj.MakeBucketWithLocation(nil, "test-getobjectinfo", "") err := obj.MakeBucketWithLocation(context.Background(), "test-getobjectinfo", "")
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
_, err = obj.PutObject(nil, "test-getobjectinfo", "Asia/asiapics.jpg", mustGetHashReader(t, bytes.NewBufferString("asiapics"), int64(len("asiapics")), "", ""), nil) _, err = obj.PutObject(context.Background(), "test-getobjectinfo", "Asia/asiapics.jpg", mustGetHashReader(t, bytes.NewBufferString("asiapics"), int64(len("asiapics")), "", ""), nil)
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
@ -71,7 +72,7 @@ func testGetObjectInfo(obj ObjectLayer, instanceType string, t TestErrHandler) {
{"test-getobjectinfo", "Asia/asiapics.jpg", resultCases[0], nil, true}, {"test-getobjectinfo", "Asia/asiapics.jpg", resultCases[0], nil, true},
} }
for i, testCase := range testCases { for i, testCase := range testCases {
result, err := obj.GetObjectInfo(nil, testCase.bucketName, testCase.objectName) result, err := obj.GetObjectInfo(context.Background(), testCase.bucketName, testCase.objectName)
if err != nil && testCase.shouldPass { if err != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error()) t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
} }

View File

@ -17,6 +17,8 @@
package cmd package cmd
import ( import (
"context"
"github.com/minio/minio/pkg/errors" "github.com/minio/minio/pkg/errors"
"github.com/skyrings/skyring-common/tools/uuid" "github.com/skyrings/skyring-common/tools/uuid"
) )
@ -175,7 +177,7 @@ func checkPutObjectArgs(bucket, object string, obj ObjectLayer, size int64) erro
// Checks whether bucket exists and returns appropriate error if not. // Checks whether bucket exists and returns appropriate error if not.
func checkBucketExist(bucket string, obj ObjectLayer) error { func checkBucketExist(bucket string, obj ObjectLayer) error {
_, err := obj.GetBucketInfo(nil, bucket) _, err := obj.GetBucketInfo(context.Background(), bucket)
if err != nil { if err != nil {
return errors.Cause(err) return errors.Cause(err)
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"crypto/md5" "crypto/md5"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
@ -44,7 +45,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
"empty-bucket", "empty-bucket",
} }
for _, bucket := range testBuckets { for _, bucket := range testBuckets {
err := obj.MakeBucketWithLocation(nil, bucket, "") err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
@ -68,7 +69,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
} }
for _, object := range testObjects { for _, object := range testObjects {
md5Bytes := md5.Sum([]byte(object.content)) md5Bytes := md5.Sum([]byte(object.content))
_, err = obj.PutObject(nil, testBuckets[0], object.name, mustGetHashReader(t, bytes.NewBufferString(object.content), _, err = obj.PutObject(context.Background(), testBuckets[0], object.name, mustGetHashReader(t, bytes.NewBufferString(object.content),
int64(len(object.content)), hex.EncodeToString(md5Bytes[:]), ""), object.meta) int64(len(object.content)), hex.EncodeToString(md5Bytes[:]), ""), object.meta)
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -524,7 +525,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
} }
for i, testCase := range testCases { for i, testCase := range testCases {
result, err := obj.ListObjects(nil, testCase.bucketName, testCase.prefix, testCase.marker, testCase.delimeter, int(testCase.maxKeys)) result, err := obj.ListObjects(context.Background(), testCase.bucketName, testCase.prefix, testCase.marker, testCase.delimeter, int(testCase.maxKeys))
if err != nil && testCase.shouldPass { if err != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error()) t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
} }
@ -565,7 +566,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
} }
// Take ListObject treeWalk go-routine to completion, if available in the treewalk pool. // Take ListObject treeWalk go-routine to completion, if available in the treewalk pool.
if result.IsTruncated { if result.IsTruncated {
_, err = obj.ListObjects(nil, testCase.bucketName, testCase.prefix, result.NextMarker, testCase.delimeter, 1000) _, err = obj.ListObjects(context.Background(), testCase.bucketName, testCase.prefix, result.NextMarker, testCase.delimeter, 1000)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -603,7 +604,7 @@ func BenchmarkListObjects(b *testing.B) {
bucket := "ls-benchmark-bucket" bucket := "ls-benchmark-bucket"
// Create a bucket. // Create a bucket.
err = obj.MakeBucketWithLocation(nil, bucket, "") err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -611,7 +612,7 @@ func BenchmarkListObjects(b *testing.B) {
// Insert objects to be listed and benchmarked later. // Insert objects to be listed and benchmarked later.
for i := 0; i < 20000; i++ { for i := 0; i < 20000; i++ {
key := "obj" + strconv.Itoa(i) key := "obj" + strconv.Itoa(i)
_, err = obj.PutObject(nil, bucket, key, mustGetHashReader(b, bytes.NewBufferString(key), int64(len(key)), "", ""), nil) _, err = obj.PutObject(context.Background(), bucket, key, mustGetHashReader(b, bytes.NewBufferString(key), int64(len(key)), "", ""), nil)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -621,7 +622,7 @@ func BenchmarkListObjects(b *testing.B) {
// List the buckets over and over and over. // List the buckets over and over and over.
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
_, err = obj.ListObjects(nil, bucket, "", "obj9000", "", -1) _, err = obj.ListObjects(context.Background(), bucket, "", "obj9000", "", -1)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"os" "os"
"strings" "strings"
@ -39,14 +40,14 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr
bucket := "minio-bucket" bucket := "minio-bucket"
object := "minio-object" object := "minio-object"
_, err := obj.NewMultipartUpload(nil, "--", object, nil) _, err := obj.NewMultipartUpload(context.Background(), "--", object, nil)
if err == nil { if err == nil {
t.Fatalf("%s: Expected to fail since bucket name is invalid.", instanceType) t.Fatalf("%s: Expected to fail since bucket name is invalid.", instanceType)
} }
errMsg := "Bucket not found: minio-bucket" errMsg := "Bucket not found: minio-bucket"
// opearation expected to fail since the bucket on which NewMultipartUpload is being initiated doesn't exist. // opearation expected to fail since the bucket on which NewMultipartUpload is being initiated doesn't exist.
_, err = obj.NewMultipartUpload(nil, bucket, object, nil) _, err = obj.NewMultipartUpload(context.Background(), bucket, object, nil)
if err == nil { if err == nil {
t.Fatalf("%s: Expected to fail since the NewMultipartUpload is intialized on a non-existent bucket.", instanceType) t.Fatalf("%s: Expected to fail since the NewMultipartUpload is intialized on a non-existent bucket.", instanceType)
} }
@ -55,23 +56,23 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr
} }
// Create bucket before intiating NewMultipartUpload. // Create bucket before intiating NewMultipartUpload.
err = obj.MakeBucketWithLocation(nil, bucket, "") err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil { if err != nil {
// failed to create newbucket, abort. // failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
_, err = obj.NewMultipartUpload(nil, bucket, "\\", nil) _, err = obj.NewMultipartUpload(context.Background(), bucket, "\\", nil)
if err == nil { if err == nil {
t.Fatalf("%s: Expected to fail since object name is invalid.", instanceType) t.Fatalf("%s: Expected to fail since object name is invalid.", instanceType)
} }
uploadID, err := obj.NewMultipartUpload(nil, bucket, object, nil) uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, nil)
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
err = obj.AbortMultipartUpload(nil, bucket, object, uploadID) err = obj.AbortMultipartUpload(context.Background(), bucket, object, uploadID)
if err != nil { if err != nil {
switch err.(type) { switch err.(type) {
case InvalidUploadID: case InvalidUploadID:
@ -94,13 +95,13 @@ func testObjectAbortMultipartUpload(obj ObjectLayer, instanceType string, t Test
object := "minio-object" object := "minio-object"
// Create bucket before intiating NewMultipartUpload. // Create bucket before intiating NewMultipartUpload.
err := obj.MakeBucketWithLocation(nil, bucket, "") err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil { if err != nil {
// failed to create newbucket, abort. // failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
uploadID, err := obj.NewMultipartUpload(nil, bucket, object, nil) uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, nil)
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
@ -119,7 +120,7 @@ func testObjectAbortMultipartUpload(obj ObjectLayer, instanceType string, t Test
} }
// Iterating over creatPartCases to generate multipart chunks. // Iterating over creatPartCases to generate multipart chunks.
for i, testCase := range abortTestCases { for i, testCase := range abortTestCases {
err = obj.AbortMultipartUpload(nil, testCase.bucketName, testCase.objName, testCase.uploadID) err = obj.AbortMultipartUpload(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID)
if testCase.expectedErrType == nil && err != nil { if testCase.expectedErrType == nil && err != nil {
t.Errorf("Test %d, unexpected err is received: %v, expected:%v\n", i+1, err, testCase.expectedErrType) t.Errorf("Test %d, unexpected err is received: %v, expected:%v\n", i+1, err, testCase.expectedErrType)
} }
@ -140,18 +141,18 @@ func testObjectAPIIsUploadIDExists(obj ObjectLayer, instanceType string, t TestE
object := "minio-object" object := "minio-object"
// Create bucket before intiating NewMultipartUpload. // Create bucket before intiating NewMultipartUpload.
err := obj.MakeBucketWithLocation(nil, bucket, "") err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
_, err = obj.NewMultipartUpload(nil, bucket, object, nil) _, err = obj.NewMultipartUpload(context.Background(), bucket, object, nil)
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
err = obj.AbortMultipartUpload(nil, bucket, object, "abc") err = obj.AbortMultipartUpload(context.Background(), bucket, object, "abc")
err = errors.Cause(err) err = errors.Cause(err)
switch err.(type) { switch err.(type) {
case InvalidUploadID: case InvalidUploadID:
@ -176,14 +177,14 @@ func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks [
// objectNames[0]. // objectNames[0].
// uploadIds [0]. // uploadIds [0].
// Create bucket before intiating NewMultipartUpload. // Create bucket before intiating NewMultipartUpload.
err := obj.MakeBucketWithLocation(nil, bucketNames[0], "") err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], "")
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
// Initiate Multipart Upload on the above created bucket. // Initiate Multipart Upload on the above created bucket.
uploadID, err := obj.NewMultipartUpload(nil, bucketNames[0], objectNames[0], nil) uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], nil)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -220,7 +221,7 @@ func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks [
sha256sum := "" sha256sum := ""
// Iterating over creatPartCases to generate multipart chunks. // Iterating over creatPartCases to generate multipart chunks.
for _, testCase := range createPartCases { for _, testCase := range createPartCases {
_, err = obj.PutObjectPart(nil, testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum)) _, err = obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
@ -234,7 +235,7 @@ func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks [
// Object part upload should fail with quorum not available. // Object part upload should fail with quorum not available.
testCase := createPartCases[len(createPartCases)-1] testCase := createPartCases[len(createPartCases)-1]
_, err = obj.PutObjectPart(nil, testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum)) _, err = obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
if err == nil { if err == nil {
t.Fatalf("Test %s: expected to fail but passed instead", instanceType) t.Fatalf("Test %s: expected to fail but passed instead", instanceType)
} }
@ -257,19 +258,19 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
object := "minio-object" object := "minio-object"
// Create bucket before intiating NewMultipartUpload. // Create bucket before intiating NewMultipartUpload.
err := obj.MakeBucketWithLocation(nil, bucket, "") err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
// Initiate Multipart Upload on the above created bucket. // Initiate Multipart Upload on the above created bucket.
uploadID, err := obj.NewMultipartUpload(nil, bucket, object, nil) uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, nil)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
// Creating a dummy bucket for tests. // Creating a dummy bucket for tests.
err = obj.MakeBucketWithLocation(nil, "unused-bucket", "") err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", "")
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -353,7 +354,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
// Validate all the test cases. // Validate all the test cases.
for i, testCase := range testCases { for i, testCase := range testCases {
actualInfo, actualErr := obj.PutObjectPart(nil, testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, testCase.inputSHA256)) actualInfo, actualErr := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, testCase.inputSHA256))
// All are test cases above are expected to fail. // All are test cases above are expected to fail.
if actualErr != nil && testCase.shouldPass { if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error()) t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
@ -393,13 +394,13 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
// objectNames[0]. // objectNames[0].
// uploadIds [0]. // uploadIds [0].
// Create bucket before initiating NewMultipartUpload. // Create bucket before initiating NewMultipartUpload.
err := obj.MakeBucketWithLocation(nil, bucketNames[0], "") err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], "")
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
// Initiate Multipart Upload on the above created bucket. // Initiate Multipart Upload on the above created bucket.
uploadID, err := obj.NewMultipartUpload(nil, bucketNames[0], objectNames[0], nil) uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], nil)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -411,7 +412,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
// objectNames[0]. // objectNames[0].
// uploadIds [1-3]. // uploadIds [1-3].
// Bucket to test for mutiple upload Id's for a given object. // Bucket to test for mutiple upload Id's for a given object.
err = obj.MakeBucketWithLocation(nil, bucketNames[1], "") err = obj.MakeBucketWithLocation(context.Background(), bucketNames[1], "")
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -419,7 +420,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
// Initiate Multipart Upload on bucketNames[1] for the same object 3 times. // Initiate Multipart Upload on bucketNames[1] for the same object 3 times.
// Used to test the listing for the case of multiple uploadID's for a given object. // Used to test the listing for the case of multiple uploadID's for a given object.
uploadID, err = obj.NewMultipartUpload(nil, bucketNames[1], objectNames[0], nil) uploadID, err = obj.NewMultipartUpload(context.Background(), bucketNames[1], objectNames[0], nil)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -432,7 +433,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
// bucketnames[2]. // bucketnames[2].
// objectNames[0-2]. // objectNames[0-2].
// uploadIds [4-9]. // uploadIds [4-9].
err = obj.MakeBucketWithLocation(nil, bucketNames[2], "") err = obj.MakeBucketWithLocation(context.Background(), bucketNames[2], "")
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -441,7 +442,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
// Used to test the listing for the case of multiple objects for a given bucket. // Used to test the listing for the case of multiple objects for a given bucket.
for i := 0; i < 6; i++ { for i := 0; i < 6; i++ {
var uploadID string var uploadID string
uploadID, err = obj.NewMultipartUpload(nil, bucketNames[2], objectNames[i], nil) uploadID, err = obj.NewMultipartUpload(context.Background(), bucketNames[2], objectNames[i], nil)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -487,7 +488,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
sha256sum := "" sha256sum := ""
// Iterating over creatPartCases to generate multipart chunks. // Iterating over creatPartCases to generate multipart chunks.
for _, testCase := range createPartCases { for _, testCase := range createPartCases {
_, err := obj.PutObjectPart(nil, testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum)) _, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
@ -1215,7 +1216,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
for i, testCase := range testCases { for i, testCase := range testCases {
// fmt.Println(i+1, testCase) // uncomment to peek into the test cases. // fmt.Println(i+1, testCase) // uncomment to peek into the test cases.
actualResult, actualErr := obj.ListMultipartUploads(nil, testCase.bucket, testCase.prefix, testCase.keyMarker, testCase.uploadIDMarker, testCase.delimiter, testCase.maxUploads) actualResult, actualErr := obj.ListMultipartUploads(context.Background(), testCase.bucket, testCase.prefix, testCase.keyMarker, testCase.uploadIDMarker, testCase.delimiter, testCase.maxUploads)
if actualErr != nil && testCase.shouldPass { if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, actualErr.Error()) t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, actualErr.Error())
} }
@ -1267,13 +1268,13 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
// objectNames[0]. // objectNames[0].
// uploadIds [0]. // uploadIds [0].
// Create bucket before intiating NewMultipartUpload. // Create bucket before intiating NewMultipartUpload.
err := obj.MakeBucketWithLocation(nil, bucketNames[0], "") err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], "")
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
// Initiate Multipart Upload on the above created bucket. // Initiate Multipart Upload on the above created bucket.
uploadID, err := obj.NewMultipartUpload(nil, bucketNames[0], objectNames[0], nil) uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], nil)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -1307,7 +1308,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
sha256sum := "" sha256sum := ""
// Iterating over creatPartCases to generate multipart chunks. // Iterating over creatPartCases to generate multipart chunks.
for _, testCase := range createPartCases { for _, testCase := range createPartCases {
_, err := obj.PutObjectPart(nil, testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum)) _, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
@ -1427,7 +1428,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
} }
for i, testCase := range testCases { for i, testCase := range testCases {
actualResult, actualErr := obj.ListObjectParts(nil, testCase.bucket, testCase.object, testCase.uploadID, testCase.partNumberMarker, testCase.maxParts) actualResult, actualErr := obj.ListObjectParts(context.Background(), testCase.bucket, testCase.object, testCase.uploadID, testCase.partNumberMarker, testCase.maxParts)
if actualErr != nil && testCase.shouldPass { if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, actualErr.Error()) t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, actualErr.Error())
} }
@ -1511,13 +1512,13 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
// objectNames[0]. // objectNames[0].
// uploadIds [0]. // uploadIds [0].
// Create bucket before intiating NewMultipartUpload. // Create bucket before intiating NewMultipartUpload.
err := obj.MakeBucketWithLocation(nil, bucketNames[0], "") err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], "")
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
// Initiate Multipart Upload on the above created bucket. // Initiate Multipart Upload on the above created bucket.
uploadID, err := obj.NewMultipartUpload(nil, bucketNames[0], objectNames[0], nil) uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], nil)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -1548,7 +1549,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
sha256sum := "" sha256sum := ""
// Iterating over creatPartCases to generate multipart chunks. // Iterating over creatPartCases to generate multipart chunks.
for _, testCase := range createPartCases { for _, testCase := range createPartCases {
_, err := obj.PutObjectPart(nil, testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum)) _, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
@ -1665,7 +1666,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
} }
for i, testCase := range testCases { for i, testCase := range testCases {
actualResult, actualErr := obj.ListObjectParts(nil, testCase.bucket, testCase.object, testCase.uploadID, testCase.partNumberMarker, testCase.maxParts) actualResult, actualErr := obj.ListObjectParts(context.Background(), testCase.bucket, testCase.object, testCase.uploadID, testCase.partNumberMarker, testCase.maxParts)
if actualErr != nil && testCase.shouldPass { if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, actualErr.Error()) t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, actualErr.Error())
} }
@ -1757,13 +1758,13 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
// objectNames[0]. // objectNames[0].
// uploadIds [0]. // uploadIds [0].
// Create bucket before intiating NewMultipartUpload. // Create bucket before intiating NewMultipartUpload.
err = obj.MakeBucketWithLocation(nil, bucketNames[0], "") err = obj.MakeBucketWithLocation(context.Background(), bucketNames[0], "")
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err) t.Fatalf("%s : %s", instanceType, err)
} }
// Initiate Multipart Upload on the above created bucket. // Initiate Multipart Upload on the above created bucket.
uploadID, err = obj.NewMultipartUpload(nil, bucketNames[0], objectNames[0], map[string]string{"X-Amz-Meta-Id": "id"}) uploadID, err = obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], map[string]string{"X-Amz-Meta-Id": "id"})
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err) t.Fatalf("%s : %s", instanceType, err)
@ -1798,7 +1799,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
sha256sum := "" sha256sum := ""
// Iterating over creatPartCases to generate multipart chunks. // Iterating over creatPartCases to generate multipart chunks.
for _, part := range parts { for _, part := range parts {
_, err = obj.PutObjectPart(nil, part.bucketName, part.objName, part.uploadID, part.PartID, mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, sha256sum)) _, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID, mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, sha256sum))
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err) t.Fatalf("%s : %s", instanceType, err)
} }
@ -1901,7 +1902,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
} }
for i, testCase := range testCases { for i, testCase := range testCases {
actualResult, actualErr := obj.CompleteMultipartUpload(nil, testCase.bucket, testCase.object, testCase.uploadID, testCase.parts) actualResult, actualErr := obj.CompleteMultipartUpload(context.Background(), testCase.bucket, testCase.object, testCase.uploadID, testCase.parts)
if actualErr != nil && testCase.shouldPass { if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, actualErr) t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, actualErr)
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"crypto/md5" "crypto/md5"
"encoding/hex" "encoding/hex"
"io/ioutil" "io/ioutil"
@ -46,14 +47,14 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
object := "minio-object" object := "minio-object"
// Create bucket. // Create bucket.
err := obj.MakeBucketWithLocation(nil, bucket, "") err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
// Creating a dummy bucket for tests. // Creating a dummy bucket for tests.
err = obj.MakeBucketWithLocation(nil, "unused-bucket", "") err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", "")
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -162,7 +163,7 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
} }
for i, testCase := range testCases { for i, testCase := range testCases {
objInfo, actualErr := obj.PutObject(nil, testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256), testCase.inputMeta) objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256), testCase.inputMeta)
actualErr = errors.Cause(actualErr) actualErr = errors.Cause(actualErr)
if actualErr != nil && testCase.expectedError == nil { if actualErr != nil && testCase.expectedError == nil {
t.Errorf("Test %d: %s: Expected to pass, but failed with: error %s.", i+1, instanceType, actualErr.Error()) t.Errorf("Test %d: %s: Expected to pass, but failed with: error %s.", i+1, instanceType, actualErr.Error())
@ -197,14 +198,14 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
object := "minio-object" object := "minio-object"
// Create bucket. // Create bucket.
err := obj.MakeBucketWithLocation(nil, bucket, "") err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
// Creating a dummy bucket for tests. // Creating a dummy bucket for tests.
err = obj.MakeBucketWithLocation(nil, "unused-bucket", "") err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", "")
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -236,7 +237,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
sha256sum := "" sha256sum := ""
for i, testCase := range testCases { for i, testCase := range testCases {
objInfo, actualErr := obj.PutObject(nil, testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta) objInfo, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta)
actualErr = errors.Cause(actualErr) actualErr = errors.Cause(actualErr)
if actualErr != nil && testCase.shouldPass { if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error()) t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
@ -286,7 +287,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
InsufficientWriteQuorum{}, InsufficientWriteQuorum{},
} }
_, actualErr := obj.PutObject(nil, testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta) _, actualErr := obj.PutObject(context.Background(), testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta)
actualErr = errors.Cause(actualErr) actualErr = errors.Cause(actualErr)
if actualErr != nil && testCase.shouldPass { if actualErr != nil && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error()) t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error())
@ -311,7 +312,7 @@ func testObjectAPIPutObjectStaleFiles(obj ObjectLayer, instanceType string, disk
object := "minio-object" object := "minio-object"
// Create bucket. // Create bucket.
err := obj.MakeBucketWithLocation(nil, bucket, "") err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -319,7 +320,7 @@ func testObjectAPIPutObjectStaleFiles(obj ObjectLayer, instanceType string, disk
data := []byte("hello, world") data := []byte("hello, world")
// Create object. // Create object.
_, err = obj.PutObject(nil, bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil) _, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil)
if err != nil { if err != nil {
// Failed to create object, abort. // Failed to create object, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -345,14 +346,14 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
object := "minio-object" object := "minio-object"
// Create bucket. // Create bucket.
err := obj.MakeBucketWithLocation(nil, bucket, "") err := obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
// Initiate Multipart Upload on the above created bucket. // Initiate Multipart Upload on the above created bucket.
uploadID, err := obj.NewMultipartUpload(nil, bucket, object, nil) uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, nil)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -364,7 +365,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
md5Writer.Write(fiveMBBytes) md5Writer.Write(fiveMBBytes)
etag1 := hex.EncodeToString(md5Writer.Sum(nil)) etag1 := hex.EncodeToString(md5Writer.Sum(nil))
sha256sum := "" sha256sum := ""
_, err = obj.PutObjectPart(nil, bucket, object, uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), int64(len(fiveMBBytes)), etag1, sha256sum)) _, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), int64(len(fiveMBBytes)), etag1, sha256sum))
if err != nil { if err != nil {
// Failed to upload object part, abort. // Failed to upload object part, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -375,7 +376,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
md5Writer = md5.New() md5Writer = md5.New()
md5Writer.Write(data) md5Writer.Write(data)
etag2 := hex.EncodeToString(md5Writer.Sum(nil)) etag2 := hex.EncodeToString(md5Writer.Sum(nil))
_, err = obj.PutObjectPart(nil, bucket, object, uploadID, 2, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), etag2, sha256sum)) _, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, 2, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), etag2, sha256sum))
if err != nil { if err != nil {
// Failed to upload object part, abort. // Failed to upload object part, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -386,7 +387,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
{ETag: etag1, PartNumber: 1}, {ETag: etag1, PartNumber: 1},
{ETag: etag2, PartNumber: 2}, {ETag: etag2, PartNumber: 2},
} }
_, err = obj.CompleteMultipartUpload(nil, bucket, object, uploadID, parts) _, err = obj.CompleteMultipartUpload(context.Background(), bucket, object, uploadID, parts)
if err != nil { if err != nil {
// Failed to complete multipart upload, abort. // Failed to complete multipart upload, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"io" "io"
@ -76,7 +77,7 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string,
// iterate through the above set of inputs and upload the object. // iterate through the above set of inputs and upload the object.
for i, input := range putObjectInputs { for i, input := range putObjectInputs {
// uploading the object. // uploading the object.
_, err := obj.PutObject(nil, input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData) _, err := obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
// if object upload fails stop the test. // if object upload fails stop the test.
if err != nil { if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err) t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
@ -223,7 +224,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
// iterate through the above set of inputs and upload the object. // iterate through the above set of inputs and upload the object.
for i, input := range putObjectInputs { for i, input := range putObjectInputs {
// uploading the object. // uploading the object.
_, err := obj.PutObject(nil, input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData) _, err := obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
// if object upload fails stop the test. // if object upload fails stop the test.
if err != nil { if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err) t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
@ -751,7 +752,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
t.Errorf("Test %d: %s: Object content differs from expected value.: %s", i+1, instanceType, string(actualContent)) t.Errorf("Test %d: %s: Object content differs from expected value.: %s", i+1, instanceType, string(actualContent))
continue continue
} }
objInfo, err := obj.GetObjectInfo(nil, testCase.bucketName, testCase.objectName) objInfo, err := obj.GetObjectInfo(context.Background(), testCase.bucketName, testCase.objectName)
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
} }
@ -763,7 +764,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
t.Fatalf("Test %d: %s: ContentEncoding is set to \"%s\" which is unexpected, expected \"%s\"", i+1, instanceType, objInfo.ContentEncoding, expectedContentEncoding) t.Fatalf("Test %d: %s: ContentEncoding is set to \"%s\" which is unexpected, expected \"%s\"", i+1, instanceType, objInfo.ContentEncoding, expectedContentEncoding)
} }
buffer := new(bytes.Buffer) buffer := new(bytes.Buffer)
err = obj.GetObject(nil, testCase.bucketName, testCase.objectName, 0, int64(testCase.dataLen), buffer, objInfo.ETag) err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, 0, int64(testCase.dataLen), buffer, objInfo.ETag)
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
} }
@ -935,7 +936,7 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
buffer := new(bytes.Buffer) buffer := new(bytes.Buffer)
// Fetch the object to check whether the content is same as the one uploaded via PutObject. // Fetch the object to check whether the content is same as the one uploaded via PutObject.
err = obj.GetObject(nil, testCase.bucketName, testCase.objectName, 0, int64(len(bytesData)), buffer, "") err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, 0, int64(len(bytesData)), buffer, "")
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
} }
@ -978,7 +979,7 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
if testCase.expectedRespStatus == http.StatusOK { if testCase.expectedRespStatus == http.StatusOK {
buffer := new(bytes.Buffer) buffer := new(bytes.Buffer)
// Fetch the object to check whether the content is same as the one uploaded via PutObject. // Fetch the object to check whether the content is same as the one uploaded via PutObject.
err = obj.GetObject(nil, testCase.bucketName, testCase.objectName, 0, int64(len(bytesData)), buffer, "") err = obj.GetObject(context.Background(), testCase.bucketName, testCase.objectName, 0, int64(len(bytesData)), buffer, "")
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
} }
@ -1058,7 +1059,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
// iterate through the above set of inputs and upload the object. // iterate through the above set of inputs and upload the object.
for i, input := range putObjectInputs { for i, input := range putObjectInputs {
// uploading the object. // uploading the object.
_, err = obj.PutObject(nil, input.bucketName, input.objectName, _, err = obj.PutObject(context.Background(), input.bucketName, input.objectName,
mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData) mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
// if object upload fails stop the test. // if object upload fails stop the test.
if err != nil { if err != nil {
@ -1072,7 +1073,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
// PutObjectPart API HTTP Handler has to be tested in isolation, // PutObjectPart API HTTP Handler has to be tested in isolation,
// that is without any other handler being registered, // that is without any other handler being registered,
// That's why NewMultipartUpload is initiated using ObjectLayer. // That's why NewMultipartUpload is initiated using ObjectLayer.
uploadID, err := obj.NewMultipartUpload(nil, bucketName, testObject, nil) uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, nil)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err) t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
@ -1117,7 +1118,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
}) })
} }
result, err := obj.CompleteMultipartUpload(nil, bucketName, testObject, uploadID, parts) result, err := obj.CompleteMultipartUpload(context.Background(), bucketName, testObject, uploadID, parts)
if err != nil { if err != nil {
t.Fatalf("Test: %s complete multipart upload failed: <ERROR> %v", instanceType, err) t.Fatalf("Test: %s complete multipart upload failed: <ERROR> %v", instanceType, err)
} }
@ -1126,7 +1127,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
} }
var buf bytes.Buffer var buf bytes.Buffer
if err = obj.GetObject(nil, bucketName, testObject, 0, int64(len(bytesData[0].byteData)), &buf, ""); err != nil { if err = obj.GetObject(context.Background(), bucketName, testObject, 0, int64(len(bytesData[0].byteData)), &buf, ""); err != nil {
t.Fatalf("Test: %s reading completed file failed: <ERROR> %v", instanceType, err) t.Fatalf("Test: %s reading completed file failed: <ERROR> %v", instanceType, err)
} }
if !bytes.Equal(buf.Bytes(), bytesData[0].byteData) { if !bytes.Equal(buf.Bytes(), bytesData[0].byteData) {
@ -1169,7 +1170,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
// iterate through the above set of inputs and upload the object. // iterate through the above set of inputs and upload the object.
for i, input := range putObjectInputs { for i, input := range putObjectInputs {
// uploading the object. // uploading the object.
_, err = obj.PutObject(nil, input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData) _, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
// if object upload fails stop the test. // if object upload fails stop the test.
if err != nil { if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err) t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
@ -1182,7 +1183,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
// PutObjectPart API HTTP Handler has to be tested in isolation, // PutObjectPart API HTTP Handler has to be tested in isolation,
// that is without any other handler being registered, // that is without any other handler being registered,
// That's why NewMultipartUpload is initiated using ObjectLayer. // That's why NewMultipartUpload is initiated using ObjectLayer.
uploadID, err := obj.NewMultipartUpload(nil, bucketName, testObject, nil) uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, nil)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err) t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
@ -1398,7 +1399,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
// See if the new part has been uploaded. // See if the new part has been uploaded.
// testing whether the copy was successful. // testing whether the copy was successful.
var results ListPartsInfo var results ListPartsInfo
results, err = obj.ListObjectParts(nil, testCase.bucketName, testObject, testCase.uploadID, 0, 1) results, err = obj.ListObjectParts(context.Background(), testCase.bucketName, testObject, testCase.uploadID, 0, 1)
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to look for copied object part: <ERROR> %s", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to look for copied object part: <ERROR> %s", i+1, instanceType, err)
} }
@ -1506,7 +1507,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
// iterate through the above set of inputs and upload the object. // iterate through the above set of inputs and upload the object.
for i, input := range putObjectInputs { for i, input := range putObjectInputs {
// uploading the object. // uploading the object.
_, err = obj.PutObject(nil, input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData) _, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
// if object upload fails stop the test. // if object upload fails stop the test.
if err != nil { if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err) t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
@ -1775,7 +1776,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
if rec.Code == http.StatusOK { if rec.Code == http.StatusOK {
// See if the new object is formed. // See if the new object is formed.
// testing whether the copy was successful. // testing whether the copy was successful.
err = obj.GetObject(nil, testCase.bucketName, testCase.newObjectName, 0, int64(len(bytesData[0].byteData)), buffers[0], "") err = obj.GetObject(context.Background(), testCase.bucketName, testCase.newObjectName, 0, int64(len(bytesData[0].byteData)), buffers[0], "")
if err != nil { if err != nil {
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err) t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
} }
@ -1909,7 +1910,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string
t.Fatalf("Error decoding the recorded response Body") t.Fatalf("Error decoding the recorded response Body")
} }
// verify the uploadID my making an attempt to list parts. // verify the uploadID my making an attempt to list parts.
_, err = obj.ListObjectParts(nil, bucketName, objectName, multipartResponse.UploadID, 0, 1) _, err = obj.ListObjectParts(context.Background(), bucketName, objectName, multipartResponse.UploadID, 0, 1)
if err != nil { if err != nil {
t.Fatalf("Invalid UploadID: <ERROR> %s", err) t.Fatalf("Invalid UploadID: <ERROR> %s", err)
} }
@ -1961,7 +1962,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string
t.Fatalf("Error decoding the recorded response Body") t.Fatalf("Error decoding the recorded response Body")
} }
// verify the uploadID my making an attempt to list parts. // verify the uploadID my making an attempt to list parts.
_, err = obj.ListObjectParts(nil, bucketName, objectName, multipartResponse.UploadID, 0, 1) _, err = obj.ListObjectParts(context.Background(), bucketName, objectName, multipartResponse.UploadID, 0, 1)
if err != nil { if err != nil {
t.Fatalf("Invalid UploadID: <ERROR> %s", err) t.Fatalf("Invalid UploadID: <ERROR> %s", err)
} }
@ -2073,7 +2074,7 @@ func testAPINewMultipartHandlerParallel(obj ObjectLayer, instanceType, bucketNam
wg.Wait() wg.Wait()
// Validate the upload ID by an attempt to list parts using it. // Validate the upload ID by an attempt to list parts using it.
for _, uploadID := range testUploads.uploads { for _, uploadID := range testUploads.uploads {
_, err := obj.ListObjectParts(nil, bucketName, objectName, uploadID, 0, 1) _, err := obj.ListObjectParts(context.Background(), bucketName, objectName, uploadID, 0, 1)
if err != nil { if err != nil {
t.Fatalf("Invalid UploadID: <ERROR> %s", err) t.Fatalf("Invalid UploadID: <ERROR> %s", err)
} }
@ -2101,7 +2102,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
for i := 0; i < 2; i++ { for i := 0; i < 2; i++ {
// initiate new multipart uploadID. // initiate new multipart uploadID.
uploadID, err = obj.NewMultipartUpload(nil, bucketName, objectName, nil) uploadID, err = obj.NewMultipartUpload(context.Background(), bucketName, objectName, nil)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err) t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
@ -2142,7 +2143,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
} }
// Iterating over creatPartCases to generate multipart chunks. // Iterating over creatPartCases to generate multipart chunks.
for _, part := range parts { for _, part := range parts {
_, err = obj.PutObjectPart(nil, part.bucketName, part.objName, part.uploadID, part.PartID, _, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID,
mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, "")) mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, ""))
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err) t.Fatalf("%s : %s", instanceType, err)
@ -2450,7 +2451,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
for i := 0; i < 2; i++ { for i := 0; i < 2; i++ {
// initiate new multipart uploadID. // initiate new multipart uploadID.
uploadID, err = obj.NewMultipartUpload(nil, bucketName, objectName, nil) uploadID, err = obj.NewMultipartUpload(context.Background(), bucketName, objectName, nil)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err) t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
@ -2491,7 +2492,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
} }
// Iterating over createPartCases to generate multipart chunks. // Iterating over createPartCases to generate multipart chunks.
for _, part := range parts { for _, part := range parts {
_, err = obj.PutObjectPart(nil, part.bucketName, part.objName, part.uploadID, part.PartID, _, err = obj.PutObjectPart(context.Background(), part.bucketName, part.objName, part.uploadID, part.PartID,
mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, "")) mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, ""))
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err) t.Fatalf("%s : %s", instanceType, err)
@ -2630,7 +2631,7 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string
// iterate through the above set of inputs and upload the object. // iterate through the above set of inputs and upload the object.
for i, input := range putObjectInputs { for i, input := range putObjectInputs {
// uploading the object. // uploading the object.
_, err = obj.PutObject(nil, input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData) _, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
// if object upload fails stop the test. // if object upload fails stop the test.
if err != nil { if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err) t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
@ -2924,7 +2925,7 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
// PutObjectPart API HTTP Handler has to be tested in isolation, // PutObjectPart API HTTP Handler has to be tested in isolation,
// that is without any other handler being registered, // that is without any other handler being registered,
// That's why NewMultipartUpload is initiated using ObjectLayer. // That's why NewMultipartUpload is initiated using ObjectLayer.
uploadID, err := obj.NewMultipartUpload(nil, bucketName, testObject, nil) uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, nil)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err) t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
@ -3327,7 +3328,7 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str
// PutObjectPart API HTTP Handler has to be tested in isolation, // PutObjectPart API HTTP Handler has to be tested in isolation,
// that is without any other handler being registered, // that is without any other handler being registered,
// That's why NewMultipartUpload is initiated using ObjectLayer. // That's why NewMultipartUpload is initiated using ObjectLayer.
uploadID, err := obj.NewMultipartUpload(nil, bucketName, testObject, nil) uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, nil)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err) t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
@ -3336,7 +3337,7 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str
uploadIDCopy := uploadID uploadIDCopy := uploadID
// create an object Part, will be used to test list object parts. // create an object Part, will be used to test list object parts.
_, err = obj.PutObjectPart(nil, bucketName, testObject, uploadID, 1, mustGetHashReader(t, bytes.NewReader([]byte("hello")), int64(len("hello")), "5d41402abc4b2a76b9719d911017c592", "")) _, err = obj.PutObjectPart(context.Background(), bucketName, testObject, uploadID, 1, mustGetHashReader(t, bytes.NewReader([]byte("hello")), int64(len("hello")), "5d41402abc4b2a76b9719d911017c592", ""))
if err != nil { if err != nil {
t.Fatalf("Minio %s : %s.", instanceType, err) t.Fatalf("Minio %s : %s.", instanceType, err)
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"io" "io"
"math/rand" "math/rand"
"strconv" "strconv"
@ -77,7 +78,7 @@ func (s *ObjectLayerAPISuite) TestMakeBucket(t *testing.T) {
// Tests validate bucket creation. // Tests validate bucket creation.
func testMakeBucket(obj ObjectLayer, instanceType string, t TestErrHandler) { func testMakeBucket(obj ObjectLayer, instanceType string, t TestErrHandler) {
err := obj.MakeBucketWithLocation(nil, "bucket-unknown", "") err := obj.MakeBucketWithLocation(context.Background(), "bucket-unknown", "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -90,11 +91,11 @@ func (s *ObjectLayerAPISuite) TestMultipartObjectCreation(t *testing.T) {
// Tests validate creation of part files during Multipart operation. // Tests validate creation of part files during Multipart operation.
func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErrHandler) { func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErrHandler) {
err := obj.MakeBucketWithLocation(nil, "bucket", "") err := obj.MakeBucketWithLocation(context.Background(), "bucket", "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
uploadID, err := obj.NewMultipartUpload(nil, "bucket", "key", nil) uploadID, err := obj.NewMultipartUpload(context.Background(), "bucket", "key", nil)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -105,7 +106,7 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErr
expectedETaghex := getMD5Hash(data) expectedETaghex := getMD5Hash(data)
var calcPartInfo PartInfo var calcPartInfo PartInfo
calcPartInfo, err = obj.PutObjectPart(nil, "bucket", "key", uploadID, i, mustGetHashReader(t, bytes.NewBuffer(data), int64(len(data)), expectedETaghex, "")) calcPartInfo, err = obj.PutObjectPart(context.Background(), "bucket", "key", uploadID, i, mustGetHashReader(t, bytes.NewBuffer(data), int64(len(data)), expectedETaghex, ""))
if err != nil { if err != nil {
t.Errorf("%s: <ERROR> %s", instanceType, err) t.Errorf("%s: <ERROR> %s", instanceType, err)
} }
@ -117,7 +118,7 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErr
ETag: calcPartInfo.ETag, ETag: calcPartInfo.ETag,
}) })
} }
objInfo, err := obj.CompleteMultipartUpload(nil, "bucket", "key", uploadID, completedParts.Parts) objInfo, err := obj.CompleteMultipartUpload(context.Background(), "bucket", "key", uploadID, completedParts.Parts)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -133,11 +134,11 @@ func (s *ObjectLayerAPISuite) TestMultipartObjectAbort(t *testing.T) {
// Tests validate abortion of Multipart operation. // Tests validate abortion of Multipart operation.
func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHandler) { func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHandler) {
err := obj.MakeBucketWithLocation(nil, "bucket", "") err := obj.MakeBucketWithLocation(context.Background(), "bucket", "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
uploadID, err := obj.NewMultipartUpload(nil, "bucket", "key", nil) uploadID, err := obj.NewMultipartUpload(context.Background(), "bucket", "key", nil)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -155,7 +156,7 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHan
metadata["md5"] = expectedETaghex metadata["md5"] = expectedETaghex
var calcPartInfo PartInfo var calcPartInfo PartInfo
calcPartInfo, err = obj.PutObjectPart(nil, "bucket", "key", uploadID, i, mustGetHashReader(t, bytes.NewBufferString(randomString), int64(len(randomString)), expectedETaghex, "")) calcPartInfo, err = obj.PutObjectPart(context.Background(), "bucket", "key", uploadID, i, mustGetHashReader(t, bytes.NewBufferString(randomString), int64(len(randomString)), expectedETaghex, ""))
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -164,7 +165,7 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHan
} }
parts[i] = expectedETaghex parts[i] = expectedETaghex
} }
err = obj.AbortMultipartUpload(nil, "bucket", "key", uploadID) err = obj.AbortMultipartUpload(context.Background(), "bucket", "key", uploadID)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -178,7 +179,7 @@ func (s *ObjectLayerAPISuite) TestMultipleObjectCreation(t *testing.T) {
// Tests validate object creation. // Tests validate object creation.
func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrHandler) { func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrHandler) {
objects := make(map[string][]byte) objects := make(map[string][]byte)
err := obj.MakeBucketWithLocation(nil, "bucket", "") err := obj.MakeBucketWithLocation(context.Background(), "bucket", "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -196,7 +197,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH
metadata := make(map[string]string) metadata := make(map[string]string)
metadata["etag"] = expectedETaghex metadata["etag"] = expectedETaghex
var objInfo ObjectInfo var objInfo ObjectInfo
objInfo, err = obj.PutObject(nil, "bucket", key, mustGetHashReader(t, bytes.NewBufferString(randomString), int64(len(randomString)), metadata["etag"], ""), metadata) objInfo, err = obj.PutObject(context.Background(), "bucket", key, mustGetHashReader(t, bytes.NewBufferString(randomString), int64(len(randomString)), metadata["etag"], ""), metadata)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -207,7 +208,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH
for key, value := range objects { for key, value := range objects {
var byteBuffer bytes.Buffer var byteBuffer bytes.Buffer
err = obj.GetObject(nil, "bucket", key, 0, int64(len(value)), &byteBuffer, "") err = obj.GetObject(context.Background(), "bucket", key, 0, int64(len(value)), &byteBuffer, "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -215,7 +216,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH
t.Errorf("%s: Mismatch of GetObject data with the expected one.", instanceType) t.Errorf("%s: Mismatch of GetObject data with the expected one.", instanceType)
} }
objInfo, err := obj.GetObjectInfo(nil, "bucket", key) objInfo, err := obj.GetObjectInfo(context.Background(), "bucket", key)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -233,8 +234,8 @@ func (s *ObjectLayerAPISuite) TestPaging(t *testing.T) {
// Tests validate creation of objects and the order of listing using various filters for ListObjects operation. // Tests validate creation of objects and the order of listing using various filters for ListObjects operation.
func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) { func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
obj.MakeBucketWithLocation(nil, "bucket", "") obj.MakeBucketWithLocation(context.Background(), "bucket", "")
result, err := obj.ListObjects(nil, "bucket", "", "", "", 0) result, err := obj.ListObjects(context.Background(), "bucket", "", "", "", 0)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -249,12 +250,12 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
// check before paging occurs. // check before paging occurs.
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
key := "obj" + strconv.Itoa(i) key := "obj" + strconv.Itoa(i)
_, err = obj.PutObject(nil, "bucket", key, mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil) _, err = obj.PutObject(context.Background(), "bucket", key, mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
result, err = obj.ListObjects(nil, "bucket", "", "", "", 5) result, err = obj.ListObjects(context.Background(), "bucket", "", "", "", 5)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -269,11 +270,11 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
// check after paging occurs pages work. // check after paging occurs pages work.
for i := 6; i <= 10; i++ { for i := 6; i <= 10; i++ {
key := "obj" + strconv.Itoa(i) key := "obj" + strconv.Itoa(i)
_, err = obj.PutObject(nil, "bucket", key, mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil) _, err = obj.PutObject(context.Background(), "bucket", key, mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
result, err = obj.ListObjects(nil, "bucket", "obj", "", "", 5) result, err = obj.ListObjects(context.Background(), "bucket", "obj", "", "", 5)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -286,15 +287,15 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
} }
// check paging with prefix at end returns less objects. // check paging with prefix at end returns less objects.
{ {
_, err = obj.PutObject(nil, "bucket", "newPrefix", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil) _, err = obj.PutObject(context.Background(), "bucket", "newPrefix", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
_, err = obj.PutObject(nil, "bucket", "newPrefix2", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil) _, err = obj.PutObject(context.Background(), "bucket", "newPrefix2", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
result, err = obj.ListObjects(nil, "bucket", "new", "", "", 5) result, err = obj.ListObjects(context.Background(), "bucket", "new", "", "", 5)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -305,7 +306,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
// check ordering of pages. // check ordering of pages.
{ {
result, err = obj.ListObjects(nil, "bucket", "", "", "", 1000) result, err = obj.ListObjects(context.Background(), "bucket", "", "", "", 1000)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -328,15 +329,15 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
// check delimited results with delimiter and prefix. // check delimited results with delimiter and prefix.
{ {
_, err = obj.PutObject(nil, "bucket", "this/is/delimited", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil) _, err = obj.PutObject(context.Background(), "bucket", "this/is/delimited", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
_, err = obj.PutObject(nil, "bucket", "this/is/also/a/delimited/file", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil) _, err = obj.PutObject(context.Background(), "bucket", "this/is/also/a/delimited/file", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
result, err = obj.ListObjects(nil, "bucket", "this/is/", "", "/", 10) result, err = obj.ListObjects(context.Background(), "bucket", "this/is/", "", "/", 10)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -350,7 +351,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
// check delimited results with delimiter without prefix. // check delimited results with delimiter without prefix.
{ {
result, err = obj.ListObjects(nil, "bucket", "", "", "/", 1000) result, err = obj.ListObjects(context.Background(), "bucket", "", "", "/", 1000)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -378,7 +379,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
// check results with Marker. // check results with Marker.
{ {
result, err = obj.ListObjects(nil, "bucket", "", "newPrefix", "", 3) result, err = obj.ListObjects(context.Background(), "bucket", "", "newPrefix", "", 3)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -394,7 +395,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
} }
// check ordering of results with prefix. // check ordering of results with prefix.
{ {
result, err = obj.ListObjects(nil, "bucket", "obj", "", "", 1000) result, err = obj.ListObjects(context.Background(), "bucket", "obj", "", "", 1000)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -416,7 +417,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
} }
// check ordering of results with prefix and no paging. // check ordering of results with prefix and no paging.
{ {
result, err = obj.ListObjects(nil, "bucket", "new", "", "", 5) result, err = obj.ListObjects(context.Background(), "bucket", "new", "", "", 5)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -436,27 +437,27 @@ func (s *ObjectLayerAPISuite) TestObjectOverwriteWorks(t *testing.T) {
// Tests validate overwriting of an existing object. // Tests validate overwriting of an existing object.
func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, t TestErrHandler) { func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, t TestErrHandler) {
err := obj.MakeBucketWithLocation(nil, "bucket", "") err := obj.MakeBucketWithLocation(context.Background(), "bucket", "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
uploadContent := "The list of parts was not in ascending order. The parts list must be specified in order by part number." uploadContent := "The list of parts was not in ascending order. The parts list must be specified in order by part number."
length := int64(len(uploadContent)) length := int64(len(uploadContent))
_, err = obj.PutObject(nil, "bucket", "object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil) _, err = obj.PutObject(context.Background(), "bucket", "object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
uploadContent = "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed." uploadContent = "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
length = int64(len(uploadContent)) length = int64(len(uploadContent))
_, err = obj.PutObject(nil, "bucket", "object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil) _, err = obj.PutObject(context.Background(), "bucket", "object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
var bytesBuffer bytes.Buffer var bytesBuffer bytes.Buffer
err = obj.GetObject(nil, "bucket", "object", 0, length, &bytesBuffer, "") err = obj.GetObject(context.Background(), "bucket", "object", 0, length, &bytesBuffer, "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -472,7 +473,7 @@ func (s *ObjectLayerAPISuite) TestNonExistantBucketOperations(t *testing.T) {
// Tests validate that bucket operation on non-existent bucket fails. // Tests validate that bucket operation on non-existent bucket fails.
func testNonExistantBucketOperations(obj ObjectLayer, instanceType string, t TestErrHandler) { func testNonExistantBucketOperations(obj ObjectLayer, instanceType string, t TestErrHandler) {
_, err := obj.PutObject(nil, "bucket1", "object", mustGetHashReader(t, bytes.NewBufferString("one"), int64(len("one")), "", ""), nil) _, err := obj.PutObject(context.Background(), "bucket1", "object", mustGetHashReader(t, bytes.NewBufferString("one"), int64(len("one")), "", ""), nil)
if err == nil { if err == nil {
t.Fatal("Expected error but found nil") t.Fatal("Expected error but found nil")
} }
@ -488,11 +489,11 @@ func (s *ObjectLayerAPISuite) TestBucketRecreateFails(t *testing.T) {
// Tests validate that recreation of the bucket fails. // Tests validate that recreation of the bucket fails.
func testBucketRecreateFails(obj ObjectLayer, instanceType string, t TestErrHandler) { func testBucketRecreateFails(obj ObjectLayer, instanceType string, t TestErrHandler) {
err := obj.MakeBucketWithLocation(nil, "string", "") err := obj.MakeBucketWithLocation(context.Background(), "string", "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
err = obj.MakeBucketWithLocation(nil, "string", "") err = obj.MakeBucketWithLocation(context.Background(), "string", "")
if err == nil { if err == nil {
t.Fatalf("%s: Expected error but found nil.", instanceType) t.Fatalf("%s: Expected error but found nil.", instanceType)
} }
@ -513,17 +514,17 @@ func testPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
length := int64(len(content)) length := int64(len(content))
readerEOF := newTestReaderEOF(content) readerEOF := newTestReaderEOF(content)
readerNoEOF := newTestReaderNoEOF(content) readerNoEOF := newTestReaderNoEOF(content)
err := obj.MakeBucketWithLocation(nil, "bucket", "") err := obj.MakeBucketWithLocation(context.Background(), "bucket", "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
var bytesBuffer1 bytes.Buffer var bytesBuffer1 bytes.Buffer
_, err = obj.PutObject(nil, "bucket", "object", mustGetHashReader(t, readerEOF, length, "", ""), nil) _, err = obj.PutObject(context.Background(), "bucket", "object", mustGetHashReader(t, readerEOF, length, "", ""), nil)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
err = obj.GetObject(nil, "bucket", "object", 0, length, &bytesBuffer1, "") err = obj.GetObject(context.Background(), "bucket", "object", 0, length, &bytesBuffer1, "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -532,11 +533,11 @@ func testPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
} }
var bytesBuffer2 bytes.Buffer var bytesBuffer2 bytes.Buffer
_, err = obj.PutObject(nil, "bucket", "object", mustGetHashReader(t, readerNoEOF, length, "", ""), nil) _, err = obj.PutObject(context.Background(), "bucket", "object", mustGetHashReader(t, readerNoEOF, length, "", ""), nil)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
err = obj.GetObject(nil, "bucket", "object", 0, length, &bytesBuffer2, "") err = obj.GetObject(context.Background(), "bucket", "object", 0, length, &bytesBuffer2, "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -552,7 +553,7 @@ func (s *ObjectLayerAPISuite) TestPutObjectInSubdir(t *testing.T) {
// Tests validate PutObject with subdirectory prefix. // Tests validate PutObject with subdirectory prefix.
func testPutObjectInSubdir(obj ObjectLayer, instanceType string, t TestErrHandler) { func testPutObjectInSubdir(obj ObjectLayer, instanceType string, t TestErrHandler) {
err := obj.MakeBucketWithLocation(nil, "bucket", "") err := obj.MakeBucketWithLocation(context.Background(), "bucket", "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -560,13 +561,13 @@ func testPutObjectInSubdir(obj ObjectLayer, instanceType string, t TestErrHandle
uploadContent := `The specified multipart upload does not exist. The upload ID might be invalid, or the multipart uploadContent := `The specified multipart upload does not exist. The upload ID might be invalid, or the multipart
upload might have been aborted or completed.` upload might have been aborted or completed.`
length := int64(len(uploadContent)) length := int64(len(uploadContent))
_, err = obj.PutObject(nil, "bucket", "dir1/dir2/object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil) _, err = obj.PutObject(context.Background(), "bucket", "dir1/dir2/object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
var bytesBuffer bytes.Buffer var bytesBuffer bytes.Buffer
err = obj.GetObject(nil, "bucket", "dir1/dir2/object", 0, length, &bytesBuffer, "") err = obj.GetObject(context.Background(), "bucket", "dir1/dir2/object", 0, length, &bytesBuffer, "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -584,7 +585,7 @@ func (s *ObjectLayerAPISuite) TestListBuckets(t *testing.T) {
// Tests validate ListBuckets. // Tests validate ListBuckets.
func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) { func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) {
// test empty list. // test empty list.
buckets, err := obj.ListBuckets(nil) buckets, err := obj.ListBuckets(context.Background())
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -593,12 +594,12 @@ func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) {
} }
// add one and test exists. // add one and test exists.
err = obj.MakeBucketWithLocation(nil, "bucket1", "") err = obj.MakeBucketWithLocation(context.Background(), "bucket1", "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
buckets, err = obj.ListBuckets(nil) buckets, err = obj.ListBuckets(context.Background())
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -607,12 +608,12 @@ func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) {
} }
// add two and test exists. // add two and test exists.
err = obj.MakeBucketWithLocation(nil, "bucket2", "") err = obj.MakeBucketWithLocation(context.Background(), "bucket2", "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
buckets, err = obj.ListBuckets(nil) buckets, err = obj.ListBuckets(context.Background())
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -621,12 +622,12 @@ func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) {
} }
// add three and test exists + prefix. // add three and test exists + prefix.
err = obj.MakeBucketWithLocation(nil, "bucket22", "") err = obj.MakeBucketWithLocation(context.Background(), "bucket22", "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
buckets, err = obj.ListBuckets(nil) buckets, err = obj.ListBuckets(context.Background())
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -645,15 +646,15 @@ func testListBucketsOrder(obj ObjectLayer, instanceType string, t TestErrHandler
// if implementation contains a map, order of map keys will vary. // if implementation contains a map, order of map keys will vary.
// this ensures they return in the same order each time. // this ensures they return in the same order each time.
// add one and test exists. // add one and test exists.
err := obj.MakeBucketWithLocation(nil, "bucket1", "") err := obj.MakeBucketWithLocation(context.Background(), "bucket1", "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
err = obj.MakeBucketWithLocation(nil, "bucket2", "") err = obj.MakeBucketWithLocation(context.Background(), "bucket2", "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
buckets, err := obj.ListBuckets(nil) buckets, err := obj.ListBuckets(context.Background())
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -676,7 +677,7 @@ func (s *ObjectLayerAPISuite) TestListObjectsTestsForNonExistantBucket(t *testin
// Tests validate that ListObjects operation on a non-existent bucket fails as expected. // Tests validate that ListObjects operation on a non-existent bucket fails as expected.
func testListObjectsTestsForNonExistantBucket(obj ObjectLayer, instanceType string, t TestErrHandler) { func testListObjectsTestsForNonExistantBucket(obj ObjectLayer, instanceType string, t TestErrHandler) {
result, err := obj.ListObjects(nil, "bucket", "", "", "", 1000) result, err := obj.ListObjects(context.Background(), "bucket", "", "", "", 1000)
if err == nil { if err == nil {
t.Fatalf("%s: Expected error but found nil.", instanceType) t.Fatalf("%s: Expected error but found nil.", instanceType)
} }
@ -698,12 +699,12 @@ func (s *ObjectLayerAPISuite) TestNonExistantObjectInBucket(t *testing.T) {
// Tests validate that GetObject fails on a non-existent bucket as expected. // Tests validate that GetObject fails on a non-existent bucket as expected.
func testNonExistantObjectInBucket(obj ObjectLayer, instanceType string, t TestErrHandler) { func testNonExistantObjectInBucket(obj ObjectLayer, instanceType string, t TestErrHandler) {
err := obj.MakeBucketWithLocation(nil, "bucket", "") err := obj.MakeBucketWithLocation(context.Background(), "bucket", "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
_, err = obj.GetObjectInfo(nil, "bucket", "dir1") _, err = obj.GetObjectInfo(context.Background(), "bucket", "dir1")
if err == nil { if err == nil {
t.Fatalf("%s: Expected error but found nil", instanceType) t.Fatalf("%s: Expected error but found nil", instanceType)
} }
@ -726,13 +727,13 @@ func (s *ObjectLayerAPISuite) TestGetDirectoryReturnsObjectNotFound(t *testing.T
// Tests validate that GetObject on an existing directory fails as expected. // Tests validate that GetObject on an existing directory fails as expected.
func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string, t TestErrHandler) { func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string, t TestErrHandler) {
bucketName := "bucket" bucketName := "bucket"
err := obj.MakeBucketWithLocation(nil, bucketName, "") err := obj.MakeBucketWithLocation(context.Background(), bucketName, "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
content := "One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag." content := "One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag."
length := int64(len(content)) length := int64(len(content))
_, err = obj.PutObject(nil, bucketName, "dir1/dir3/object", mustGetHashReader(t, bytes.NewBufferString(content), length, "", ""), nil) _, err = obj.PutObject(context.Background(), bucketName, "dir1/dir3/object", mustGetHashReader(t, bytes.NewBufferString(content), length, "", ""), nil)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
@ -753,7 +754,7 @@ func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string,
} }
for i, testCase := range testCases { for i, testCase := range testCases {
_, expectedErr := obj.GetObjectInfo(nil, bucketName, testCase.dir) _, expectedErr := obj.GetObjectInfo(context.Background(), bucketName, testCase.dir)
if expectedErr != nil { if expectedErr != nil {
expectedErr = errors.Cause(expectedErr) expectedErr = errors.Cause(expectedErr)
if expectedErr.Error() != testCase.err.Error() { if expectedErr.Error() != testCase.err.Error() {
@ -770,17 +771,17 @@ func (s *ObjectLayerAPISuite) TestContentType(t *testing.T) {
// Test content-type. // Test content-type.
func testContentType(obj ObjectLayer, instanceType string, t TestErrHandler) { func testContentType(obj ObjectLayer, instanceType string, t TestErrHandler) {
err := obj.MakeBucketWithLocation(nil, "bucket", "") err := obj.MakeBucketWithLocation(context.Background(), "bucket", "")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
uploadContent := "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed." uploadContent := "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
// Test empty. // Test empty.
_, err = obj.PutObject(nil, "bucket", "minio.png", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil) _, err = obj.PutObject(context.Background(), "bucket", "minio.png", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
objInfo, err := obj.GetObjectInfo(nil, "bucket", "minio.png") objInfo, err := obj.GetObjectInfo(context.Background(), "bucket", "minio.png")
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"fmt" "fmt"
"path" "path"
@ -58,7 +59,7 @@ func (receiver *PeerRPCReceiver) UpdateBucketPolicy(args *UpdateBucketPolicyArgs
// If the object layer is just coming up then it will load the policy from the disk. // If the object layer is just coming up then it will load the policy from the disk.
return nil return nil
} }
return objectAPI.RefreshBucketPolicy(args.BucketName) return objectAPI.RefreshBucketPolicy(context.Background(), args.BucketName)
} }
// PutBucketNotificationArgs - put bucket notification RPC arguments. // PutBucketNotificationArgs - put bucket notification RPC arguments.

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
@ -138,7 +139,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
// objectNames[0]. // objectNames[0].
// uploadIds [0]. // uploadIds [0].
// Create bucket before initiating NewMultipartUpload. // Create bucket before initiating NewMultipartUpload.
err = obj.MakeBucketWithLocation(nil, bucketName, "") err = obj.MakeBucketWithLocation(context.Background(), bucketName, "")
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -229,7 +230,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
} }
// When the operation is successful, check if sending metadata is successful too // When the operation is successful, check if sending metadata is successful too
if rec.Code == http.StatusNoContent { if rec.Code == http.StatusNoContent {
objInfo, err := obj.GetObjectInfo(nil, bucketName, testCase.objectName+"/upload.txt") objInfo, err := obj.GetObjectInfo(context.Background(), bucketName, testCase.objectName+"/upload.txt")
if err != nil { if err != nil {
t.Error("Unexpected error: ", err) t.Error("Unexpected error: ", err)
} }
@ -448,7 +449,7 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t
curTime := UTCNow() curTime := UTCNow()
curTimePlus5Min := curTime.Add(time.Minute * 5) curTimePlus5Min := curTime.Add(time.Minute * 5)
err = obj.MakeBucketWithLocation(nil, bucketName, "") err = obj.MakeBucketWithLocation(context.Background(), bucketName, "")
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -482,7 +483,7 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t
} }
// Get the uploaded object info // Get the uploaded object info
info, err := obj.GetObjectInfo(nil, bucketName, targetObj) info, err := obj.GetObjectInfo(context.Background(), bucketName, targetObj)
if err != nil { if err != nil {
t.Error("Unexpected error: ", err) t.Error("Unexpected error: ", err)
} }

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"crypto/x509" "crypto/x509"
"fmt" "fmt"
"net/url" "net/url"
@ -50,7 +51,7 @@ func printStartupMessage(apiEndPoints []string) {
// Object layer is initialized then print StorageInfo. // Object layer is initialized then print StorageInfo.
objAPI := newObjectLayerFn() objAPI := newObjectLayerFn()
if objAPI != nil { if objAPI != nil {
printStorageInfo(objAPI.StorageInfo(nil)) printStorageInfo(objAPI.StorageInfo(context.Background()))
} }
// Prints credential, region and browser access. // Prints credential, region and browser access.

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"os" "os"
) )
@ -46,7 +47,7 @@ func handleSignals() {
errorIf(err, "Unable to shutdown http server") errorIf(err, "Unable to shutdown http server")
if objAPI := newObjectLayerFn(); objAPI != nil { if objAPI := newObjectLayerFn(); objAPI != nil {
oerr = objAPI.Shutdown(nil) oerr = objAPI.Shutdown(context.Background())
errorIf(oerr, "Unable to shutdown object layer") errorIf(oerr, "Unable to shutdown object layer")
} }
@ -59,7 +60,7 @@ func handleSignals() {
errorIf(err, "http server exited abnormally") errorIf(err, "http server exited abnormally")
var oerr error var oerr error
if objAPI := newObjectLayerFn(); objAPI != nil { if objAPI := newObjectLayerFn(); objAPI != nil {
oerr = objAPI.Shutdown(nil) oerr = objAPI.Shutdown(context.Background())
errorIf(oerr, "Unable to shutdown object layer") errorIf(oerr, "Unable to shutdown object layer")
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"errors" "errors"
"reflect" "reflect"
"testing" "testing"
@ -180,14 +181,14 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
xl := obj.(*xlObjects) xl := obj.(*xlObjects)
xlDisks := xl.storageDisks xlDisks := xl.storageDisks
err := obj.MakeBucketWithLocation(nil, bucket, globalMinioDefaultRegion) err := obj.MakeBucketWithLocation(context.Background(), bucket, globalMinioDefaultRegion)
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket %v", err) t.Fatalf("Failed to make a bucket %v", err)
} }
// Object for test case 1 - No StorageClass defined, no MetaData in PutObject // Object for test case 1 - No StorageClass defined, no MetaData in PutObject
object1 := "object1" object1 := "object1"
_, err = obj.PutObject(nil, bucket, object1, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil) _, err = obj.PutObject(context.Background(), bucket, object1, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil)
if err != nil { if err != nil {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
@ -198,7 +199,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
object2 := "object2" object2 := "object2"
metadata2 := make(map[string]string) metadata2 := make(map[string]string)
metadata2["x-amz-storage-class"] = reducedRedundancyStorageClass metadata2["x-amz-storage-class"] = reducedRedundancyStorageClass
_, err = obj.PutObject(nil, bucket, object2, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata2) _, err = obj.PutObject(context.Background(), bucket, object2, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata2)
if err != nil { if err != nil {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
@ -209,7 +210,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
object3 := "object3" object3 := "object3"
metadata3 := make(map[string]string) metadata3 := make(map[string]string)
metadata3["x-amz-storage-class"] = standardStorageClass metadata3["x-amz-storage-class"] = standardStorageClass
_, err = obj.PutObject(nil, bucket, object3, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata3) _, err = obj.PutObject(context.Background(), bucket, object3, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata3)
if err != nil { if err != nil {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
@ -225,7 +226,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
Scheme: "EC", Scheme: "EC",
} }
_, err = obj.PutObject(nil, bucket, object4, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata4) _, err = obj.PutObject(context.Background(), bucket, object4, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata4)
if err != nil { if err != nil {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
@ -243,7 +244,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
Scheme: "EC", Scheme: "EC",
} }
_, err = obj.PutObject(nil, bucket, object5, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata5) _, err = obj.PutObject(context.Background(), bucket, object5, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata5)
if err != nil { if err != nil {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
@ -261,7 +262,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
Scheme: "EC", Scheme: "EC",
} }
_, err = obj.PutObject(nil, bucket, object6, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata6) _, err = obj.PutObject(context.Background(), bucket, object6, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata6)
if err != nil { if err != nil {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
@ -279,7 +280,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
Scheme: "EC", Scheme: "EC",
} }
_, err = obj.PutObject(nil, bucket, object7, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata7) _, err = obj.PutObject(context.Background(), bucket, object7, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata7)
if err != nil { if err != nil {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }

View File

@ -19,6 +19,7 @@ package cmd
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"context"
"crypto/ecdsa" "crypto/ecdsa"
"crypto/hmac" "crypto/hmac"
crand "crypto/rand" crand "crypto/rand"
@ -1766,7 +1767,7 @@ func initAPIHandlerTest(obj ObjectLayer, endpoints []string) (string, http.Handl
bucketName := getRandomBucketName() bucketName := getRandomBucketName()
// Create bucket. // Create bucket.
err := obj.MakeBucketWithLocation(nil, bucketName, "") err := obj.MakeBucketWithLocation(context.Background(), bucketName, "")
if err != nil { if err != nil {
// failed to create newbucket, return err. // failed to create newbucket, return err.
return "", nil, err return "", nil, err
@ -1872,7 +1873,7 @@ func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketN
Version: "1.0", Version: "1.0",
Statements: []policy.Statement{policyFunc(bucketName, "")}, Statements: []policy.Statement{policyFunc(bucketName, "")},
} }
obj.SetBucketPolicy(nil, bucketName, bp) obj.SetBucketPolicy(context.Background(), bucketName, bp)
// now call the handler again with the unsigned/anonymous request, it should be accepted. // now call the handler again with the unsigned/anonymous request, it should be accepted.
rec = httptest.NewRecorder() rec = httptest.NewRecorder()

View File

@ -330,5 +330,5 @@ func newContext(r *http.Request, api string) context.Context {
object = prefix object = prefix
} }
return logger.ContextSet(context.Background(), &logger.ReqInfo{r.RemoteAddr, r.Header.Get("user-agent"), "", api, bucket, object, nil}) return logger.SetContext(context.Background(), &logger.ReqInfo{r.RemoteAddr, r.Header.Get("user-agent"), "", api, bucket, object, nil})
} }

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"archive/zip" "archive/zip"
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
@ -29,7 +30,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/gorilla/rpc/v2/json2" "github.com/gorilla/rpc/v2/json2"
"github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/policy"
@ -106,7 +107,7 @@ func (web *webAPIHandlers) StorageInfo(r *http.Request, args *AuthRPCArgs, reply
if !isHTTPRequestValid(r) { if !isHTTPRequestValid(r) {
return toJSONError(errAuthentication) return toJSONError(errAuthentication)
} }
reply.StorageInfo = objectAPI.StorageInfo(nil) reply.StorageInfo = objectAPI.StorageInfo(context.Background())
reply.UIVersion = browser.UIVersion reply.UIVersion = browser.UIVersion
return nil return nil
} }
@ -131,7 +132,7 @@ func (web *webAPIHandlers) MakeBucket(r *http.Request, args *MakeBucketArgs, rep
return toJSONError(errInvalidBucketName) return toJSONError(errInvalidBucketName)
} }
if err := objectAPI.MakeBucketWithLocation(nil, args.BucketName, globalServerConfig.GetRegion()); err != nil { if err := objectAPI.MakeBucketWithLocation(context.Background(), args.BucketName, globalServerConfig.GetRegion()); err != nil {
return toJSONError(err, args.BucketName) return toJSONError(err, args.BucketName)
} }
@ -154,7 +155,7 @@ func (web *webAPIHandlers) DeleteBucket(r *http.Request, args *RemoveBucketArgs,
return toJSONError(errAuthentication) return toJSONError(errAuthentication)
} }
err := objectAPI.DeleteBucket(nil, args.BucketName) err := objectAPI.DeleteBucket(context.Background(), args.BucketName)
if err != nil { if err != nil {
return toJSONError(err, args.BucketName) return toJSONError(err, args.BucketName)
} }
@ -187,7 +188,7 @@ func (web *webAPIHandlers) ListBuckets(r *http.Request, args *WebGenericArgs, re
if authErr != nil { if authErr != nil {
return toJSONError(authErr) return toJSONError(authErr)
} }
buckets, err := objectAPI.ListBuckets(nil) buckets, err := objectAPI.ListBuckets(context.Background())
if err != nil { if err != nil {
return toJSONError(err) return toJSONError(err)
} }
@ -256,7 +257,7 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r
default: default:
return errAuthentication return errAuthentication
} }
lo, err := objectAPI.ListObjects(nil, args.BucketName, args.Prefix, args.Marker, slashSeparator, 1000) lo, err := objectAPI.ListObjects(context.Background(), args.BucketName, args.Prefix, args.Marker, slashSeparator, 1000)
if err != nil { if err != nil {
return &json2.Error{Message: err.Error()} return &json2.Error{Message: err.Error()}
} }
@ -323,7 +324,7 @@ next:
marker := "" marker := ""
for { for {
var lo ListObjectsInfo var lo ListObjectsInfo
lo, err = objectAPI.ListObjects(nil, args.BucketName, objectName, marker, "", 1000) lo, err = objectAPI.ListObjects(context.Background(), args.BucketName, objectName, marker, "", 1000)
if err != nil { if err != nil {
break next break next
} }
@ -562,7 +563,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
return return
} }
objInfo, err := objectAPI.PutObject(nil, bucket, object, hashReader, metadata) objInfo, err := objectAPI.PutObject(context.Background(), bucket, object, hashReader, metadata)
if err != nil { if err != nil {
writeWebErrorResponse(w, err) writeWebErrorResponse(w, err)
return return
@ -598,7 +599,7 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) {
// Add content disposition. // Add content disposition.
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", path.Base(object))) w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", path.Base(object)))
if err := objectAPI.GetObject(nil, bucket, object, 0, -1, w, ""); err != nil { if err := objectAPI.GetObject(context.Background(), bucket, object, 0, -1, w, ""); err != nil {
/// No need to print error, response writer already written to. /// No need to print error, response writer already written to.
return return
} }
@ -647,7 +648,7 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
for _, object := range args.Objects { for _, object := range args.Objects {
// Writes compressed object file to the response. // Writes compressed object file to the response.
zipit := func(objectName string) error { zipit := func(objectName string) error {
info, err := objectAPI.GetObjectInfo(nil, args.BucketName, objectName) info, err := objectAPI.GetObjectInfo(context.Background(), args.BucketName, objectName)
if err != nil { if err != nil {
return err return err
} }
@ -662,7 +663,7 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
writeWebErrorResponse(w, errUnexpected) writeWebErrorResponse(w, errUnexpected)
return err return err
} }
return objectAPI.GetObject(nil, args.BucketName, objectName, 0, info.Size, writer, "") return objectAPI.GetObject(context.Background(), args.BucketName, objectName, 0, info.Size, writer, "")
} }
if !hasSuffix(object, slashSeparator) { if !hasSuffix(object, slashSeparator) {
@ -678,7 +679,7 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
// date to the response writer. // date to the response writer.
marker := "" marker := ""
for { for {
lo, err := objectAPI.ListObjects(nil, args.BucketName, pathJoin(args.Prefix, object), marker, "", 1000) lo, err := objectAPI.ListObjects(context.Background(), args.BucketName, pathJoin(args.Prefix, object), marker, "", 1000)
if err != nil { if err != nil {
return return
} }
@ -719,7 +720,7 @@ func (web *webAPIHandlers) GetBucketPolicy(r *http.Request, args *GetBucketPolic
return toJSONError(errAuthentication) return toJSONError(errAuthentication)
} }
var policyInfo, err = objectAPI.GetBucketPolicy(nil, args.BucketName) var policyInfo, err = objectAPI.GetBucketPolicy(context.Background(), args.BucketName)
if err != nil { if err != nil {
_, ok := errors.Cause(err).(BucketPolicyNotFound) _, ok := errors.Cause(err).(BucketPolicyNotFound)
if !ok { if !ok {
@ -760,7 +761,7 @@ func (web *webAPIHandlers) ListAllBucketPolicies(r *http.Request, args *ListAllB
if !isHTTPRequestValid(r) { if !isHTTPRequestValid(r) {
return toJSONError(errAuthentication) return toJSONError(errAuthentication)
} }
var policyInfo, err = objectAPI.GetBucketPolicy(nil, args.BucketName) var policyInfo, err = objectAPI.GetBucketPolicy(context.Background(), args.BucketName)
if err != nil { if err != nil {
_, ok := errors.Cause(err).(PolicyNotFound) _, ok := errors.Cause(err).(PolicyNotFound)
if !ok { if !ok {
@ -804,7 +805,7 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic
} }
} }
var policyInfo, err = objectAPI.GetBucketPolicy(nil, args.BucketName) var policyInfo, err = objectAPI.GetBucketPolicy(context.Background(), args.BucketName)
if err != nil { if err != nil {
if _, ok := errors.Cause(err).(PolicyNotFound); !ok { if _, ok := errors.Cause(err).(PolicyNotFound); !ok {
return toJSONError(err, args.BucketName) return toJSONError(err, args.BucketName)
@ -815,14 +816,14 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, bucketP, args.BucketName, args.Prefix) policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, bucketP, args.BucketName, args.Prefix)
if len(policyInfo.Statements) == 0 { if len(policyInfo.Statements) == 0 {
if err = objectAPI.DeleteBucketPolicy(nil, args.BucketName); err != nil { if err = objectAPI.DeleteBucketPolicy(context.Background(), args.BucketName); err != nil {
return toJSONError(err, args.BucketName) return toJSONError(err, args.BucketName)
} }
return nil return nil
} }
// Parse validate and save bucket policy. // Parse validate and save bucket policy.
if err := objectAPI.SetBucketPolicy(nil, args.BucketName, policyInfo); err != nil { if err := objectAPI.SetBucketPolicy(context.Background(), args.BucketName, policyInfo); err != nil {
return toJSONError(err, args.BucketName) return toJSONError(err, args.BucketName)
} }

View File

@ -19,6 +19,7 @@ package cmd
import ( import (
"archive/zip" "archive/zip"
"bytes" "bytes"
"context"
"crypto/md5" "crypto/md5"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
@ -340,7 +341,7 @@ func testDeleteBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrH
} }
bucketName := getRandomBucketName() bucketName := getRandomBucketName()
err = obj.MakeBucketWithLocation(nil, bucketName, "") err = obj.MakeBucketWithLocation(context.Background(), bucketName, "")
if err != nil { if err != nil {
t.Fatalf("failed to create bucket: %s (%s)", err.Error(), instanceType) t.Fatalf("failed to create bucket: %s (%s)", err.Error(), instanceType)
} }
@ -368,7 +369,7 @@ func testDeleteBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrH
for _, test := range testCases { for _, test := range testCases {
if test.initWithObject { if test.initWithObject {
data := bytes.NewBufferString("hello") data := bytes.NewBufferString("hello")
_, err = obj.PutObject(nil, test.bucketName, "object", mustGetHashReader(t, data, int64(data.Len()), "", ""), nil) _, err = obj.PutObject(context.Background(), test.bucketName, "object", mustGetHashReader(t, data, int64(data.Len()), "", ""), nil)
// _, err = obj.PutObject(test.bucketName, "object", int64(data.Len()), data, nil, "") // _, err = obj.PutObject(test.bucketName, "object", int64(data.Len()), data, nil, "")
if err != nil { if err != nil {
t.Fatalf("could not put object to %s, %s", test.bucketName, err.Error()) t.Fatalf("could not put object to %s, %s", test.bucketName, err.Error())
@ -405,7 +406,7 @@ func testDeleteBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrH
// If we created the bucket with an object, now delete the object to cleanup. // If we created the bucket with an object, now delete the object to cleanup.
if test.initWithObject { if test.initWithObject {
err = obj.DeleteObject(nil, test.bucketName, "object") err = obj.DeleteObject(context.Background(), test.bucketName, "object")
if err != nil { if err != nil {
t.Fatalf("could not delete object, %s", err.Error()) t.Fatalf("could not delete object, %s", err.Error())
} }
@ -417,7 +418,7 @@ func testDeleteBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrH
continue continue
} }
err = obj.MakeBucketWithLocation(nil, bucketName, "") err = obj.MakeBucketWithLocation(context.Background(), bucketName, "")
if err != nil { if err != nil {
// failed to create new bucket, abort. // failed to create new bucket, abort.
t.Fatalf("failed to create new bucket (%s): %s", instanceType, err.Error()) t.Fatalf("failed to create new bucket (%s): %s", instanceType, err.Error())
@ -445,7 +446,7 @@ func testListBucketsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa
bucketName := getRandomBucketName() bucketName := getRandomBucketName()
// Create bucket. // Create bucket.
err = obj.MakeBucketWithLocation(nil, bucketName, "") err = obj.MakeBucketWithLocation(context.Background(), bucketName, "")
if err != nil { if err != nil {
// failed to create newbucket, abort. // failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err) t.Fatalf("%s : %s", instanceType, err)
@ -496,7 +497,7 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa
objectSize := 1 * humanize.KiByte objectSize := 1 * humanize.KiByte
// Create bucket. // Create bucket.
err = obj.MakeBucketWithLocation(nil, bucketName, "") err = obj.MakeBucketWithLocation(context.Background(), bucketName, "")
if err != nil { if err != nil {
// failed to create newbucket, abort. // failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err) t.Fatalf("%s : %s", instanceType, err)
@ -504,7 +505,7 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa
data := bytes.Repeat([]byte("a"), objectSize) data := bytes.Repeat([]byte("a"), objectSize)
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"} metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
_, err = obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata) _, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
if err != nil { if err != nil {
t.Fatalf("Was not able to upload an object, %v", err) t.Fatalf("Was not able to upload an object, %v", err)
@ -558,7 +559,7 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa
Statements: []policy.Statement{getReadOnlyObjectStatement(bucketName, "")}, Statements: []policy.Statement{getReadOnlyObjectStatement(bucketName, "")},
} }
obj.SetBucketPolicy(nil, bucketName, policy) obj.SetBucketPolicy(context.Background(), bucketName, policy)
// Unauthenticated ListObjects with READ bucket policy should succeed. // Unauthenticated ListObjects with READ bucket policy should succeed.
err, reply = test("") err, reply = test("")
@ -590,7 +591,7 @@ func testRemoveObjectWebHandler(obj ObjectLayer, instanceType string, t TestErrH
objectSize := 1 * humanize.KiByte objectSize := 1 * humanize.KiByte
// Create bucket. // Create bucket.
err = obj.MakeBucketWithLocation(nil, bucketName, "") err = obj.MakeBucketWithLocation(context.Background(), bucketName, "")
if err != nil { if err != nil {
// failed to create newbucket, abort. // failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err) t.Fatalf("%s : %s", instanceType, err)
@ -598,14 +599,14 @@ func testRemoveObjectWebHandler(obj ObjectLayer, instanceType string, t TestErrH
data := bytes.Repeat([]byte("a"), objectSize) data := bytes.Repeat([]byte("a"), objectSize)
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"} metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
_, err = obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata) _, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
if err != nil { if err != nil {
t.Fatalf("Was not able to upload an object, %v", err) t.Fatalf("Was not able to upload an object, %v", err)
} }
objectName = "a/object" objectName = "a/object"
metadata = map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"} metadata = map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
_, err = obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata) _, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
if err != nil { if err != nil {
t.Fatalf("Was not able to upload an object, %v", err) t.Fatalf("Was not able to upload an object, %v", err)
} }
@ -893,7 +894,7 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler
return rec.Code return rec.Code
} }
// Create bucket. // Create bucket.
err = obj.MakeBucketWithLocation(nil, bucketName, "") err = obj.MakeBucketWithLocation(context.Background(), bucketName, "")
if err != nil { if err != nil {
// failed to create newbucket, abort. // failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err) t.Fatalf("%s : %s", instanceType, err)
@ -906,7 +907,7 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler
} }
var byteBuffer bytes.Buffer var byteBuffer bytes.Buffer
err = obj.GetObject(nil, bucketName, objectName, 0, int64(len(content)), &byteBuffer, "") err = obj.GetObject(context.Background(), bucketName, objectName, 0, int64(len(content)), &byteBuffer, "")
if err != nil { if err != nil {
t.Fatalf("Failed, %v", err) t.Fatalf("Failed, %v", err)
} }
@ -932,7 +933,7 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler
Statements: []policy.Statement{getWriteOnlyObjectStatement(bucketName, "")}, Statements: []policy.Statement{getWriteOnlyObjectStatement(bucketName, "")},
} }
obj.SetBucketPolicy(nil, bucketName, bp) obj.SetBucketPolicy(context.Background(), bucketName, bp)
// Unauthenticated upload with WRITE policy should succeed. // Unauthenticated upload with WRITE policy should succeed.
code = test("", true) code = test("", true)
@ -978,7 +979,7 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl
} }
// Create bucket. // Create bucket.
err = obj.MakeBucketWithLocation(nil, bucketName, "") err = obj.MakeBucketWithLocation(context.Background(), bucketName, "")
if err != nil { if err != nil {
// failed to create newbucket, abort. // failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err) t.Fatalf("%s : %s", instanceType, err)
@ -986,7 +987,7 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl
content := []byte("temporary file's content") content := []byte("temporary file's content")
metadata := map[string]string{"etag": "01ce59706106fe5e02e7f55fffda7f34"} metadata := map[string]string{"etag": "01ce59706106fe5e02e7f55fffda7f34"}
_, err = obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewReader(content), int64(len(content)), metadata["etag"], ""), metadata) _, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader(content), int64(len(content)), metadata["etag"], ""), metadata)
if err != nil { if err != nil {
t.Fatalf("Was not able to upload an object, %v", err) t.Fatalf("Was not able to upload an object, %v", err)
} }
@ -1039,7 +1040,7 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl
Statements: []policy.Statement{getReadOnlyObjectStatement(bucketName, "")}, Statements: []policy.Statement{getReadOnlyObjectStatement(bucketName, "")},
} }
obj.SetBucketPolicy(nil, bucketName, bp) obj.SetBucketPolicy(context.Background(), bucketName, bp)
// Unauthenticated download with READ policy should succeed. // Unauthenticated download with READ policy should succeed.
code, bodyContent = test("") code, bodyContent = test("")
@ -1072,15 +1073,15 @@ func testWebHandlerDownloadZip(obj ObjectLayer, instanceType string, t TestErrHa
fileThree := "cccccccccccccc" fileThree := "cccccccccccccc"
// Create bucket. // Create bucket.
err = obj.MakeBucketWithLocation(nil, bucket, "") err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil { if err != nil {
// failed to create newbucket, abort. // failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err) t.Fatalf("%s : %s", instanceType, err)
} }
obj.PutObject(nil, bucket, "a/one", mustGetHashReader(t, strings.NewReader(fileOne), int64(len(fileOne)), "", ""), nil) obj.PutObject(context.Background(), bucket, "a/one", mustGetHashReader(t, strings.NewReader(fileOne), int64(len(fileOne)), "", ""), nil)
obj.PutObject(nil, bucket, "a/b/two", mustGetHashReader(t, strings.NewReader(fileTwo), int64(len(fileTwo)), "", ""), nil) obj.PutObject(context.Background(), bucket, "a/b/two", mustGetHashReader(t, strings.NewReader(fileTwo), int64(len(fileTwo)), "", ""), nil)
obj.PutObject(nil, bucket, "a/c/three", mustGetHashReader(t, strings.NewReader(fileThree), int64(len(fileThree)), "", ""), nil) obj.PutObject(context.Background(), bucket, "a/c/three", mustGetHashReader(t, strings.NewReader(fileThree), int64(len(fileThree)), "", ""), nil)
test := func(token string) (int, []byte) { test := func(token string) (int, []byte) {
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
@ -1157,7 +1158,7 @@ func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrH
objectSize := 1 * humanize.KiByte objectSize := 1 * humanize.KiByte
// Create bucket. // Create bucket.
err = obj.MakeBucketWithLocation(nil, bucketName, "") err = obj.MakeBucketWithLocation(context.Background(), bucketName, "")
if err != nil { if err != nil {
// failed to create newbucket, abort. // failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err) t.Fatalf("%s : %s", instanceType, err)
@ -1165,7 +1166,7 @@ func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrH
data := bytes.Repeat([]byte("a"), objectSize) data := bytes.Repeat([]byte("a"), objectSize)
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"} metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
_, err = obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata) _, err = obj.PutObject(context.Background(), bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
if err != nil { if err != nil {
t.Fatalf("Was not able to upload an object, %v", err) t.Fatalf("Was not able to upload an object, %v", err)
} }
@ -1258,7 +1259,7 @@ func testWebGetBucketPolicyHandler(obj ObjectLayer, instanceType string, t TestE
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
bucketName := getRandomBucketName() bucketName := getRandomBucketName()
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil { if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
@ -1336,7 +1337,7 @@ func testWebListAllBucketPoliciesHandler(obj ObjectLayer, instanceType string, t
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
bucketName := getRandomBucketName() bucketName := getRandomBucketName()
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil { if err := obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
@ -1434,7 +1435,7 @@ func testWebSetBucketPolicyHandler(obj ObjectLayer, instanceType string, t TestE
// Create a bucket // Create a bucket
bucketName := getRandomBucketName() bucketName := getRandomBucketName()
if err = obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil { if err = obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
@ -1674,7 +1675,7 @@ func TestWebObjectLayerFaultyDisks(t *testing.T) {
defer removeRoots(fsDirs) defer removeRoots(fsDirs)
bucketName := "mybucket" bucketName := "mybucket"
err = obj.MakeBucketWithLocation(nil, bucketName, "") err = obj.MakeBucketWithLocation(context.Background(), bucketName, "")
if err != nil { if err != nil {
t.Fatal("Cannot make bucket:", err) t.Fatal("Cannot make bucket:", err)
} }

View File

@ -368,7 +368,7 @@ func undoMakeBucketSets(bucket string, sets []*xlObjects, errs []error) {
index := index index := index
if errs[index] == nil { if errs[index] == nil {
g.Go(func() error { g.Go(func() error {
return sets[index].DeleteBucket(nil, bucket) return sets[index].DeleteBucket(context.Background(), bucket)
}, index) }, index)
} }
} }
@ -509,7 +509,7 @@ func undoDeleteBucketSets(bucket string, sets []*xlObjects, errs []error) {
index := index index := index
if errs[index] == nil { if errs[index] == nil {
g.Go(func() error { g.Go(func() error {
return sets[index].MakeBucketWithLocation(nil, bucket, "") return sets[index].MakeBucketWithLocation(context.Background(), bucket, "")
}, index) }, index)
} }
} }

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
@ -123,7 +124,7 @@ func TestStorageInfoSets(t *testing.T) {
} }
// Get storage info first attempt. // Get storage info first attempt.
disks16Info := objLayer.StorageInfo(nil) disks16Info := objLayer.StorageInfo(context.Background())
// This test assumes homogeneity between all disks, // This test assumes homogeneity between all disks,
// i.e if we loose one disk the effective storage // i.e if we loose one disk the effective storage

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"os" "os"
"testing" "testing"
) )
@ -43,11 +44,11 @@ func TestXLParentDirIsObject(t *testing.T) {
bucketName := "testbucket" bucketName := "testbucket"
objectName := "object" objectName := "object"
if err = obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil { if err = obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
t.Fatal(err) t.Fatal(err)
} }
objectContent := "12345" objectContent := "12345"
objInfo, err := obj.PutObject(nil, bucketName, objectName, objInfo, err := obj.PutObject(context.Background(), bucketName, objectName,
mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil) mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
@ -198,15 +199,15 @@ func TestListOnlineDisks(t *testing.T) {
// Prepare bucket/object backend for the tests below. // Prepare bucket/object backend for the tests below.
// Cleanup from previous test. // Cleanup from previous test.
obj.DeleteObject(nil, bucket, object) obj.DeleteObject(context.Background(), bucket, object)
obj.DeleteBucket(nil, bucket) obj.DeleteBucket(context.Background(), bucket)
err = obj.MakeBucketWithLocation(nil, "bucket", "") err = obj.MakeBucketWithLocation(context.Background(), "bucket", "")
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket %v", err) t.Fatalf("Failed to make a bucket %v", err)
} }
_, err = obj.PutObject(nil, bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil) _, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil)
if err != nil { if err != nil {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
@ -298,12 +299,12 @@ func TestDisksWithAllParts(t *testing.T) {
xl := obj.(*xlObjects) xl := obj.(*xlObjects)
xlDisks := xl.storageDisks xlDisks := xl.storageDisks
err = obj.MakeBucketWithLocation(nil, "bucket", "") err = obj.MakeBucketWithLocation(context.Background(), "bucket", "")
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket %v", err) t.Fatalf("Failed to make a bucket %v", err)
} }
_, err = obj.PutObject(nil, bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil) _, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil)
if err != nil { if err != nil {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }

View File

@ -168,7 +168,7 @@ func healBucketMetadata(xl xlObjects, bucket string, dryRun bool) (
results []madmin.HealResultItem, err error) { results []madmin.HealResultItem, err error) {
healBucketMetaFn := func(metaPath string) error { healBucketMetaFn := func(metaPath string) error {
result, healErr := xl.HealObject(nil, minioMetaBucket, metaPath, dryRun) result, healErr := xl.HealObject(context.Background(), minioMetaBucket, metaPath, dryRun)
// If object is not found, no result to add. // If object is not found, no result to add.
if isErrObjectNotFound(healErr) { if isErrObjectNotFound(healErr) {
return nil return nil

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
@ -47,14 +48,14 @@ func TestUndoMakeBucket(t *testing.T) {
} }
bucketName := getRandomBucketName() bucketName := getRandomBucketName()
if err = obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil { if err = obj.MakeBucketWithLocation(context.Background(), bucketName, ""); err != nil {
t.Fatal(err) t.Fatal(err)
} }
xl := obj.(*xlObjects) xl := obj.(*xlObjects)
undoMakeBucket(xl.storageDisks, bucketName) undoMakeBucket(xl.storageDisks, bucketName)
// Validate if bucket was deleted properly. // Validate if bucket was deleted properly.
_, err = obj.GetBucketInfo(nil, bucketName) _, err = obj.GetBucketInfo(context.Background(), bucketName)
if err != nil { if err != nil {
err = errors.Cause(err) err = errors.Cause(err)
switch err.(type) { switch err.(type) {
@ -91,21 +92,21 @@ func TestHealObjectXL(t *testing.T) {
object := "object" object := "object"
data := bytes.Repeat([]byte("a"), 5*1024*1024) data := bytes.Repeat([]byte("a"), 5*1024*1024)
err = obj.MakeBucketWithLocation(nil, bucket, "") err = obj.MakeBucketWithLocation(context.Background(), bucket, "")
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket - %v", err) t.Fatalf("Failed to make a bucket - %v", err)
} }
// Create an object with multiple parts uploaded in decreasing // Create an object with multiple parts uploaded in decreasing
// part number. // part number.
uploadID, err := obj.NewMultipartUpload(nil, bucket, object, nil) uploadID, err := obj.NewMultipartUpload(context.Background(), bucket, object, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create a multipart upload - %v", err) t.Fatalf("Failed to create a multipart upload - %v", err)
} }
var uploadedParts []CompletePart var uploadedParts []CompletePart
for _, partID := range []int{2, 1} { for _, partID := range []int{2, 1} {
pInfo, err1 := obj.PutObjectPart(nil, bucket, object, uploadID, partID, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", "")) pInfo, err1 := obj.PutObjectPart(context.Background(), bucket, object, uploadID, partID, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""))
if err1 != nil { if err1 != nil {
t.Fatalf("Failed to upload a part - %v", err1) t.Fatalf("Failed to upload a part - %v", err1)
} }
@ -115,7 +116,7 @@ func TestHealObjectXL(t *testing.T) {
}) })
} }
_, err = obj.CompleteMultipartUpload(nil, bucket, object, uploadID, uploadedParts) _, err = obj.CompleteMultipartUpload(context.Background(), bucket, object, uploadID, uploadedParts)
if err != nil { if err != nil {
t.Fatalf("Failed to complete multipart upload - %v", err) t.Fatalf("Failed to complete multipart upload - %v", err)
} }
@ -128,7 +129,7 @@ func TestHealObjectXL(t *testing.T) {
t.Fatalf("Failed to delete a file - %v", err) t.Fatalf("Failed to delete a file - %v", err)
} }
_, err = obj.HealObject(nil, bucket, object, false) _, err = obj.HealObject(context.Background(), bucket, object, false)
if err != nil { if err != nil {
t.Fatalf("Failed to heal object - %v", err) t.Fatalf("Failed to heal object - %v", err)
} }
@ -144,7 +145,7 @@ func TestHealObjectXL(t *testing.T) {
} }
// Try healing now, expect to receive errDiskNotFound. // Try healing now, expect to receive errDiskNotFound.
_, err = obj.HealObject(nil, bucket, object, false) _, err = obj.HealObject(context.Background(), bucket, object, false)
// since majority of xl.jsons are not available, object quorum can't be read properly and error will be errXLReadQuorum // since majority of xl.jsons are not available, object quorum can't be read properly and error will be errXLReadQuorum
if errors.Cause(err) != errXLReadQuorum { if errors.Cause(err) != errXLReadQuorum {
t.Errorf("Expected %v but received %v", errDiskNotFound, err) t.Errorf("Expected %v but received %v", errDiskNotFound, err)

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"errors" "errors"
"os" "os"
"path" "path"
@ -39,7 +40,7 @@ func testXLReadStat(obj ObjectLayer, instanceType string, disks []string, t *tes
bucketName := getRandomBucketName() bucketName := getRandomBucketName()
objectName := "test-object" objectName := "test-object"
// create bucket. // create bucket.
err := obj.MakeBucketWithLocation(nil, bucketName, "") err := obj.MakeBucketWithLocation(context.Background(), bucketName, "")
// Stop the test if creation of the bucket fails. // Stop the test if creation of the bucket fails.
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -68,7 +69,7 @@ func testXLReadStat(obj ObjectLayer, instanceType string, disks []string, t *tes
// iterate through the above set of inputs and upkoad the object. // iterate through the above set of inputs and upkoad the object.
for i, input := range putObjectInputs { for i, input := range putObjectInputs {
// uploading the object. // uploading the object.
_, err = obj.PutObject(nil, input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData) _, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
// if object upload fails stop the test. // if object upload fails stop the test.
if err != nil { if err != nil {
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err) t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
@ -114,13 +115,13 @@ func testXLReadMetaParts(obj ObjectLayer, instanceType string, disks []string, t
// objectNames[0]. // objectNames[0].
// uploadIds [0]. // uploadIds [0].
// Create bucket before intiating NewMultipartUpload. // Create bucket before intiating NewMultipartUpload.
err := obj.MakeBucketWithLocation(nil, bucketNames[0], "") err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], "")
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
// Initiate Multipart Upload on the above created bucket. // Initiate Multipart Upload on the above created bucket.
uploadID, err := obj.NewMultipartUpload(nil, bucketNames[0], objectNames[0], nil) uploadID, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], nil)
if err != nil { if err != nil {
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -151,7 +152,7 @@ func testXLReadMetaParts(obj ObjectLayer, instanceType string, disks []string, t
sha256sum := "" sha256sum := ""
// Iterating over creatPartCases to generate multipart chunks. // Iterating over creatPartCases to generate multipart chunks.
for _, testCase := range createPartCases { for _, testCase := range createPartCases {
_, perr := obj.PutObjectPart(nil, testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum)) _, perr := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
if perr != nil { if perr != nil {
t.Fatalf("%s : %s", instanceType, perr) t.Fatalf("%s : %s", instanceType, perr)
} }

View File

@ -469,7 +469,7 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
// Implements S3 compatible ListObjectParts API. The resulting // Implements S3 compatible ListObjectParts API. The resulting
// ListPartsInfo structure is marshalled directly into XML and // ListPartsInfo structure is marshalled directly into XML and
// replied back to the client. // replied back to the client.
func (xl xlObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListPartsInfo, e error) { func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListPartsInfo, e error) {
if err := checkListPartsArgs(bucket, object, xl); err != nil { if err := checkListPartsArgs(bucket, object, xl); err != nil {
return result, err return result, err
} }
@ -546,40 +546,6 @@ func (xl xlObjects) ListObjectParts(bucket, object, uploadID string, partNumberM
return result, nil return result, nil
} }
// ListObjectParts - lists all previously uploaded parts for a given
// object and uploadID. Takes additional input of part-number-marker
// to indicate where the listing should begin from.
//
// Implements S3 compatible ListObjectParts API. The resulting
// ListPartsInfo structure is unmarshalled directly into XML and
// replied back to the client.
func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (lpi ListPartsInfo, e error) {
if err := checkListPartsArgs(bucket, object, xl); err != nil {
return lpi, err
}
// Hold the lock so that two parallel complete-multipart-uploads
// do not leave a stale uploads.json behind.
objectMPartPathLock := xl.nsMutex.NewNSLock(minioMetaMultipartBucket, pathJoin(bucket, object))
if err := objectMPartPathLock.GetRLock(globalListingTimeout); err != nil {
return lpi, errors.Trace(err)
}
defer objectMPartPathLock.RUnlock()
// Hold lock so that there is no competing
// abort-multipart-upload or complete-multipart-upload.
uploadIDLock := xl.nsMutex.NewNSLock(minioMetaMultipartBucket,
pathJoin(bucket, object, uploadID))
if err := uploadIDLock.GetLock(globalListingTimeout); err != nil {
return lpi, err
}
defer uploadIDLock.Unlock()
if !xl.isUploadIDExists(bucket, object, uploadID) {
return lpi, errors.Trace(InvalidUploadID{UploadID: uploadID})
}
result, err := xl.listObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
return result, err
}
// CompleteMultipartUpload - completes an ongoing multipart // CompleteMultipartUpload - completes an ongoing multipart
// transaction after receiving all the parts indicated by the client. // transaction after receiving all the parts indicated by the client.
// Returns an md5sum calculated by concatenating all the individual // Returns an md5sum calculated by concatenating all the individual

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"os" "os"
"testing" "testing"
"time" "time"
@ -50,8 +51,8 @@ func TestXLCleanupStaleMultipartUploads(t *testing.T) {
bucketName := "bucket" bucketName := "bucket"
objectName := "object" objectName := "object"
obj.MakeBucketWithLocation(nil, bucketName, "") obj.MakeBucketWithLocation(context.Background(), bucketName, "")
uploadID, err := obj.NewMultipartUpload(nil, bucketName, objectName, nil) uploadID, err := obj.NewMultipartUpload(context.Background(), bucketName, objectName, nil)
if err != nil { if err != nil {
t.Fatal("Unexpected err: ", err) t.Fatal("Unexpected err: ", err)
} }
@ -66,52 +67,7 @@ func TestXLCleanupStaleMultipartUploads(t *testing.T) {
globalServiceDoneCh <- struct{}{} globalServiceDoneCh <- struct{}{}
// Check if upload id was already purged. // Check if upload id was already purged.
if err = obj.AbortMultipartUpload(nil, bucketName, objectName, uploadID); err != nil { if err = obj.AbortMultipartUpload(context.Background(), bucketName, objectName, uploadID); err != nil {
err = errors.Cause(err)
if _, ok := err.(InvalidUploadID); !ok {
t.Fatal("Unexpected err: ", err)
}
}
}
// Tests cleanup of stale upload ids.
func TestXLCleanupMultipartUpload(t *testing.T) {
// Initialize configuration
root, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
t.Fatalf("%s", err)
}
defer os.RemoveAll(root)
// Create an instance of xl backend
obj, fsDirs, err := prepareXL16()
if err != nil {
t.Fatal(err)
}
// Defer cleanup of backend directories
defer removeRoots(fsDirs)
xl := obj.(*xlObjects)
// Close the go-routine, we are going to
// manually start it and test in this test case.
globalServiceDoneCh <- struct{}{}
bucketName := "bucket"
objectName := "object"
obj.MakeBucketWithLocation(nil, bucketName, "")
uploadID, err := obj.NewMultipartUpload(nil, bucketName, objectName, nil)
if err != nil {
t.Fatal("Unexpected err: ", err)
}
if err = cleanupStaleMultipartUpload(bucketName, 0, obj, xl.listMultipartUploadsCleanup); err != nil {
t.Fatal("Unexpected err: ", err)
}
// Check if upload id was already purged.
if err = obj.AbortMultipartUpload(nil, bucketName, objectName, uploadID); err != nil {
err = errors.Cause(err) err = errors.Cause(err)
if _, ok := err.(InvalidUploadID); !ok { if _, ok := err.(InvalidUploadID); !ok {
t.Fatal("Unexpected err: ", err) t.Fatal("Unexpected err: ", err)

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"bytes" "bytes"
"context"
"io/ioutil" "io/ioutil"
"math/rand" "math/rand"
"os" "os"
@ -43,23 +44,23 @@ func TestRepeatPutObjectPart(t *testing.T) {
// cleaning up of temporary test directories // cleaning up of temporary test directories
defer removeRoots(disks) defer removeRoots(disks)
err = objLayer.MakeBucketWithLocation(nil, "bucket1", "") err = objLayer.MakeBucketWithLocation(context.Background(), "bucket1", "")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
uploadID, err := objLayer.NewMultipartUpload(nil, "bucket1", "mpartObj1", nil) uploadID, err := objLayer.NewMultipartUpload(context.Background(), "bucket1", "mpartObj1", nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte) fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
md5Hex := getMD5Hash(fiveMBBytes) md5Hex := getMD5Hash(fiveMBBytes)
_, err = objLayer.PutObjectPart(nil, "bucket1", "mpartObj1", uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, "")) _, err = objLayer.PutObjectPart(context.Background(), "bucket1", "mpartObj1", uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930 // PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930
_, err = objLayer.PutObjectPart(nil, "bucket1", "mpartObj1", uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, "")) _, err = objLayer.PutObjectPart(context.Background(), "bucket1", "mpartObj1", uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -85,18 +86,18 @@ func TestXLDeleteObjectBasic(t *testing.T) {
} }
// Make bucket for Test 7 to pass // Make bucket for Test 7 to pass
err = xl.MakeBucketWithLocation(nil, "bucket", "") err = xl.MakeBucketWithLocation(context.Background(), "bucket", "")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Create object "obj" under bucket "bucket" for Test 7 to pass // Create object "obj" under bucket "bucket" for Test 7 to pass
_, err = xl.PutObject(nil, "bucket", "obj", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil) _, err = xl.PutObject(context.Background(), "bucket", "obj", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err != nil { if err != nil {
t.Fatalf("XL Object upload failed: <ERROR> %s", err) t.Fatalf("XL Object upload failed: <ERROR> %s", err)
} }
for i, test := range testCases { for i, test := range testCases {
actualErr := xl.DeleteObject(nil, test.bucket, test.object) actualErr := xl.DeleteObject(context.Background(), test.bucket, test.object)
actualErr = errors.Cause(actualErr) actualErr = errors.Cause(actualErr)
if test.expectedErr != nil && actualErr != test.expectedErr { if test.expectedErr != nil && actualErr != test.expectedErr {
t.Errorf("Test %d: Expected to fail with %s, but failed with %s", i+1, test.expectedErr, actualErr) t.Errorf("Test %d: Expected to fail with %s, but failed with %s", i+1, test.expectedErr, actualErr)
@ -121,7 +122,7 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
xl := obj.(*xlObjects) xl := obj.(*xlObjects)
// Create "bucket" // Create "bucket"
err = obj.MakeBucketWithLocation(nil, "bucket", "") err = obj.MakeBucketWithLocation(context.Background(), "bucket", "")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -129,7 +130,7 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
bucket := "bucket" bucket := "bucket"
object := "object" object := "object"
// Create object "obj" under bucket "bucket". // Create object "obj" under bucket "bucket".
_, err = obj.PutObject(nil, bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil) _, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -138,13 +139,13 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
for i := range xl.storageDisks[:7] { for i := range xl.storageDisks[:7] {
xl.storageDisks[i] = newNaughtyDisk(xl.storageDisks[i], nil, errFaultyDisk) xl.storageDisks[i] = newNaughtyDisk(xl.storageDisks[i], nil, errFaultyDisk)
} }
err = obj.DeleteObject(nil, bucket, object) err = obj.DeleteObject(context.Background(), bucket, object)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Create "obj" under "bucket". // Create "obj" under "bucket".
_, err = obj.PutObject(nil, bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil) _, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -152,7 +153,7 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
// Remove one more disk to 'lose' quorum, by setting it to nil. // Remove one more disk to 'lose' quorum, by setting it to nil.
xl.storageDisks[7] = nil xl.storageDisks[7] = nil
xl.storageDisks[8] = nil xl.storageDisks[8] = nil
err = obj.DeleteObject(nil, bucket, object) err = obj.DeleteObject(context.Background(), bucket, object)
err = errors.Cause(err) err = errors.Cause(err)
// since majority of disks are not available, metaquorum is not achieved and hence errXLReadQuorum error // since majority of disks are not available, metaquorum is not achieved and hence errXLReadQuorum error
if err != toObjectErr(errXLReadQuorum, bucket, object) { if err != toObjectErr(errXLReadQuorum, bucket, object) {
@ -172,7 +173,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
xl := obj.(*xlObjects) xl := obj.(*xlObjects)
// Create "bucket" // Create "bucket"
err = obj.MakeBucketWithLocation(nil, "bucket", "") err = obj.MakeBucketWithLocation(context.Background(), "bucket", "")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -180,7 +181,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
bucket := "bucket" bucket := "bucket"
object := "object" object := "object"
// Create "object" under "bucket". // Create "object" under "bucket".
_, err = obj.PutObject(nil, bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil) _, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -203,7 +204,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
} }
} }
// Fetch object from store. // Fetch object from store.
err = xl.GetObject(nil, bucket, object, 0, int64(len("abcd")), ioutil.Discard, "") err = xl.GetObject(context.Background(), bucket, object, 0, int64(len("abcd")), ioutil.Discard, "")
err = errors.Cause(err) err = errors.Cause(err)
if err != toObjectErr(errXLReadQuorum, bucket, object) { if err != toObjectErr(errXLReadQuorum, bucket, object) {
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err) t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
@ -223,7 +224,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
xl := obj.(*xlObjects) xl := obj.(*xlObjects)
// Create "bucket" // Create "bucket"
err = obj.MakeBucketWithLocation(nil, "bucket", "") err = obj.MakeBucketWithLocation(context.Background(), "bucket", "")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -231,7 +232,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
bucket := "bucket" bucket := "bucket"
object := "object" object := "object"
// Create "object" under "bucket". // Create "object" under "bucket".
_, err = obj.PutObject(nil, bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil) _, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -254,7 +255,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
} }
} }
// Upload new content to same object "object" // Upload new content to same object "object"
_, err = obj.PutObject(nil, bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil) _, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
err = errors.Cause(err) err = errors.Cause(err)
if err != toObjectErr(errXLWriteQuorum, bucket, object) { if err != toObjectErr(errXLWriteQuorum, bucket, object) {
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err) t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
@ -280,7 +281,7 @@ func TestHealing(t *testing.T) {
xl := obj.(*xlObjects) xl := obj.(*xlObjects)
// Create "bucket" // Create "bucket"
err = obj.MakeBucketWithLocation(nil, "bucket", "") err = obj.MakeBucketWithLocation(context.Background(), "bucket", "")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -295,7 +296,7 @@ func TestHealing(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
_, err = obj.PutObject(nil, bucket, object, mustGetHashReader(t, bytes.NewReader(data), length, "", ""), nil) _, err = obj.PutObject(context.Background(), bucket, object, mustGetHashReader(t, bytes.NewReader(data), length, "", ""), nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -313,7 +314,7 @@ func TestHealing(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
_, err = xl.HealObject(nil, bucket, object, false) _, err = xl.HealObject(context.Background(), bucket, object, false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -337,7 +338,7 @@ func TestHealing(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
_, err = xl.HealObject(nil, bucket, object, false) _, err = xl.HealObject(context.Background(), bucket, object, false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -359,7 +360,7 @@ func TestHealing(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
// This would create the bucket. // This would create the bucket.
_, err = xl.HealBucket(nil, bucket, false) _, err = xl.HealBucket(context.Background(), bucket, false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -17,6 +17,7 @@
package cmd package cmd
import ( import (
"context"
"os" "os"
"reflect" "reflect"
"testing" "testing"
@ -37,7 +38,7 @@ func TestStorageInfo(t *testing.T) {
} }
// Get storage info first attempt. // Get storage info first attempt.
disks16Info := objLayer.StorageInfo(nil) disks16Info := objLayer.StorageInfo(context.Background())
// This test assumes homogenity between all disks, // This test assumes homogenity between all disks,
// i.e if we loose one disk the effective storage // i.e if we loose one disk the effective storage