mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
Add context to the object-interface methods.
Make necessary changes to xl fs azure sia
This commit is contained in:
parent
9083bc152e
commit
e452377b24
@ -348,6 +348,7 @@ func (a adminAPIHandlers) ListLocksHandler(w http.ResponseWriter, r *http.Reques
|
||||
// ---------
|
||||
// Clear locks held on a given bucket, prefix and duration it was held for.
|
||||
func (a adminAPIHandlers) ClearLocksHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "ClearLocks")
|
||||
|
||||
adminAPIErr := checkAdminRequestAuthType(r, globalServerConfig.GetRegion())
|
||||
if adminAPIErr != ErrNone {
|
||||
@ -379,7 +380,8 @@ func (a adminAPIHandlers) ClearLocksHandler(w http.ResponseWriter, r *http.Reque
|
||||
errorIf(err, "Failed to marshal lock information into json.")
|
||||
return
|
||||
}
|
||||
newObjectLayerFn().ClearLocks(volLocks)
|
||||
|
||||
newObjectLayerFn().ClearLocks(ctx, volLocks)
|
||||
|
||||
// Reply with list of locks cleared, as json.
|
||||
writeSuccessResponseJSON(w, jsonBytes)
|
||||
@ -447,6 +449,8 @@ func extractHealInitParams(r *http.Request) (bucket, objPrefix string,
|
||||
// sequence. However, if the force-start flag is provided, the server
|
||||
// aborts the running heal sequence and starts a new one.
|
||||
func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "Heal")
|
||||
|
||||
// Get object layer instance.
|
||||
objLayer := newObjectLayerFn()
|
||||
if objLayer == nil {
|
||||
@ -515,7 +519,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// find number of disks in the setup
|
||||
info := objLayer.StorageInfo()
|
||||
info := objLayer.StorageInfo(ctx)
|
||||
numDisks := info.Backend.OfflineDisks + info.Backend.OnlineDisks
|
||||
|
||||
if clientToken == "" {
|
||||
|
@ -204,7 +204,7 @@ func (atb *adminXLTestBed) TearDown() {
|
||||
func (atb *adminXLTestBed) GenerateHealTestData(t *testing.T) {
|
||||
// Create an object myobject under bucket mybucket.
|
||||
bucketName := "mybucket"
|
||||
err := atb.objLayer.MakeBucketWithLocation(bucketName, "")
|
||||
err := atb.objLayer.MakeBucketWithLocation(nil, bucketName, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make bucket %s - %v", bucketName,
|
||||
err)
|
||||
@ -215,7 +215,7 @@ func (atb *adminXLTestBed) GenerateHealTestData(t *testing.T) {
|
||||
objName := "myobject"
|
||||
for i := 0; i < 10; i++ {
|
||||
objectName := fmt.Sprintf("%s-%d", objName, i)
|
||||
_, err = atb.objLayer.PutObject(bucketName, objectName,
|
||||
_, err = atb.objLayer.PutObject(nil, bucketName, objectName,
|
||||
mustGetHashReader(t, bytes.NewReader([]byte("hello")),
|
||||
int64(len("hello")), "", ""), nil)
|
||||
if err != nil {
|
||||
@ -228,13 +228,13 @@ func (atb *adminXLTestBed) GenerateHealTestData(t *testing.T) {
|
||||
// create a multipart upload (incomplete)
|
||||
{
|
||||
objName := "mpObject"
|
||||
uploadID, err := atb.objLayer.NewMultipartUpload(bucketName,
|
||||
uploadID, err := atb.objLayer.NewMultipartUpload(nil, bucketName,
|
||||
objName, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("mp new error: %v", err)
|
||||
}
|
||||
|
||||
_, err = atb.objLayer.PutObjectPart(bucketName, objName,
|
||||
_, err = atb.objLayer.PutObjectPart(nil, bucketName, objName,
|
||||
uploadID, 3, mustGetHashReader(t, bytes.NewReader(
|
||||
[]byte("hello")), int64(len("hello")), "", ""))
|
||||
if err != nil {
|
||||
@ -248,11 +248,11 @@ func (atb *adminXLTestBed) CleanupHealTestData(t *testing.T) {
|
||||
bucketName := "mybucket"
|
||||
objName := "myobject"
|
||||
for i := 0; i < 10; i++ {
|
||||
atb.objLayer.DeleteObject(bucketName,
|
||||
atb.objLayer.DeleteObject(nil, bucketName,
|
||||
fmt.Sprintf("%s-%d", objName, i))
|
||||
}
|
||||
|
||||
atb.objLayer.DeleteBucket(bucketName)
|
||||
atb.objLayer.DeleteBucket(nil, bucketName)
|
||||
}
|
||||
|
||||
// initTestObjLayer - Helper function to initialize an XL-based object
|
||||
|
@ -17,12 +17,14 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
@ -309,6 +311,9 @@ type healSequence struct {
|
||||
|
||||
// the last result index sent to client
|
||||
lastSentResultIndex int64
|
||||
|
||||
// Holds the request-info for logging
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// NewHealSequence - creates healSettings, assumes bucket and
|
||||
@ -316,6 +321,8 @@ type healSequence struct {
|
||||
func newHealSequence(bucket, objPrefix, clientAddr string,
|
||||
numDisks int, hs madmin.HealOpts, forceStart bool) *healSequence {
|
||||
|
||||
ctx := logger.ContextSet(context.Background(), &logger.ReqInfo{clientAddr, "", "", "Heal", bucket, objPrefix, nil})
|
||||
|
||||
return &healSequence{
|
||||
bucket: bucket,
|
||||
objPrefix: objPrefix,
|
||||
@ -333,6 +340,7 @@ func newHealSequence(bucket, objPrefix, clientAddr string,
|
||||
},
|
||||
traverseAndHealDoneCh: make(chan error),
|
||||
stopSignalCh: make(chan struct{}),
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
@ -528,7 +536,7 @@ func (h *healSequence) healDiskFormat() error {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
res, err := objectAPI.HealFormat(h.settings.DryRun)
|
||||
res, err := objectAPI.HealFormat(h.ctx, h.settings.DryRun)
|
||||
if err != nil {
|
||||
return errFnHealFromAPIErr(err)
|
||||
}
|
||||
@ -552,7 +560,7 @@ func (h *healSequence) healBuckets() error {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
buckets, err := objectAPI.ListBucketsHeal()
|
||||
buckets, err := objectAPI.ListBucketsHeal(h.ctx)
|
||||
if err != nil {
|
||||
return errFnHealFromAPIErr(err)
|
||||
}
|
||||
@ -583,7 +591,7 @@ func (h *healSequence) healBucket(bucket string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
results, err := objectAPI.HealBucket(bucket, h.settings.DryRun)
|
||||
results, err := objectAPI.HealBucket(h.ctx, bucket, h.settings.DryRun)
|
||||
// push any available results before checking for error
|
||||
for _, result := range results {
|
||||
if perr := h.pushHealResultItem(result); perr != nil {
|
||||
@ -601,7 +609,7 @@ func (h *healSequence) healBucket(bucket string) error {
|
||||
if h.objPrefix != "" {
|
||||
// Check if an object named as the objPrefix exists,
|
||||
// and if so heal it.
|
||||
_, err = objectAPI.GetObjectInfo(bucket, h.objPrefix)
|
||||
_, err = objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix)
|
||||
if err == nil {
|
||||
err = h.healObject(bucket, h.objPrefix)
|
||||
if err != nil {
|
||||
@ -616,7 +624,7 @@ func (h *healSequence) healBucket(bucket string) error {
|
||||
marker := ""
|
||||
isTruncated := true
|
||||
for isTruncated {
|
||||
objectInfos, err := objectAPI.ListObjectsHeal(bucket,
|
||||
objectInfos, err := objectAPI.ListObjectsHeal(h.ctx, bucket,
|
||||
h.objPrefix, marker, "", 1000)
|
||||
if err != nil {
|
||||
return errFnHealFromAPIErr(err)
|
||||
@ -646,7 +654,7 @@ func (h *healSequence) healObject(bucket, object string) error {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
hri, err := objectAPI.HealObject(bucket, object, h.settings.DryRun)
|
||||
hri, err := objectAPI.HealObject(h.ctx, bucket, object, h.settings.DryRun)
|
||||
if err != nil {
|
||||
hri.Detail = err.Error()
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ func (lc localAdminClient) ReInitFormat(dryRun bool) error {
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
_, err := objectAPI.HealFormat(dryRun)
|
||||
_, err := objectAPI.HealFormat(nil, dryRun)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -137,7 +137,7 @@ func (lc localAdminClient) ServerInfoData() (sid ServerInfoData, e error) {
|
||||
if objLayer == nil {
|
||||
return sid, errServerNotInitialized
|
||||
}
|
||||
storage := objLayer.StorageInfo()
|
||||
storage := objLayer.StorageInfo(nil)
|
||||
|
||||
return ServerInfoData{
|
||||
StorageInfo: storage,
|
||||
|
@ -89,7 +89,7 @@ func (s *adminCmd) ReInitFormat(args *ReInitFormatArgs, reply *AuthRPCReply) err
|
||||
if err := args.IsAuthenticated(); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := newObjectLayerFn().HealFormat(args.DryRun)
|
||||
_, err := newObjectLayerFn().HealFormat(nil, args.DryRun)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -118,7 +118,7 @@ func (s *adminCmd) ServerInfoData(args *AuthRPCArgs, reply *ServerInfoDataReply)
|
||||
if objLayer == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
storageInfo := objLayer.StorageInfo()
|
||||
storageInfo := objLayer.StorageInfo(nil)
|
||||
|
||||
reply.ServerInfoData = ServerInfoData{
|
||||
Properties: ServerProperties{
|
||||
|
@ -40,7 +40,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(bucket, "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucket, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -60,7 +60,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(bucket, "object"+strconv.Itoa(i),
|
||||
objInfo, err := obj.PutObject(nil, bucket, "object"+strconv.Itoa(i),
|
||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
@ -82,7 +82,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
object := getRandomObjectName()
|
||||
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(bucket, "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucket, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -97,7 +97,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
// generate md5sum for the generated data.
|
||||
// md5sum of the data to written is required as input for NewMultipartUpload.
|
||||
metadata := make(map[string]string)
|
||||
uploadID, err = obj.NewMultipartUpload(bucket, object, metadata)
|
||||
uploadID, err = obj.NewMultipartUpload(nil, bucket, object, metadata)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -121,7 +121,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
|
||||
}
|
||||
md5hex = getMD5Hash([]byte(textPartData))
|
||||
var partInfo PartInfo
|
||||
partInfo, err = obj.PutObjectPart(bucket, object, uploadID, j,
|
||||
partInfo, err = obj.PutObjectPart(nil, bucket, object, uploadID, j,
|
||||
mustGetHashReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
@ -204,7 +204,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(bucket, "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucket, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -223,7 +223,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
for i := 0; i < 10; i++ {
|
||||
// insert the object.
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject(bucket, "object"+strconv.Itoa(i),
|
||||
objInfo, err = obj.PutObject(nil, bucket, "object"+strconv.Itoa(i),
|
||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
@ -239,7 +239,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
var buffer = new(bytes.Buffer)
|
||||
err = obj.GetObject(bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "")
|
||||
err = obj.GetObject(nil, bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "")
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
@ -316,7 +316,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(bucket, "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucket, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -339,7 +339,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
i := 0
|
||||
for pb.Next() {
|
||||
// insert the object.
|
||||
objInfo, err := obj.PutObject(bucket, "object"+strconv.Itoa(i),
|
||||
objInfo, err := obj.PutObject(nil, bucket, "object"+strconv.Itoa(i),
|
||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
@ -367,7 +367,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
// obtains random bucket name.
|
||||
bucket := getRandomBucketName()
|
||||
// create bucket.
|
||||
err = obj.MakeBucketWithLocation(bucket, "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucket, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -385,7 +385,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
for i := 0; i < 10; i++ {
|
||||
// insert the object.
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject(bucket, "object"+strconv.Itoa(i),
|
||||
objInfo, err = obj.PutObject(nil, bucket, "object"+strconv.Itoa(i),
|
||||
mustGetHashReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), metadata)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
@ -402,7 +402,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
i := 0
|
||||
for pb.Next() {
|
||||
err = obj.GetObject(bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "")
|
||||
err = obj.GetObject(nil, bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "")
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
|
@ -53,6 +53,8 @@ func validateListObjectsArgs(prefix, marker, delimiter string, maxKeys int) APIE
|
||||
// NOTE: It is recommended that this API to be used for application development.
|
||||
// Minio continues to support ListObjectsV1 for supporting legacy tools.
|
||||
func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "ListObjectsV2")
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
@ -88,7 +90,7 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
// Inititate a list objects operation based on the input params.
|
||||
// On success would return back ListObjectsInfo object to be
|
||||
// marshalled into S3 compatible XML header.
|
||||
listObjectsV2Info, err := objectAPI.ListObjectsV2(bucket, prefix, marker, delimiter, maxKeys, fetchOwner, startAfter)
|
||||
listObjectsV2Info, err := objectAPI.ListObjectsV2(ctx, bucket, prefix, marker, delimiter, maxKeys, fetchOwner, startAfter)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
@ -118,6 +120,8 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
||||
// criteria to return a subset of the objects in a bucket.
|
||||
//
|
||||
func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "ListObjectsV1")
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
@ -149,7 +153,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
||||
// Inititate a list objects operation based on the input params.
|
||||
// On success would return back ListObjectsInfo object to be
|
||||
// marshalled into S3 compatible XML header.
|
||||
listObjectsInfo, err := objectAPI.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
|
||||
listObjectsInfo, err := objectAPI.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
|
@ -58,7 +58,7 @@ func enforceBucketPolicy(bucket, action, resource, referer, sourceIP string, que
|
||||
}
|
||||
|
||||
// Fetch bucket policy, if policy is not set return access denied.
|
||||
p, err := objAPI.GetBucketPolicy(bucket)
|
||||
p, err := objAPI.GetBucketPolicy(nil, bucket)
|
||||
if err != nil {
|
||||
return ErrAccessDenied
|
||||
}
|
||||
@ -91,8 +91,7 @@ func enforceBucketPolicy(bucket, action, resource, referer, sourceIP string, que
|
||||
|
||||
// Check if the action is allowed on the bucket/prefix.
|
||||
func isBucketActionAllowed(action, bucket, prefix string, objectAPI ObjectLayer) bool {
|
||||
|
||||
bp, err := objectAPI.GetBucketPolicy(bucket)
|
||||
bp, err := objectAPI.GetBucketPolicy(nil, bucket)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
@ -109,6 +108,8 @@ func isBucketActionAllowed(action, bucket, prefix string, objectAPI ObjectLayer)
|
||||
// -------------------------
|
||||
// This operation returns bucket location.
|
||||
func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "GetBucketLocation")
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
@ -135,7 +136,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
}
|
||||
defer bucketLock.RUnlock()
|
||||
|
||||
if _, err := objectAPI.GetBucketInfo(bucket); err != nil {
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
@ -163,6 +164,8 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
||||
// uploads in the response.
|
||||
//
|
||||
func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "ListMultipartUploads")
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
@ -190,7 +193,7 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
|
||||
}
|
||||
}
|
||||
|
||||
listMultipartsInfo, err := objectAPI.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||
listMultipartsInfo, err := objectAPI.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
@ -208,6 +211,8 @@ func (api objectAPIHandlers) ListMultipartUploadsHandler(w http.ResponseWriter,
|
||||
// This implementation of the GET operation returns a list of all buckets
|
||||
// owned by the authenticated sender of the request.
|
||||
func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "ListBuckets")
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
@ -225,7 +230,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
return
|
||||
}
|
||||
// Invoke the list buckets.
|
||||
bucketsInfo, err := objectAPI.ListBuckets()
|
||||
bucketsInfo, err := objectAPI.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
@ -241,6 +246,8 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
|
||||
|
||||
// DeleteMultipleObjectsHandler - deletes multiple objects.
|
||||
func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "DeleteMultipleObjects")
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
@ -309,7 +316,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
}
|
||||
return
|
||||
}
|
||||
dErr := objectAPI.DeleteObject(bucket, obj.ObjectName)
|
||||
dErr := objectAPI.DeleteObject(ctx, bucket, obj.ObjectName)
|
||||
if dErr != nil {
|
||||
dErrs[i] = dErr
|
||||
}
|
||||
@ -375,6 +382,8 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
||||
// ----------
|
||||
// This implementation of the PUT operation creates a new bucket for authenticated request
|
||||
func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "PutBucket")
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
@ -413,7 +422,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
defer bucketLock.Unlock()
|
||||
|
||||
// Proceed to creating a bucket.
|
||||
err := objectAPI.MakeBucketWithLocation(bucket, location)
|
||||
err := objectAPI.MakeBucketWithLocation(ctx, bucket, location)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
@ -430,6 +439,8 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
||||
// This implementation of the POST operation handles object creation with a specified
|
||||
// signature policy in multipart/form-data
|
||||
func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "PostPolicyBucket")
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
@ -590,7 +601,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
}
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.PutObject(bucket, object, hashReader, metadata)
|
||||
objInfo, err := objectAPI.PutObject(ctx, bucket, object, hashReader, metadata)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
@ -648,6 +659,8 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
||||
// have permission to access it. Otherwise, the operation might
|
||||
// return responses such as 404 Not Found and 403 Forbidden.
|
||||
func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "HeadBucket")
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
@ -662,7 +675,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := objectAPI.GetBucketInfo(bucket); err != nil {
|
||||
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
|
||||
writeErrorResponseHeadersOnly(w, toAPIErrorCode(err))
|
||||
return
|
||||
}
|
||||
@ -672,6 +685,8 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
|
||||
|
||||
// DeleteBucketHandler - Delete bucket
|
||||
func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "DeleteBucket")
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
@ -688,7 +703,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// Attempt to delete bucket.
|
||||
if err := objectAPI.DeleteBucket(bucket); err != nil {
|
||||
if err := objectAPI.DeleteBucket(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -624,7 +624,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
|
||||
for i := 0; i < 10; i++ {
|
||||
objectName := "test-object-" + strconv.Itoa(i)
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), nil)
|
||||
_, err = obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), nil)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)
|
||||
|
@ -41,6 +41,7 @@ var errNoSuchNotifications = errors.New("The specified bucket does not have buck
|
||||
// as per http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html.
|
||||
// It returns empty configuration if its not set.
|
||||
func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "GetBucketNotification")
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
@ -60,7 +61,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars["bucket"]
|
||||
|
||||
_, err := objAPI.GetBucketInfo(bucketName)
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucketName)
|
||||
if err != nil {
|
||||
errorIf(err, "Unable to find bucket info.")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
@ -93,6 +94,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
||||
// PutBucketNotificationHandler - This HTTP handler stores given notification configuration as per
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html.
|
||||
func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "PutBucketNotification")
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
@ -112,7 +114,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars["bucket"]
|
||||
|
||||
_, err := objectAPI.GetBucketInfo(bucketName)
|
||||
_, err := objectAPI.GetBucketInfo(ctx, bucketName)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
@ -161,6 +163,8 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
||||
// ListenBucketNotificationHandler - This HTTP handler sends events to the connected HTTP client.
|
||||
// Client should send prefix/suffix object name to match and events to watch as query parameters.
|
||||
func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "ListenBucketNotification")
|
||||
|
||||
// Validate if bucket exists.
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
@ -220,7 +224,7 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
|
||||
eventNames = append(eventNames, eventName)
|
||||
}
|
||||
|
||||
if _, err := objAPI.GetBucketInfo(bucketName); err != nil {
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucketName); err != nil {
|
||||
errorIf(err, "Unable to get bucket info.")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
|
@ -220,6 +220,8 @@ func bucketPolicyConditionMatch(conditions policy.ConditionKeyMap, statement pol
|
||||
// This implementation of the PUT operation uses the policy
|
||||
// subresource to add to or replace a policy on a bucket
|
||||
func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "PutBucketPolicy")
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
@ -235,7 +237,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objAPI.GetBucketInfo(bucket)
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
@ -273,7 +275,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
return
|
||||
}
|
||||
|
||||
if err = objAPI.SetBucketPolicy(bucket, policyInfo); err != nil {
|
||||
if err = objAPI.SetBucketPolicy(ctx, bucket, policyInfo); err != nil {
|
||||
err = errors.Cause(err)
|
||||
switch err.(type) {
|
||||
case NotImplemented:
|
||||
@ -298,6 +300,8 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
// This implementation of the DELETE operation uses the policy
|
||||
// subresource to add to remove a policy on a bucket.
|
||||
func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "DeleteBucketPolicy")
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
@ -313,7 +317,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objAPI.GetBucketInfo(bucket)
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
@ -321,7 +325,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
||||
|
||||
// Delete bucket access policy, by passing an empty policy
|
||||
// struct.
|
||||
if err := objAPI.DeleteBucketPolicy(bucket); err != nil {
|
||||
if err := objAPI.DeleteBucketPolicy(ctx, bucket); err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
@ -339,6 +343,8 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
||||
// This operation uses the policy
|
||||
// subresource to return the policy of a specified bucket.
|
||||
func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "GetBucketPolicy")
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
@ -354,14 +360,14 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht
|
||||
bucket := vars["bucket"]
|
||||
|
||||
// Before proceeding validate if bucket exists.
|
||||
_, err := objAPI.GetBucketInfo(bucket)
|
||||
_, err := objAPI.GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Read bucket access policy.
|
||||
policy, err := objAPI.GetBucketPolicy(bucket)
|
||||
policy, err := objAPI.GetBucketPolicy(ctx, bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
|
@ -252,7 +252,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
credentials auth.Credentials, t *testing.T) {
|
||||
|
||||
bucketName1 := fmt.Sprintf("%s-1", bucketName)
|
||||
if err := obj.MakeBucketWithLocation(bucketName1, ""); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(nil, bucketName1, ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -82,7 +82,7 @@ func initBucketPolicies(objAPI ObjectLayer) (*bucketPolicies, error) {
|
||||
}
|
||||
|
||||
// List buckets to proceed loading all notification configuration.
|
||||
buckets, err := objAPI.ListBuckets()
|
||||
buckets, err := objAPI.ListBuckets(nil)
|
||||
if err != nil {
|
||||
return nil, errors.Cause(err)
|
||||
}
|
||||
@ -118,7 +118,7 @@ func readBucketPolicyJSON(bucket string, objAPI ObjectLayer) (bucketPolicyReader
|
||||
policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig)
|
||||
|
||||
var buffer bytes.Buffer
|
||||
err = objAPI.GetObject(minioMetaBucket, policyPath, 0, -1, &buffer, "")
|
||||
err = objAPI.GetObject(nil, minioMetaBucket, policyPath, 0, -1, &buffer, "")
|
||||
if err != nil {
|
||||
if isErrObjectNotFound(err) || isErrIncompleteBody(err) {
|
||||
return nil, PolicyNotFound{Bucket: bucket}
|
||||
@ -152,7 +152,7 @@ func ReadBucketPolicy(bucket string, objAPI ObjectLayer) (policy.BucketAccessPol
|
||||
// if no policies are found.
|
||||
func removeBucketPolicy(bucket string, objAPI ObjectLayer) error {
|
||||
policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig)
|
||||
err := objAPI.DeleteObject(minioMetaBucket, policyPath)
|
||||
err := objAPI.DeleteObject(nil, minioMetaBucket, policyPath)
|
||||
if err != nil {
|
||||
err = errors.Cause(err)
|
||||
if _, ok := err.(ObjectNotFound); ok {
|
||||
@ -177,7 +177,7 @@ func writeBucketPolicy(bucket string, objAPI ObjectLayer, bpy policy.BucketAcces
|
||||
return errors.Cause(err)
|
||||
}
|
||||
|
||||
if _, err = objAPI.PutObject(minioMetaBucket, policyPath, hashReader, nil); err != nil {
|
||||
if _, err = objAPI.PutObject(nil, minioMetaBucket, policyPath, hashReader, nil); err != nil {
|
||||
errorIf(err, "Unable to set policy for the bucket %s", bucket)
|
||||
return errors.Cause(err)
|
||||
}
|
||||
|
@ -49,10 +49,10 @@ func TestReadFSMetadata(t *testing.T) {
|
||||
bucketName := "bucket"
|
||||
objectName := "object"
|
||||
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
if _, err := obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil); err != nil {
|
||||
if _, err := obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil); err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
|
||||
@ -84,10 +84,10 @@ func TestWriteFSMetadata(t *testing.T) {
|
||||
bucketName := "bucket"
|
||||
objectName := "object"
|
||||
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
if _, err := obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil); err != nil {
|
||||
if _, err := obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil); err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@ -120,7 +121,7 @@ func (fs *FSObjects) backgroundAppend(bucket, object, uploadID string) {
|
||||
|
||||
// ListMultipartUploads - lists all the uploadIDs for the specified object.
|
||||
// We do not support prefix based listing.
|
||||
func (fs *FSObjects) ListMultipartUploads(bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) {
|
||||
func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) {
|
||||
if err := checkListMultipartArgs(bucket, object, keyMarker, uploadIDMarker, delimiter, fs); err != nil {
|
||||
return result, toObjectErr(errors.Trace(err))
|
||||
}
|
||||
@ -202,7 +203,7 @@ func (fs *FSObjects) ListMultipartUploads(bucket, object, keyMarker, uploadIDMar
|
||||
// subsequent request each UUID is unique.
|
||||
//
|
||||
// Implements S3 compatible initiate multipart API.
|
||||
func (fs *FSObjects) NewMultipartUpload(bucket, object string, meta map[string]string) (string, error) {
|
||||
func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string) (string, error) {
|
||||
if err := checkNewMultipartArgs(bucket, object, fs); err != nil {
|
||||
return "", toObjectErr(err, bucket)
|
||||
}
|
||||
@ -238,7 +239,7 @@ func (fs *FSObjects) NewMultipartUpload(bucket, object string, meta map[string]s
|
||||
// CopyObjectPart - similar to PutObjectPart but reads data from an existing
|
||||
// object. Internally incoming data is written to '.minio.sys/tmp' location
|
||||
// and safely renamed to '.minio.sys/multipart' for reach parts.
|
||||
func (fs *FSObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int,
|
||||
func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int,
|
||||
startOffset int64, length int64, srcInfo ObjectInfo) (pi PartInfo, e error) {
|
||||
|
||||
if err := checkNewMultipartArgs(srcBucket, srcObject, fs); err != nil {
|
||||
@ -247,7 +248,7 @@ func (fs *FSObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject,
|
||||
|
||||
// Initialize pipe.
|
||||
go func() {
|
||||
if gerr := fs.GetObject(srcBucket, srcObject, startOffset, length, srcInfo.Writer, srcInfo.ETag); gerr != nil {
|
||||
if gerr := fs.GetObject(ctx, srcBucket, srcObject, startOffset, length, srcInfo.Writer, srcInfo.ETag); gerr != nil {
|
||||
if gerr = srcInfo.Writer.Close(); gerr != nil {
|
||||
errorIf(gerr, "Unable to read %s/%s.", srcBucket, srcObject)
|
||||
return
|
||||
@ -261,7 +262,7 @@ func (fs *FSObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject,
|
||||
}
|
||||
}()
|
||||
|
||||
partInfo, err := fs.PutObjectPart(dstBucket, dstObject, uploadID, partID, srcInfo.Reader)
|
||||
partInfo, err := fs.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.Reader)
|
||||
if err != nil {
|
||||
return pi, toObjectErr(err, dstBucket, dstObject)
|
||||
}
|
||||
@ -273,7 +274,7 @@ func (fs *FSObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject,
|
||||
// an ongoing multipart transaction. Internally incoming data is
|
||||
// written to '.minio.sys/tmp' location and safely renamed to
|
||||
// '.minio.sys/multipart' for reach parts.
|
||||
func (fs *FSObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) {
|
||||
func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) {
|
||||
if err := checkPutObjectPartArgs(bucket, object, fs); err != nil {
|
||||
return pi, toObjectErr(errors.Trace(err), bucket)
|
||||
}
|
||||
@ -354,7 +355,7 @@ func (fs *FSObjects) PutObjectPart(bucket, object, uploadID string, partID int,
|
||||
// Implements S3 compatible ListObjectParts API. The resulting
|
||||
// ListPartsInfo structure is unmarshalled directly into XML and
|
||||
// replied back to the client.
|
||||
func (fs *FSObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListPartsInfo, e error) {
|
||||
func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (result ListPartsInfo, e error) {
|
||||
if err := checkListPartsArgs(bucket, object, fs); err != nil {
|
||||
return result, toObjectErr(errors.Trace(err))
|
||||
}
|
||||
@ -465,7 +466,7 @@ func (fs *FSObjects) ListObjectParts(bucket, object, uploadID string, partNumber
|
||||
// md5sums of all the parts.
|
||||
//
|
||||
// Implements S3 compatible Complete multipart API.
|
||||
func (fs *FSObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (oi ObjectInfo, e error) {
|
||||
func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart) (oi ObjectInfo, e error) {
|
||||
if err := checkCompleteMultipartArgs(bucket, object, fs); err != nil {
|
||||
return oi, toObjectErr(err)
|
||||
}
|
||||
@ -650,7 +651,7 @@ func (fs *FSObjects) CompleteMultipartUpload(bucket string, object string, uploa
|
||||
// that this is an atomic idempotent operation. Subsequent calls have
|
||||
// no affect and further requests to the same uploadID would not be
|
||||
// honored.
|
||||
func (fs *FSObjects) AbortMultipartUpload(bucket, object, uploadID string) error {
|
||||
func (fs *FSObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
|
||||
if err := checkAbortMultipartArgs(bucket, object, fs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -42,8 +42,8 @@ func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) {
|
||||
bucketName := "bucket"
|
||||
objectName := "object"
|
||||
|
||||
obj.MakeBucketWithLocation(bucketName, "")
|
||||
uploadID, err := obj.NewMultipartUpload(bucketName, objectName, nil)
|
||||
obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
uploadID, err := obj.NewMultipartUpload(nil, bucketName, objectName, nil)
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
@ -58,7 +58,7 @@ func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) {
|
||||
globalServiceDoneCh <- struct{}{}
|
||||
|
||||
// Check if upload id was already purged.
|
||||
if err = obj.AbortMultipartUpload(bucketName, objectName, uploadID); err != nil {
|
||||
if err = obj.AbortMultipartUpload(nil, bucketName, objectName, uploadID); err != nil {
|
||||
err = errors.Cause(err)
|
||||
if _, ok := err.(InvalidUploadID); !ok {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
@ -77,13 +77,13 @@ func TestNewMultipartUploadFaultyDisk(t *testing.T) {
|
||||
bucketName := "bucket"
|
||||
objectName := "object"
|
||||
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil {
|
||||
t.Fatal("Cannot create bucket, err: ", err)
|
||||
}
|
||||
|
||||
// Test with disk removed.
|
||||
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||
if _, err := fs.NewMultipartUpload(bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}); err != nil {
|
||||
if _, err := fs.NewMultipartUpload(nil, bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"}); err != nil {
|
||||
if !isSameType(errors.Cause(err), BucketNotFound{}) {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
@ -108,11 +108,11 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
|
||||
data := []byte("12345")
|
||||
dataLen := int64(len(data))
|
||||
|
||||
if err = obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
if err = obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil {
|
||||
t.Fatal("Cannot create bucket, err: ", err)
|
||||
}
|
||||
|
||||
uploadID, err := fs.NewMultipartUpload(bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
|
||||
uploadID, err := fs.NewMultipartUpload(nil, bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
@ -121,7 +121,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
|
||||
sha256sum := ""
|
||||
|
||||
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||
_, err = fs.PutObjectPart(bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), dataLen, md5Hex, sha256sum))
|
||||
_, err = fs.PutObjectPart(nil, bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), dataLen, md5Hex, sha256sum))
|
||||
if !isSameType(errors.Cause(err), BucketNotFound{}) {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
@ -139,11 +139,11 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) {
|
||||
objectName := "object"
|
||||
data := []byte("12345")
|
||||
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil {
|
||||
t.Fatal("Cannot create bucket, err: ", err)
|
||||
}
|
||||
|
||||
uploadID, err := fs.NewMultipartUpload(bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
|
||||
uploadID, err := fs.NewMultipartUpload(nil, bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
@ -152,7 +152,7 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) {
|
||||
|
||||
parts := []CompletePart{{PartNumber: 1, ETag: md5Hex}}
|
||||
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||
if _, err := fs.CompleteMultipartUpload(bucketName, objectName, uploadID, parts); err != nil {
|
||||
if _, err := fs.CompleteMultipartUpload(nil, bucketName, objectName, uploadID, parts); err != nil {
|
||||
if !isSameType(errors.Cause(err), BucketNotFound{}) {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
@ -171,24 +171,24 @@ func TestCompleteMultipartUpload(t *testing.T) {
|
||||
objectName := "object"
|
||||
data := []byte("12345")
|
||||
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil {
|
||||
t.Fatal("Cannot create bucket, err: ", err)
|
||||
}
|
||||
|
||||
uploadID, err := fs.NewMultipartUpload(bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
|
||||
uploadID, err := fs.NewMultipartUpload(nil, bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
|
||||
md5Hex := getMD5Hash(data)
|
||||
|
||||
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), 5, md5Hex, "")); err != nil {
|
||||
if _, err := fs.PutObjectPart(nil, bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), 5, md5Hex, "")); err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
|
||||
parts := []CompletePart{{PartNumber: 1, ETag: md5Hex}}
|
||||
|
||||
if _, err := fs.CompleteMultipartUpload(bucketName, objectName, uploadID, parts); err != nil {
|
||||
if _, err := fs.CompleteMultipartUpload(nil, bucketName, objectName, uploadID, parts); err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
}
|
||||
@ -205,22 +205,22 @@ func TestAbortMultipartUpload(t *testing.T) {
|
||||
objectName := "object"
|
||||
data := []byte("12345")
|
||||
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil {
|
||||
t.Fatal("Cannot create bucket, err: ", err)
|
||||
}
|
||||
|
||||
uploadID, err := fs.NewMultipartUpload(bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
|
||||
uploadID, err := fs.NewMultipartUpload(nil, bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
|
||||
md5Hex := getMD5Hash(data)
|
||||
|
||||
if _, err := fs.PutObjectPart(bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), 5, md5Hex, "")); err != nil {
|
||||
if _, err := fs.PutObjectPart(nil, bucketName, objectName, uploadID, 1, mustGetHashReader(t, bytes.NewReader(data), 5, md5Hex, "")); err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
time.Sleep(time.Second) // Without Sleep on windows, the fs.AbortMultipartUpload() fails with "The process cannot access the file because it is being used by another process."
|
||||
if err := fs.AbortMultipartUpload(bucketName, objectName, uploadID); err != nil {
|
||||
if err := fs.AbortMultipartUpload(nil, bucketName, objectName, uploadID); err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
}
|
||||
@ -237,17 +237,17 @@ func TestListMultipartUploadsFaultyDisk(t *testing.T) {
|
||||
bucketName := "bucket"
|
||||
objectName := "object"
|
||||
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil {
|
||||
t.Fatal("Cannot create bucket, err: ", err)
|
||||
}
|
||||
|
||||
_, err := fs.NewMultipartUpload(bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
|
||||
_, err := fs.NewMultipartUpload(nil, bucketName, objectName, map[string]string{"X-Amz-Meta-xid": "3f"})
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
|
||||
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||
if _, err := fs.ListMultipartUploads(bucketName, objectName, "", "", "", 1000); err != nil {
|
||||
if _, err := fs.ListMultipartUploads(nil, bucketName, objectName, "", "", "", 1000); err != nil {
|
||||
if !isSameType(errors.Cause(err), BucketNotFound{}) {
|
||||
t.Fatal("Unexpected error ", err)
|
||||
}
|
||||
|
51
cmd/fs-v1.go
51
cmd/fs-v1.go
@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -181,7 +182,7 @@ func NewFSObjectLayer(fsPath string) (ObjectLayer, error) {
|
||||
}
|
||||
|
||||
// Shutdown - should be called when process shuts down.
|
||||
func (fs *FSObjects) Shutdown() error {
|
||||
func (fs *FSObjects) Shutdown(ctx context.Context) error {
|
||||
fs.fsFormatRlk.Close()
|
||||
|
||||
// Cleanup and delete tmp uuid.
|
||||
@ -189,7 +190,7 @@ func (fs *FSObjects) Shutdown() error {
|
||||
}
|
||||
|
||||
// StorageInfo - returns underlying storage statistics.
|
||||
func (fs *FSObjects) StorageInfo() StorageInfo {
|
||||
func (fs *FSObjects) StorageInfo(ctx context.Context) StorageInfo {
|
||||
info, err := getDiskInfo((fs.fsPath))
|
||||
errorIf(err, "Unable to get disk info %#v", fs.fsPath)
|
||||
storageInfo := StorageInfo{
|
||||
@ -203,12 +204,12 @@ func (fs *FSObjects) StorageInfo() StorageInfo {
|
||||
// Locking operations
|
||||
|
||||
// ListLocks - List namespace locks held in object layer
|
||||
func (fs *FSObjects) ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
|
||||
func (fs *FSObjects) ListLocks(ctx context.Context, bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
|
||||
return []VolumeLockInfo{}, NotImplemented{}
|
||||
}
|
||||
|
||||
// ClearLocks - Clear namespace locks held in object layer
|
||||
func (fs *FSObjects) ClearLocks([]VolumeLockInfo) error {
|
||||
func (fs *FSObjects) ClearLocks(ctx context.Context, info []VolumeLockInfo) error {
|
||||
return NotImplemented{}
|
||||
}
|
||||
|
||||
@ -241,7 +242,7 @@ func (fs *FSObjects) statBucketDir(bucket string) (os.FileInfo, error) {
|
||||
|
||||
// MakeBucketWithLocation - create a new bucket, returns if it
|
||||
// already exists.
|
||||
func (fs *FSObjects) MakeBucketWithLocation(bucket, location string) error {
|
||||
func (fs *FSObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string) error {
|
||||
bucketLock := fs.nsMutex.NewNSLock(bucket, "")
|
||||
if err := bucketLock.GetLock(globalObjectTimeout); err != nil {
|
||||
return err
|
||||
@ -260,7 +261,7 @@ func (fs *FSObjects) MakeBucketWithLocation(bucket, location string) error {
|
||||
}
|
||||
|
||||
// GetBucketInfo - fetch bucket metadata info.
|
||||
func (fs *FSObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
|
||||
func (fs *FSObjects) GetBucketInfo(ctx context.Context, bucket string) (bi BucketInfo, e error) {
|
||||
bucketLock := fs.nsMutex.NewNSLock(bucket, "")
|
||||
if e := bucketLock.GetRLock(globalObjectTimeout); e != nil {
|
||||
return bi, e
|
||||
@ -280,7 +281,7 @@ func (fs *FSObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
|
||||
}
|
||||
|
||||
// ListBuckets - list all s3 compatible buckets (directories) at fsPath.
|
||||
func (fs *FSObjects) ListBuckets() ([]BucketInfo, error) {
|
||||
func (fs *FSObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
|
||||
if err := checkPathLength(fs.fsPath); err != nil {
|
||||
return nil, errors.Trace(err)
|
||||
}
|
||||
@ -321,7 +322,7 @@ func (fs *FSObjects) ListBuckets() ([]BucketInfo, error) {
|
||||
|
||||
// DeleteBucket - delete a bucket and all the metadata associated
|
||||
// with the bucket including pending multipart, object metadata.
|
||||
func (fs *FSObjects) DeleteBucket(bucket string) error {
|
||||
func (fs *FSObjects) DeleteBucket(ctx context.Context, bucket string) error {
|
||||
bucketLock := fs.nsMutex.NewNSLock(bucket, "")
|
||||
if err := bucketLock.GetLock(globalObjectTimeout); err != nil {
|
||||
return err
|
||||
@ -360,7 +361,7 @@ func (fs *FSObjects) DeleteBucket(bucket string) error {
|
||||
// CopyObject - copy object source object to destination object.
|
||||
// if source object and destination object are same we only
|
||||
// update metadata.
|
||||
func (fs *FSObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo) (oi ObjectInfo, e error) {
|
||||
func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo) (oi ObjectInfo, e error) {
|
||||
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
||||
// Hold write lock on destination since in both cases
|
||||
// - if source and destination are same
|
||||
@ -449,7 +450,7 @@ func (fs *FSObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject strin
|
||||
//
|
||||
// startOffset indicates the starting read location of the object.
|
||||
// length indicates the total length of the object.
|
||||
func (fs *FSObjects) GetObject(bucket, object string, offset int64, length int64, writer io.Writer, etag string) (err error) {
|
||||
func (fs *FSObjects) GetObject(ctx context.Context, bucket, object string, offset int64, length int64, writer io.Writer, etag string) (err error) {
|
||||
if err = checkGetObjArgs(bucket, object); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -586,7 +587,7 @@ func (fs *FSObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e erro
|
||||
}
|
||||
|
||||
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
|
||||
func (fs *FSObjects) GetObjectInfo(bucket, object string) (oi ObjectInfo, e error) {
|
||||
func (fs *FSObjects) GetObjectInfo(ctx context.Context, bucket, object string) (oi ObjectInfo, e error) {
|
||||
// Lock the object before reading.
|
||||
objectLock := fs.nsMutex.NewNSLock(bucket, object)
|
||||
if err := objectLock.GetRLock(globalObjectTimeout); err != nil {
|
||||
@ -629,7 +630,7 @@ func (fs *FSObjects) parentDirIsObject(bucket, parent string) bool {
|
||||
// until EOF, writes data directly to configured filesystem path.
|
||||
// Additionally writes `fs.json` which carries the necessary metadata
|
||||
// for future object operations.
|
||||
func (fs *FSObjects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, retErr error) {
|
||||
func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, retErr error) {
|
||||
if err := checkPutObjectArgs(bucket, object, fs, data.Size()); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
@ -767,7 +768,7 @@ func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader,
|
||||
|
||||
// DeleteObject - deletes an object from a bucket, this operation is destructive
|
||||
// and there are no rollbacks supported.
|
||||
func (fs *FSObjects) DeleteObject(bucket, object string) error {
|
||||
func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string) error {
|
||||
// Acquire a write lock before deleting the object.
|
||||
objectLock := fs.nsMutex.NewNSLock(bucket, object)
|
||||
if err := objectLock.GetLock(globalOperationTimeout); err != nil {
|
||||
@ -892,7 +893,7 @@ func (fs *FSObjects) getObjectETag(bucket, entry string, lock bool) (string, err
|
||||
|
||||
// ListObjects - list all objects at prefix upto maxKeys., optionally delimited by '/'. Maintains the list pool
|
||||
// state for future re-entrant list requests.
|
||||
func (fs *FSObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
|
||||
func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
|
||||
if err := checkListObjsArgs(bucket, prefix, marker, delimiter, fs); err != nil {
|
||||
return loi, err
|
||||
}
|
||||
@ -1012,39 +1013,39 @@ func (fs *FSObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKe
|
||||
}
|
||||
|
||||
// HealFormat - no-op for fs, Valid only for XL.
|
||||
func (fs *FSObjects) HealFormat(dryRun bool) (madmin.HealResultItem, error) {
|
||||
func (fs *FSObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) {
|
||||
return madmin.HealResultItem{}, errors.Trace(NotImplemented{})
|
||||
}
|
||||
|
||||
// HealObject - no-op for fs. Valid only for XL.
|
||||
func (fs *FSObjects) HealObject(bucket, object string, dryRun bool) (
|
||||
func (fs *FSObjects) HealObject(ctx context.Context, bucket, object string, dryRun bool) (
|
||||
res madmin.HealResultItem, err error) {
|
||||
return res, errors.Trace(NotImplemented{})
|
||||
}
|
||||
|
||||
// HealBucket - no-op for fs, Valid only for XL.
|
||||
func (fs *FSObjects) HealBucket(bucket string, dryRun bool) ([]madmin.HealResultItem,
|
||||
func (fs *FSObjects) HealBucket(ctx context.Context, bucket string, dryRun bool) ([]madmin.HealResultItem,
|
||||
error) {
|
||||
return nil, errors.Trace(NotImplemented{})
|
||||
}
|
||||
|
||||
// ListObjectsHeal - list all objects to be healed. Valid only for XL
|
||||
func (fs *FSObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
|
||||
func (fs *FSObjects) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
|
||||
return loi, errors.Trace(NotImplemented{})
|
||||
}
|
||||
|
||||
// ListBucketsHeal - list all buckets to be healed. Valid only for XL
|
||||
func (fs *FSObjects) ListBucketsHeal() ([]BucketInfo, error) {
|
||||
func (fs *FSObjects) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) {
|
||||
return []BucketInfo{}, errors.Trace(NotImplemented{})
|
||||
}
|
||||
|
||||
// SetBucketPolicy sets policy on bucket
|
||||
func (fs *FSObjects) SetBucketPolicy(bucket string, policy policy.BucketAccessPolicy) error {
|
||||
func (fs *FSObjects) SetBucketPolicy(ctx context.Context, bucket string, policy policy.BucketAccessPolicy) error {
|
||||
return persistAndNotifyBucketPolicyChange(bucket, false, policy, fs)
|
||||
}
|
||||
|
||||
// GetBucketPolicy will get policy on bucket
|
||||
func (fs *FSObjects) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, error) {
|
||||
func (fs *FSObjects) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) {
|
||||
policy := fs.bucketPolicies.GetBucketPolicy(bucket)
|
||||
if reflect.DeepEqual(policy, emptyBucketPolicy) {
|
||||
return ReadBucketPolicy(bucket, fs)
|
||||
@ -1053,13 +1054,13 @@ func (fs *FSObjects) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy,
|
||||
}
|
||||
|
||||
// DeleteBucketPolicy deletes all policies on bucket
|
||||
func (fs *FSObjects) DeleteBucketPolicy(bucket string) error {
|
||||
func (fs *FSObjects) DeleteBucketPolicy(ctx context.Context, bucket string) error {
|
||||
return persistAndNotifyBucketPolicyChange(bucket, true, emptyBucketPolicy, fs)
|
||||
}
|
||||
|
||||
// ListObjectsV2 lists all blobs in bucket filtered by prefix
|
||||
func (fs *FSObjects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
|
||||
loi, err := fs.ListObjects(bucket, prefix, continuationToken, delimiter, maxKeys)
|
||||
func (fs *FSObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
|
||||
loi, err := fs.ListObjects(ctx, bucket, prefix, continuationToken, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
@ -1075,7 +1076,7 @@ func (fs *FSObjects) ListObjectsV2(bucket, prefix, continuationToken, delimiter
|
||||
}
|
||||
|
||||
// RefreshBucketPolicy refreshes cache policy with what's on disk.
|
||||
func (fs *FSObjects) RefreshBucketPolicy(bucket string) error {
|
||||
func (fs *FSObjects) RefreshBucketPolicy(ctx context.Context, bucket string) error {
|
||||
policy, err := ReadBucketPolicy(bucket, fs)
|
||||
|
||||
if err != nil {
|
||||
|
@ -43,11 +43,11 @@ func TestFSParentDirIsObject(t *testing.T) {
|
||||
bucketName := "testbucket"
|
||||
objectName := "object"
|
||||
|
||||
if err = obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
if err = obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
objectContent := "12345"
|
||||
objInfo, err := obj.PutObject(bucketName, objectName,
|
||||
objInfo, err := obj.PutObject(nil, bucketName, objectName,
|
||||
mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -136,23 +136,23 @@ func TestFSShutdown(t *testing.T) {
|
||||
obj := initFSObjects(disk, t)
|
||||
fs := obj.(*FSObjects)
|
||||
objectContent := "12345"
|
||||
obj.MakeBucketWithLocation(bucketName, "")
|
||||
obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil)
|
||||
obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil)
|
||||
return fs, disk
|
||||
}
|
||||
|
||||
// Test Shutdown with regular conditions
|
||||
fs, disk := prepareTest()
|
||||
if err := fs.Shutdown(); err != nil {
|
||||
if err := fs.Shutdown(nil); err != nil {
|
||||
t.Fatal("Cannot shutdown the FS object: ", err)
|
||||
}
|
||||
os.RemoveAll(disk)
|
||||
|
||||
// Test Shutdown with faulty disk
|
||||
fs, disk = prepareTest()
|
||||
fs.DeleteObject(bucketName, objectName)
|
||||
fs.DeleteObject(nil, bucketName, objectName)
|
||||
os.RemoveAll(disk)
|
||||
if err := fs.Shutdown(); err != nil {
|
||||
if err := fs.Shutdown(nil); err != nil {
|
||||
t.Fatal("Got unexpected fs shutdown error: ", err)
|
||||
}
|
||||
}
|
||||
@ -167,10 +167,10 @@ func TestFSGetBucketInfo(t *testing.T) {
|
||||
fs := obj.(*FSObjects)
|
||||
bucketName := "bucket"
|
||||
|
||||
obj.MakeBucketWithLocation(bucketName, "")
|
||||
obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
|
||||
// Test with valid parameters
|
||||
info, err := fs.GetBucketInfo(bucketName)
|
||||
info, err := fs.GetBucketInfo(nil, bucketName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -179,7 +179,7 @@ func TestFSGetBucketInfo(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test with inexistant bucket
|
||||
_, err = fs.GetBucketInfo("a")
|
||||
_, err = fs.GetBucketInfo(nil, "a")
|
||||
if !isSameType(errors.Cause(err), BucketNameInvalid{}) {
|
||||
t.Fatal("BucketNameInvalid error not returned")
|
||||
}
|
||||
@ -187,7 +187,7 @@ func TestFSGetBucketInfo(t *testing.T) {
|
||||
// Check for buckets and should get disk not found.
|
||||
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||
|
||||
_, err = fs.GetBucketInfo(bucketName)
|
||||
_, err = fs.GetBucketInfo(nil, bucketName)
|
||||
if !isSameType(errors.Cause(err), BucketNotFound{}) {
|
||||
t.Fatal("BucketNotFound error not returned")
|
||||
}
|
||||
@ -202,12 +202,12 @@ func TestFSPutObject(t *testing.T) {
|
||||
bucketName := "bucket"
|
||||
objectName := "1/2/3/4/object"
|
||||
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// With a regular object.
|
||||
_, err := obj.PutObject(bucketName+"non-existent", objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err := obj.PutObject(nil, bucketName+"non-existent", objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
||||
}
|
||||
@ -216,7 +216,7 @@ func TestFSPutObject(t *testing.T) {
|
||||
}
|
||||
|
||||
// With a directory object.
|
||||
_, err = obj.PutObject(bucketName+"non-existent", objectName+"/", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), nil)
|
||||
_, err = obj.PutObject(nil, bucketName+"non-existent", objectName+"/", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), nil)
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
||||
}
|
||||
@ -224,11 +224,11 @@ func TestFSPutObject(t *testing.T) {
|
||||
t.Fatalf("Expected error type BucketNotFound, got %#v", err)
|
||||
}
|
||||
|
||||
_, err = obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = obj.PutObject(bucketName, objectName+"/1", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, bucketName, objectName+"/1", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected should fail here, backend corruption occurred")
|
||||
}
|
||||
@ -243,7 +243,7 @@ func TestFSPutObject(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
_, err = obj.PutObject(bucketName, objectName+"/1/", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), nil)
|
||||
_, err = obj.PutObject(nil, bucketName, objectName+"/1/", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), nil)
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected should fail here, backned corruption occurred")
|
||||
}
|
||||
@ -270,33 +270,33 @@ func TestFSDeleteObject(t *testing.T) {
|
||||
bucketName := "bucket"
|
||||
objectName := "object"
|
||||
|
||||
obj.MakeBucketWithLocation(bucketName, "")
|
||||
obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
|
||||
// Test with invalid bucket name
|
||||
if err := fs.DeleteObject("fo", objectName); !isSameType(errors.Cause(err), BucketNameInvalid{}) {
|
||||
if err := fs.DeleteObject(nil, "fo", objectName); !isSameType(errors.Cause(err), BucketNameInvalid{}) {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
// Test with bucket does not exist
|
||||
if err := fs.DeleteObject("foobucket", "fooobject"); !isSameType(errors.Cause(err), BucketNotFound{}) {
|
||||
if err := fs.DeleteObject(nil, "foobucket", "fooobject"); !isSameType(errors.Cause(err), BucketNotFound{}) {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
// Test with invalid object name
|
||||
if err := fs.DeleteObject(bucketName, "\\"); !isSameType(errors.Cause(err), ObjectNameInvalid{}) {
|
||||
if err := fs.DeleteObject(nil, bucketName, "\\"); !isSameType(errors.Cause(err), ObjectNameInvalid{}) {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
// Test with object does not exist.
|
||||
if err := fs.DeleteObject(bucketName, "foooobject"); !isSameType(errors.Cause(err), ObjectNotFound{}) {
|
||||
if err := fs.DeleteObject(nil, bucketName, "foooobject"); !isSameType(errors.Cause(err), ObjectNotFound{}) {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
// Test with valid condition
|
||||
if err := fs.DeleteObject(bucketName, objectName); err != nil {
|
||||
if err := fs.DeleteObject(nil, bucketName, objectName); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
// Delete object should err disk not found.
|
||||
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||
if err := fs.DeleteObject(bucketName, objectName); err != nil {
|
||||
if err := fs.DeleteObject(nil, bucketName, objectName); err != nil {
|
||||
if !isSameType(errors.Cause(err), BucketNotFound{}) {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
@ -314,29 +314,29 @@ func TestFSDeleteBucket(t *testing.T) {
|
||||
fs := obj.(*FSObjects)
|
||||
bucketName := "bucket"
|
||||
|
||||
err := obj.MakeBucketWithLocation(bucketName, "")
|
||||
err := obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
// Test with an invalid bucket name
|
||||
if err = fs.DeleteBucket("fo"); !isSameType(errors.Cause(err), BucketNameInvalid{}) {
|
||||
if err = fs.DeleteBucket(nil, "fo"); !isSameType(errors.Cause(err), BucketNameInvalid{}) {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
// Test with an inexistant bucket
|
||||
if err = fs.DeleteBucket("foobucket"); !isSameType(errors.Cause(err), BucketNotFound{}) {
|
||||
if err = fs.DeleteBucket(nil, "foobucket"); !isSameType(errors.Cause(err), BucketNotFound{}) {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
// Test with a valid case
|
||||
if err = fs.DeleteBucket(bucketName); err != nil {
|
||||
if err = fs.DeleteBucket(nil, bucketName); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
obj.MakeBucketWithLocation(bucketName, "")
|
||||
obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
|
||||
// Delete bucket should get error disk not found.
|
||||
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||
if err = fs.DeleteBucket(bucketName); err != nil {
|
||||
if err = fs.DeleteBucket(nil, bucketName); err != nil {
|
||||
if !isSameType(errors.Cause(err), BucketNotFound{}) {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
@ -353,7 +353,7 @@ func TestFSListBuckets(t *testing.T) {
|
||||
fs := obj.(*FSObjects)
|
||||
|
||||
bucketName := "bucket"
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
@ -368,7 +368,7 @@ func TestFSListBuckets(t *testing.T) {
|
||||
f.Close()
|
||||
|
||||
// Test list buckets to have only one entry.
|
||||
buckets, err := fs.ListBuckets()
|
||||
buckets, err := fs.ListBuckets(nil)
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
@ -379,7 +379,7 @@ func TestFSListBuckets(t *testing.T) {
|
||||
// Test ListBuckets with disk not found.
|
||||
fs.fsPath = filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||
|
||||
if _, err := fs.ListBuckets(); err != nil {
|
||||
if _, err := fs.ListBuckets(nil); err != nil {
|
||||
if errors.Cause(err) != errDiskNotFound {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
@ -387,7 +387,7 @@ func TestFSListBuckets(t *testing.T) {
|
||||
|
||||
longPath := fmt.Sprintf("%0256d", 1)
|
||||
fs.fsPath = longPath
|
||||
if _, err := fs.ListBuckets(); err != nil {
|
||||
if _, err := fs.ListBuckets(nil); err != nil {
|
||||
if errors.Cause(err) != errFileNameTooLong {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
@ -400,7 +400,7 @@ func TestFSHealObject(t *testing.T) {
|
||||
defer os.RemoveAll(disk)
|
||||
|
||||
obj := initFSObjects(disk, t)
|
||||
_, err := obj.HealObject("bucket", "object", false)
|
||||
_, err := obj.HealObject(nil, "bucket", "object", false)
|
||||
if err == nil || !isSameType(errors.Cause(err), NotImplemented{}) {
|
||||
t.Fatalf("Heal Object should return NotImplemented error ")
|
||||
}
|
||||
@ -412,7 +412,7 @@ func TestFSListObjectsHeal(t *testing.T) {
|
||||
defer os.RemoveAll(disk)
|
||||
|
||||
obj := initFSObjects(disk, t)
|
||||
_, err := obj.ListObjectsHeal("bucket", "prefix", "marker", "delimiter", 1000)
|
||||
_, err := obj.ListObjectsHeal(nil, "bucket", "prefix", "marker", "delimiter", 1000)
|
||||
if err == nil || !isSameType(errors.Cause(err), NotImplemented{}) {
|
||||
t.Fatalf("Heal Object should return NotImplemented error ")
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package azure
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
@ -404,17 +405,17 @@ func azureParseBlockID(blockID string) (partID, subPartNumber int, uploadID, md5
|
||||
|
||||
// Shutdown - save any gateway metadata to disk
|
||||
// if necessary and reload upon next restart.
|
||||
func (a *azureObjects) Shutdown() error {
|
||||
func (a *azureObjects) Shutdown(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// StorageInfo - Not relevant to Azure backend.
|
||||
func (a *azureObjects) StorageInfo() (si minio.StorageInfo) {
|
||||
func (a *azureObjects) StorageInfo(ctx context.Context) (si minio.StorageInfo) {
|
||||
return si
|
||||
}
|
||||
|
||||
// MakeBucketWithLocation - Create a new container on azure backend.
|
||||
func (a *azureObjects) MakeBucketWithLocation(bucket, location string) error {
|
||||
func (a *azureObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string) error {
|
||||
container := a.client.GetContainerReference(bucket)
|
||||
err := container.Create(&storage.CreateContainerOptions{
|
||||
Access: storage.ContainerAccessTypePrivate,
|
||||
@ -423,7 +424,7 @@ func (a *azureObjects) MakeBucketWithLocation(bucket, location string) error {
|
||||
}
|
||||
|
||||
// GetBucketInfo - Get bucket metadata..
|
||||
func (a *azureObjects) GetBucketInfo(bucket string) (bi minio.BucketInfo, e error) {
|
||||
func (a *azureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, e error) {
|
||||
// Verify if bucket (container-name) is valid.
|
||||
// IsValidBucketName has same restrictions as container names mentioned
|
||||
// in azure documentation, so we will simply use the same function here.
|
||||
@ -455,7 +456,7 @@ func (a *azureObjects) GetBucketInfo(bucket string) (bi minio.BucketInfo, e erro
|
||||
}
|
||||
|
||||
// ListBuckets - Lists all azure containers, uses Azure equivalent ListContainers.
|
||||
func (a *azureObjects) ListBuckets() (buckets []minio.BucketInfo, err error) {
|
||||
func (a *azureObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) {
|
||||
resp, err := a.client.ListContainers(storage.ListContainersParameters{})
|
||||
if err != nil {
|
||||
return nil, azureToObjectError(errors.Trace(err))
|
||||
@ -474,14 +475,14 @@ func (a *azureObjects) ListBuckets() (buckets []minio.BucketInfo, err error) {
|
||||
}
|
||||
|
||||
// DeleteBucket - delete a container on azure, uses Azure equivalent DeleteContainer.
|
||||
func (a *azureObjects) DeleteBucket(bucket string) error {
|
||||
func (a *azureObjects) DeleteBucket(ctx context.Context, bucket string) error {
|
||||
container := a.client.GetContainerReference(bucket)
|
||||
return azureToObjectError(errors.Trace(container.Delete(nil)), bucket)
|
||||
}
|
||||
|
||||
// ListObjects - lists all blobs on azure with in a container filtered by prefix
|
||||
// and marker, uses Azure equivalent ListBlobs.
|
||||
func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result minio.ListObjectsInfo, err error) {
|
||||
func (a *azureObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result minio.ListObjectsInfo, err error) {
|
||||
var objects []minio.ObjectInfo
|
||||
var prefixes []string
|
||||
container := a.client.GetContainerReference(bucket)
|
||||
@ -532,14 +533,14 @@ func (a *azureObjects) ListObjects(bucket, prefix, marker, delimiter string, max
|
||||
}
|
||||
|
||||
// ListObjectsV2 - list all blobs in Azure bucket filtered by prefix
|
||||
func (a *azureObjects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result minio.ListObjectsV2Info, err error) {
|
||||
func (a *azureObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result minio.ListObjectsV2Info, err error) {
|
||||
marker := continuationToken
|
||||
if startAfter != "" {
|
||||
marker = startAfter
|
||||
}
|
||||
|
||||
var resultV1 minio.ListObjectsInfo
|
||||
resultV1, err = a.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
|
||||
resultV1, err = a.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
@ -558,7 +559,7 @@ func (a *azureObjects) ListObjectsV2(bucket, prefix, continuationToken, delimite
|
||||
//
|
||||
// startOffset indicates the starting read location of the object.
|
||||
// length indicates the total length of the object.
|
||||
func (a *azureObjects) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
||||
func (a *azureObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
||||
// startOffset cannot be negative.
|
||||
if startOffset < 0 {
|
||||
return azureToObjectError(errors.Trace(minio.InvalidRange{}), bucket, object)
|
||||
@ -589,7 +590,7 @@ func (a *azureObjects) GetObject(bucket, object string, startOffset int64, lengt
|
||||
|
||||
// GetObjectInfo - reads blob metadata properties and replies back minio.ObjectInfo,
|
||||
// uses zure equivalent GetBlobProperties.
|
||||
func (a *azureObjects) GetObjectInfo(bucket, object string) (objInfo minio.ObjectInfo, err error) {
|
||||
func (a *azureObjects) GetObjectInfo(ctx context.Context, bucket, object string) (objInfo minio.ObjectInfo, err error) {
|
||||
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
||||
err = blob.GetProperties(nil)
|
||||
if err != nil {
|
||||
@ -610,7 +611,7 @@ func (a *azureObjects) GetObjectInfo(bucket, object string) (objInfo minio.Objec
|
||||
|
||||
// PutObject - Create a new blob with the incoming data,
|
||||
// uses Azure equivalent CreateBlockBlobFromReader.
|
||||
func (a *azureObjects) PutObject(bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
|
||||
func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
|
||||
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
||||
blob.Metadata, blob.Properties, err = s3MetaToAzureProperties(metadata)
|
||||
if err != nil {
|
||||
@ -620,12 +621,12 @@ func (a *azureObjects) PutObject(bucket, object string, data *hash.Reader, metad
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
|
||||
}
|
||||
return a.GetObjectInfo(bucket, object)
|
||||
return a.GetObjectInfo(ctx, bucket, object)
|
||||
}
|
||||
|
||||
// CopyObject - Copies a blob from source container to destination container.
|
||||
// Uses Azure equivalent CopyBlob API.
|
||||
func (a *azureObjects) CopyObject(srcBucket, srcObject, destBucket, destObject string, srcInfo minio.ObjectInfo) (objInfo minio.ObjectInfo, err error) {
|
||||
func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo minio.ObjectInfo) (objInfo minio.ObjectInfo, err error) {
|
||||
srcBlobURL := a.client.GetContainerReference(srcBucket).GetBlobReference(srcObject).GetURL()
|
||||
destBlob := a.client.GetContainerReference(destBucket).GetBlobReference(destObject)
|
||||
azureMeta, props, err := s3MetaToAzureProperties(srcInfo.UserDefined)
|
||||
@ -642,12 +643,12 @@ func (a *azureObjects) CopyObject(srcBucket, srcObject, destBucket, destObject s
|
||||
if err != nil {
|
||||
return objInfo, azureToObjectError(errors.Trace(err), srcBucket, srcObject)
|
||||
}
|
||||
return a.GetObjectInfo(destBucket, destObject)
|
||||
return a.GetObjectInfo(ctx, destBucket, destObject)
|
||||
}
|
||||
|
||||
// DeleteObject - Deletes a blob on azure container, uses Azure
|
||||
// equivalent DeleteBlob API.
|
||||
func (a *azureObjects) DeleteObject(bucket, object string) error {
|
||||
func (a *azureObjects) DeleteObject(ctx context.Context, bucket, object string) error {
|
||||
blob := a.client.GetContainerReference(bucket).GetBlobReference(object)
|
||||
err := blob.Delete(nil)
|
||||
if err != nil {
|
||||
@ -657,7 +658,7 @@ func (a *azureObjects) DeleteObject(bucket, object string) error {
|
||||
}
|
||||
|
||||
// ListMultipartUploads - It's decided not to support List Multipart Uploads, hence returning empty result.
|
||||
func (a *azureObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result minio.ListMultipartsInfo, err error) {
|
||||
func (a *azureObjects) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result minio.ListMultipartsInfo, err error) {
|
||||
// It's decided not to support List Multipart Uploads, hence returning empty result.
|
||||
return result, nil
|
||||
}
|
||||
@ -689,7 +690,7 @@ func (a *azureObjects) checkUploadIDExists(bucketName, objectName, uploadID stri
|
||||
}
|
||||
|
||||
// NewMultipartUpload - Use Azure equivalent CreateBlockBlob.
|
||||
func (a *azureObjects) NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error) {
|
||||
func (a *azureObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) {
|
||||
uploadID = mustGetAzureUploadID()
|
||||
if err = a.checkUploadIDExists(bucket, object, uploadID); err == nil {
|
||||
return "", errors.Trace(fmt.Errorf("Upload ID name collision"))
|
||||
@ -711,7 +712,7 @@ func (a *azureObjects) NewMultipartUpload(bucket, object string, metadata map[st
|
||||
}
|
||||
|
||||
// PutObjectPart - Use Azure equivalent PutBlockWithLength.
|
||||
func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *hash.Reader) (info minio.PartInfo, err error) {
|
||||
func (a *azureObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info minio.PartInfo, err error) {
|
||||
if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil {
|
||||
return info, err
|
||||
}
|
||||
@ -753,7 +754,7 @@ func (a *azureObjects) PutObjectPart(bucket, object, uploadID string, partID int
|
||||
}
|
||||
|
||||
// ListObjectParts - Use Azure equivalent GetBlockList.
|
||||
func (a *azureObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result minio.ListPartsInfo, err error) {
|
||||
func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result minio.ListPartsInfo, err error) {
|
||||
if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil {
|
||||
return result, err
|
||||
}
|
||||
@ -839,7 +840,7 @@ func (a *azureObjects) ListObjectParts(bucket, object, uploadID string, partNumb
|
||||
// AbortMultipartUpload - Not Implemented.
|
||||
// There is no corresponding API in azure to abort an incomplete upload. The uncommmitted blocks
|
||||
// gets deleted after one week.
|
||||
func (a *azureObjects) AbortMultipartUpload(bucket, object, uploadID string) (err error) {
|
||||
func (a *azureObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) (err error) {
|
||||
if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -850,7 +851,7 @@ func (a *azureObjects) AbortMultipartUpload(bucket, object, uploadID string) (er
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload - Use Azure equivalent PutBlockList.
|
||||
func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []minio.CompletePart) (objInfo minio.ObjectInfo, err error) {
|
||||
func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []minio.CompletePart) (objInfo minio.ObjectInfo, err error) {
|
||||
metadataObject := getAzureMetadataObjectName(object, uploadID)
|
||||
if err = a.checkUploadIDExists(bucket, object, uploadID); err != nil {
|
||||
return objInfo, err
|
||||
@ -956,7 +957,7 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string,
|
||||
return objInfo, azureToObjectError(errors.Trace(err), bucket, object)
|
||||
}
|
||||
}
|
||||
return a.GetObjectInfo(bucket, object)
|
||||
return a.GetObjectInfo(ctx, bucket, object)
|
||||
}
|
||||
|
||||
// SetBucketPolicy - Azure supports three types of container policies:
|
||||
@ -965,7 +966,7 @@ func (a *azureObjects) CompleteMultipartUpload(bucket, object, uploadID string,
|
||||
// storage.ContainerAccessTypePrivate - none in minio terminology
|
||||
// As the common denominator for minio and azure is readonly and none, we support
|
||||
// these two policies at the bucket level.
|
||||
func (a *azureObjects) SetBucketPolicy(bucket string, policyInfo policy.BucketAccessPolicy) error {
|
||||
func (a *azureObjects) SetBucketPolicy(ctx context.Context, bucket string, policyInfo policy.BucketAccessPolicy) error {
|
||||
var policies []minio.BucketAccessPolicy
|
||||
|
||||
for prefix, policy := range policy.GetPolicies(policyInfo.Statements, bucket, "") {
|
||||
@ -994,7 +995,7 @@ func (a *azureObjects) SetBucketPolicy(bucket string, policyInfo policy.BucketAc
|
||||
}
|
||||
|
||||
// GetBucketPolicy - Get the container ACL and convert it to canonical []bucketAccessPolicy
|
||||
func (a *azureObjects) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, error) {
|
||||
func (a *azureObjects) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) {
|
||||
policyInfo := policy.BucketAccessPolicy{Version: "2012-10-17"}
|
||||
container := a.client.GetContainerReference(bucket)
|
||||
perm, err := container.GetPermissions(nil)
|
||||
@ -1013,7 +1014,7 @@ func (a *azureObjects) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy
|
||||
}
|
||||
|
||||
// DeleteBucketPolicy - Set the container ACL to "private"
|
||||
func (a *azureObjects) DeleteBucketPolicy(bucket string) error {
|
||||
func (a *azureObjects) DeleteBucketPolicy(ctx context.Context, bucket string) error {
|
||||
perm := storage.ContainerPermissions{
|
||||
AccessType: storage.ContainerAccessTypePrivate,
|
||||
AccessPolicies: nil,
|
||||
|
@ -18,6 +18,8 @@ package sia
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@ -39,7 +41,6 @@ import (
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/errors"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/sha256-simd"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -298,17 +299,17 @@ func get(addr, call, apiPassword string) error {
|
||||
|
||||
// Shutdown saves any gateway metadata to disk
|
||||
// if necessary and reload upon next restart.
|
||||
func (s *siaObjects) Shutdown() error {
|
||||
func (s *siaObjects) Shutdown(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// StorageInfo is not relevant to Sia backend.
|
||||
func (s *siaObjects) StorageInfo() (si minio.StorageInfo) {
|
||||
func (s *siaObjects) StorageInfo(ctx context.Context) (si minio.StorageInfo) {
|
||||
return si
|
||||
}
|
||||
|
||||
// MakeBucket creates a new container on Sia backend.
|
||||
func (s *siaObjects) MakeBucketWithLocation(bucket, location string) error {
|
||||
func (s *siaObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string) error {
|
||||
srcFile := path.Join(s.TempDir, minio.MustGetUUID())
|
||||
defer os.Remove(srcFile)
|
||||
|
||||
@ -326,7 +327,7 @@ func (s *siaObjects) MakeBucketWithLocation(bucket, location string) error {
|
||||
}
|
||||
|
||||
// GetBucketInfo gets bucket metadata.
|
||||
func (s *siaObjects) GetBucketInfo(bucket string) (bi minio.BucketInfo, err error) {
|
||||
func (s *siaObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, err error) {
|
||||
sha256sum := sha256.Sum256([]byte(bucket))
|
||||
var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:]))
|
||||
|
||||
@ -340,7 +341,7 @@ func (s *siaObjects) GetBucketInfo(bucket string) (bi minio.BucketInfo, err erro
|
||||
}
|
||||
|
||||
// ListBuckets will detect and return existing buckets on Sia.
|
||||
func (s *siaObjects) ListBuckets() (buckets []minio.BucketInfo, err error) {
|
||||
func (s *siaObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) {
|
||||
sObjs, serr := s.listRenterFiles("")
|
||||
if serr != nil {
|
||||
return buckets, serr
|
||||
@ -370,14 +371,14 @@ func (s *siaObjects) ListBuckets() (buckets []minio.BucketInfo, err error) {
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket on Sia.
|
||||
func (s *siaObjects) DeleteBucket(bucket string) error {
|
||||
func (s *siaObjects) DeleteBucket(ctx context.Context, bucket string) error {
|
||||
sha256sum := sha256.Sum256([]byte(bucket))
|
||||
var siaObj = path.Join(s.RootDir, bucket, hex.EncodeToString(sha256sum[:]))
|
||||
|
||||
return post(s.Address, "/renter/delete/"+siaObj, "", s.password)
|
||||
}
|
||||
|
||||
func (s *siaObjects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
|
||||
func (s *siaObjects) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) {
|
||||
siaObjs, siaErr := s.listRenterFiles(bucket)
|
||||
if siaErr != nil {
|
||||
return loi, siaErr
|
||||
@ -410,7 +411,7 @@ func (s *siaObjects) ListObjects(bucket string, prefix string, marker string, de
|
||||
return loi, nil
|
||||
}
|
||||
|
||||
func (s *siaObjects) GetObject(bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
||||
func (s *siaObjects) GetObject(ctx context.Context, bucket string, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
||||
dstFile := path.Join(s.TempDir, minio.MustGetUUID())
|
||||
defer os.Remove(dstFile)
|
||||
|
||||
@ -483,7 +484,7 @@ func (s *siaObjects) findSiaObject(bucket, object string) (siaObjectInfo, error)
|
||||
}
|
||||
|
||||
// GetObjectInfo reads object info and replies back ObjectInfo
|
||||
func (s *siaObjects) GetObjectInfo(bucket string, object string) (minio.ObjectInfo, error) {
|
||||
func (s *siaObjects) GetObjectInfo(ctx context.Context, bucket string, object string) (minio.ObjectInfo, error) {
|
||||
so, err := s.findSiaObject(bucket, object)
|
||||
if err != nil {
|
||||
return minio.ObjectInfo{}, err
|
||||
@ -500,7 +501,7 @@ func (s *siaObjects) GetObjectInfo(bucket string, object string) (minio.ObjectIn
|
||||
}
|
||||
|
||||
// PutObject creates a new object with the incoming data,
|
||||
func (s *siaObjects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
|
||||
func (s *siaObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo minio.ObjectInfo, err error) {
|
||||
srcFile := path.Join(s.TempDir, minio.MustGetUUID())
|
||||
writer, err := os.Create(srcFile)
|
||||
if err != nil {
|
||||
@ -529,7 +530,7 @@ func (s *siaObjects) PutObject(bucket string, object string, data *hash.Reader,
|
||||
}
|
||||
|
||||
// DeleteObject deletes a blob in bucket
|
||||
func (s *siaObjects) DeleteObject(bucket string, object string) error {
|
||||
func (s *siaObjects) DeleteObject(ctx context.Context, bucket string, object string) error {
|
||||
// Tell Sia daemon to delete the object
|
||||
var siaObj = path.Join(s.RootDir, bucket, object)
|
||||
return post(s.Address, "/renter/delete/"+siaObj, "", s.password)
|
||||
|
@ -51,7 +51,7 @@ func LivenessCheckHandler(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
// List buckets is unsuccessful, means server is having issues, send 503 service unavailable
|
||||
if _, err := objLayer.ListBuckets(); err != nil {
|
||||
if _, err := objLayer.ListBuckets(nil); err != nil {
|
||||
writeResponse(w, http.StatusServiceUnavailable, nil, mimeNone)
|
||||
return
|
||||
}
|
||||
|
@ -66,6 +66,6 @@ type OpsLockState struct {
|
||||
|
||||
// listLocksInfo - Fetches locks held on bucket, matching prefix held for longer than duration.
|
||||
func listLocksInfo(bucket, prefix string, duration time.Duration) []VolumeLockInfo {
|
||||
locksInfo, _ := newObjectLayerFn().ListLocks(bucket, prefix, duration)
|
||||
locksInfo, _ := newObjectLayerFn().ListLocks(nil, bucket, prefix, duration)
|
||||
return locksInfo
|
||||
}
|
||||
|
61
cmd/logger/log.go
Normal file
61
cmd/logger/log.go
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package logger
|
||||
|
||||
import "context"
|
||||
|
||||
// Key used for ContextSet/Get
|
||||
const contextKey = "reqInfo"
|
||||
|
||||
// KeyVal - appended to ReqInfo.Tags
|
||||
type KeyVal struct {
|
||||
Key string
|
||||
Val string
|
||||
}
|
||||
|
||||
// ReqInfo stores the request info.
|
||||
type ReqInfo struct {
|
||||
RemoteHost string // Client Host/IP
|
||||
UserAgent string // User Agent
|
||||
RequestID string // x-amz-request-id
|
||||
API string // API name - GetObject PutObject NewMultipartUpload etc.
|
||||
BucketName string // Bucket name
|
||||
ObjectName string // Object name
|
||||
Tags []KeyVal // Any additional info not accomodated by above fields
|
||||
}
|
||||
|
||||
// AppendTags - appends key/val to ReqInfo.Tags
|
||||
func (r *ReqInfo) AppendTags(key string, val string) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
r.Tags = append(r.Tags, KeyVal{key, val})
|
||||
}
|
||||
|
||||
// ContextSet sets ReqInfo in the context.
|
||||
func ContextSet(ctx context.Context, req *ReqInfo) context.Context {
|
||||
return context.WithValue(ctx, contextKey, req)
|
||||
}
|
||||
|
||||
// ContextGet returns ReqInfo if set.
|
||||
func ContextGet(ctx context.Context) *ReqInfo {
|
||||
r, ok := ctx.Value(contextKey).(*ReqInfo)
|
||||
if ok {
|
||||
return r
|
||||
}
|
||||
return nil
|
||||
}
|
@ -39,7 +39,7 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
bucketName := getRandomBucketName()
|
||||
objectName := "test-object"
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(bucketName, "")
|
||||
err := obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
// Stop the test if creation of the bucket fails.
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -68,7 +68,7 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
// iterate through the above set of inputs and upkoad the object.
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
||||
_, err = obj.PutObject(nil, input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
@ -144,7 +144,7 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
err = obj.GetObject(testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "")
|
||||
err = obj.GetObject(nil, testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "")
|
||||
if err != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
|
||||
}
|
||||
@ -183,7 +183,7 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [
|
||||
// Setup for the tests.
|
||||
bucketName := getRandomBucketName()
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(bucketName, "")
|
||||
err := obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
// Stop the test if creation of the bucket fails.
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -209,7 +209,7 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [
|
||||
// iterate through the above set of inputs and upkoad the object.
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
||||
_, err = obj.PutObject(nil, input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
@ -254,7 +254,7 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [
|
||||
}
|
||||
}
|
||||
|
||||
err = obj.GetObject(testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "")
|
||||
err = obj.GetObject(nil, testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "")
|
||||
if err != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
|
||||
}
|
||||
@ -293,7 +293,7 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str
|
||||
bucketName := getRandomBucketName()
|
||||
objectName := "test-object"
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(bucketName, "")
|
||||
err := obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
// Stop the test if creation of the bucket fails.
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -322,7 +322,7 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str
|
||||
// iterate through the above set of inputs and upkoad the object.
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
||||
_, err = obj.PutObject(nil, input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
@ -407,7 +407,7 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
err = obj.GetObject(testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "")
|
||||
err = obj.GetObject(nil, testCase.bucketName, testCase.objectName, testCase.startOffset, testCase.length, testCase.writer, "")
|
||||
if err != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
|
||||
}
|
||||
|
@ -29,11 +29,11 @@ func TestGetObjectInfo(t *testing.T) {
|
||||
// Testing GetObjectInfo().
|
||||
func testGetObjectInfo(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
// This bucket is used for testing getObjectInfo operations.
|
||||
err := obj.MakeBucketWithLocation("test-getobjectinfo", "")
|
||||
err := obj.MakeBucketWithLocation(nil, "test-getobjectinfo", "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
_, err = obj.PutObject("test-getobjectinfo", "Asia/asiapics.jpg", mustGetHashReader(t, bytes.NewBufferString("asiapics"), int64(len("asiapics")), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, "test-getobjectinfo", "Asia/asiapics.jpg", mustGetHashReader(t, bytes.NewBufferString("asiapics"), int64(len("asiapics")), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
@ -71,7 +71,7 @@ func testGetObjectInfo(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
{"test-getobjectinfo", "Asia/asiapics.jpg", resultCases[0], nil, true},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
result, err := obj.GetObjectInfo(testCase.bucketName, testCase.objectName)
|
||||
result, err := obj.GetObjectInfo(nil, testCase.bucketName, testCase.objectName)
|
||||
if err != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ func checkPutObjectArgs(bucket, object string, obj ObjectLayer, size int64) erro
|
||||
|
||||
// Checks whether bucket exists and returns appropriate error if not.
|
||||
func checkBucketExist(bucket string, obj ObjectLayer) error {
|
||||
_, err := obj.GetBucketInfo(bucket)
|
||||
_, err := obj.GetBucketInfo(nil, bucket)
|
||||
if err != nil {
|
||||
return errors.Cause(err)
|
||||
}
|
||||
|
@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
@ -28,50 +29,50 @@ import (
|
||||
// ObjectLayer implements primitives for object API layer.
|
||||
type ObjectLayer interface {
|
||||
// Storage operations.
|
||||
Shutdown() error
|
||||
StorageInfo() StorageInfo
|
||||
Shutdown(context.Context) error
|
||||
StorageInfo(context.Context) StorageInfo
|
||||
|
||||
// Bucket operations.
|
||||
MakeBucketWithLocation(bucket string, location string) error
|
||||
GetBucketInfo(bucket string) (bucketInfo BucketInfo, err error)
|
||||
ListBuckets() (buckets []BucketInfo, err error)
|
||||
DeleteBucket(bucket string) error
|
||||
ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
|
||||
ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error)
|
||||
MakeBucketWithLocation(ctx context.Context, bucket string, location string) error
|
||||
GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error)
|
||||
ListBuckets(ctx context.Context) (buckets []BucketInfo, err error)
|
||||
DeleteBucket(ctx context.Context, bucket string) error
|
||||
ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
|
||||
ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error)
|
||||
|
||||
// Object operations.
|
||||
GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error)
|
||||
GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error)
|
||||
PutObject(bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error)
|
||||
CopyObject(srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo) (objInfo ObjectInfo, err error)
|
||||
DeleteObject(bucket, object string) error
|
||||
GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error)
|
||||
GetObjectInfo(ctx context.Context, bucket, object string) (objInfo ObjectInfo, err error)
|
||||
PutObject(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error)
|
||||
CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo) (objInfo ObjectInfo, err error)
|
||||
DeleteObject(ctx context.Context, bucket, object string) error
|
||||
|
||||
// Multipart operations.
|
||||
ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error)
|
||||
NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error)
|
||||
CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int,
|
||||
ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error)
|
||||
NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error)
|
||||
CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int,
|
||||
startOffset int64, length int64, srcInfo ObjectInfo) (info PartInfo, err error)
|
||||
PutObjectPart(bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error)
|
||||
ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error)
|
||||
AbortMultipartUpload(bucket, object, uploadID string) error
|
||||
CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
|
||||
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error)
|
||||
ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error)
|
||||
AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error
|
||||
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error)
|
||||
|
||||
// Healing operations.
|
||||
HealFormat(dryRun bool) (madmin.HealResultItem, error)
|
||||
HealBucket(bucket string, dryRun bool) ([]madmin.HealResultItem, error)
|
||||
HealObject(bucket, object string, dryRun bool) (madmin.HealResultItem, error)
|
||||
ListBucketsHeal() (buckets []BucketInfo, err error)
|
||||
ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error)
|
||||
HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error)
|
||||
HealBucket(ctx context.Context, bucket string, dryRun bool) ([]madmin.HealResultItem, error)
|
||||
HealObject(ctx context.Context, bucket, object string, dryRun bool) (madmin.HealResultItem, error)
|
||||
ListBucketsHeal(ctx context.Context) (buckets []BucketInfo, err error)
|
||||
ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error)
|
||||
|
||||
// Locking operations
|
||||
ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error)
|
||||
ClearLocks([]VolumeLockInfo) error
|
||||
ListLocks(ctx context.Context, bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error)
|
||||
ClearLocks(context.Context, []VolumeLockInfo) error
|
||||
|
||||
// Policy operations
|
||||
SetBucketPolicy(string, policy.BucketAccessPolicy) error
|
||||
GetBucketPolicy(string) (policy.BucketAccessPolicy, error)
|
||||
RefreshBucketPolicy(string) error
|
||||
DeleteBucketPolicy(string) error
|
||||
SetBucketPolicy(context.Context, string, policy.BucketAccessPolicy) error
|
||||
GetBucketPolicy(context.Context, string) (policy.BucketAccessPolicy, error)
|
||||
RefreshBucketPolicy(context.Context, string) error
|
||||
DeleteBucketPolicy(context.Context, string) error
|
||||
|
||||
// Supported operations check
|
||||
IsNotificationSupported() bool
|
||||
|
@ -44,7 +44,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
"empty-bucket",
|
||||
}
|
||||
for _, bucket := range testBuckets {
|
||||
err := obj.MakeBucketWithLocation(bucket, "")
|
||||
err := obj.MakeBucketWithLocation(nil, bucket, "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
@ -68,7 +68,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
}
|
||||
for _, object := range testObjects {
|
||||
md5Bytes := md5.Sum([]byte(object.content))
|
||||
_, err = obj.PutObject(testBuckets[0], object.name, mustGetHashReader(t, bytes.NewBufferString(object.content),
|
||||
_, err = obj.PutObject(nil, testBuckets[0], object.name, mustGetHashReader(t, bytes.NewBufferString(object.content),
|
||||
int64(len(object.content)), hex.EncodeToString(md5Bytes[:]), ""), object.meta)
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -524,7 +524,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := obj.ListObjects(testCase.bucketName, testCase.prefix, testCase.marker, testCase.delimeter, int(testCase.maxKeys))
|
||||
result, err := obj.ListObjects(nil, testCase.bucketName, testCase.prefix, testCase.marker, testCase.delimeter, int(testCase.maxKeys))
|
||||
if err != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, err.Error())
|
||||
}
|
||||
@ -565,7 +565,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
}
|
||||
// Take ListObject treeWalk go-routine to completion, if available in the treewalk pool.
|
||||
if result.IsTruncated {
|
||||
_, err = obj.ListObjects(testCase.bucketName, testCase.prefix, result.NextMarker, testCase.delimeter, 1000)
|
||||
_, err = obj.ListObjects(nil, testCase.bucketName, testCase.prefix, result.NextMarker, testCase.delimeter, 1000)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -603,7 +603,7 @@ func BenchmarkListObjects(b *testing.B) {
|
||||
|
||||
bucket := "ls-benchmark-bucket"
|
||||
// Create a bucket.
|
||||
err = obj.MakeBucketWithLocation(bucket, "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucket, "")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -611,7 +611,7 @@ func BenchmarkListObjects(b *testing.B) {
|
||||
// Insert objects to be listed and benchmarked later.
|
||||
for i := 0; i < 20000; i++ {
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
_, err = obj.PutObject(bucket, key, mustGetHashReader(b, bytes.NewBufferString(key), int64(len(key)), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, bucket, key, mustGetHashReader(b, bytes.NewBufferString(key), int64(len(key)), "", ""), nil)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
@ -621,7 +621,7 @@ func BenchmarkListObjects(b *testing.B) {
|
||||
|
||||
// List the buckets over and over and over.
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err = obj.ListObjects(bucket, "", "obj9000", "", -1)
|
||||
_, err = obj.ListObjects(nil, bucket, "", "obj9000", "", -1)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
@ -39,14 +39,14 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr
|
||||
bucket := "minio-bucket"
|
||||
object := "minio-object"
|
||||
|
||||
_, err := obj.NewMultipartUpload("--", object, nil)
|
||||
_, err := obj.NewMultipartUpload(nil, "--", object, nil)
|
||||
if err == nil {
|
||||
t.Fatalf("%s: Expected to fail since bucket name is invalid.", instanceType)
|
||||
}
|
||||
|
||||
errMsg := "Bucket not found: minio-bucket"
|
||||
// opearation expected to fail since the bucket on which NewMultipartUpload is being initiated doesn't exist.
|
||||
_, err = obj.NewMultipartUpload(bucket, object, nil)
|
||||
_, err = obj.NewMultipartUpload(nil, bucket, object, nil)
|
||||
if err == nil {
|
||||
t.Fatalf("%s: Expected to fail since the NewMultipartUpload is intialized on a non-existent bucket.", instanceType)
|
||||
}
|
||||
@ -55,23 +55,23 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr
|
||||
}
|
||||
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err = obj.MakeBucketWithLocation(bucket, "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucket, "")
|
||||
if err != nil {
|
||||
// failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
_, err = obj.NewMultipartUpload(bucket, "\\", nil)
|
||||
_, err = obj.NewMultipartUpload(nil, bucket, "\\", nil)
|
||||
if err == nil {
|
||||
t.Fatalf("%s: Expected to fail since object name is invalid.", instanceType)
|
||||
}
|
||||
|
||||
uploadID, err := obj.NewMultipartUpload(bucket, object, nil)
|
||||
uploadID, err := obj.NewMultipartUpload(nil, bucket, object, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
err = obj.AbortMultipartUpload(bucket, object, uploadID)
|
||||
err = obj.AbortMultipartUpload(nil, bucket, object, uploadID)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case InvalidUploadID:
|
||||
@ -94,13 +94,13 @@ func testObjectAbortMultipartUpload(obj ObjectLayer, instanceType string, t Test
|
||||
object := "minio-object"
|
||||
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err := obj.MakeBucketWithLocation(bucket, "")
|
||||
err := obj.MakeBucketWithLocation(nil, bucket, "")
|
||||
if err != nil {
|
||||
// failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
uploadID, err := obj.NewMultipartUpload(bucket, object, nil)
|
||||
uploadID, err := obj.NewMultipartUpload(nil, bucket, object, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
@ -119,7 +119,7 @@ func testObjectAbortMultipartUpload(obj ObjectLayer, instanceType string, t Test
|
||||
}
|
||||
// Iterating over creatPartCases to generate multipart chunks.
|
||||
for i, testCase := range abortTestCases {
|
||||
err = obj.AbortMultipartUpload(testCase.bucketName, testCase.objName, testCase.uploadID)
|
||||
err = obj.AbortMultipartUpload(nil, testCase.bucketName, testCase.objName, testCase.uploadID)
|
||||
if testCase.expectedErrType == nil && err != nil {
|
||||
t.Errorf("Test %d, unexpected err is received: %v, expected:%v\n", i+1, err, testCase.expectedErrType)
|
||||
}
|
||||
@ -140,18 +140,18 @@ func testObjectAPIIsUploadIDExists(obj ObjectLayer, instanceType string, t TestE
|
||||
object := "minio-object"
|
||||
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err := obj.MakeBucketWithLocation(bucket, "")
|
||||
err := obj.MakeBucketWithLocation(nil, bucket, "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
_, err = obj.NewMultipartUpload(bucket, object, nil)
|
||||
_, err = obj.NewMultipartUpload(nil, bucket, object, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
err = obj.AbortMultipartUpload(bucket, object, "abc")
|
||||
err = obj.AbortMultipartUpload(nil, bucket, object, "abc")
|
||||
err = errors.Cause(err)
|
||||
switch err.(type) {
|
||||
case InvalidUploadID:
|
||||
@ -176,14 +176,14 @@ func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks [
|
||||
// objectNames[0].
|
||||
// uploadIds [0].
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err := obj.MakeBucketWithLocation(bucketNames[0], "")
|
||||
err := obj.MakeBucketWithLocation(nil, bucketNames[0], "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
// Initiate Multipart Upload on the above created bucket.
|
||||
uploadID, err := obj.NewMultipartUpload(bucketNames[0], objectNames[0], nil)
|
||||
uploadID, err := obj.NewMultipartUpload(nil, bucketNames[0], objectNames[0], nil)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -220,7 +220,7 @@ func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks [
|
||||
sha256sum := ""
|
||||
// Iterating over creatPartCases to generate multipart chunks.
|
||||
for _, testCase := range createPartCases {
|
||||
_, err = obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
_, err = obj.PutObjectPart(nil, testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
@ -234,7 +234,7 @@ func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks [
|
||||
|
||||
// Object part upload should fail with quorum not available.
|
||||
testCase := createPartCases[len(createPartCases)-1]
|
||||
_, err = obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
_, err = obj.PutObjectPart(nil, testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
if err == nil {
|
||||
t.Fatalf("Test %s: expected to fail but passed instead", instanceType)
|
||||
}
|
||||
@ -257,19 +257,19 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
|
||||
object := "minio-object"
|
||||
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err := obj.MakeBucketWithLocation(bucket, "")
|
||||
err := obj.MakeBucketWithLocation(nil, bucket, "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
// Initiate Multipart Upload on the above created bucket.
|
||||
uploadID, err := obj.NewMultipartUpload(bucket, object, nil)
|
||||
uploadID, err := obj.NewMultipartUpload(nil, bucket, object, nil)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
// Creating a dummy bucket for tests.
|
||||
err = obj.MakeBucketWithLocation("unused-bucket", "")
|
||||
err = obj.MakeBucketWithLocation(nil, "unused-bucket", "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -353,7 +353,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
|
||||
|
||||
// Validate all the test cases.
|
||||
for i, testCase := range testCases {
|
||||
actualInfo, actualErr := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, testCase.inputSHA256))
|
||||
actualInfo, actualErr := obj.PutObjectPart(nil, testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, testCase.inputSHA256))
|
||||
// All are test cases above are expected to fail.
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
||||
@ -393,13 +393,13 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
||||
// objectNames[0].
|
||||
// uploadIds [0].
|
||||
// Create bucket before initiating NewMultipartUpload.
|
||||
err := obj.MakeBucketWithLocation(bucketNames[0], "")
|
||||
err := obj.MakeBucketWithLocation(nil, bucketNames[0], "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
// Initiate Multipart Upload on the above created bucket.
|
||||
uploadID, err := obj.NewMultipartUpload(bucketNames[0], objectNames[0], nil)
|
||||
uploadID, err := obj.NewMultipartUpload(nil, bucketNames[0], objectNames[0], nil)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -411,7 +411,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
||||
// objectNames[0].
|
||||
// uploadIds [1-3].
|
||||
// Bucket to test for mutiple upload Id's for a given object.
|
||||
err = obj.MakeBucketWithLocation(bucketNames[1], "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucketNames[1], "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -419,7 +419,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
||||
for i := 0; i < 3; i++ {
|
||||
// Initiate Multipart Upload on bucketNames[1] for the same object 3 times.
|
||||
// Used to test the listing for the case of multiple uploadID's for a given object.
|
||||
uploadID, err = obj.NewMultipartUpload(bucketNames[1], objectNames[0], nil)
|
||||
uploadID, err = obj.NewMultipartUpload(nil, bucketNames[1], objectNames[0], nil)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -432,7 +432,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
||||
// bucketnames[2].
|
||||
// objectNames[0-2].
|
||||
// uploadIds [4-9].
|
||||
err = obj.MakeBucketWithLocation(bucketNames[2], "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucketNames[2], "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -441,7 +441,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
||||
// Used to test the listing for the case of multiple objects for a given bucket.
|
||||
for i := 0; i < 6; i++ {
|
||||
var uploadID string
|
||||
uploadID, err = obj.NewMultipartUpload(bucketNames[2], objectNames[i], nil)
|
||||
uploadID, err = obj.NewMultipartUpload(nil, bucketNames[2], objectNames[i], nil)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -487,7 +487,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
||||
sha256sum := ""
|
||||
// Iterating over creatPartCases to generate multipart chunks.
|
||||
for _, testCase := range createPartCases {
|
||||
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
_, err := obj.PutObjectPart(nil, testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
@ -1215,7 +1215,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
||||
|
||||
for i, testCase := range testCases {
|
||||
// fmt.Println(i+1, testCase) // uncomment to peek into the test cases.
|
||||
actualResult, actualErr := obj.ListMultipartUploads(testCase.bucket, testCase.prefix, testCase.keyMarker, testCase.uploadIDMarker, testCase.delimiter, testCase.maxUploads)
|
||||
actualResult, actualErr := obj.ListMultipartUploads(nil, testCase.bucket, testCase.prefix, testCase.keyMarker, testCase.uploadIDMarker, testCase.delimiter, testCase.maxUploads)
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, actualErr.Error())
|
||||
}
|
||||
@ -1267,13 +1267,13 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
|
||||
// objectNames[0].
|
||||
// uploadIds [0].
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err := obj.MakeBucketWithLocation(bucketNames[0], "")
|
||||
err := obj.MakeBucketWithLocation(nil, bucketNames[0], "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
// Initiate Multipart Upload on the above created bucket.
|
||||
uploadID, err := obj.NewMultipartUpload(bucketNames[0], objectNames[0], nil)
|
||||
uploadID, err := obj.NewMultipartUpload(nil, bucketNames[0], objectNames[0], nil)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -1307,7 +1307,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
|
||||
sha256sum := ""
|
||||
// Iterating over creatPartCases to generate multipart chunks.
|
||||
for _, testCase := range createPartCases {
|
||||
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
_, err := obj.PutObjectPart(nil, testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
@ -1427,7 +1427,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
actualResult, actualErr := obj.ListObjectParts(testCase.bucket, testCase.object, testCase.uploadID, testCase.partNumberMarker, testCase.maxParts)
|
||||
actualResult, actualErr := obj.ListObjectParts(nil, testCase.bucket, testCase.object, testCase.uploadID, testCase.partNumberMarker, testCase.maxParts)
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, actualErr.Error())
|
||||
}
|
||||
@ -1511,13 +1511,13 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
|
||||
// objectNames[0].
|
||||
// uploadIds [0].
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err := obj.MakeBucketWithLocation(bucketNames[0], "")
|
||||
err := obj.MakeBucketWithLocation(nil, bucketNames[0], "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
// Initiate Multipart Upload on the above created bucket.
|
||||
uploadID, err := obj.NewMultipartUpload(bucketNames[0], objectNames[0], nil)
|
||||
uploadID, err := obj.NewMultipartUpload(nil, bucketNames[0], objectNames[0], nil)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -1548,7 +1548,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
|
||||
sha256sum := ""
|
||||
// Iterating over creatPartCases to generate multipart chunks.
|
||||
for _, testCase := range createPartCases {
|
||||
_, err := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
_, err := obj.PutObjectPart(nil, testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
@ -1665,7 +1665,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
actualResult, actualErr := obj.ListObjectParts(testCase.bucket, testCase.object, testCase.uploadID, testCase.partNumberMarker, testCase.maxParts)
|
||||
actualResult, actualErr := obj.ListObjectParts(nil, testCase.bucket, testCase.object, testCase.uploadID, testCase.partNumberMarker, testCase.maxParts)
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, actualErr.Error())
|
||||
}
|
||||
@ -1757,13 +1757,13 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
||||
// objectNames[0].
|
||||
// uploadIds [0].
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err = obj.MakeBucketWithLocation(bucketNames[0], "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucketNames[0], "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err)
|
||||
}
|
||||
// Initiate Multipart Upload on the above created bucket.
|
||||
uploadID, err = obj.NewMultipartUpload(bucketNames[0], objectNames[0], map[string]string{"X-Amz-Meta-Id": "id"})
|
||||
uploadID, err = obj.NewMultipartUpload(nil, bucketNames[0], objectNames[0], map[string]string{"X-Amz-Meta-Id": "id"})
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err)
|
||||
@ -1798,7 +1798,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
||||
sha256sum := ""
|
||||
// Iterating over creatPartCases to generate multipart chunks.
|
||||
for _, part := range parts {
|
||||
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID, mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, sha256sum))
|
||||
_, err = obj.PutObjectPart(nil, part.bucketName, part.objName, part.uploadID, part.PartID, mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, sha256sum))
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err)
|
||||
}
|
||||
@ -1901,7 +1901,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
actualResult, actualErr := obj.CompleteMultipartUpload(testCase.bucket, testCase.object, testCase.uploadID, testCase.parts)
|
||||
actualResult, actualErr := obj.CompleteMultipartUpload(nil, testCase.bucket, testCase.object, testCase.uploadID, testCase.parts)
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s", i+1, instanceType, actualErr)
|
||||
}
|
||||
|
@ -46,14 +46,14 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
|
||||
object := "minio-object"
|
||||
|
||||
// Create bucket.
|
||||
err := obj.MakeBucketWithLocation(bucket, "")
|
||||
err := obj.MakeBucketWithLocation(nil, bucket, "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
// Creating a dummy bucket for tests.
|
||||
err = obj.MakeBucketWithLocation("unused-bucket", "")
|
||||
err = obj.MakeBucketWithLocation(nil, "unused-bucket", "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -162,7 +162,7 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256), testCase.inputMeta)
|
||||
objInfo, actualErr := obj.PutObject(nil, testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], testCase.inputSHA256), testCase.inputMeta)
|
||||
actualErr = errors.Cause(actualErr)
|
||||
if actualErr != nil && testCase.expectedError == nil {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: error %s.", i+1, instanceType, actualErr.Error())
|
||||
@ -197,14 +197,14 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
|
||||
object := "minio-object"
|
||||
|
||||
// Create bucket.
|
||||
err := obj.MakeBucketWithLocation(bucket, "")
|
||||
err := obj.MakeBucketWithLocation(nil, bucket, "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
// Creating a dummy bucket for tests.
|
||||
err = obj.MakeBucketWithLocation("unused-bucket", "")
|
||||
err = obj.MakeBucketWithLocation(nil, "unused-bucket", "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -236,7 +236,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
|
||||
|
||||
sha256sum := ""
|
||||
for i, testCase := range testCases {
|
||||
objInfo, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta)
|
||||
objInfo, actualErr := obj.PutObject(nil, testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta)
|
||||
actualErr = errors.Cause(actualErr)
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", i+1, instanceType, actualErr.Error())
|
||||
@ -286,7 +286,7 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
|
||||
InsufficientWriteQuorum{},
|
||||
}
|
||||
|
||||
_, actualErr := obj.PutObject(testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta)
|
||||
_, actualErr := obj.PutObject(nil, testCase.bucketName, testCase.objName, mustGetHashReader(t, bytes.NewReader(testCase.inputData), testCase.intputDataSize, testCase.inputMeta["etag"], sha256sum), testCase.inputMeta)
|
||||
actualErr = errors.Cause(actualErr)
|
||||
if actualErr != nil && testCase.shouldPass {
|
||||
t.Errorf("Test %d: %s: Expected to pass, but failed with: <ERROR> %s.", len(testCases)+1, instanceType, actualErr.Error())
|
||||
@ -311,7 +311,7 @@ func testObjectAPIPutObjectStaleFiles(obj ObjectLayer, instanceType string, disk
|
||||
object := "minio-object"
|
||||
|
||||
// Create bucket.
|
||||
err := obj.MakeBucketWithLocation(bucket, "")
|
||||
err := obj.MakeBucketWithLocation(nil, bucket, "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -319,7 +319,7 @@ func testObjectAPIPutObjectStaleFiles(obj ObjectLayer, instanceType string, disk
|
||||
|
||||
data := []byte("hello, world")
|
||||
// Create object.
|
||||
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
||||
if err != nil {
|
||||
// Failed to create object, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -345,14 +345,14 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
|
||||
object := "minio-object"
|
||||
|
||||
// Create bucket.
|
||||
err := obj.MakeBucketWithLocation(bucket, "")
|
||||
err := obj.MakeBucketWithLocation(nil, bucket, "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
|
||||
// Initiate Multipart Upload on the above created bucket.
|
||||
uploadID, err := obj.NewMultipartUpload(bucket, object, nil)
|
||||
uploadID, err := obj.NewMultipartUpload(nil, bucket, object, nil)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -364,7 +364,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
|
||||
md5Writer.Write(fiveMBBytes)
|
||||
etag1 := hex.EncodeToString(md5Writer.Sum(nil))
|
||||
sha256sum := ""
|
||||
_, err = obj.PutObjectPart(bucket, object, uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), int64(len(fiveMBBytes)), etag1, sha256sum))
|
||||
_, err = obj.PutObjectPart(nil, bucket, object, uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), int64(len(fiveMBBytes)), etag1, sha256sum))
|
||||
if err != nil {
|
||||
// Failed to upload object part, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -375,7 +375,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
|
||||
md5Writer = md5.New()
|
||||
md5Writer.Write(data)
|
||||
etag2 := hex.EncodeToString(md5Writer.Sum(nil))
|
||||
_, err = obj.PutObjectPart(bucket, object, uploadID, 2, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), etag2, sha256sum))
|
||||
_, err = obj.PutObjectPart(nil, bucket, object, uploadID, 2, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), etag2, sha256sum))
|
||||
if err != nil {
|
||||
// Failed to upload object part, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -386,7 +386,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
|
||||
{ETag: etag1, PartNumber: 1},
|
||||
{ETag: etag2, PartNumber: 2},
|
||||
}
|
||||
_, err = obj.CompleteMultipartUpload(bucket, object, uploadID, parts)
|
||||
_, err = obj.CompleteMultipartUpload(nil, bucket, object, uploadID, parts)
|
||||
if err != nil {
|
||||
// Failed to complete multipart upload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
|
@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
@ -231,10 +232,10 @@ func isETagEqual(left, right string) bool {
|
||||
// deleteObject is a convenient wrapper to delete an object, this
|
||||
// is a common function to be called from object handlers and
|
||||
// web handlers.
|
||||
func deleteObject(obj ObjectLayer, bucket, object string, r *http.Request) (err error) {
|
||||
func deleteObject(ctx context.Context, obj ObjectLayer, bucket, object string, r *http.Request) (err error) {
|
||||
|
||||
// Proceed to delete the object.
|
||||
if err = obj.DeleteObject(bucket, object); err != nil {
|
||||
if err = obj.DeleteObject(ctx, bucket, object); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -81,6 +81,8 @@ func errAllowableObjectNotFound(bucket string, r *http.Request) APIErrorCode {
|
||||
// This implementation of the GET operation retrieves object. To use GET,
|
||||
// you must have READ access to the object.
|
||||
func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "GetObject")
|
||||
|
||||
var object, bucket string
|
||||
vars := mux.Vars(r)
|
||||
bucket = vars["bucket"]
|
||||
@ -98,7 +100,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
return
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.GetObjectInfo(bucket, object)
|
||||
objInfo, err := objectAPI.GetObjectInfo(ctx, bucket, object)
|
||||
if err != nil {
|
||||
apiErr := toAPIErrorCode(err)
|
||||
if apiErr == ErrNoSuchKey {
|
||||
@ -169,7 +171,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
httpWriter := ioutil.WriteOnClose(writer)
|
||||
|
||||
// Reads the object at startOffset and writes to httpWriter.
|
||||
if err = objectAPI.GetObject(bucket, object, startOffset, length, httpWriter, objInfo.ETag); err != nil {
|
||||
if err = objectAPI.GetObject(ctx, bucket, object, startOffset, length, httpWriter, objInfo.ETag); err != nil {
|
||||
errorIf(err, "Unable to write to client.")
|
||||
if !httpWriter.HasWritten() { // write error response only if no data has been written to client yet
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
@ -207,6 +209,8 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
// -----------
|
||||
// The HEAD operation retrieves metadata from an object without returning the object itself.
|
||||
func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "HeadObject")
|
||||
|
||||
var object, bucket string
|
||||
vars := mux.Vars(r)
|
||||
bucket = vars["bucket"]
|
||||
@ -223,7 +227,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.GetObjectInfo(bucket, object)
|
||||
objInfo, err := objectAPI.GetObjectInfo(ctx, bucket, object)
|
||||
if err != nil {
|
||||
apiErr := toAPIErrorCode(err)
|
||||
if apiErr == ErrNoSuchKey {
|
||||
@ -310,6 +314,8 @@ func getCpObjMetadataFromHeader(header http.Header, userMeta map[string]string)
|
||||
// This implementation of the PUT operation adds an object to a bucket
|
||||
// while reading the object from another source.
|
||||
func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "CopyObject")
|
||||
|
||||
vars := mux.Vars(r)
|
||||
dstBucket := vars["bucket"]
|
||||
dstObject := vars["object"]
|
||||
@ -348,7 +354,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
|
||||
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
||||
srcInfo, err := objectAPI.GetObjectInfo(srcBucket, srcObject)
|
||||
srcInfo, err := objectAPI.GetObjectInfo(ctx, srcBucket, srcObject)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
@ -502,7 +508,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
|
||||
// Copy source object to destination, if source and destination
|
||||
// object is same then only metadata is updated.
|
||||
objInfo, err := objectAPI.CopyObject(srcBucket, srcObject, dstBucket, dstObject, srcInfo)
|
||||
objInfo, err := objectAPI.CopyObject(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo)
|
||||
if err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
@ -539,6 +545,8 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
// ----------
|
||||
// This implementation of the PUT operation adds an object to a bucket.
|
||||
func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "PutObject")
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
|
||||
@ -684,7 +692,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.PutObject(bucket, object, hashReader, metadata)
|
||||
objInfo, err := objectAPI.PutObject(ctx, bucket, object, hashReader, metadata)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
@ -722,6 +730,8 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
||||
|
||||
// NewMultipartUploadHandler - New multipart upload.
|
||||
func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "NewMultipartUpload")
|
||||
|
||||
var object, bucket string
|
||||
vars := mux.Vars(r)
|
||||
bucket = vars["bucket"]
|
||||
@ -781,7 +791,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
||||
metadata[k] = v
|
||||
}
|
||||
|
||||
uploadID, err := objectAPI.NewMultipartUpload(bucket, object, metadata)
|
||||
uploadID, err := objectAPI.NewMultipartUpload(ctx, bucket, object, metadata)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
@ -796,6 +806,8 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
||||
|
||||
// CopyObjectPartHandler - uploads a part by copying data from an existing object as data source.
|
||||
func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "CopyObjectPart")
|
||||
|
||||
vars := mux.Vars(r)
|
||||
dstBucket := vars["bucket"]
|
||||
dstObject := vars["object"]
|
||||
@ -840,7 +852,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
srcInfo, err := objectAPI.GetObjectInfo(srcBucket, srcObject)
|
||||
srcInfo, err := objectAPI.GetObjectInfo(ctx, srcBucket, srcObject)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
@ -898,7 +910,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||
}
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
var li ListPartsInfo
|
||||
li, err = objectAPI.ListObjectParts(dstBucket, dstObject, uploadID, 0, 1)
|
||||
li, err = objectAPI.ListObjectParts(ctx, dstBucket, dstObject, uploadID, 0, 1)
|
||||
if err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
@ -963,7 +975,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||
|
||||
// Copy source object to destination, if source and destination
|
||||
// object is same then only metadata is updated.
|
||||
partInfo, err := objectAPI.CopyObjectPart(srcBucket, srcObject, dstBucket,
|
||||
partInfo, err := objectAPI.CopyObjectPart(ctx, srcBucket, srcObject, dstBucket,
|
||||
dstObject, uploadID, partID, startOffset, length, srcInfo)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
@ -982,6 +994,8 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
||||
|
||||
// PutObjectPartHandler - uploads an incoming part for an ongoing multipart operation.
|
||||
func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "PutObjectPart")
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
@ -1097,7 +1111,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
var li ListPartsInfo
|
||||
li, err = objectAPI.ListObjectParts(bucket, object, uploadID, 0, 1)
|
||||
li, err = objectAPI.ListObjectParts(ctx, bucket, object, uploadID, 0, 1)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
@ -1144,7 +1158,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
}
|
||||
}
|
||||
|
||||
partInfo, err := objectAPI.PutObjectPart(bucket, object, uploadID, partID, hashReader)
|
||||
partInfo, err := objectAPI.PutObjectPart(ctx, bucket, object, uploadID, partID, hashReader)
|
||||
if err != nil {
|
||||
// Verify if the underlying error is signature mismatch.
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
@ -1159,6 +1173,8 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
|
||||
// AbortMultipartUploadHandler - Abort multipart upload
|
||||
func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "AbortMultipartUpload")
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
@ -1175,7 +1191,7 @@ func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter,
|
||||
}
|
||||
|
||||
uploadID, _, _, _ := getObjectResources(r.URL.Query())
|
||||
if err := objectAPI.AbortMultipartUpload(bucket, object, uploadID); err != nil {
|
||||
if err := objectAPI.AbortMultipartUpload(ctx, bucket, object, uploadID); err != nil {
|
||||
errorIf(err, "AbortMultipartUpload failed")
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
@ -1185,6 +1201,8 @@ func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter,
|
||||
|
||||
// ListObjectPartsHandler - List object parts
|
||||
func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "ListObjectParts")
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
@ -1209,7 +1227,7 @@ func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *ht
|
||||
writeErrorResponse(w, ErrInvalidMaxParts, r.URL)
|
||||
return
|
||||
}
|
||||
listPartsInfo, err := objectAPI.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
|
||||
listPartsInfo, err := objectAPI.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
|
||||
return
|
||||
@ -1223,6 +1241,8 @@ func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *ht
|
||||
|
||||
// CompleteMultipartUploadHandler - Complete multipart upload.
|
||||
func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "CompleteMultipartUpload")
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
@ -1267,7 +1287,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
||||
completeParts = append(completeParts, part)
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.CompleteMultipartUpload(bucket, object, uploadID, completeParts)
|
||||
objInfo, err := objectAPI.CompleteMultipartUpload(ctx, bucket, object, uploadID, completeParts)
|
||||
if err != nil {
|
||||
err = errors.Cause(err)
|
||||
switch oErr := err.(type) {
|
||||
@ -1319,6 +1339,8 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
||||
|
||||
// DeleteObjectHandler - delete an object
|
||||
func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, "DeleteObject")
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
@ -1338,7 +1360,7 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
|
||||
// Ignore delete object errors while replying to client, since we are
|
||||
// suppposed to reply only 204. Additionally log the error for
|
||||
// investigation.
|
||||
if err := deleteObject(objectAPI, bucket, object, r); err != nil {
|
||||
if err := deleteObject(ctx, objectAPI, bucket, object, r); err != nil {
|
||||
errorIf(err, "Unable to delete an object %s", pathJoin(bucket, object))
|
||||
}
|
||||
writeSuccessNoContent(w)
|
||||
|
@ -76,7 +76,7 @@ func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
||||
// iterate through the above set of inputs and upload the object.
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err := obj.PutObject(input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
_, err := obj.PutObject(nil, input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
@ -223,7 +223,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
// iterate through the above set of inputs and upload the object.
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err := obj.PutObject(input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
_, err := obj.PutObject(nil, input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
@ -751,7 +751,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
|
||||
t.Errorf("Test %d: %s: Object content differs from expected value.: %s", i+1, instanceType, string(actualContent))
|
||||
continue
|
||||
}
|
||||
objInfo, err := obj.GetObjectInfo(testCase.bucketName, testCase.objectName)
|
||||
objInfo, err := obj.GetObjectInfo(nil, testCase.bucketName, testCase.objectName)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
|
||||
}
|
||||
@ -763,7 +763,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
|
||||
t.Fatalf("Test %d: %s: ContentEncoding is set to \"%s\" which is unexpected, expected \"%s\"", i+1, instanceType, objInfo.ContentEncoding, expectedContentEncoding)
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
err = obj.GetObject(testCase.bucketName, testCase.objectName, 0, int64(testCase.dataLen), buffer, objInfo.ETag)
|
||||
err = obj.GetObject(nil, testCase.bucketName, testCase.objectName, 0, int64(testCase.dataLen), buffer, objInfo.ETag)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
|
||||
}
|
||||
@ -935,7 +935,7 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
buffer := new(bytes.Buffer)
|
||||
|
||||
// Fetch the object to check whether the content is same as the one uploaded via PutObject.
|
||||
err = obj.GetObject(testCase.bucketName, testCase.objectName, 0, int64(len(bytesData)), buffer, "")
|
||||
err = obj.GetObject(nil, testCase.bucketName, testCase.objectName, 0, int64(len(bytesData)), buffer, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
|
||||
}
|
||||
@ -978,7 +978,7 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
||||
if testCase.expectedRespStatus == http.StatusOK {
|
||||
buffer := new(bytes.Buffer)
|
||||
// Fetch the object to check whether the content is same as the one uploaded via PutObject.
|
||||
err = obj.GetObject(testCase.bucketName, testCase.objectName, 0, int64(len(bytesData)), buffer, "")
|
||||
err = obj.GetObject(nil, testCase.bucketName, testCase.objectName, 0, int64(len(bytesData)), buffer, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
|
||||
}
|
||||
@ -1058,7 +1058,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
|
||||
// iterate through the above set of inputs and upload the object.
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName,
|
||||
_, err = obj.PutObject(nil, input.bucketName, input.objectName,
|
||||
mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
@ -1072,7 +1072,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
|
||||
// PutObjectPart API HTTP Handler has to be tested in isolation,
|
||||
// that is without any other handler being registered,
|
||||
// That's why NewMultipartUpload is initiated using ObjectLayer.
|
||||
uploadID, err := obj.NewMultipartUpload(bucketName, testObject, nil)
|
||||
uploadID, err := obj.NewMultipartUpload(nil, bucketName, testObject, nil)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
|
||||
@ -1117,7 +1117,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
|
||||
})
|
||||
}
|
||||
|
||||
result, err := obj.CompleteMultipartUpload(bucketName, testObject, uploadID, parts)
|
||||
result, err := obj.CompleteMultipartUpload(nil, bucketName, testObject, uploadID, parts)
|
||||
if err != nil {
|
||||
t.Fatalf("Test: %s complete multipart upload failed: <ERROR> %v", instanceType, err)
|
||||
}
|
||||
@ -1126,7 +1126,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err = obj.GetObject(bucketName, testObject, 0, int64(len(bytesData[0].byteData)), &buf, ""); err != nil {
|
||||
if err = obj.GetObject(nil, bucketName, testObject, 0, int64(len(bytesData[0].byteData)), &buf, ""); err != nil {
|
||||
t.Fatalf("Test: %s reading completed file failed: <ERROR> %v", instanceType, err)
|
||||
}
|
||||
if !bytes.Equal(buf.Bytes(), bytesData[0].byteData) {
|
||||
@ -1169,7 +1169,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
// iterate through the above set of inputs and upload the object.
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
_, err = obj.PutObject(nil, input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
@ -1182,7 +1182,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
// PutObjectPart API HTTP Handler has to be tested in isolation,
|
||||
// that is without any other handler being registered,
|
||||
// That's why NewMultipartUpload is initiated using ObjectLayer.
|
||||
uploadID, err := obj.NewMultipartUpload(bucketName, testObject, nil)
|
||||
uploadID, err := obj.NewMultipartUpload(nil, bucketName, testObject, nil)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
|
||||
@ -1398,7 +1398,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
// See if the new part has been uploaded.
|
||||
// testing whether the copy was successful.
|
||||
var results ListPartsInfo
|
||||
results, err = obj.ListObjectParts(testCase.bucketName, testObject, testCase.uploadID, 0, 1)
|
||||
results, err = obj.ListObjectParts(nil, testCase.bucketName, testObject, testCase.uploadID, 0, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to look for copied object part: <ERROR> %s", i+1, instanceType, err)
|
||||
}
|
||||
@ -1506,7 +1506,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
||||
// iterate through the above set of inputs and upload the object.
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
_, err = obj.PutObject(nil, input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
@ -1775,7 +1775,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
||||
if rec.Code == http.StatusOK {
|
||||
// See if the new object is formed.
|
||||
// testing whether the copy was successful.
|
||||
err = obj.GetObject(testCase.bucketName, testCase.newObjectName, 0, int64(len(bytesData[0].byteData)), buffers[0], "")
|
||||
err = obj.GetObject(nil, testCase.bucketName, testCase.newObjectName, 0, int64(len(bytesData[0].byteData)), buffers[0], "")
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: %s: Failed to fetch the copied object: <ERROR> %s", i+1, instanceType, err)
|
||||
}
|
||||
@ -1909,7 +1909,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
t.Fatalf("Error decoding the recorded response Body")
|
||||
}
|
||||
// verify the uploadID my making an attempt to list parts.
|
||||
_, err = obj.ListObjectParts(bucketName, objectName, multipartResponse.UploadID, 0, 1)
|
||||
_, err = obj.ListObjectParts(nil, bucketName, objectName, multipartResponse.UploadID, 0, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("Invalid UploadID: <ERROR> %s", err)
|
||||
}
|
||||
@ -1961,7 +1961,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
t.Fatalf("Error decoding the recorded response Body")
|
||||
}
|
||||
// verify the uploadID my making an attempt to list parts.
|
||||
_, err = obj.ListObjectParts(bucketName, objectName, multipartResponse.UploadID, 0, 1)
|
||||
_, err = obj.ListObjectParts(nil, bucketName, objectName, multipartResponse.UploadID, 0, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("Invalid UploadID: <ERROR> %s", err)
|
||||
}
|
||||
@ -2073,7 +2073,7 @@ func testAPINewMultipartHandlerParallel(obj ObjectLayer, instanceType, bucketNam
|
||||
wg.Wait()
|
||||
// Validate the upload ID by an attempt to list parts using it.
|
||||
for _, uploadID := range testUploads.uploads {
|
||||
_, err := obj.ListObjectParts(bucketName, objectName, uploadID, 0, 1)
|
||||
_, err := obj.ListObjectParts(nil, bucketName, objectName, uploadID, 0, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("Invalid UploadID: <ERROR> %s", err)
|
||||
}
|
||||
@ -2101,7 +2101,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
// initiate new multipart uploadID.
|
||||
uploadID, err = obj.NewMultipartUpload(bucketName, objectName, nil)
|
||||
uploadID, err = obj.NewMultipartUpload(nil, bucketName, objectName, nil)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
|
||||
@ -2142,7 +2142,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
||||
}
|
||||
// Iterating over creatPartCases to generate multipart chunks.
|
||||
for _, part := range parts {
|
||||
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID,
|
||||
_, err = obj.PutObjectPart(nil, part.bucketName, part.objName, part.uploadID, part.PartID,
|
||||
mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, ""))
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err)
|
||||
@ -2450,7 +2450,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
// initiate new multipart uploadID.
|
||||
uploadID, err = obj.NewMultipartUpload(bucketName, objectName, nil)
|
||||
uploadID, err = obj.NewMultipartUpload(nil, bucketName, objectName, nil)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
|
||||
@ -2491,7 +2491,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
|
||||
}
|
||||
// Iterating over createPartCases to generate multipart chunks.
|
||||
for _, part := range parts {
|
||||
_, err = obj.PutObjectPart(part.bucketName, part.objName, part.uploadID, part.PartID,
|
||||
_, err = obj.PutObjectPart(nil, part.bucketName, part.objName, part.uploadID, part.PartID,
|
||||
mustGetHashReader(t, bytes.NewBufferString(part.inputReaderData), part.intputDataSize, part.inputMd5, ""))
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err)
|
||||
@ -2630,7 +2630,7 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string
|
||||
// iterate through the above set of inputs and upload the object.
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
_, err = obj.PutObject(nil, input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData[""], ""), input.metaData)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
@ -2924,7 +2924,7 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
|
||||
// PutObjectPart API HTTP Handler has to be tested in isolation,
|
||||
// that is without any other handler being registered,
|
||||
// That's why NewMultipartUpload is initiated using ObjectLayer.
|
||||
uploadID, err := obj.NewMultipartUpload(bucketName, testObject, nil)
|
||||
uploadID, err := obj.NewMultipartUpload(nil, bucketName, testObject, nil)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
|
||||
@ -3327,7 +3327,7 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
// PutObjectPart API HTTP Handler has to be tested in isolation,
|
||||
// that is without any other handler being registered,
|
||||
// That's why NewMultipartUpload is initiated using ObjectLayer.
|
||||
uploadID, err := obj.NewMultipartUpload(bucketName, testObject, nil)
|
||||
uploadID, err := obj.NewMultipartUpload(nil, bucketName, testObject, nil)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("Minio %s : <ERROR> %s", instanceType, err)
|
||||
@ -3336,7 +3336,7 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str
|
||||
uploadIDCopy := uploadID
|
||||
|
||||
// create an object Part, will be used to test list object parts.
|
||||
_, err = obj.PutObjectPart(bucketName, testObject, uploadID, 1, mustGetHashReader(t, bytes.NewReader([]byte("hello")), int64(len("hello")), "5d41402abc4b2a76b9719d911017c592", ""))
|
||||
_, err = obj.PutObjectPart(nil, bucketName, testObject, uploadID, 1, mustGetHashReader(t, bytes.NewReader([]byte("hello")), int64(len("hello")), "5d41402abc4b2a76b9719d911017c592", ""))
|
||||
if err != nil {
|
||||
t.Fatalf("Minio %s : %s.", instanceType, err)
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ func (s *ObjectLayerAPISuite) TestMakeBucket(t *testing.T) {
|
||||
|
||||
// Tests validate bucket creation.
|
||||
func testMakeBucket(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
err := obj.MakeBucketWithLocation("bucket-unknown", "")
|
||||
err := obj.MakeBucketWithLocation(nil, "bucket-unknown", "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -90,11 +90,11 @@ func (s *ObjectLayerAPISuite) TestMultipartObjectCreation(t *testing.T) {
|
||||
|
||||
// Tests validate creation of part files during Multipart operation.
|
||||
func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
err := obj.MakeBucketWithLocation("bucket", "")
|
||||
err := obj.MakeBucketWithLocation(nil, "bucket", "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
uploadID, err := obj.NewMultipartUpload("bucket", "key", nil)
|
||||
uploadID, err := obj.NewMultipartUpload(nil, "bucket", "key", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -105,7 +105,7 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErr
|
||||
expectedETaghex := getMD5Hash(data)
|
||||
|
||||
var calcPartInfo PartInfo
|
||||
calcPartInfo, err = obj.PutObjectPart("bucket", "key", uploadID, i, mustGetHashReader(t, bytes.NewBuffer(data), int64(len(data)), expectedETaghex, ""))
|
||||
calcPartInfo, err = obj.PutObjectPart(nil, "bucket", "key", uploadID, i, mustGetHashReader(t, bytes.NewBuffer(data), int64(len(data)), expectedETaghex, ""))
|
||||
if err != nil {
|
||||
t.Errorf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -117,7 +117,7 @@ func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErr
|
||||
ETag: calcPartInfo.ETag,
|
||||
})
|
||||
}
|
||||
objInfo, err := obj.CompleteMultipartUpload("bucket", "key", uploadID, completedParts.Parts)
|
||||
objInfo, err := obj.CompleteMultipartUpload(nil, "bucket", "key", uploadID, completedParts.Parts)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -133,11 +133,11 @@ func (s *ObjectLayerAPISuite) TestMultipartObjectAbort(t *testing.T) {
|
||||
|
||||
// Tests validate abortion of Multipart operation.
|
||||
func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
err := obj.MakeBucketWithLocation("bucket", "")
|
||||
err := obj.MakeBucketWithLocation(nil, "bucket", "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
uploadID, err := obj.NewMultipartUpload("bucket", "key", nil)
|
||||
uploadID, err := obj.NewMultipartUpload(nil, "bucket", "key", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -155,7 +155,7 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHan
|
||||
|
||||
metadata["md5"] = expectedETaghex
|
||||
var calcPartInfo PartInfo
|
||||
calcPartInfo, err = obj.PutObjectPart("bucket", "key", uploadID, i, mustGetHashReader(t, bytes.NewBufferString(randomString), int64(len(randomString)), expectedETaghex, ""))
|
||||
calcPartInfo, err = obj.PutObjectPart(nil, "bucket", "key", uploadID, i, mustGetHashReader(t, bytes.NewBufferString(randomString), int64(len(randomString)), expectedETaghex, ""))
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -164,7 +164,7 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHan
|
||||
}
|
||||
parts[i] = expectedETaghex
|
||||
}
|
||||
err = obj.AbortMultipartUpload("bucket", "key", uploadID)
|
||||
err = obj.AbortMultipartUpload(nil, "bucket", "key", uploadID)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -178,7 +178,7 @@ func (s *ObjectLayerAPISuite) TestMultipleObjectCreation(t *testing.T) {
|
||||
// Tests validate object creation.
|
||||
func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
objects := make(map[string][]byte)
|
||||
err := obj.MakeBucketWithLocation("bucket", "")
|
||||
err := obj.MakeBucketWithLocation(nil, "bucket", "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -196,7 +196,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH
|
||||
metadata := make(map[string]string)
|
||||
metadata["etag"] = expectedETaghex
|
||||
var objInfo ObjectInfo
|
||||
objInfo, err = obj.PutObject("bucket", key, mustGetHashReader(t, bytes.NewBufferString(randomString), int64(len(randomString)), metadata["etag"], ""), metadata)
|
||||
objInfo, err = obj.PutObject(nil, "bucket", key, mustGetHashReader(t, bytes.NewBufferString(randomString), int64(len(randomString)), metadata["etag"], ""), metadata)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -207,7 +207,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH
|
||||
|
||||
for key, value := range objects {
|
||||
var byteBuffer bytes.Buffer
|
||||
err = obj.GetObject("bucket", key, 0, int64(len(value)), &byteBuffer, "")
|
||||
err = obj.GetObject(nil, "bucket", key, 0, int64(len(value)), &byteBuffer, "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -215,7 +215,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH
|
||||
t.Errorf("%s: Mismatch of GetObject data with the expected one.", instanceType)
|
||||
}
|
||||
|
||||
objInfo, err := obj.GetObjectInfo("bucket", key)
|
||||
objInfo, err := obj.GetObjectInfo(nil, "bucket", key)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -233,8 +233,8 @@ func (s *ObjectLayerAPISuite) TestPaging(t *testing.T) {
|
||||
|
||||
// Tests validate creation of objects and the order of listing using various filters for ListObjects operation.
|
||||
func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
obj.MakeBucketWithLocation("bucket", "")
|
||||
result, err := obj.ListObjects("bucket", "", "", "", 0)
|
||||
obj.MakeBucketWithLocation(nil, "bucket", "")
|
||||
result, err := obj.ListObjects(nil, "bucket", "", "", "", 0)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -249,12 +249,12 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
// check before paging occurs.
|
||||
for i := 0; i < 5; i++ {
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
_, err = obj.PutObject("bucket", key, mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, "bucket", key, mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
||||
result, err = obj.ListObjects("bucket", "", "", "", 5)
|
||||
result, err = obj.ListObjects(nil, "bucket", "", "", "", 5)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -269,11 +269,11 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
// check after paging occurs pages work.
|
||||
for i := 6; i <= 10; i++ {
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
_, err = obj.PutObject("bucket", key, mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, "bucket", key, mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
result, err = obj.ListObjects("bucket", "obj", "", "", 5)
|
||||
result, err = obj.ListObjects(nil, "bucket", "obj", "", "", 5)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -286,15 +286,15 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
}
|
||||
// check paging with prefix at end returns less objects.
|
||||
{
|
||||
_, err = obj.PutObject("bucket", "newPrefix", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, "bucket", "newPrefix", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
_, err = obj.PutObject("bucket", "newPrefix2", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, "bucket", "newPrefix2", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
result, err = obj.ListObjects("bucket", "new", "", "", 5)
|
||||
result, err = obj.ListObjects(nil, "bucket", "new", "", "", 5)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -305,7 +305,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
|
||||
// check ordering of pages.
|
||||
{
|
||||
result, err = obj.ListObjects("bucket", "", "", "", 1000)
|
||||
result, err = obj.ListObjects(nil, "bucket", "", "", "", 1000)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -328,15 +328,15 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
|
||||
// check delimited results with delimiter and prefix.
|
||||
{
|
||||
_, err = obj.PutObject("bucket", "this/is/delimited", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, "bucket", "this/is/delimited", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
_, err = obj.PutObject("bucket", "this/is/also/a/delimited/file", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, "bucket", "this/is/also/a/delimited/file", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
result, err = obj.ListObjects("bucket", "this/is/", "", "/", 10)
|
||||
result, err = obj.ListObjects(nil, "bucket", "this/is/", "", "/", 10)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -350,7 +350,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
|
||||
// check delimited results with delimiter without prefix.
|
||||
{
|
||||
result, err = obj.ListObjects("bucket", "", "", "/", 1000)
|
||||
result, err = obj.ListObjects(nil, "bucket", "", "", "/", 1000)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -378,7 +378,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
// check results with Marker.
|
||||
{
|
||||
|
||||
result, err = obj.ListObjects("bucket", "", "newPrefix", "", 3)
|
||||
result, err = obj.ListObjects(nil, "bucket", "", "newPrefix", "", 3)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -394,7 +394,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
}
|
||||
// check ordering of results with prefix.
|
||||
{
|
||||
result, err = obj.ListObjects("bucket", "obj", "", "", 1000)
|
||||
result, err = obj.ListObjects(nil, "bucket", "obj", "", "", 1000)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -416,7 +416,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
}
|
||||
// check ordering of results with prefix and no paging.
|
||||
{
|
||||
result, err = obj.ListObjects("bucket", "new", "", "", 5)
|
||||
result, err = obj.ListObjects(nil, "bucket", "new", "", "", 5)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -436,27 +436,27 @@ func (s *ObjectLayerAPISuite) TestObjectOverwriteWorks(t *testing.T) {
|
||||
|
||||
// Tests validate overwriting of an existing object.
|
||||
func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
err := obj.MakeBucketWithLocation("bucket", "")
|
||||
err := obj.MakeBucketWithLocation(nil, "bucket", "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
||||
uploadContent := "The list of parts was not in ascending order. The parts list must be specified in order by part number."
|
||||
length := int64(len(uploadContent))
|
||||
_, err = obj.PutObject("bucket", "object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil)
|
||||
_, err = obj.PutObject(nil, "bucket", "object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
||||
uploadContent = "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
|
||||
length = int64(len(uploadContent))
|
||||
_, err = obj.PutObject("bucket", "object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil)
|
||||
_, err = obj.PutObject(nil, "bucket", "object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
||||
var bytesBuffer bytes.Buffer
|
||||
err = obj.GetObject("bucket", "object", 0, length, &bytesBuffer, "")
|
||||
err = obj.GetObject(nil, "bucket", "object", 0, length, &bytesBuffer, "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -472,7 +472,7 @@ func (s *ObjectLayerAPISuite) TestNonExistantBucketOperations(t *testing.T) {
|
||||
|
||||
// Tests validate that bucket operation on non-existent bucket fails.
|
||||
func testNonExistantBucketOperations(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
_, err := obj.PutObject("bucket1", "object", mustGetHashReader(t, bytes.NewBufferString("one"), int64(len("one")), "", ""), nil)
|
||||
_, err := obj.PutObject(nil, "bucket1", "object", mustGetHashReader(t, bytes.NewBufferString("one"), int64(len("one")), "", ""), nil)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error but found nil")
|
||||
}
|
||||
@ -488,11 +488,11 @@ func (s *ObjectLayerAPISuite) TestBucketRecreateFails(t *testing.T) {
|
||||
|
||||
// Tests validate that recreation of the bucket fails.
|
||||
func testBucketRecreateFails(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
err := obj.MakeBucketWithLocation("string", "")
|
||||
err := obj.MakeBucketWithLocation(nil, "string", "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
err = obj.MakeBucketWithLocation("string", "")
|
||||
err = obj.MakeBucketWithLocation(nil, "string", "")
|
||||
if err == nil {
|
||||
t.Fatalf("%s: Expected error but found nil.", instanceType)
|
||||
}
|
||||
@ -513,17 +513,17 @@ func testPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
length := int64(len(content))
|
||||
readerEOF := newTestReaderEOF(content)
|
||||
readerNoEOF := newTestReaderNoEOF(content)
|
||||
err := obj.MakeBucketWithLocation("bucket", "")
|
||||
err := obj.MakeBucketWithLocation(nil, "bucket", "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
||||
var bytesBuffer1 bytes.Buffer
|
||||
_, err = obj.PutObject("bucket", "object", mustGetHashReader(t, readerEOF, length, "", ""), nil)
|
||||
_, err = obj.PutObject(nil, "bucket", "object", mustGetHashReader(t, readerEOF, length, "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
err = obj.GetObject("bucket", "object", 0, length, &bytesBuffer1, "")
|
||||
err = obj.GetObject(nil, "bucket", "object", 0, length, &bytesBuffer1, "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -532,11 +532,11 @@ func testPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
}
|
||||
|
||||
var bytesBuffer2 bytes.Buffer
|
||||
_, err = obj.PutObject("bucket", "object", mustGetHashReader(t, readerNoEOF, length, "", ""), nil)
|
||||
_, err = obj.PutObject(nil, "bucket", "object", mustGetHashReader(t, readerNoEOF, length, "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
err = obj.GetObject("bucket", "object", 0, length, &bytesBuffer2, "")
|
||||
err = obj.GetObject(nil, "bucket", "object", 0, length, &bytesBuffer2, "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -552,7 +552,7 @@ func (s *ObjectLayerAPISuite) TestPutObjectInSubdir(t *testing.T) {
|
||||
|
||||
// Tests validate PutObject with subdirectory prefix.
|
||||
func testPutObjectInSubdir(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
err := obj.MakeBucketWithLocation("bucket", "")
|
||||
err := obj.MakeBucketWithLocation(nil, "bucket", "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -560,13 +560,13 @@ func testPutObjectInSubdir(obj ObjectLayer, instanceType string, t TestErrHandle
|
||||
uploadContent := `The specified multipart upload does not exist. The upload ID might be invalid, or the multipart
|
||||
upload might have been aborted or completed.`
|
||||
length := int64(len(uploadContent))
|
||||
_, err = obj.PutObject("bucket", "dir1/dir2/object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil)
|
||||
_, err = obj.PutObject(nil, "bucket", "dir1/dir2/object", mustGetHashReader(t, bytes.NewBufferString(uploadContent), length, "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
||||
var bytesBuffer bytes.Buffer
|
||||
err = obj.GetObject("bucket", "dir1/dir2/object", 0, length, &bytesBuffer, "")
|
||||
err = obj.GetObject(nil, "bucket", "dir1/dir2/object", 0, length, &bytesBuffer, "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -584,7 +584,7 @@ func (s *ObjectLayerAPISuite) TestListBuckets(t *testing.T) {
|
||||
// Tests validate ListBuckets.
|
||||
func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
// test empty list.
|
||||
buckets, err := obj.ListBuckets()
|
||||
buckets, err := obj.ListBuckets(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -593,12 +593,12 @@ func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
}
|
||||
|
||||
// add one and test exists.
|
||||
err = obj.MakeBucketWithLocation("bucket1", "")
|
||||
err = obj.MakeBucketWithLocation(nil, "bucket1", "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
||||
buckets, err = obj.ListBuckets()
|
||||
buckets, err = obj.ListBuckets(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -607,12 +607,12 @@ func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
}
|
||||
|
||||
// add two and test exists.
|
||||
err = obj.MakeBucketWithLocation("bucket2", "")
|
||||
err = obj.MakeBucketWithLocation(nil, "bucket2", "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
||||
buckets, err = obj.ListBuckets()
|
||||
buckets, err = obj.ListBuckets(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -621,12 +621,12 @@ func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
}
|
||||
|
||||
// add three and test exists + prefix.
|
||||
err = obj.MakeBucketWithLocation("bucket22", "")
|
||||
err = obj.MakeBucketWithLocation(nil, "bucket22", "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
||||
buckets, err = obj.ListBuckets()
|
||||
buckets, err = obj.ListBuckets(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -645,15 +645,15 @@ func testListBucketsOrder(obj ObjectLayer, instanceType string, t TestErrHandler
|
||||
// if implementation contains a map, order of map keys will vary.
|
||||
// this ensures they return in the same order each time.
|
||||
// add one and test exists.
|
||||
err := obj.MakeBucketWithLocation("bucket1", "")
|
||||
err := obj.MakeBucketWithLocation(nil, "bucket1", "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
err = obj.MakeBucketWithLocation("bucket2", "")
|
||||
err = obj.MakeBucketWithLocation(nil, "bucket2", "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
buckets, err := obj.ListBuckets()
|
||||
buckets, err := obj.ListBuckets(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
@ -676,7 +676,7 @@ func (s *ObjectLayerAPISuite) TestListObjectsTestsForNonExistantBucket(t *testin
|
||||
|
||||
// Tests validate that ListObjects operation on a non-existent bucket fails as expected.
|
||||
func testListObjectsTestsForNonExistantBucket(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
result, err := obj.ListObjects("bucket", "", "", "", 1000)
|
||||
result, err := obj.ListObjects(nil, "bucket", "", "", "", 1000)
|
||||
if err == nil {
|
||||
t.Fatalf("%s: Expected error but found nil.", instanceType)
|
||||
}
|
||||
@ -698,12 +698,12 @@ func (s *ObjectLayerAPISuite) TestNonExistantObjectInBucket(t *testing.T) {
|
||||
|
||||
// Tests validate that GetObject fails on a non-existent bucket as expected.
|
||||
func testNonExistantObjectInBucket(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
err := obj.MakeBucketWithLocation("bucket", "")
|
||||
err := obj.MakeBucketWithLocation(nil, "bucket", "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
||||
_, err = obj.GetObjectInfo("bucket", "dir1")
|
||||
_, err = obj.GetObjectInfo(nil, "bucket", "dir1")
|
||||
if err == nil {
|
||||
t.Fatalf("%s: Expected error but found nil", instanceType)
|
||||
}
|
||||
@ -726,13 +726,13 @@ func (s *ObjectLayerAPISuite) TestGetDirectoryReturnsObjectNotFound(t *testing.T
|
||||
// Tests validate that GetObject on an existing directory fails as expected.
|
||||
func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
bucketName := "bucket"
|
||||
err := obj.MakeBucketWithLocation(bucketName, "")
|
||||
err := obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
content := "One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag."
|
||||
length := int64(len(content))
|
||||
_, err = obj.PutObject(bucketName, "dir1/dir3/object", mustGetHashReader(t, bytes.NewBufferString(content), length, "", ""), nil)
|
||||
_, err = obj.PutObject(nil, bucketName, "dir1/dir3/object", mustGetHashReader(t, bytes.NewBufferString(content), length, "", ""), nil)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
@ -753,7 +753,7 @@ func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string,
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
_, expectedErr := obj.GetObjectInfo(bucketName, testCase.dir)
|
||||
_, expectedErr := obj.GetObjectInfo(nil, bucketName, testCase.dir)
|
||||
if expectedErr != nil {
|
||||
expectedErr = errors.Cause(expectedErr)
|
||||
if expectedErr.Error() != testCase.err.Error() {
|
||||
@ -770,17 +770,17 @@ func (s *ObjectLayerAPISuite) TestContentType(t *testing.T) {
|
||||
|
||||
// Test content-type.
|
||||
func testContentType(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
err := obj.MakeBucketWithLocation("bucket", "")
|
||||
err := obj.MakeBucketWithLocation(nil, "bucket", "")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
uploadContent := "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
|
||||
// Test empty.
|
||||
_, err = obj.PutObject("bucket", "minio.png", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, "bucket", "minio.png", mustGetHashReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
objInfo, err := obj.GetObjectInfo("bucket", "minio.png")
|
||||
objInfo, err := obj.GetObjectInfo(nil, "bucket", "minio.png")
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
|
||||
// objectNames[0].
|
||||
// uploadIds [0].
|
||||
// Create bucket before initiating NewMultipartUpload.
|
||||
err = obj.MakeBucketWithLocation(bucketName, "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -229,7 +229,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
|
||||
}
|
||||
// When the operation is successful, check if sending metadata is successful too
|
||||
if rec.Code == http.StatusNoContent {
|
||||
objInfo, err := obj.GetObjectInfo(bucketName, testCase.objectName+"/upload.txt")
|
||||
objInfo, err := obj.GetObjectInfo(nil, bucketName, testCase.objectName+"/upload.txt")
|
||||
if err != nil {
|
||||
t.Error("Unexpected error: ", err)
|
||||
}
|
||||
@ -448,7 +448,7 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t
|
||||
curTime := UTCNow()
|
||||
curTimePlus5Min := curTime.Add(time.Minute * 5)
|
||||
|
||||
err = obj.MakeBucketWithLocation(bucketName, "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -482,7 +482,7 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t
|
||||
}
|
||||
|
||||
// Get the uploaded object info
|
||||
info, err := obj.GetObjectInfo(bucketName, targetObj)
|
||||
info, err := obj.GetObjectInfo(nil, bucketName, targetObj)
|
||||
if err != nil {
|
||||
t.Error("Unexpected error: ", err)
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ func printStartupMessage(apiEndPoints []string) {
|
||||
// Object layer is initialized then print StorageInfo.
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI != nil {
|
||||
printStorageInfo(objAPI.StorageInfo())
|
||||
printStorageInfo(objAPI.StorageInfo(nil))
|
||||
}
|
||||
|
||||
// Prints credential, region and browser access.
|
||||
|
@ -46,7 +46,7 @@ func handleSignals() {
|
||||
errorIf(err, "Unable to shutdown http server")
|
||||
|
||||
if objAPI := newObjectLayerFn(); objAPI != nil {
|
||||
oerr = objAPI.Shutdown()
|
||||
oerr = objAPI.Shutdown(nil)
|
||||
errorIf(oerr, "Unable to shutdown object layer")
|
||||
}
|
||||
|
||||
@ -59,7 +59,7 @@ func handleSignals() {
|
||||
errorIf(err, "http server exited abnormally")
|
||||
var oerr error
|
||||
if objAPI := newObjectLayerFn(); objAPI != nil {
|
||||
oerr = objAPI.Shutdown()
|
||||
oerr = objAPI.Shutdown(nil)
|
||||
errorIf(oerr, "Unable to shutdown object layer")
|
||||
}
|
||||
|
||||
|
@ -180,14 +180,14 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
||||
xl := obj.(*xlObjects)
|
||||
xlDisks := xl.storageDisks
|
||||
|
||||
err := obj.MakeBucketWithLocation(bucket, globalMinioDefaultRegion)
|
||||
err := obj.MakeBucketWithLocation(nil, bucket, globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a bucket %v", err)
|
||||
}
|
||||
|
||||
// Object for test case 1 - No StorageClass defined, no MetaData in PutObject
|
||||
object1 := "object1"
|
||||
_, err = obj.PutObject(bucket, object1, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, bucket, object1, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to putObject %v", err)
|
||||
}
|
||||
@ -198,7 +198,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
||||
object2 := "object2"
|
||||
metadata2 := make(map[string]string)
|
||||
metadata2["x-amz-storage-class"] = reducedRedundancyStorageClass
|
||||
_, err = obj.PutObject(bucket, object2, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata2)
|
||||
_, err = obj.PutObject(nil, bucket, object2, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to putObject %v", err)
|
||||
}
|
||||
@ -209,7 +209,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
||||
object3 := "object3"
|
||||
metadata3 := make(map[string]string)
|
||||
metadata3["x-amz-storage-class"] = standardStorageClass
|
||||
_, err = obj.PutObject(bucket, object3, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata3)
|
||||
_, err = obj.PutObject(nil, bucket, object3, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata3)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to putObject %v", err)
|
||||
}
|
||||
@ -225,7 +225,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
||||
Scheme: "EC",
|
||||
}
|
||||
|
||||
_, err = obj.PutObject(bucket, object4, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata4)
|
||||
_, err = obj.PutObject(nil, bucket, object4, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata4)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to putObject %v", err)
|
||||
}
|
||||
@ -243,7 +243,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
||||
Scheme: "EC",
|
||||
}
|
||||
|
||||
_, err = obj.PutObject(bucket, object5, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata5)
|
||||
_, err = obj.PutObject(nil, bucket, object5, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata5)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to putObject %v", err)
|
||||
}
|
||||
@ -261,7 +261,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
||||
Scheme: "EC",
|
||||
}
|
||||
|
||||
_, err = obj.PutObject(bucket, object6, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata6)
|
||||
_, err = obj.PutObject(nil, bucket, object6, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata6)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to putObject %v", err)
|
||||
}
|
||||
@ -279,7 +279,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
|
||||
Scheme: "EC",
|
||||
}
|
||||
|
||||
_, err = obj.PutObject(bucket, object7, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata7)
|
||||
_, err = obj.PutObject(nil, bucket, object7, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), metadata7)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to putObject %v", err)
|
||||
}
|
||||
|
@ -1766,7 +1766,7 @@ func initAPIHandlerTest(obj ObjectLayer, endpoints []string) (string, http.Handl
|
||||
bucketName := getRandomBucketName()
|
||||
|
||||
// Create bucket.
|
||||
err := obj.MakeBucketWithLocation(bucketName, "")
|
||||
err := obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
if err != nil {
|
||||
// failed to create newbucket, return err.
|
||||
return "", nil, err
|
||||
@ -1872,7 +1872,7 @@ func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketN
|
||||
Version: "1.0",
|
||||
Statements: []policy.Statement{policyFunc(bucketName, "")},
|
||||
}
|
||||
obj.SetBucketPolicy(bucketName, bp)
|
||||
obj.SetBucketPolicy(nil, bucketName, bp)
|
||||
// now call the handler again with the unsigned/anonymous request, it should be accepted.
|
||||
rec = httptest.NewRecorder()
|
||||
|
||||
|
16
cmd/utils.go
16
cmd/utils.go
@ -18,6 +18,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
@ -34,6 +35,8 @@ import (
|
||||
"time"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/pkg/profile"
|
||||
)
|
||||
|
||||
@ -314,3 +317,16 @@ func ceilFrac(numerator, denominator int64) (ceil int64) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func newContext(r *http.Request, api string) context.Context {
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
object := vars["object"]
|
||||
prefix := vars["prefix"]
|
||||
|
||||
if prefix != "" {
|
||||
object = prefix
|
||||
}
|
||||
|
||||
return logger.ContextSet(context.Background(), &logger.ReqInfo{r.RemoteAddr, r.Header.Get("user-agent"), "", api, bucket, object, nil})
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ func (web *webAPIHandlers) StorageInfo(r *http.Request, args *AuthRPCArgs, reply
|
||||
if !isHTTPRequestValid(r) {
|
||||
return toJSONError(errAuthentication)
|
||||
}
|
||||
reply.StorageInfo = objectAPI.StorageInfo()
|
||||
reply.StorageInfo = objectAPI.StorageInfo(nil)
|
||||
reply.UIVersion = browser.UIVersion
|
||||
return nil
|
||||
}
|
||||
@ -131,7 +131,7 @@ func (web *webAPIHandlers) MakeBucket(r *http.Request, args *MakeBucketArgs, rep
|
||||
return toJSONError(errInvalidBucketName)
|
||||
}
|
||||
|
||||
if err := objectAPI.MakeBucketWithLocation(args.BucketName, globalServerConfig.GetRegion()); err != nil {
|
||||
if err := objectAPI.MakeBucketWithLocation(nil, args.BucketName, globalServerConfig.GetRegion()); err != nil {
|
||||
return toJSONError(err, args.BucketName)
|
||||
}
|
||||
|
||||
@ -154,7 +154,7 @@ func (web *webAPIHandlers) DeleteBucket(r *http.Request, args *RemoveBucketArgs,
|
||||
return toJSONError(errAuthentication)
|
||||
}
|
||||
|
||||
err := objectAPI.DeleteBucket(args.BucketName)
|
||||
err := objectAPI.DeleteBucket(nil, args.BucketName)
|
||||
if err != nil {
|
||||
return toJSONError(err, args.BucketName)
|
||||
}
|
||||
@ -187,7 +187,7 @@ func (web *webAPIHandlers) ListBuckets(r *http.Request, args *WebGenericArgs, re
|
||||
if authErr != nil {
|
||||
return toJSONError(authErr)
|
||||
}
|
||||
buckets, err := objectAPI.ListBuckets()
|
||||
buckets, err := objectAPI.ListBuckets(nil)
|
||||
if err != nil {
|
||||
return toJSONError(err)
|
||||
}
|
||||
@ -256,7 +256,7 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r
|
||||
default:
|
||||
return errAuthentication
|
||||
}
|
||||
lo, err := objectAPI.ListObjects(args.BucketName, args.Prefix, args.Marker, slashSeparator, 1000)
|
||||
lo, err := objectAPI.ListObjects(nil, args.BucketName, args.Prefix, args.Marker, slashSeparator, 1000)
|
||||
if err != nil {
|
||||
return &json2.Error{Message: err.Error()}
|
||||
}
|
||||
@ -313,7 +313,7 @@ next:
|
||||
for _, objectName := range args.Objects {
|
||||
// If not a directory, remove the object.
|
||||
if !hasSuffix(objectName, slashSeparator) && objectName != "" {
|
||||
if err = deleteObject(objectAPI, args.BucketName, objectName, r); err != nil {
|
||||
if err = deleteObject(nil, objectAPI, args.BucketName, objectName, r); err != nil {
|
||||
break next
|
||||
}
|
||||
continue
|
||||
@ -323,13 +323,13 @@ next:
|
||||
marker := ""
|
||||
for {
|
||||
var lo ListObjectsInfo
|
||||
lo, err = objectAPI.ListObjects(args.BucketName, objectName, marker, "", 1000)
|
||||
lo, err = objectAPI.ListObjects(nil, args.BucketName, objectName, marker, "", 1000)
|
||||
if err != nil {
|
||||
break next
|
||||
}
|
||||
marker = lo.NextMarker
|
||||
for _, obj := range lo.Objects {
|
||||
err = deleteObject(objectAPI, args.BucketName, obj.Name, r)
|
||||
err = deleteObject(nil, objectAPI, args.BucketName, obj.Name, r)
|
||||
if err != nil {
|
||||
break next
|
||||
}
|
||||
@ -562,7 +562,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.PutObject(bucket, object, hashReader, metadata)
|
||||
objInfo, err := objectAPI.PutObject(nil, bucket, object, hashReader, metadata)
|
||||
if err != nil {
|
||||
writeWebErrorResponse(w, err)
|
||||
return
|
||||
@ -598,7 +598,7 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) {
|
||||
// Add content disposition.
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", path.Base(object)))
|
||||
|
||||
if err := objectAPI.GetObject(bucket, object, 0, -1, w, ""); err != nil {
|
||||
if err := objectAPI.GetObject(nil, bucket, object, 0, -1, w, ""); err != nil {
|
||||
/// No need to print error, response writer already written to.
|
||||
return
|
||||
}
|
||||
@ -647,7 +647,7 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
|
||||
for _, object := range args.Objects {
|
||||
// Writes compressed object file to the response.
|
||||
zipit := func(objectName string) error {
|
||||
info, err := objectAPI.GetObjectInfo(args.BucketName, objectName)
|
||||
info, err := objectAPI.GetObjectInfo(nil, args.BucketName, objectName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -662,7 +662,7 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
|
||||
writeWebErrorResponse(w, errUnexpected)
|
||||
return err
|
||||
}
|
||||
return objectAPI.GetObject(args.BucketName, objectName, 0, info.Size, writer, "")
|
||||
return objectAPI.GetObject(nil, args.BucketName, objectName, 0, info.Size, writer, "")
|
||||
}
|
||||
|
||||
if !hasSuffix(object, slashSeparator) {
|
||||
@ -678,7 +678,7 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
|
||||
// date to the response writer.
|
||||
marker := ""
|
||||
for {
|
||||
lo, err := objectAPI.ListObjects(args.BucketName, pathJoin(args.Prefix, object), marker, "", 1000)
|
||||
lo, err := objectAPI.ListObjects(nil, args.BucketName, pathJoin(args.Prefix, object), marker, "", 1000)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -719,7 +719,7 @@ func (web *webAPIHandlers) GetBucketPolicy(r *http.Request, args *GetBucketPolic
|
||||
return toJSONError(errAuthentication)
|
||||
}
|
||||
|
||||
var policyInfo, err = objectAPI.GetBucketPolicy(args.BucketName)
|
||||
var policyInfo, err = objectAPI.GetBucketPolicy(nil, args.BucketName)
|
||||
if err != nil {
|
||||
_, ok := errors.Cause(err).(BucketPolicyNotFound)
|
||||
if !ok {
|
||||
@ -760,7 +760,7 @@ func (web *webAPIHandlers) ListAllBucketPolicies(r *http.Request, args *ListAllB
|
||||
if !isHTTPRequestValid(r) {
|
||||
return toJSONError(errAuthentication)
|
||||
}
|
||||
var policyInfo, err = objectAPI.GetBucketPolicy(args.BucketName)
|
||||
var policyInfo, err = objectAPI.GetBucketPolicy(nil, args.BucketName)
|
||||
if err != nil {
|
||||
_, ok := errors.Cause(err).(PolicyNotFound)
|
||||
if !ok {
|
||||
@ -804,7 +804,7 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic
|
||||
}
|
||||
}
|
||||
|
||||
var policyInfo, err = objectAPI.GetBucketPolicy(args.BucketName)
|
||||
var policyInfo, err = objectAPI.GetBucketPolicy(nil, args.BucketName)
|
||||
if err != nil {
|
||||
if _, ok := errors.Cause(err).(PolicyNotFound); !ok {
|
||||
return toJSONError(err, args.BucketName)
|
||||
@ -815,14 +815,14 @@ func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolic
|
||||
policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, bucketP, args.BucketName, args.Prefix)
|
||||
|
||||
if len(policyInfo.Statements) == 0 {
|
||||
if err = objectAPI.DeleteBucketPolicy(args.BucketName); err != nil {
|
||||
if err = objectAPI.DeleteBucketPolicy(nil, args.BucketName); err != nil {
|
||||
return toJSONError(err, args.BucketName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse validate and save bucket policy.
|
||||
if err := objectAPI.SetBucketPolicy(args.BucketName, policyInfo); err != nil {
|
||||
if err := objectAPI.SetBucketPolicy(nil, args.BucketName, policyInfo); err != nil {
|
||||
return toJSONError(err, args.BucketName)
|
||||
}
|
||||
|
||||
|
@ -340,7 +340,7 @@ func testDeleteBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrH
|
||||
}
|
||||
|
||||
bucketName := getRandomBucketName()
|
||||
err = obj.MakeBucketWithLocation(bucketName, "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create bucket: %s (%s)", err.Error(), instanceType)
|
||||
}
|
||||
@ -368,7 +368,7 @@ func testDeleteBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrH
|
||||
for _, test := range testCases {
|
||||
if test.initWithObject {
|
||||
data := bytes.NewBufferString("hello")
|
||||
_, err = obj.PutObject(test.bucketName, "object", mustGetHashReader(t, data, int64(data.Len()), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, test.bucketName, "object", mustGetHashReader(t, data, int64(data.Len()), "", ""), nil)
|
||||
// _, err = obj.PutObject(test.bucketName, "object", int64(data.Len()), data, nil, "")
|
||||
if err != nil {
|
||||
t.Fatalf("could not put object to %s, %s", test.bucketName, err.Error())
|
||||
@ -405,7 +405,7 @@ func testDeleteBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrH
|
||||
|
||||
// If we created the bucket with an object, now delete the object to cleanup.
|
||||
if test.initWithObject {
|
||||
err = obj.DeleteObject(test.bucketName, "object")
|
||||
err = obj.DeleteObject(nil, test.bucketName, "object")
|
||||
if err != nil {
|
||||
t.Fatalf("could not delete object, %s", err.Error())
|
||||
}
|
||||
@ -417,7 +417,7 @@ func testDeleteBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrH
|
||||
continue
|
||||
}
|
||||
|
||||
err = obj.MakeBucketWithLocation(bucketName, "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
if err != nil {
|
||||
// failed to create new bucket, abort.
|
||||
t.Fatalf("failed to create new bucket (%s): %s", instanceType, err.Error())
|
||||
@ -445,7 +445,7 @@ func testListBucketsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa
|
||||
|
||||
bucketName := getRandomBucketName()
|
||||
// Create bucket.
|
||||
err = obj.MakeBucketWithLocation(bucketName, "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
if err != nil {
|
||||
// failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err)
|
||||
@ -496,7 +496,7 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa
|
||||
objectSize := 1 * humanize.KiByte
|
||||
|
||||
// Create bucket.
|
||||
err = obj.MakeBucketWithLocation(bucketName, "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
if err != nil {
|
||||
// failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err)
|
||||
@ -504,7 +504,7 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa
|
||||
|
||||
data := bytes.Repeat([]byte("a"), objectSize)
|
||||
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
||||
_, err = obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
||||
_, err = obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Was not able to upload an object, %v", err)
|
||||
@ -558,7 +558,7 @@ func testListObjectsWebHandler(obj ObjectLayer, instanceType string, t TestErrHa
|
||||
Statements: []policy.Statement{getReadOnlyObjectStatement(bucketName, "")},
|
||||
}
|
||||
|
||||
obj.SetBucketPolicy(bucketName, policy)
|
||||
obj.SetBucketPolicy(nil, bucketName, policy)
|
||||
|
||||
// Unauthenticated ListObjects with READ bucket policy should succeed.
|
||||
err, reply = test("")
|
||||
@ -590,7 +590,7 @@ func testRemoveObjectWebHandler(obj ObjectLayer, instanceType string, t TestErrH
|
||||
objectSize := 1 * humanize.KiByte
|
||||
|
||||
// Create bucket.
|
||||
err = obj.MakeBucketWithLocation(bucketName, "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
if err != nil {
|
||||
// failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err)
|
||||
@ -598,14 +598,14 @@ func testRemoveObjectWebHandler(obj ObjectLayer, instanceType string, t TestErrH
|
||||
|
||||
data := bytes.Repeat([]byte("a"), objectSize)
|
||||
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
||||
_, err = obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
||||
_, err = obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
||||
if err != nil {
|
||||
t.Fatalf("Was not able to upload an object, %v", err)
|
||||
}
|
||||
|
||||
objectName = "a/object"
|
||||
metadata = map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
||||
_, err = obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
||||
_, err = obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
||||
if err != nil {
|
||||
t.Fatalf("Was not able to upload an object, %v", err)
|
||||
}
|
||||
@ -893,7 +893,7 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler
|
||||
return rec.Code
|
||||
}
|
||||
// Create bucket.
|
||||
err = obj.MakeBucketWithLocation(bucketName, "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
if err != nil {
|
||||
// failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err)
|
||||
@ -906,7 +906,7 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler
|
||||
}
|
||||
|
||||
var byteBuffer bytes.Buffer
|
||||
err = obj.GetObject(bucketName, objectName, 0, int64(len(content)), &byteBuffer, "")
|
||||
err = obj.GetObject(nil, bucketName, objectName, 0, int64(len(content)), &byteBuffer, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed, %v", err)
|
||||
}
|
||||
@ -932,7 +932,7 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler
|
||||
Statements: []policy.Statement{getWriteOnlyObjectStatement(bucketName, "")},
|
||||
}
|
||||
|
||||
obj.SetBucketPolicy(bucketName, bp)
|
||||
obj.SetBucketPolicy(nil, bucketName, bp)
|
||||
|
||||
// Unauthenticated upload with WRITE policy should succeed.
|
||||
code = test("", true)
|
||||
@ -978,7 +978,7 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl
|
||||
}
|
||||
|
||||
// Create bucket.
|
||||
err = obj.MakeBucketWithLocation(bucketName, "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
if err != nil {
|
||||
// failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err)
|
||||
@ -986,7 +986,7 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl
|
||||
|
||||
content := []byte("temporary file's content")
|
||||
metadata := map[string]string{"etag": "01ce59706106fe5e02e7f55fffda7f34"}
|
||||
_, err = obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader(content), int64(len(content)), metadata["etag"], ""), metadata)
|
||||
_, err = obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewReader(content), int64(len(content)), metadata["etag"], ""), metadata)
|
||||
if err != nil {
|
||||
t.Fatalf("Was not able to upload an object, %v", err)
|
||||
}
|
||||
@ -1039,7 +1039,7 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl
|
||||
Statements: []policy.Statement{getReadOnlyObjectStatement(bucketName, "")},
|
||||
}
|
||||
|
||||
obj.SetBucketPolicy(bucketName, bp)
|
||||
obj.SetBucketPolicy(nil, bucketName, bp)
|
||||
|
||||
// Unauthenticated download with READ policy should succeed.
|
||||
code, bodyContent = test("")
|
||||
@ -1072,15 +1072,15 @@ func testWebHandlerDownloadZip(obj ObjectLayer, instanceType string, t TestErrHa
|
||||
fileThree := "cccccccccccccc"
|
||||
|
||||
// Create bucket.
|
||||
err = obj.MakeBucketWithLocation(bucket, "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucket, "")
|
||||
if err != nil {
|
||||
// failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err)
|
||||
}
|
||||
|
||||
obj.PutObject(bucket, "a/one", mustGetHashReader(t, strings.NewReader(fileOne), int64(len(fileOne)), "", ""), nil)
|
||||
obj.PutObject(bucket, "a/b/two", mustGetHashReader(t, strings.NewReader(fileTwo), int64(len(fileTwo)), "", ""), nil)
|
||||
obj.PutObject(bucket, "a/c/three", mustGetHashReader(t, strings.NewReader(fileThree), int64(len(fileThree)), "", ""), nil)
|
||||
obj.PutObject(nil, bucket, "a/one", mustGetHashReader(t, strings.NewReader(fileOne), int64(len(fileOne)), "", ""), nil)
|
||||
obj.PutObject(nil, bucket, "a/b/two", mustGetHashReader(t, strings.NewReader(fileTwo), int64(len(fileTwo)), "", ""), nil)
|
||||
obj.PutObject(nil, bucket, "a/c/three", mustGetHashReader(t, strings.NewReader(fileThree), int64(len(fileThree)), "", ""), nil)
|
||||
|
||||
test := func(token string) (int, []byte) {
|
||||
rec := httptest.NewRecorder()
|
||||
@ -1157,7 +1157,7 @@ func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrH
|
||||
objectSize := 1 * humanize.KiByte
|
||||
|
||||
// Create bucket.
|
||||
err = obj.MakeBucketWithLocation(bucketName, "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
if err != nil {
|
||||
// failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err)
|
||||
@ -1165,7 +1165,7 @@ func testWebPresignedGetHandler(obj ObjectLayer, instanceType string, t TestErrH
|
||||
|
||||
data := bytes.Repeat([]byte("a"), objectSize)
|
||||
metadata := map[string]string{"etag": "c9a34cfc85d982698c6ac89f76071abd"}
|
||||
_, err = obj.PutObject(bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
||||
_, err = obj.PutObject(nil, bucketName, objectName, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), metadata["etag"], ""), metadata)
|
||||
if err != nil {
|
||||
t.Fatalf("Was not able to upload an object, %v", err)
|
||||
}
|
||||
@ -1258,7 +1258,7 @@ func testWebGetBucketPolicyHandler(obj ObjectLayer, instanceType string, t TestE
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
bucketName := getRandomBucketName()
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
@ -1336,7 +1336,7 @@ func testWebListAllBucketPoliciesHandler(obj ObjectLayer, instanceType string, t
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
bucketName := getRandomBucketName()
|
||||
if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
if err := obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
@ -1434,7 +1434,7 @@ func testWebSetBucketPolicyHandler(obj ObjectLayer, instanceType string, t TestE
|
||||
|
||||
// Create a bucket
|
||||
bucketName := getRandomBucketName()
|
||||
if err = obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
if err = obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
}
|
||||
|
||||
@ -1674,7 +1674,7 @@ func TestWebObjectLayerFaultyDisks(t *testing.T) {
|
||||
defer removeRoots(fsDirs)
|
||||
|
||||
bucketName := "mybucket"
|
||||
err = obj.MakeBucketWithLocation(bucketName, "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
if err != nil {
|
||||
t.Fatal("Cannot make bucket:", err)
|
||||
}
|
||||
|
115
cmd/xl-sets.go
115
cmd/xl-sets.go
@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
@ -241,11 +242,11 @@ func newXLSets(endpoints EndpointList, format *formatXLV3, setCount int, drivesP
|
||||
}
|
||||
|
||||
// StorageInfo - combines output of StorageInfo across all erasure coded object sets.
|
||||
func (s *xlSets) StorageInfo() StorageInfo {
|
||||
func (s *xlSets) StorageInfo(ctx context.Context) StorageInfo {
|
||||
var storageInfo StorageInfo
|
||||
storageInfo.Backend.Type = Erasure
|
||||
for _, set := range s.sets {
|
||||
lstorageInfo := set.StorageInfo()
|
||||
lstorageInfo := set.StorageInfo(ctx)
|
||||
storageInfo.Total = storageInfo.Total + lstorageInfo.Total
|
||||
storageInfo.Free = storageInfo.Free + lstorageInfo.Free
|
||||
storageInfo.Backend.OnlineDisks = storageInfo.Backend.OnlineDisks + lstorageInfo.Backend.OnlineDisks
|
||||
@ -310,13 +311,13 @@ func (s *xlSets) StorageInfo() StorageInfo {
|
||||
|
||||
// Shutdown shutsdown all erasure coded sets in parallel
|
||||
// returns error upon first error.
|
||||
func (s *xlSets) Shutdown() error {
|
||||
func (s *xlSets) Shutdown(ctx context.Context) error {
|
||||
g := errgroup.WithNErrs(len(s.sets))
|
||||
|
||||
for index := range s.sets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
return s.sets[index].Shutdown()
|
||||
return s.sets[index].Shutdown(ctx)
|
||||
}, index)
|
||||
}
|
||||
|
||||
@ -332,14 +333,14 @@ func (s *xlSets) Shutdown() error {
|
||||
// MakeBucketLocation - creates a new bucket across all sets simultaneously
|
||||
// even if one of the sets fail to create buckets, we proceed to undo a
|
||||
// successful operation.
|
||||
func (s *xlSets) MakeBucketWithLocation(bucket, location string) error {
|
||||
func (s *xlSets) MakeBucketWithLocation(ctx context.Context, bucket, location string) error {
|
||||
g := errgroup.WithNErrs(len(s.sets))
|
||||
|
||||
// Create buckets in parallel across all sets.
|
||||
for index := range s.sets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
return s.sets[index].MakeBucketWithLocation(bucket, location)
|
||||
return s.sets[index].MakeBucketWithLocation(ctx, bucket, location)
|
||||
}, index)
|
||||
}
|
||||
|
||||
@ -367,7 +368,7 @@ func undoMakeBucketSets(bucket string, sets []*xlObjects, errs []error) {
|
||||
index := index
|
||||
if errs[index] == nil {
|
||||
g.Go(func() error {
|
||||
return sets[index].DeleteBucket(bucket)
|
||||
return sets[index].DeleteBucket(nil, bucket)
|
||||
}, index)
|
||||
}
|
||||
}
|
||||
@ -403,13 +404,13 @@ func (s *xlSets) getHashedSet(input string) (set *xlObjects) {
|
||||
}
|
||||
|
||||
// GetBucketInfo - returns bucket info from one of the erasure coded set.
|
||||
func (s *xlSets) GetBucketInfo(bucket string) (bucketInfo BucketInfo, err error) {
|
||||
return s.getHashedSet(bucket).GetBucketInfo(bucket)
|
||||
func (s *xlSets) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
|
||||
return s.getHashedSet(bucket).GetBucketInfo(ctx, bucket)
|
||||
}
|
||||
|
||||
// ListObjectsV2 lists all objects in bucket filtered by prefix
|
||||
func (s *xlSets) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
|
||||
loi, err := s.ListObjects(bucket, prefix, continuationToken, delimiter, maxKeys)
|
||||
func (s *xlSets) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
|
||||
loi, err := s.ListObjects(ctx, bucket, prefix, continuationToken, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
@ -425,12 +426,12 @@ func (s *xlSets) ListObjectsV2(bucket, prefix, continuationToken, delimiter stri
|
||||
}
|
||||
|
||||
// SetBucketPolicy persist the new policy on the bucket.
|
||||
func (s *xlSets) SetBucketPolicy(bucket string, policy policy.BucketAccessPolicy) error {
|
||||
func (s *xlSets) SetBucketPolicy(ctx context.Context, bucket string, policy policy.BucketAccessPolicy) error {
|
||||
return persistAndNotifyBucketPolicyChange(bucket, false, policy, s)
|
||||
}
|
||||
|
||||
// GetBucketPolicy will return a policy on a bucket
|
||||
func (s *xlSets) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, error) {
|
||||
func (s *xlSets) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) {
|
||||
// fetch bucket policy from cache.
|
||||
bpolicy := s.bucketPolicies.GetBucketPolicy(bucket)
|
||||
if reflect.DeepEqual(bpolicy, emptyBucketPolicy) {
|
||||
@ -440,12 +441,12 @@ func (s *xlSets) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, erro
|
||||
}
|
||||
|
||||
// DeleteBucketPolicy deletes all policies on bucket
|
||||
func (s *xlSets) DeleteBucketPolicy(bucket string) error {
|
||||
func (s *xlSets) DeleteBucketPolicy(ctx context.Context, bucket string) error {
|
||||
return persistAndNotifyBucketPolicyChange(bucket, true, emptyBucketPolicy, s)
|
||||
}
|
||||
|
||||
// RefreshBucketPolicy refreshes policy cache from disk
|
||||
func (s *xlSets) RefreshBucketPolicy(bucket string) error {
|
||||
func (s *xlSets) RefreshBucketPolicy(ctx context.Context, bucket string) error {
|
||||
policy, err := ReadBucketPolicy(bucket, s)
|
||||
if err != nil {
|
||||
if reflect.DeepEqual(policy, emptyBucketPolicy) {
|
||||
@ -469,14 +470,14 @@ func (s *xlSets) IsEncryptionSupported() bool {
|
||||
// DeleteBucket - deletes a bucket on all sets simultaneously,
|
||||
// even if one of the sets fail to delete buckets, we proceed to
|
||||
// undo a successful operation.
|
||||
func (s *xlSets) DeleteBucket(bucket string) error {
|
||||
func (s *xlSets) DeleteBucket(ctx context.Context, bucket string) error {
|
||||
g := errgroup.WithNErrs(len(s.sets))
|
||||
|
||||
// Delete buckets in parallel across all sets.
|
||||
for index := range s.sets {
|
||||
index := index
|
||||
g.Go(func() error {
|
||||
return s.sets[index].DeleteBucket(bucket)
|
||||
return s.sets[index].DeleteBucket(ctx, bucket)
|
||||
}, index)
|
||||
}
|
||||
|
||||
@ -508,7 +509,7 @@ func undoDeleteBucketSets(bucket string, sets []*xlObjects, errs []error) {
|
||||
index := index
|
||||
if errs[index] == nil {
|
||||
g.Go(func() error {
|
||||
return sets[index].MakeBucketWithLocation(bucket, "")
|
||||
return sets[index].MakeBucketWithLocation(nil, bucket, "")
|
||||
}, index)
|
||||
}
|
||||
}
|
||||
@ -519,42 +520,42 @@ func undoDeleteBucketSets(bucket string, sets []*xlObjects, errs []error) {
|
||||
// List all buckets from one of the set, we are not doing merge
|
||||
// sort here just for simplification. As per design it is assumed
|
||||
// that all buckets are present on all sets.
|
||||
func (s *xlSets) ListBuckets() (buckets []BucketInfo, err error) {
|
||||
func (s *xlSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) {
|
||||
// Always lists from the same set signified by the empty string.
|
||||
return s.getHashedSet("").ListBuckets()
|
||||
return s.getHashedSet("").ListBuckets(ctx)
|
||||
}
|
||||
|
||||
// --- Object Operations ---
|
||||
|
||||
// GetObject - reads an object from the hashedSet based on the object name.
|
||||
func (s *xlSets) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
||||
return s.getHashedSet(object).GetObject(bucket, object, startOffset, length, writer, etag)
|
||||
func (s *xlSets) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
||||
return s.getHashedSet(object).GetObject(ctx, bucket, object, startOffset, length, writer, etag)
|
||||
}
|
||||
|
||||
// PutObject - writes an object to hashedSet based on the object name.
|
||||
func (s *xlSets) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||
return s.getHashedSet(object).PutObject(bucket, object, data, metadata)
|
||||
func (s *xlSets) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||
return s.getHashedSet(object).PutObject(ctx, bucket, object, data, metadata)
|
||||
}
|
||||
|
||||
// GetObjectInfo - reads object metadata from the hashedSet based on the object name.
|
||||
func (s *xlSets) GetObjectInfo(bucket, object string) (objInfo ObjectInfo, err error) {
|
||||
return s.getHashedSet(object).GetObjectInfo(bucket, object)
|
||||
func (s *xlSets) GetObjectInfo(ctx context.Context, bucket, object string) (objInfo ObjectInfo, err error) {
|
||||
return s.getHashedSet(object).GetObjectInfo(ctx, bucket, object)
|
||||
}
|
||||
|
||||
// DeleteObject - deletes an object from the hashedSet based on the object name.
|
||||
func (s *xlSets) DeleteObject(bucket string, object string) (err error) {
|
||||
return s.getHashedSet(object).DeleteObject(bucket, object)
|
||||
func (s *xlSets) DeleteObject(ctx context.Context, bucket string, object string) (err error) {
|
||||
return s.getHashedSet(object).DeleteObject(ctx, bucket, object)
|
||||
}
|
||||
|
||||
// CopyObject - copies objects from one hashedSet to another hashedSet, on server side.
|
||||
func (s *xlSets) CopyObject(srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo) (objInfo ObjectInfo, err error) {
|
||||
func (s *xlSets) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo) (objInfo ObjectInfo, err error) {
|
||||
srcSet := s.getHashedSet(srcObject)
|
||||
destSet := s.getHashedSet(destObject)
|
||||
|
||||
// Check if this request is only metadata update.
|
||||
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(destBucket, destObject))
|
||||
if cpSrcDstSame && srcInfo.metadataOnly {
|
||||
return srcSet.CopyObject(srcBucket, srcObject, destBucket, destObject, srcInfo)
|
||||
return srcSet.CopyObject(ctx, srcBucket, srcObject, destBucket, destObject, srcInfo)
|
||||
}
|
||||
|
||||
// Hold write lock on destination since in both cases
|
||||
@ -672,7 +673,7 @@ func listDirSetsFactory(isLeaf isLeafFunc, treeWalkIgnoredErrs []error, sets ...
|
||||
// ListObjects - implements listing of objects across sets, each set is independently
|
||||
// listed and subsequently merge lexically sorted inside listDirSetsFactory(). Resulting
|
||||
// value through the walk channel receives the data properly lexically sorted.
|
||||
func (s *xlSets) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) {
|
||||
func (s *xlSets) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) {
|
||||
// validate all the inputs for listObjects
|
||||
if err = checkListObjsArgs(bucket, prefix, marker, delimiter, s); err != nil {
|
||||
return result, err
|
||||
@ -766,26 +767,26 @@ func (s *xlSets) ListObjects(bucket, prefix, marker, delimiter string, maxKeys i
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *xlSets) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) {
|
||||
func (s *xlSets) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) {
|
||||
// In list multipart uploads we are going to treat input prefix as the object,
|
||||
// this means that we are not supporting directory navigation.
|
||||
return s.getHashedSet(prefix).ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||
return s.getHashedSet(prefix).ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||
}
|
||||
|
||||
// Initiate a new multipart upload on a hashedSet based on object name.
|
||||
func (s *xlSets) NewMultipartUpload(bucket, object string, metadata map[string]string) (uploadID string, err error) {
|
||||
return s.getHashedSet(object).NewMultipartUpload(bucket, object, metadata)
|
||||
func (s *xlSets) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) {
|
||||
return s.getHashedSet(object).NewMultipartUpload(ctx, bucket, object, metadata)
|
||||
}
|
||||
|
||||
// Copies a part of an object from source hashedSet to destination hashedSet.
|
||||
func (s *xlSets) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int,
|
||||
func (s *xlSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int,
|
||||
startOffset int64, length int64, srcInfo ObjectInfo) (partInfo PartInfo, err error) {
|
||||
|
||||
srcSet := s.getHashedSet(srcObject)
|
||||
destSet := s.getHashedSet(destObject)
|
||||
|
||||
go func() {
|
||||
if gerr := srcSet.GetObject(srcBucket, srcObject, startOffset, length, srcInfo.Writer, srcInfo.ETag); gerr != nil {
|
||||
if gerr := srcSet.GetObject(ctx, srcBucket, srcObject, startOffset, length, srcInfo.Writer, srcInfo.ETag); gerr != nil {
|
||||
if gerr = srcInfo.Writer.Close(); gerr != nil {
|
||||
errorIf(gerr, "Unable to read %s of the object `%s/%s`.", srcBucket, srcObject)
|
||||
return
|
||||
@ -797,27 +798,27 @@ func (s *xlSets) CopyObjectPart(srcBucket, srcObject, destBucket, destObject str
|
||||
}
|
||||
}()
|
||||
|
||||
return destSet.PutObjectPart(destBucket, destObject, uploadID, partID, srcInfo.Reader)
|
||||
return destSet.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, srcInfo.Reader)
|
||||
}
|
||||
|
||||
// PutObjectPart - writes part of an object to hashedSet based on the object name.
|
||||
func (s *xlSets) PutObjectPart(bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error) {
|
||||
return s.getHashedSet(object).PutObjectPart(bucket, object, uploadID, partID, data)
|
||||
func (s *xlSets) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error) {
|
||||
return s.getHashedSet(object).PutObjectPart(ctx, bucket, object, uploadID, partID, data)
|
||||
}
|
||||
|
||||
// ListObjectParts - lists all uploaded parts to an object in hashedSet.
|
||||
func (s *xlSets) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error) {
|
||||
return s.getHashedSet(object).ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
|
||||
func (s *xlSets) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListPartsInfo, err error) {
|
||||
return s.getHashedSet(object).ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts)
|
||||
}
|
||||
|
||||
// Aborts an in-progress multipart operation on hashedSet based on the object name.
|
||||
func (s *xlSets) AbortMultipartUpload(bucket, object, uploadID string) error {
|
||||
return s.getHashedSet(object).AbortMultipartUpload(bucket, object, uploadID)
|
||||
func (s *xlSets) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
|
||||
return s.getHashedSet(object).AbortMultipartUpload(ctx, bucket, object, uploadID)
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name.
|
||||
func (s *xlSets) CompleteMultipartUpload(bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) {
|
||||
return s.getHashedSet(object).CompleteMultipartUpload(bucket, object, uploadID, uploadedParts)
|
||||
func (s *xlSets) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) {
|
||||
return s.getHashedSet(object).CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts)
|
||||
}
|
||||
|
||||
/*
|
||||
@ -911,7 +912,7 @@ func formatsToDrivesInfo(endpoints EndpointList, formats []*formatXLV3, sErrs []
|
||||
|
||||
// HealFormat - heals missing `format.json` on freshly or corrupted
|
||||
// disks (missing format.json but does have erasure coded data in it).
|
||||
func (s *xlSets) HealFormat(dryRun bool) (madmin.HealResultItem, error) {
|
||||
func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) {
|
||||
// Acquire lock on format.json
|
||||
formatLock := s.getHashedSet(formatConfigFile).nsMutex.NewNSLock(minioMetaBucket, formatConfigFile)
|
||||
if err := formatLock.GetLock(globalHealingTimeout); err != nil {
|
||||
@ -1041,7 +1042,7 @@ func (s *xlSets) HealFormat(dryRun bool) (madmin.HealResultItem, error) {
|
||||
}
|
||||
|
||||
// HealBucket - heals inconsistent buckets and bucket metadata on all sets.
|
||||
func (s *xlSets) HealBucket(bucket string, dryRun bool) (results []madmin.HealResultItem, err error) {
|
||||
func (s *xlSets) HealBucket(ctx context.Context, bucket string, dryRun bool) (results []madmin.HealResultItem, err error) {
|
||||
// Initialize heal result info
|
||||
res := madmin.HealResultItem{
|
||||
Type: madmin.HealItemBucket,
|
||||
@ -1052,7 +1053,7 @@ func (s *xlSets) HealBucket(bucket string, dryRun bool) (results []madmin.HealRe
|
||||
|
||||
for _, s := range s.sets {
|
||||
var setResults []madmin.HealResultItem
|
||||
setResults, _ = s.HealBucket(bucket, dryRun)
|
||||
setResults, _ = s.HealBucket(ctx, bucket, dryRun)
|
||||
for _, setResult := range setResults {
|
||||
if setResult.Type == madmin.HealItemBucket {
|
||||
for _, v := range setResult.Before.Drives {
|
||||
@ -1108,12 +1109,12 @@ func (s *xlSets) HealBucket(bucket string, dryRun bool) (results []madmin.HealRe
|
||||
}
|
||||
|
||||
// HealObject - heals inconsistent object on a hashedSet based on object name.
|
||||
func (s *xlSets) HealObject(bucket, object string, dryRun bool) (madmin.HealResultItem, error) {
|
||||
return s.getHashedSet(object).HealObject(bucket, object, dryRun)
|
||||
func (s *xlSets) HealObject(ctx context.Context, bucket, object string, dryRun bool) (madmin.HealResultItem, error) {
|
||||
return s.getHashedSet(object).HealObject(ctx, bucket, object, dryRun)
|
||||
}
|
||||
|
||||
// Lists all buckets which need healing.
|
||||
func (s *xlSets) ListBucketsHeal() ([]BucketInfo, error) {
|
||||
func (s *xlSets) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) {
|
||||
listBuckets := []BucketInfo{}
|
||||
var healBuckets = map[string]BucketInfo{}
|
||||
for _, set := range s.sets {
|
||||
@ -1308,7 +1309,7 @@ func (s *xlSets) listObjectsHeal(bucket, prefix, marker, delimiter string, maxKe
|
||||
}
|
||||
|
||||
// This is not implemented yet, will be implemented later to comply with Admin API refactor.
|
||||
func (s *xlSets) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) {
|
||||
func (s *xlSets) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) {
|
||||
if err = checkListObjsArgs(bucket, prefix, marker, delimiter, s); err != nil {
|
||||
return loi, err
|
||||
}
|
||||
@ -1343,10 +1344,10 @@ func (s *xlSets) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKe
|
||||
}
|
||||
|
||||
// ListLocks from all sets, aggregate them and return.
|
||||
func (s *xlSets) ListLocks(bucket, prefix string, duration time.Duration) (lockInfo []VolumeLockInfo, err error) {
|
||||
func (s *xlSets) ListLocks(ctx context.Context, bucket, prefix string, duration time.Duration) (lockInfo []VolumeLockInfo, err error) {
|
||||
for _, set := range s.sets {
|
||||
var setLockInfo []VolumeLockInfo
|
||||
setLockInfo, err = set.ListLocks(bucket, prefix, duration)
|
||||
setLockInfo, err = set.ListLocks(ctx, bucket, prefix, duration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1356,9 +1357,9 @@ func (s *xlSets) ListLocks(bucket, prefix string, duration time.Duration) (lockI
|
||||
}
|
||||
|
||||
// Clear all requested locks on all sets.
|
||||
func (s *xlSets) ClearLocks(lockInfo []VolumeLockInfo) error {
|
||||
func (s *xlSets) ClearLocks(ctx context.Context, lockInfo []VolumeLockInfo) error {
|
||||
for _, set := range s.sets {
|
||||
set.ClearLocks(lockInfo)
|
||||
set.ClearLocks(ctx, lockInfo)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ func TestStorageInfoSets(t *testing.T) {
|
||||
}
|
||||
|
||||
// Get storage info first attempt.
|
||||
disks16Info := objLayer.StorageInfo()
|
||||
disks16Info := objLayer.StorageInfo(nil)
|
||||
|
||||
// This test assumes homogeneity between all disks,
|
||||
// i.e if we loose one disk the effective storage
|
||||
|
@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
@ -34,7 +35,7 @@ var bucketMetadataOpIgnoredErrs = append(bucketOpIgnoredErrs, errVolumeNotFound)
|
||||
/// Bucket operations
|
||||
|
||||
// MakeBucket - make a bucket.
|
||||
func (xl xlObjects) MakeBucketWithLocation(bucket, location string) error {
|
||||
func (xl xlObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string) error {
|
||||
// Verify if bucket is valid.
|
||||
if !IsValidBucketName(bucket) {
|
||||
return errors.Trace(BucketNameInvalid{Bucket: bucket})
|
||||
@ -150,7 +151,7 @@ func (xl xlObjects) getBucketInfo(bucketName string) (bucketInfo BucketInfo, err
|
||||
}
|
||||
|
||||
// GetBucketInfo - returns BucketInfo for a bucket.
|
||||
func (xl xlObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
|
||||
func (xl xlObjects) GetBucketInfo(ctx context.Context, bucket string) (bi BucketInfo, e error) {
|
||||
bucketLock := xl.nsMutex.NewNSLock(bucket, "")
|
||||
if e := bucketLock.GetRLock(globalObjectTimeout); e != nil {
|
||||
return bi, e
|
||||
@ -209,7 +210,7 @@ func (xl xlObjects) listBuckets() (bucketsInfo []BucketInfo, err error) {
|
||||
}
|
||||
|
||||
// ListBuckets - lists all the buckets, sorted by its name.
|
||||
func (xl xlObjects) ListBuckets() ([]BucketInfo, error) {
|
||||
func (xl xlObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
|
||||
bucketInfos, err := xl.listBuckets()
|
||||
if err != nil {
|
||||
return nil, toObjectErr(err)
|
||||
@ -220,7 +221,7 @@ func (xl xlObjects) ListBuckets() ([]BucketInfo, error) {
|
||||
}
|
||||
|
||||
// DeleteBucket - deletes a bucket.
|
||||
func (xl xlObjects) DeleteBucket(bucket string) error {
|
||||
func (xl xlObjects) DeleteBucket(ctx context.Context, bucket string) error {
|
||||
|
||||
bucketLock := xl.nsMutex.NewNSLock(bucket, "")
|
||||
if err := bucketLock.GetLock(globalObjectTimeout); err != nil {
|
||||
@ -282,12 +283,12 @@ func (xl xlObjects) DeleteBucket(bucket string) error {
|
||||
}
|
||||
|
||||
// SetBucketPolicy sets policy on bucket
|
||||
func (xl xlObjects) SetBucketPolicy(bucket string, policy policy.BucketAccessPolicy) error {
|
||||
func (xl xlObjects) SetBucketPolicy(ctx context.Context, bucket string, policy policy.BucketAccessPolicy) error {
|
||||
return persistAndNotifyBucketPolicyChange(bucket, false, policy, xl)
|
||||
}
|
||||
|
||||
// GetBucketPolicy will get policy on bucket
|
||||
func (xl xlObjects) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, error) {
|
||||
func (xl xlObjects) GetBucketPolicy(ctx context.Context, bucket string) (policy.BucketAccessPolicy, error) {
|
||||
// fetch bucket policy from cache.
|
||||
bpolicy := xl.bucketPolicies.GetBucketPolicy(bucket)
|
||||
if reflect.DeepEqual(bpolicy, emptyBucketPolicy) {
|
||||
@ -297,12 +298,12 @@ func (xl xlObjects) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, e
|
||||
}
|
||||
|
||||
// DeleteBucketPolicy deletes all policies on bucket
|
||||
func (xl xlObjects) DeleteBucketPolicy(bucket string) error {
|
||||
func (xl xlObjects) DeleteBucketPolicy(ctx context.Context, bucket string) error {
|
||||
return persistAndNotifyBucketPolicyChange(bucket, true, emptyBucketPolicy, xl)
|
||||
}
|
||||
|
||||
// RefreshBucketPolicy refreshes policy cache from disk
|
||||
func (xl xlObjects) RefreshBucketPolicy(bucket string) error {
|
||||
func (xl xlObjects) RefreshBucketPolicy(ctx context.Context, bucket string) error {
|
||||
policy, err := ReadBucketPolicy(bucket, xl)
|
||||
|
||||
if err != nil {
|
||||
|
@ -43,11 +43,11 @@ func TestXLParentDirIsObject(t *testing.T) {
|
||||
bucketName := "testbucket"
|
||||
objectName := "object"
|
||||
|
||||
if err = obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
if err = obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
objectContent := "12345"
|
||||
objInfo, err := obj.PutObject(bucketName, objectName,
|
||||
objInfo, err := obj.PutObject(nil, bucketName, objectName,
|
||||
mustGetHashReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -198,15 +198,15 @@ func TestListOnlineDisks(t *testing.T) {
|
||||
// Prepare bucket/object backend for the tests below.
|
||||
|
||||
// Cleanup from previous test.
|
||||
obj.DeleteObject(bucket, object)
|
||||
obj.DeleteBucket(bucket)
|
||||
obj.DeleteObject(nil, bucket, object)
|
||||
obj.DeleteBucket(nil, bucket)
|
||||
|
||||
err = obj.MakeBucketWithLocation("bucket", "")
|
||||
err = obj.MakeBucketWithLocation(nil, "bucket", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a bucket %v", err)
|
||||
}
|
||||
|
||||
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to putObject %v", err)
|
||||
}
|
||||
@ -298,12 +298,12 @@ func TestDisksWithAllParts(t *testing.T) {
|
||||
xl := obj.(*xlObjects)
|
||||
xlDisks := xl.storageDisks
|
||||
|
||||
err = obj.MakeBucketWithLocation("bucket", "")
|
||||
err = obj.MakeBucketWithLocation(nil, "bucket", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a bucket %v", err)
|
||||
}
|
||||
|
||||
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, bucket, object, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to putObject %v", err)
|
||||
}
|
||||
|
@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"sync"
|
||||
@ -25,14 +26,14 @@ import (
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
func (xl xlObjects) HealFormat(dryRun bool) (madmin.HealResultItem, error) {
|
||||
func (xl xlObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) {
|
||||
return madmin.HealResultItem{}, errors.Trace(NotImplemented{})
|
||||
}
|
||||
|
||||
// Heals a bucket if it doesn't exist on one of the disks, additionally
|
||||
// also heals the missing entries for bucket metadata files
|
||||
// `policy.json, notification.xml, listeners.json`.
|
||||
func (xl xlObjects) HealBucket(bucket string, dryRun bool) (
|
||||
func (xl xlObjects) HealBucket(ctx context.Context, bucket string, dryRun bool) (
|
||||
results []madmin.HealResultItem, err error) {
|
||||
|
||||
if err = checkBucketExist(bucket, xl); err != nil {
|
||||
@ -167,7 +168,7 @@ func healBucketMetadata(xl xlObjects, bucket string, dryRun bool) (
|
||||
results []madmin.HealResultItem, err error) {
|
||||
|
||||
healBucketMetaFn := func(metaPath string) error {
|
||||
result, healErr := xl.HealObject(minioMetaBucket, metaPath, dryRun)
|
||||
result, healErr := xl.HealObject(nil, minioMetaBucket, metaPath, dryRun)
|
||||
// If object is not found, no result to add.
|
||||
if isErrObjectNotFound(healErr) {
|
||||
return nil
|
||||
@ -496,7 +497,7 @@ func healObject(storageDisks []StorageAPI, bucket string, object string,
|
||||
// FIXME: If an object object was deleted and one disk was down,
|
||||
// and later the disk comes back up again, heal on the object
|
||||
// should delete it.
|
||||
func (xl xlObjects) HealObject(bucket, object string, dryRun bool) (hr madmin.HealResultItem, err error) {
|
||||
func (xl xlObjects) HealObject(ctx context.Context, bucket, object string, dryRun bool) (hr madmin.HealResultItem, err error) {
|
||||
|
||||
// FIXME: Metadata is read again in the healObject() call below.
|
||||
// Read metadata files from all the disks
|
||||
|
@ -47,14 +47,14 @@ func TestUndoMakeBucket(t *testing.T) {
|
||||
}
|
||||
|
||||
bucketName := getRandomBucketName()
|
||||
if err = obj.MakeBucketWithLocation(bucketName, ""); err != nil {
|
||||
if err = obj.MakeBucketWithLocation(nil, bucketName, ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
xl := obj.(*xlObjects)
|
||||
undoMakeBucket(xl.storageDisks, bucketName)
|
||||
|
||||
// Validate if bucket was deleted properly.
|
||||
_, err = obj.GetBucketInfo(bucketName)
|
||||
_, err = obj.GetBucketInfo(nil, bucketName)
|
||||
if err != nil {
|
||||
err = errors.Cause(err)
|
||||
switch err.(type) {
|
||||
@ -91,21 +91,21 @@ func TestHealObjectXL(t *testing.T) {
|
||||
object := "object"
|
||||
data := bytes.Repeat([]byte("a"), 5*1024*1024)
|
||||
|
||||
err = obj.MakeBucketWithLocation(bucket, "")
|
||||
err = obj.MakeBucketWithLocation(nil, bucket, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to make a bucket - %v", err)
|
||||
}
|
||||
|
||||
// Create an object with multiple parts uploaded in decreasing
|
||||
// part number.
|
||||
uploadID, err := obj.NewMultipartUpload(bucket, object, nil)
|
||||
uploadID, err := obj.NewMultipartUpload(nil, bucket, object, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create a multipart upload - %v", err)
|
||||
}
|
||||
|
||||
var uploadedParts []CompletePart
|
||||
for _, partID := range []int{2, 1} {
|
||||
pInfo, err1 := obj.PutObjectPart(bucket, object, uploadID, partID, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""))
|
||||
pInfo, err1 := obj.PutObjectPart(nil, bucket, object, uploadID, partID, mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""))
|
||||
if err1 != nil {
|
||||
t.Fatalf("Failed to upload a part - %v", err1)
|
||||
}
|
||||
@ -115,7 +115,7 @@ func TestHealObjectXL(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
_, err = obj.CompleteMultipartUpload(bucket, object, uploadID, uploadedParts)
|
||||
_, err = obj.CompleteMultipartUpload(nil, bucket, object, uploadID, uploadedParts)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to complete multipart upload - %v", err)
|
||||
}
|
||||
@ -128,7 +128,7 @@ func TestHealObjectXL(t *testing.T) {
|
||||
t.Fatalf("Failed to delete a file - %v", err)
|
||||
}
|
||||
|
||||
_, err = obj.HealObject(bucket, object, false)
|
||||
_, err = obj.HealObject(nil, bucket, object, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to heal object - %v", err)
|
||||
}
|
||||
@ -144,7 +144,7 @@ func TestHealObjectXL(t *testing.T) {
|
||||
}
|
||||
|
||||
// Try healing now, expect to receive errDiskNotFound.
|
||||
_, err = obj.HealObject(bucket, object, false)
|
||||
_, err = obj.HealObject(nil, bucket, object, false)
|
||||
// since majority of xl.jsons are not available, object quorum can't be read properly and error will be errXLReadQuorum
|
||||
if errors.Cause(err) != errXLReadQuorum {
|
||||
t.Errorf("Expected %v but received %v", errDiskNotFound, err)
|
||||
|
@ -16,12 +16,14 @@
|
||||
|
||||
package cmd
|
||||
|
||||
import "context"
|
||||
|
||||
// This is not implemented/needed anymore, look for xl-sets.ListBucketHeal()
|
||||
func (xl xlObjects) ListBucketsHeal() ([]BucketInfo, error) {
|
||||
func (xl xlObjects) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// This is not implemented/needed anymore, look for xl-sets.ListObjectsHeal()
|
||||
func (xl xlObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
|
||||
func (xl xlObjects) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
|
||||
return loi, nil
|
||||
}
|
||||
|
@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
"github.com/minio/minio/pkg/errors"
|
||||
@ -147,7 +148,7 @@ func (xl xlObjects) listObjects(bucket, prefix, marker, delimiter string, maxKey
|
||||
}
|
||||
|
||||
// ListObjects - list all objects at prefix, delimited by '/'.
|
||||
func (xl xlObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
|
||||
func (xl xlObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
|
||||
if err := checkListObjsArgs(bucket, prefix, marker, delimiter, xl); err != nil {
|
||||
return loi, err
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ func testXLReadStat(obj ObjectLayer, instanceType string, disks []string, t *tes
|
||||
bucketName := getRandomBucketName()
|
||||
objectName := "test-object"
|
||||
// create bucket.
|
||||
err := obj.MakeBucketWithLocation(bucketName, "")
|
||||
err := obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
// Stop the test if creation of the bucket fails.
|
||||
if err != nil {
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -68,7 +68,7 @@ func testXLReadStat(obj ObjectLayer, instanceType string, disks []string, t *tes
|
||||
// iterate through the above set of inputs and upkoad the object.
|
||||
for i, input := range putObjectInputs {
|
||||
// uploading the object.
|
||||
_, err = obj.PutObject(input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
||||
_, err = obj.PutObject(nil, input.bucketName, input.objectName, mustGetHashReader(t, bytes.NewBuffer(input.textData), input.contentLength, input.metaData["etag"], ""), input.metaData)
|
||||
// if object upload fails stop the test.
|
||||
if err != nil {
|
||||
t.Fatalf("Put Object case %d: Error uploading object: <ERROR> %v", i+1, err)
|
||||
@ -114,13 +114,13 @@ func testXLReadMetaParts(obj ObjectLayer, instanceType string, disks []string, t
|
||||
// objectNames[0].
|
||||
// uploadIds [0].
|
||||
// Create bucket before intiating NewMultipartUpload.
|
||||
err := obj.MakeBucketWithLocation(bucketNames[0], "")
|
||||
err := obj.MakeBucketWithLocation(nil, bucketNames[0], "")
|
||||
if err != nil {
|
||||
// Failed to create newbucket, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
}
|
||||
// Initiate Multipart Upload on the above created bucket.
|
||||
uploadID, err := obj.NewMultipartUpload(bucketNames[0], objectNames[0], nil)
|
||||
uploadID, err := obj.NewMultipartUpload(nil, bucketNames[0], objectNames[0], nil)
|
||||
if err != nil {
|
||||
// Failed to create NewMultipartUpload, abort.
|
||||
t.Fatalf("%s : %s", instanceType, err.Error())
|
||||
@ -151,7 +151,7 @@ func testXLReadMetaParts(obj ObjectLayer, instanceType string, disks []string, t
|
||||
sha256sum := ""
|
||||
// Iterating over creatPartCases to generate multipart chunks.
|
||||
for _, testCase := range createPartCases {
|
||||
_, perr := obj.PutObjectPart(testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
_, perr := obj.PutObjectPart(nil, testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetHashReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.intputDataSize, testCase.inputMd5, sha256sum))
|
||||
if perr != nil {
|
||||
t.Fatalf("%s : %s", instanceType, perr)
|
||||
}
|
||||
|
@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"path"
|
||||
@ -138,7 +139,7 @@ func commitXLMetadata(disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPr
|
||||
// not support prefix based listing, this is a deliberate attempt
|
||||
// towards simplification of multipart APIs.
|
||||
// The resulting ListMultipartsInfo structure is unmarshalled directly as XML.
|
||||
func (xl xlObjects) ListMultipartUploads(bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) {
|
||||
func (xl xlObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) {
|
||||
if err := checkListMultipartArgs(bucket, object, keyMarker, uploadIDMarker, delimiter, xl); err != nil {
|
||||
return result, err
|
||||
}
|
||||
@ -236,7 +237,7 @@ func (xl xlObjects) newMultipartUpload(bucket string, object string, meta map[st
|
||||
// subsequent request each UUID is unique.
|
||||
//
|
||||
// Implements S3 compatible initiate multipart API.
|
||||
func (xl xlObjects) NewMultipartUpload(bucket, object string, meta map[string]string) (string, error) {
|
||||
func (xl xlObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string) (string, error) {
|
||||
if err := checkNewMultipartArgs(bucket, object, xl); err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -252,7 +253,7 @@ func (xl xlObjects) NewMultipartUpload(bucket, object string, meta map[string]st
|
||||
// data is read from an existing object.
|
||||
//
|
||||
// Implements S3 compatible Upload Part Copy API.
|
||||
func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo) (pi PartInfo, e error) {
|
||||
func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo) (pi PartInfo, e error) {
|
||||
// Hold read locks on source object only if we are
|
||||
// going to read data from source object.
|
||||
objectSRLock := xl.nsMutex.NewNSLock(srcBucket, srcObject)
|
||||
@ -279,7 +280,7 @@ func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
|
||||
}
|
||||
}()
|
||||
|
||||
partInfo, err := xl.PutObjectPart(dstBucket, dstObject, uploadID, partID, srcInfo.Reader)
|
||||
partInfo, err := xl.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.Reader)
|
||||
if err != nil {
|
||||
return pi, toObjectErr(err, dstBucket, dstObject)
|
||||
}
|
||||
@ -293,7 +294,7 @@ func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
|
||||
// of the multipart transaction.
|
||||
//
|
||||
// Implements S3 compatible Upload Part API.
|
||||
func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) {
|
||||
func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (pi PartInfo, e error) {
|
||||
if err := checkPutObjectPartArgs(bucket, object, xl); err != nil {
|
||||
return pi, err
|
||||
}
|
||||
@ -545,13 +546,47 @@ func (xl xlObjects) ListObjectParts(bucket, object, uploadID string, partNumberM
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ListObjectParts - lists all previously uploaded parts for a given
|
||||
// object and uploadID. Takes additional input of part-number-marker
|
||||
// to indicate where the listing should begin from.
|
||||
//
|
||||
// Implements S3 compatible ListObjectParts API. The resulting
|
||||
// ListPartsInfo structure is unmarshalled directly into XML and
|
||||
// replied back to the client.
|
||||
func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int) (lpi ListPartsInfo, e error) {
|
||||
if err := checkListPartsArgs(bucket, object, xl); err != nil {
|
||||
return lpi, err
|
||||
}
|
||||
// Hold the lock so that two parallel complete-multipart-uploads
|
||||
// do not leave a stale uploads.json behind.
|
||||
objectMPartPathLock := xl.nsMutex.NewNSLock(minioMetaMultipartBucket, pathJoin(bucket, object))
|
||||
if err := objectMPartPathLock.GetRLock(globalListingTimeout); err != nil {
|
||||
return lpi, errors.Trace(err)
|
||||
}
|
||||
defer objectMPartPathLock.RUnlock()
|
||||
// Hold lock so that there is no competing
|
||||
// abort-multipart-upload or complete-multipart-upload.
|
||||
uploadIDLock := xl.nsMutex.NewNSLock(minioMetaMultipartBucket,
|
||||
pathJoin(bucket, object, uploadID))
|
||||
if err := uploadIDLock.GetLock(globalListingTimeout); err != nil {
|
||||
return lpi, err
|
||||
}
|
||||
defer uploadIDLock.Unlock()
|
||||
|
||||
if !xl.isUploadIDExists(bucket, object, uploadID) {
|
||||
return lpi, errors.Trace(InvalidUploadID{UploadID: uploadID})
|
||||
}
|
||||
result, err := xl.listObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
|
||||
return result, err
|
||||
}
|
||||
|
||||
// CompleteMultipartUpload - completes an ongoing multipart
|
||||
// transaction after receiving all the parts indicated by the client.
|
||||
// Returns an md5sum calculated by concatenating all the individual
|
||||
// md5sums of all the parts.
|
||||
//
|
||||
// Implements S3 compatible Complete multipart API.
|
||||
func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []CompletePart) (oi ObjectInfo, e error) {
|
||||
func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart) (oi ObjectInfo, e error) {
|
||||
if err := checkCompleteMultipartArgs(bucket, object, xl); err != nil {
|
||||
return oi, err
|
||||
}
|
||||
@ -775,7 +810,7 @@ func (xl xlObjects) cleanupUploadedParts(uploadIDPath string, writeQuorum int) e
|
||||
// Implements S3 compatible Abort multipart API, slight difference is
|
||||
// that this is an atomic idempotent operation. Subsequent calls have
|
||||
// no affect and further requests to the same uploadID would not be honored.
|
||||
func (xl xlObjects) AbortMultipartUpload(bucket, object, uploadID string) error {
|
||||
func (xl xlObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
|
||||
if err := checkAbortMultipartArgs(bucket, object, xl); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -50,8 +50,8 @@ func TestXLCleanupStaleMultipartUploads(t *testing.T) {
|
||||
bucketName := "bucket"
|
||||
objectName := "object"
|
||||
|
||||
obj.MakeBucketWithLocation(bucketName, "")
|
||||
uploadID, err := obj.NewMultipartUpload(bucketName, objectName, nil)
|
||||
obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
uploadID, err := obj.NewMultipartUpload(nil, bucketName, objectName, nil)
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
@ -66,7 +66,52 @@ func TestXLCleanupStaleMultipartUploads(t *testing.T) {
|
||||
globalServiceDoneCh <- struct{}{}
|
||||
|
||||
// Check if upload id was already purged.
|
||||
if err = obj.AbortMultipartUpload(bucketName, objectName, uploadID); err != nil {
|
||||
if err = obj.AbortMultipartUpload(nil, bucketName, objectName, uploadID); err != nil {
|
||||
err = errors.Cause(err)
|
||||
if _, ok := err.(InvalidUploadID); !ok {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests cleanup of stale upload ids.
|
||||
func TestXLCleanupMultipartUpload(t *testing.T) {
|
||||
// Initialize configuration
|
||||
root, err := newTestConfig(globalMinioDefaultRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("%s", err)
|
||||
}
|
||||
defer os.RemoveAll(root)
|
||||
|
||||
// Create an instance of xl backend
|
||||
obj, fsDirs, err := prepareXL16()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Defer cleanup of backend directories
|
||||
defer removeRoots(fsDirs)
|
||||
|
||||
xl := obj.(*xlObjects)
|
||||
|
||||
// Close the go-routine, we are going to
|
||||
// manually start it and test in this test case.
|
||||
globalServiceDoneCh <- struct{}{}
|
||||
|
||||
bucketName := "bucket"
|
||||
objectName := "object"
|
||||
|
||||
obj.MakeBucketWithLocation(nil, bucketName, "")
|
||||
uploadID, err := obj.NewMultipartUpload(nil, bucketName, objectName, nil)
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
|
||||
if err = cleanupStaleMultipartUpload(bucketName, 0, obj, xl.listMultipartUploadsCleanup); err != nil {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
}
|
||||
|
||||
// Check if upload id was already purged.
|
||||
if err = obj.AbortMultipartUpload(nil, bucketName, objectName, uploadID); err != nil {
|
||||
err = errors.Cause(err)
|
||||
if _, ok := err.(InvalidUploadID); !ok {
|
||||
t.Fatal("Unexpected err: ", err)
|
||||
|
@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"path"
|
||||
@ -79,7 +80,7 @@ func (xl xlObjects) prepareFile(bucket, object string, size int64, onlineDisks [
|
||||
// CopyObject - copy object source object to destination object.
|
||||
// if source object and destination object are same we only
|
||||
// update metadata.
|
||||
func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo) (oi ObjectInfo, e error) {
|
||||
func (xl xlObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo) (oi ObjectInfo, e error) {
|
||||
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
||||
|
||||
// Read metadata associated with the object from all disks.
|
||||
@ -167,7 +168,7 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
|
||||
//
|
||||
// startOffset indicates the starting read location of the object.
|
||||
// length indicates the total length of the object.
|
||||
func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
||||
func (xl xlObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
|
||||
// Lock the object before reading.
|
||||
objectLock := xl.nsMutex.NewNSLock(bucket, object)
|
||||
if err := objectLock.GetRLock(globalObjectTimeout); err != nil {
|
||||
@ -341,7 +342,7 @@ func (xl xlObjects) getObjectInfoDir(bucket, object string) (oi ObjectInfo, err
|
||||
}
|
||||
|
||||
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
|
||||
func (xl xlObjects) GetObjectInfo(bucket, object string) (oi ObjectInfo, e error) {
|
||||
func (xl xlObjects) GetObjectInfo(ctx context.Context, bucket, object string) (oi ObjectInfo, e error) {
|
||||
// Lock the object before reading.
|
||||
objectLock := xl.nsMutex.NewNSLock(bucket, object)
|
||||
if err := objectLock.GetRLock(globalObjectTimeout); err != nil {
|
||||
@ -490,7 +491,7 @@ func renameObject(disks []StorageAPI, srcBucket, srcObject, dstBucket, dstObject
|
||||
// until EOF, erasure codes the data across all disk and additionally
|
||||
// writes `xl.json` which carries the necessary metadata for future
|
||||
// object operations.
|
||||
func (xl xlObjects) PutObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||
func (xl xlObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, err error) {
|
||||
// Validate put object input args.
|
||||
if err = checkPutObjectArgs(bucket, object, xl, data.Size()); err != nil {
|
||||
return ObjectInfo{}, err
|
||||
@ -777,7 +778,7 @@ func (xl xlObjects) deleteObject(bucket, object string) error {
|
||||
// DeleteObject - deletes an object, this call doesn't necessary reply
|
||||
// any error as it is not necessary for the handler to reply back a
|
||||
// response to the client request.
|
||||
func (xl xlObjects) DeleteObject(bucket, object string) (err error) {
|
||||
func (xl xlObjects) DeleteObject(ctx context.Context, bucket, object string) (err error) {
|
||||
// Acquire a write lock before deleting the object.
|
||||
objectLock := xl.nsMutex.NewNSLock(bucket, object)
|
||||
if perr := objectLock.GetLock(globalOperationTimeout); perr != nil {
|
||||
@ -811,8 +812,8 @@ func (xl xlObjects) DeleteObject(bucket, object string) (err error) {
|
||||
}
|
||||
|
||||
// ListObjectsV2 lists all blobs in bucket filtered by prefix
|
||||
func (xl xlObjects) ListObjectsV2(bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
|
||||
loi, err := xl.ListObjects(bucket, prefix, continuationToken, delimiter, maxKeys)
|
||||
func (xl xlObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
|
||||
loi, err := xl.ListObjects(ctx, bucket, prefix, continuationToken, delimiter, maxKeys)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
@ -43,23 +43,23 @@ func TestRepeatPutObjectPart(t *testing.T) {
|
||||
// cleaning up of temporary test directories
|
||||
defer removeRoots(disks)
|
||||
|
||||
err = objLayer.MakeBucketWithLocation("bucket1", "")
|
||||
err = objLayer.MakeBucketWithLocation(nil, "bucket1", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
uploadID, err := objLayer.NewMultipartUpload("bucket1", "mpartObj1", nil)
|
||||
uploadID, err := objLayer.NewMultipartUpload(nil, "bucket1", "mpartObj1", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fiveMBBytes := bytes.Repeat([]byte("a"), 5*humanize.MiByte)
|
||||
md5Hex := getMD5Hash(fiveMBBytes)
|
||||
_, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""))
|
||||
_, err = objLayer.PutObjectPart(nil, "bucket1", "mpartObj1", uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// PutObjectPart should succeed even if part already exists. ref: https://github.com/minio/minio/issues/1930
|
||||
_, err = objLayer.PutObjectPart("bucket1", "mpartObj1", uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""))
|
||||
_, err = objLayer.PutObjectPart(nil, "bucket1", "mpartObj1", uploadID, 1, mustGetHashReader(t, bytes.NewReader(fiveMBBytes), 5*humanize.MiByte, md5Hex, ""))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -85,18 +85,18 @@ func TestXLDeleteObjectBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
// Make bucket for Test 7 to pass
|
||||
err = xl.MakeBucketWithLocation("bucket", "")
|
||||
err = xl.MakeBucketWithLocation(nil, "bucket", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create object "obj" under bucket "bucket" for Test 7 to pass
|
||||
_, err = xl.PutObject("bucket", "obj", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err = xl.PutObject(nil, "bucket", "obj", mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("XL Object upload failed: <ERROR> %s", err)
|
||||
}
|
||||
for i, test := range testCases {
|
||||
actualErr := xl.DeleteObject(test.bucket, test.object)
|
||||
actualErr := xl.DeleteObject(nil, test.bucket, test.object)
|
||||
actualErr = errors.Cause(actualErr)
|
||||
if test.expectedErr != nil && actualErr != test.expectedErr {
|
||||
t.Errorf("Test %d: Expected to fail with %s, but failed with %s", i+1, test.expectedErr, actualErr)
|
||||
@ -121,7 +121,7 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
|
||||
xl := obj.(*xlObjects)
|
||||
|
||||
// Create "bucket"
|
||||
err = obj.MakeBucketWithLocation("bucket", "")
|
||||
err = obj.MakeBucketWithLocation(nil, "bucket", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -129,7 +129,7 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
|
||||
bucket := "bucket"
|
||||
object := "object"
|
||||
// Create object "obj" under bucket "bucket".
|
||||
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -138,13 +138,13 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
|
||||
for i := range xl.storageDisks[:7] {
|
||||
xl.storageDisks[i] = newNaughtyDisk(xl.storageDisks[i], nil, errFaultyDisk)
|
||||
}
|
||||
err = obj.DeleteObject(bucket, object)
|
||||
err = obj.DeleteObject(nil, bucket, object)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create "obj" under "bucket".
|
||||
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -152,7 +152,7 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
|
||||
// Remove one more disk to 'lose' quorum, by setting it to nil.
|
||||
xl.storageDisks[7] = nil
|
||||
xl.storageDisks[8] = nil
|
||||
err = obj.DeleteObject(bucket, object)
|
||||
err = obj.DeleteObject(nil, bucket, object)
|
||||
err = errors.Cause(err)
|
||||
// since majority of disks are not available, metaquorum is not achieved and hence errXLReadQuorum error
|
||||
if err != toObjectErr(errXLReadQuorum, bucket, object) {
|
||||
@ -172,7 +172,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
|
||||
xl := obj.(*xlObjects)
|
||||
|
||||
// Create "bucket"
|
||||
err = obj.MakeBucketWithLocation("bucket", "")
|
||||
err = obj.MakeBucketWithLocation(nil, "bucket", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -180,7 +180,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
|
||||
bucket := "bucket"
|
||||
object := "object"
|
||||
// Create "object" under "bucket".
|
||||
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -203,7 +203,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
|
||||
}
|
||||
}
|
||||
// Fetch object from store.
|
||||
err = xl.GetObject(bucket, object, 0, int64(len("abcd")), ioutil.Discard, "")
|
||||
err = xl.GetObject(nil, bucket, object, 0, int64(len("abcd")), ioutil.Discard, "")
|
||||
err = errors.Cause(err)
|
||||
if err != toObjectErr(errXLReadQuorum, bucket, object) {
|
||||
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
|
||||
@ -223,7 +223,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
|
||||
xl := obj.(*xlObjects)
|
||||
|
||||
// Create "bucket"
|
||||
err = obj.MakeBucketWithLocation("bucket", "")
|
||||
err = obj.MakeBucketWithLocation(nil, "bucket", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -231,7 +231,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
|
||||
bucket := "bucket"
|
||||
object := "object"
|
||||
// Create "object" under "bucket".
|
||||
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -254,7 +254,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
|
||||
}
|
||||
}
|
||||
// Upload new content to same object "object"
|
||||
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
_, err = obj.PutObject(nil, bucket, object, mustGetHashReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), nil)
|
||||
err = errors.Cause(err)
|
||||
if err != toObjectErr(errXLWriteQuorum, bucket, object) {
|
||||
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err)
|
||||
@ -280,7 +280,7 @@ func TestHealing(t *testing.T) {
|
||||
xl := obj.(*xlObjects)
|
||||
|
||||
// Create "bucket"
|
||||
err = obj.MakeBucketWithLocation("bucket", "")
|
||||
err = obj.MakeBucketWithLocation(nil, "bucket", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -295,7 +295,7 @@ func TestHealing(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = obj.PutObject(bucket, object, mustGetHashReader(t, bytes.NewReader(data), length, "", ""), nil)
|
||||
_, err = obj.PutObject(nil, bucket, object, mustGetHashReader(t, bytes.NewReader(data), length, "", ""), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -313,7 +313,7 @@ func TestHealing(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = xl.HealObject(bucket, object, false)
|
||||
_, err = xl.HealObject(nil, bucket, object, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -337,7 +337,7 @@ func TestHealing(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = xl.HealObject(bucket, object, false)
|
||||
_, err = xl.HealObject(nil, bucket, object, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -359,7 +359,7 @@ func TestHealing(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// This would create the bucket.
|
||||
_, err = xl.HealBucket(bucket, false)
|
||||
_, err = xl.HealBucket(nil, bucket, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -17,6 +17,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
@ -56,7 +57,7 @@ type xlObjects struct {
|
||||
var xlTreeWalkIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errVolumeNotFound, errFileNotFound)
|
||||
|
||||
// Shutdown function for object storage interface.
|
||||
func (xl xlObjects) Shutdown() error {
|
||||
func (xl xlObjects) Shutdown(ctx context.Context) error {
|
||||
// Add any object layer shutdown activities here.
|
||||
for _, disk := range xl.getDisks() {
|
||||
// This closes storage rpc client connections if any.
|
||||
@ -72,7 +73,7 @@ func (xl xlObjects) Shutdown() error {
|
||||
// Locking operations
|
||||
|
||||
// List namespace locks held in object layer
|
||||
func (xl xlObjects) ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
|
||||
func (xl xlObjects) ListLocks(ctx context.Context, bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error) {
|
||||
xl.nsMutex.lockMapMutex.Lock()
|
||||
defer xl.nsMutex.lockMapMutex.Unlock()
|
||||
// Fetch current time once instead of fetching system time for every lock.
|
||||
@ -118,7 +119,7 @@ func (xl xlObjects) ListLocks(bucket, prefix string, duration time.Duration) ([]
|
||||
}
|
||||
|
||||
// Clear namespace locks held in object layer
|
||||
func (xl xlObjects) ClearLocks(volLocks []VolumeLockInfo) error {
|
||||
func (xl xlObjects) ClearLocks(ctx context.Context, volLocks []VolumeLockInfo) error {
|
||||
// Remove lock matching bucket/prefix held longer than duration.
|
||||
for _, volLock := range volLocks {
|
||||
xl.nsMutex.ForceUnlock(volLock.Bucket, volLock.Object)
|
||||
@ -215,6 +216,6 @@ func getStorageInfo(disks []StorageAPI) StorageInfo {
|
||||
}
|
||||
|
||||
// StorageInfo - returns underlying storage statistics.
|
||||
func (xl xlObjects) StorageInfo() StorageInfo {
|
||||
func (xl xlObjects) StorageInfo(ctx context.Context) StorageInfo {
|
||||
return getStorageInfo(xl.getDisks())
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ func TestStorageInfo(t *testing.T) {
|
||||
}
|
||||
|
||||
// Get storage info first attempt.
|
||||
disks16Info := objLayer.StorageInfo()
|
||||
disks16Info := objLayer.StorageInfo(nil)
|
||||
|
||||
// This test assumes homogenity between all disks,
|
||||
// i.e if we loose one disk the effective storage
|
||||
|
Loading…
Reference in New Issue
Block a user