site replication: fix healing of bucket deletes. (#15377)

This PR changes the handling of bucket deletes for site 
replicated setups to hold on to deleted bucket state until 
it syncs to all the clusters participating in site replication.
This commit is contained in:
Poorna 2022-07-25 17:51:32 -07:00 committed by GitHub
parent e4b51235f8
commit 426c902b87
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
55 changed files with 1946 additions and 320 deletions

View File

@ -80,7 +80,7 @@ func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.
} }
// Before proceeding validate if bucket exists. // Before proceeding validate if bucket exists.
_, err := objAPI.GetBucketInfo(ctx, bucket) _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{})
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
@ -142,7 +142,7 @@ func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.
} }
// Before proceeding validate if bucket exists. // Before proceeding validate if bucket exists.
_, err := objAPI.GetBucketInfo(ctx, bucket) _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{})
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return

View File

@ -69,7 +69,7 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
vars := mux.Vars(r) vars := mux.Vars(r)
bucket := pathClean(vars["bucket"]) bucket := pathClean(vars["bucket"])
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -131,7 +131,7 @@ func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *
vars := mux.Vars(r) vars := mux.Vars(r)
bucket := pathClean(vars["bucket"]) bucket := pathClean(vars["bucket"])
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -173,7 +173,7 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http.
} }
// Check if bucket exists. // Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -301,7 +301,7 @@ func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *htt
} }
if bucket != "" { if bucket != "" {
// Check if bucket exists. // Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -340,7 +340,7 @@ func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *ht
} }
// Check if bucket exists. // Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -390,13 +390,13 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
) )
if bucket != "" { if bucket != "" {
// Check if bucket exists. // Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
buckets = append(buckets, BucketInfo{Name: bucket}) buckets = append(buckets, BucketInfo{Name: bucket})
} else { } else {
buckets, err = objectAPI.ListBuckets(ctx) buckets, err = objectAPI.ListBuckets(ctx, BucketOptions{})
if err != nil { if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
@ -684,7 +684,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
return return
} }
if _, ok := bucketMap[bucket]; !ok { if _, ok := bucketMap[bucket]; !ok {
opts := BucketOptions{ opts := MakeBucketOptions{
LockEnabled: config.ObjectLockEnabled == "Enabled", LockEnabled: config.ObjectLockEnabled == "Enabled",
} }
err = objectAPI.MakeBucketWithLocation(ctx, bucket, opts) err = objectAPI.MakeBucketWithLocation(ctx, bucket, opts)
@ -750,7 +750,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
return return
} }
if _, ok := bucketMap[bucket]; !ok { if _, ok := bucketMap[bucket]; !ok {
err = objectAPI.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) err = objectAPI.MakeBucketWithLocation(ctx, bucket, MakeBucketOptions{})
if err != nil { if err != nil {
if _, ok := err.(BucketExists); !ok { if _, ok := err.(BucketExists); !ok {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
@ -817,7 +817,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
} }
// create bucket if it does not exist yet. // create bucket if it does not exist yet.
if _, ok := bucketMap[bucket]; !ok { if _, ok := bucketMap[bucket]; !ok {
err = objectAPI.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) err = objectAPI.MakeBucketWithLocation(ctx, bucket, MakeBucketOptions{})
if err != nil { if err != nil {
if _, ok := err.(BucketExists); !ok { if _, ok := err.(BucketExists); !ok {
writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL) writeErrorResponse(ctx, w, importError(ctx, err, file.Name, bucket), r.URL)
@ -1089,7 +1089,7 @@ func (a adminAPIHandlers) ReplicationDiffHandler(w http.ResponseWriter, r *http.
} }
// Check if bucket exists. // Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }

View File

@ -24,6 +24,8 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"strings"
"time"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/minio/madmin-go" "github.com/minio/madmin-go"
@ -116,19 +118,38 @@ func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request
_, isLockEnabled := r.Form["lockEnabled"] _, isLockEnabled := r.Form["lockEnabled"]
_, isVersioningEnabled := r.Form["versioningEnabled"] _, isVersioningEnabled := r.Form["versioningEnabled"]
_, isForceCreate := r.Form["forceCreate"] _, isForceCreate := r.Form["forceCreate"]
opts := BucketOptions{ createdAtStr := strings.TrimSpace(r.Form.Get("createdAt"))
createdAt, cerr := time.Parse(time.RFC3339Nano, createdAtStr)
if cerr != nil {
createdAt = timeSentinel
}
opts := MakeBucketOptions{
Location: r.Form.Get("location"), Location: r.Form.Get("location"),
LockEnabled: isLockEnabled, LockEnabled: isLockEnabled,
VersioningEnabled: isVersioningEnabled, VersioningEnabled: isVersioningEnabled,
ForceCreate: isForceCreate, ForceCreate: isForceCreate,
CreatedAt: createdAt,
} }
err = globalSiteReplicationSys.PeerBucketMakeWithVersioningHandler(ctx, bucket, opts) err = globalSiteReplicationSys.PeerBucketMakeWithVersioningHandler(ctx, bucket, opts)
case madmin.ConfigureReplBktOp: case madmin.ConfigureReplBktOp:
err = globalSiteReplicationSys.PeerBucketConfigureReplHandler(ctx, bucket) err = globalSiteReplicationSys.PeerBucketConfigureReplHandler(ctx, bucket)
case madmin.DeleteBucketBktOp: case madmin.DeleteBucketBktOp:
err = globalSiteReplicationSys.PeerBucketDeleteHandler(ctx, bucket, false) _, noRecreate := r.Form["noRecreate"]
err = globalSiteReplicationSys.PeerBucketDeleteHandler(ctx, bucket, DeleteBucketOptions{
Force: false,
NoRecreate: noRecreate,
SRDeleteOp: getSRBucketDeleteOp(true),
})
case madmin.ForceDeleteBucketBktOp: case madmin.ForceDeleteBucketBktOp:
err = globalSiteReplicationSys.PeerBucketDeleteHandler(ctx, bucket, true) _, noRecreate := r.Form["noRecreate"]
err = globalSiteReplicationSys.PeerBucketDeleteHandler(ctx, bucket, DeleteBucketOptions{
Force: true,
NoRecreate: noRecreate,
SRDeleteOp: getSRBucketDeleteOp(true),
})
case madmin.PurgeDeletedBucketOp:
globalSiteReplicationSys.purgeDeletedBucket(ctx, objectAPI, bucket)
} }
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
@ -436,6 +457,7 @@ func getSRStatusOptions(r *http.Request) (opts madmin.SRStatusOptions) {
opts.Users = q.Get("users") == "true" opts.Users = q.Get("users") == "true"
opts.Entity = madmin.GetSREntityType(q.Get("entity")) opts.Entity = madmin.GetSREntityType(q.Get("entity"))
opts.EntityValue = q.Get("entityvalue") opts.EntityValue = q.Get("entityvalue")
opts.ShowDeleted = q.Get("showDeleted") == "true"
return return
} }

View File

@ -1177,7 +1177,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
return buckets[i].Name < buckets[j].Name return buckets[i].Name < buckets[j].Name
}) })
} else { } else {
buckets, err = objectAPI.ListBuckets(ctx) buckets, err = objectAPI.ListBuckets(ctx, BucketOptions{})
if err != nil { if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return

View File

@ -1288,7 +1288,7 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
} }
func makeObjectPerfBucket(ctx context.Context, objectAPI ObjectLayer, bucketName string) (bucketExists bool, err error) { func makeObjectPerfBucket(ctx context.Context, objectAPI ObjectLayer, bucketName string) (bucketExists bool, err error) {
if err = objectAPI.MakeBucketWithLocation(ctx, bucketName, BucketOptions{}); err != nil { if err = objectAPI.MakeBucketWithLocation(ctx, bucketName, MakeBucketOptions{}); err != nil {
if _, ok := err.(BucketExists); !ok { if _, ok := err.(BucketExists); !ok {
// Only BucketExists error can be ignored. // Only BucketExists error can be ignored.
return false, err return false, err
@ -1302,6 +1302,7 @@ func deleteObjectPerfBucket(objectAPI ObjectLayer) {
objectAPI.DeleteBucket(context.Background(), globalObjectPerfBucket, DeleteBucketOptions{ objectAPI.DeleteBucket(context.Background(), globalObjectPerfBucket, DeleteBucketOptions{
Force: true, Force: true,
NoRecreate: true, NoRecreate: true,
SRDeleteOp: getSRBucketDeleteOp(globalSiteReplicationSys.isEnabled()),
}) })
} }

View File

@ -822,7 +822,7 @@ func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error {
return h.healBucket(objAPI, h.bucket, bucketsOnly) return h.healBucket(objAPI, h.bucket, bucketsOnly)
} }
buckets, err := objAPI.ListBuckets(h.ctx) buckets, err := objAPI.ListBuckets(h.ctx, BucketOptions{})
if err != nil { if err != nil {
return errFnHealFromAPIErr(h.ctx, err) return errFnHealFromAPIErr(h.ctx, err)
} }

View File

@ -318,8 +318,7 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
ctx = lkctx.Context() ctx = lkctx.Context()
defer locker.Unlock(lkctx.Cancel) defer locker.Unlock(lkctx.Cancel)
buckets, _ := z.ListBuckets(ctx) buckets, _ := z.ListBuckets(ctx, BucketOptions{})
// Buckets data are dispersed in multiple zones/sets, make // Buckets data are dispersed in multiple zones/sets, make
// sure to heal all bucket metadata configuration. // sure to heal all bucket metadata configuration.
buckets = append(buckets, BucketInfo{ buckets = append(buckets, BucketInfo{

View File

@ -35,7 +35,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name. // obtains random bucket name.
bucket := getRandomBucketName() bucket := getRandomBucketName()
// create bucket. // create bucket.
err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) err = obj.MakeBucketWithLocation(context.Background(), bucket, MakeBucketOptions{})
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -76,7 +76,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
object := getRandomObjectName() object := getRandomObjectName()
// create bucket. // create bucket.
err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) err = obj.MakeBucketWithLocation(context.Background(), bucket, MakeBucketOptions{})
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -196,7 +196,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name. // obtains random bucket name.
bucket := getRandomBucketName() bucket := getRandomBucketName()
// create bucket. // create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), bucket, MakeBucketOptions{})
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }

View File

@ -65,7 +65,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
} }
// Check if bucket exists. // Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -155,7 +155,7 @@ func (api objectAPIHandlers) GetBucketEncryptionHandler(w http.ResponseWriter, r
// Check if bucket exists // Check if bucket exists
var err error var err error
if _, err = objAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err = objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -198,7 +198,7 @@ func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter
// Check if bucket exists // Check if bucket exists
var err error var err error
if _, err = objAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err = objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }

View File

@ -201,7 +201,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
getBucketInfo := objectAPI.GetBucketInfo getBucketInfo := objectAPI.GetBucketInfo
if _, err := getBucketInfo(ctx, bucket); err != nil { if _, err := getBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -332,7 +332,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
} else { } else {
// Invoke the list buckets. // Invoke the list buckets.
var err error var err error
bucketsInfo, err = listBuckets(ctx) bucketsInfo, err = listBuckets(ctx, BucketOptions{})
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
@ -447,7 +447,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, "") checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, "")
// Before proceeding validate if bucket exists. // Before proceeding validate if bucket exists.
_, err := objectAPI.GetBucketInfo(ctx, bucket) _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{})
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
@ -746,7 +746,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
return return
} }
opts := BucketOptions{ opts := MakeBucketOptions{
Location: location, Location: location,
LockEnabled: objectLockEnabled, LockEnabled: objectLockEnabled,
ForceCreate: forceCreate, ForceCreate: forceCreate,
@ -765,7 +765,11 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
} }
if err = globalDNSConfig.Put(bucket); err != nil { if err = globalDNSConfig.Put(bucket); err != nil {
objectAPI.DeleteBucket(context.Background(), bucket, DeleteBucketOptions{Force: false, NoRecreate: true}) objectAPI.DeleteBucket(context.Background(), bucket, DeleteBucketOptions{
Force: false,
NoRecreate: true,
SRDeleteOp: getSRBucketDeleteOp(globalSiteReplicationSys.isEnabled()),
})
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -1165,7 +1169,7 @@ func (api objectAPIHandlers) GetBucketPolicyStatusHandler(w http.ResponseWriter,
} }
// Check if bucket exists. // Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -1228,7 +1232,7 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
getBucketInfo := objectAPI.GetBucketInfo getBucketInfo := objectAPI.GetBucketInfo
if _, err := getBucketInfo(ctx, bucket); err != nil { if _, err := getBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponseHeadersOnly(w, toAPIError(ctx, err)) writeErrorResponseHeadersOnly(w, toAPIError(ctx, err))
return return
} }
@ -1305,7 +1309,10 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
deleteBucket := objectAPI.DeleteBucket deleteBucket := objectAPI.DeleteBucket
// Attempt to delete bucket. // Attempt to delete bucket.
if err := deleteBucket(ctx, bucket, DeleteBucketOptions{Force: forceDelete}); err != nil { if err := deleteBucket(ctx, bucket, DeleteBucketOptions{
Force: forceDelete,
SRDeleteOp: getSRBucketDeleteOp(globalSiteReplicationSys.isEnabled()),
}); err != nil {
apiErr := toAPIError(ctx, err) apiErr := toAPIError(ctx, err)
if _, ok := err.(BucketNotEmpty); ok { if _, ok := err.(BucketNotEmpty); ok {
if globalBucketVersioningSys.Enabled(bucket) || globalBucketVersioningSys.Suspended(bucket) { if globalBucketVersioningSys.Enabled(bucket) || globalBucketVersioningSys.Suspended(bucket) {
@ -1472,7 +1479,7 @@ func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *h
} }
// Check if bucket exists. // Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }

View File

@ -62,7 +62,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
} }
// Check if bucket exists. // Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -121,7 +121,7 @@ func (api objectAPIHandlers) GetBucketLifecycleHandler(w http.ResponseWriter, r
} }
// Check if bucket exists. // Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -163,7 +163,7 @@ func (api objectAPIHandlers) DeleteBucketLifecycleHandler(w http.ResponseWriter,
} }
// Check if bucket exists. // Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }

View File

@ -560,7 +560,7 @@ func (r *RestoreObjectRequest) validate(ctx context.Context, objAPI ObjectLayer)
} }
// Check if bucket exists. // Check if bucket exists.
if !r.OutputLocation.IsEmpty() { if !r.OutputLocation.IsEmpty() {
if _, err := objAPI.GetBucketInfo(ctx, r.OutputLocation.S3.BucketName); err != nil { if _, err := objAPI.GetBucketInfo(ctx, r.OutputLocation.S3.BucketName, BucketOptions{}); err != nil {
return err return err
} }
if r.OutputLocation.S3.Prefix == "" { if r.OutputLocation.S3.Prefix == "" {

View File

@ -105,10 +105,8 @@ type BucketMetadata struct {
// newBucketMetadata creates BucketMetadata with the supplied name and Created to Now. // newBucketMetadata creates BucketMetadata with the supplied name and Created to Now.
func newBucketMetadata(name string) BucketMetadata { func newBucketMetadata(name string) BucketMetadata {
now := UTCNow()
return BucketMetadata{ return BucketMetadata{
Name: name, Name: name,
Created: now,
notificationConfig: &event.Config{ notificationConfig: &event.Config{
XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/", XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/",
}, },
@ -121,6 +119,17 @@ func newBucketMetadata(name string) BucketMetadata {
} }
} }
// SetCreatedAt preserves the CreatedAt time for bucket across sites in site replication. It defaults to
// creation time of bucket on this cluster in all other cases.
func (b *BucketMetadata) SetCreatedAt(createdAt time.Time) {
if b.Created.IsZero() {
b.Created = UTCNow()
}
if !createdAt.IsZero() {
b.Created = createdAt.UTC()
}
}
// Load - loads the metadata of bucket by name from ObjectLayer api. // Load - loads the metadata of bucket by name from ObjectLayer api.
// If an error is returned the returned metadata will be default initialized. // If an error is returned the returned metadata will be default initialized.
func (b *BucketMetadata) Load(ctx context.Context, api ObjectLayer, name string) error { func (b *BucketMetadata) Load(ctx context.Context, api ObjectLayer, name string) error {
@ -160,17 +169,19 @@ func loadBucketMetadata(ctx context.Context, objectAPI ObjectLayer, bucket strin
if err != nil && !errors.Is(err, errConfigNotFound) { if err != nil && !errors.Is(err, errConfigNotFound) {
return b, err return b, err
} }
if err == nil {
b.defaultTimestamps()
}
// Old bucket without bucket metadata. Hence we migrate existing settings. // Old bucket without bucket metadata. Hence we migrate existing settings.
if err := b.convertLegacyConfigs(ctx, objectAPI); err != nil { if err := b.convertLegacyConfigs(ctx, objectAPI); err != nil {
return b, err return b, err
} }
// migrate unencrypted remote targets // migrate unencrypted remote targets
if err = b.migrateTargetConfig(ctx, objectAPI); err != nil { if err := b.migrateTargetConfig(ctx, objectAPI); err != nil {
return b, err return b, err
} }
b.defaultTimestamps()
return b, nil return b, nil
} }
@ -345,6 +356,7 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
b.BucketTargetsConfigJSON = configData b.BucketTargetsConfigJSON = configData
} }
} }
b.defaultTimestamps()
if err := b.Save(ctx, objectAPI); err != nil { if err := b.Save(ctx, objectAPI); err != nil {
return err return err

View File

@ -60,7 +60,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
return return
} }
_, err := objAPI.GetBucketInfo(ctx, bucketName) _, err := objAPI.GetBucketInfo(ctx, bucketName, BucketOptions{})
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
@ -132,7 +132,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
return return
} }
_, err := objectAPI.GetBucketInfo(ctx, bucketName) _, err := objectAPI.GetBucketInfo(ctx, bucketName, BucketOptions{})
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return

View File

@ -61,7 +61,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
} }
// Check if bucket exists. // Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -145,7 +145,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
} }
// Check if bucket exists. // Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -191,7 +191,7 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht
} }
// Check if bucket exists. // Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }

View File

@ -128,7 +128,7 @@ func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRoute
defer wg.Done() defer wg.Done()
// Sync start. // Sync start.
<-start <-start
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, BucketOptions{}); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, MakeBucketOptions{}); err != nil {
if _, ok := err.(BucketExists); !ok { if _, ok := err.(BucketExists); !ok {
t.Logf("unexpected error: %T: %v", err, err) t.Logf("unexpected error: %T: %v", err, err)
return return
@ -163,7 +163,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
credentials auth.Credentials, t *testing.T, credentials auth.Credentials, t *testing.T,
) { ) {
bucketName1 := fmt.Sprintf("%s-1", bucketName) bucketName1 := fmt.Sprintf("%s-1", bucketName)
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, BucketOptions{}); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, MakeBucketOptions{}); err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -55,7 +55,7 @@ func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWr
return return
} }
// Check if bucket exists. // Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -118,7 +118,7 @@ func (api objectAPIHandlers) GetBucketReplicationConfigHandler(w http.ResponseWr
return return
} }
// Check if bucket exists. // Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -157,7 +157,7 @@ func (api objectAPIHandlers) DeleteBucketReplicationConfigHandler(w http.Respons
return return
} }
// Check if bucket exists. // Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -198,7 +198,7 @@ func (api objectAPIHandlers) GetBucketReplicationMetricsHandler(w http.ResponseW
} }
// Check if bucket exists. // Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -267,7 +267,7 @@ func (api objectAPIHandlers) ResetBucketReplicationStartHandler(w http.ResponseW
} }
// Check if bucket exists. // Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -356,7 +356,7 @@ func (api objectAPIHandlers) ResetBucketReplicationStatusHandler(w http.Response
} }
// Check if bucket exists. // Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }

View File

@ -143,7 +143,7 @@ func (api objectAPIHandlers) GetBucketVersioningHandler(w http.ResponseWriter, r
} }
// Check if bucket exists. // Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }

View File

@ -52,7 +52,7 @@ func (api objectAPIHandlers) GetBucketWebsiteHandler(w http.ResponseWriter, r *h
} }
// Validate if bucket exists, before proceeding further... // Validate if bucket exists, before proceeding further...
_, err := objAPI.GetBucketInfo(ctx, bucket) _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{})
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
@ -84,7 +84,7 @@ func (api objectAPIHandlers) GetBucketAccelerateHandler(w http.ResponseWriter, r
} }
// Validate if bucket exists, before proceeding further... // Validate if bucket exists, before proceeding further...
_, err := objAPI.GetBucketInfo(ctx, bucket) _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{})
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
@ -117,7 +117,7 @@ func (api objectAPIHandlers) GetBucketRequestPaymentHandler(w http.ResponseWrite
} }
// Validate if bucket exists, before proceeding further... // Validate if bucket exists, before proceeding further...
_, err := objAPI.GetBucketInfo(ctx, bucket) _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{})
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
@ -151,7 +151,7 @@ func (api objectAPIHandlers) GetBucketLoggingHandler(w http.ResponseWriter, r *h
} }
// Validate if bucket exists, before proceeding further... // Validate if bucket exists, before proceeding further...
_, err := objAPI.GetBucketInfo(ctx, bucket) _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{})
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
@ -189,7 +189,7 @@ func (api objectAPIHandlers) GetBucketCorsHandler(w http.ResponseWriter, r *http
} }
// Validate if bucket exists, before proceeding further... // Validate if bucket exists, before proceeding further...
_, err := objAPI.GetBucketInfo(ctx, bucket) _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{})
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return

View File

@ -35,7 +35,7 @@ var bucketMetadataOpIgnoredErrs = append(bucketOpIgnoredErrs, errVolumeNotFound)
// Bucket operations // Bucket operations
// MakeBucket - make a bucket. // MakeBucket - make a bucket.
func (er erasureObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error { func (er erasureObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts MakeBucketOptions) error {
defer NSUpdated(bucket, slashSeparator) defer NSUpdated(bucket, slashSeparator)
// Verify if bucket is valid. // Verify if bucket is valid.
@ -94,7 +94,7 @@ func undoDeleteBucket(storageDisks []StorageAPI, bucket string) {
} }
// getBucketInfo - returns the BucketInfo from one of the load balanced disks. // getBucketInfo - returns the BucketInfo from one of the load balanced disks.
func (er erasureObjects) getBucketInfo(ctx context.Context, bucketName string) (bucketInfo BucketInfo, err error) { func (er erasureObjects) getBucketInfo(ctx context.Context, bucketName string, opts BucketOptions) (bucketInfo BucketInfo, err error) {
storageDisks := er.getDisks() storageDisks := er.getDisks()
g := errgroup.WithNErrs(len(storageDisks)) g := errgroup.WithNErrs(len(storageDisks))
@ -108,9 +108,17 @@ func (er erasureObjects) getBucketInfo(ctx context.Context, bucketName string) (
} }
volInfo, err := storageDisks[index].StatVol(ctx, bucketName) volInfo, err := storageDisks[index].StatVol(ctx, bucketName)
if err != nil { if err != nil {
if opts.Deleted {
dvi, derr := storageDisks[index].StatVol(ctx, pathJoin(minioMetaBucket, bucketMetaPrefix, deletedBucketsPrefix, bucketName))
if derr != nil {
return err
}
bucketsInfo[index] = BucketInfo{Name: bucketName, Deleted: dvi.Created}
return nil
}
return err return err
} }
bucketsInfo[index] = BucketInfo(volInfo) bucketsInfo[index] = BucketInfo{Name: volInfo.Name, Created: volInfo.Created}
return nil return nil
}, index) }, index)
} }
@ -131,8 +139,8 @@ func (er erasureObjects) getBucketInfo(ctx context.Context, bucketName string) (
} }
// GetBucketInfo - returns BucketInfo for a bucket. // GetBucketInfo - returns BucketInfo for a bucket.
func (er erasureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi BucketInfo, e error) { func (er erasureObjects) GetBucketInfo(ctx context.Context, bucket string, opts BucketOptions) (bi BucketInfo, e error) {
bucketInfo, err := er.getBucketInfo(ctx, bucket) bucketInfo, err := er.getBucketInfo(ctx, bucket, opts)
if err != nil { if err != nil {
return bi, toObjectErr(err, bucket) return bi, toObjectErr(err, bucket)
} }
@ -191,12 +199,58 @@ func (er erasureObjects) DeleteBucket(ctx context.Context, bucket string, opts D
if purgedDangling { if purgedDangling {
err = nil err = nil
} }
if opts.SRDeleteOp == MarkDelete {
er.markDelete(ctx, minioMetaBucket, pathJoin(bucketMetaPrefix, deletedBucketsPrefix, bucket))
}
} }
return toObjectErr(err, bucket) return toObjectErr(err, bucket)
} }
// markDelete creates a vol entry in .minio.sys/buckets/.deleted until site replication
// syncs the delete to peers
func (er erasureObjects) markDelete(ctx context.Context, bucket, prefix string) error {
storageDisks := er.getDisks()
g := errgroup.WithNErrs(len(storageDisks))
// Make a volume entry on all underlying storage disks.
for index := range storageDisks {
index := index
if storageDisks[index] == nil {
continue
}
g.Go(func() error {
if err := storageDisks[index].MakeVol(ctx, pathJoin(bucket, prefix)); err != nil {
if errors.Is(err, errVolumeExists) {
return nil
}
return err
}
return nil
}, index)
}
err := reduceWriteQuorumErrs(ctx, g.Wait(), bucketOpIgnoredErrs, er.defaultWQuorum())
return toObjectErr(err, bucket)
}
// purgeDelete deletes vol entry in .minio.sys/buckets/.deleted after site replication
// syncs the delete to peers OR on a new MakeBucket call.
func (er erasureObjects) purgeDelete(ctx context.Context, bucket, prefix string) error {
storageDisks := er.getDisks()
g := errgroup.WithNErrs(len(storageDisks))
// Make a volume entry on all underlying storage disks.
for index := range storageDisks {
index := index
g.Go(func() error {
if storageDisks[index] != nil {
return storageDisks[index].DeleteVol(ctx, pathJoin(bucket, prefix), true)
}
return errDiskNotFound
}, index)
}
err := reduceWriteQuorumErrs(ctx, g.Wait(), bucketOpIgnoredErrs, er.defaultWQuorum())
return toObjectErr(err, bucket)
}
// IsNotificationSupported returns whether bucket notification is applicable for this layer. // IsNotificationSupported returns whether bucket notification is applicable for this layer.
func (er erasureObjects) IsNotificationSupported() bool { func (er erasureObjects) IsNotificationSupported() bool {
return true return true

View File

@ -176,7 +176,7 @@ func TestListOnlineDisks(t *testing.T) {
} }
bucket := "bucket" bucket := "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) err = obj.MakeBucketWithLocation(ctx, "bucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket %v", err) t.Fatalf("Failed to make a bucket %v", err)
} }
@ -351,7 +351,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
} }
bucket := "bucket" bucket := "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) err = obj.MakeBucketWithLocation(ctx, "bucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket %v", err) t.Fatalf("Failed to make a bucket %v", err)
} }
@ -472,7 +472,7 @@ func TestDisksWithAllParts(t *testing.T) {
z := obj.(*erasureServerPools) z := obj.(*erasureServerPools)
s := z.serverPools[0].sets[0] s := z.serverPools[0].sets[0]
erasureDisks := s.getDisks() erasureDisks := s.getDisks()
err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) err = obj.MakeBucketWithLocation(ctx, "bucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket %v", err) t.Fatalf("Failed to make a bucket %v", err)
} }

View File

@ -236,7 +236,7 @@ func TestHealing(t *testing.T) {
er := z.serverPools[0].sets[0] er := z.serverPools[0].sets[0]
// Create "bucket" // Create "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) err = obj.MakeBucketWithLocation(ctx, "bucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -359,7 +359,7 @@ func TestHealingDanglingObject(t *testing.T) {
object := getRandomObjectName() object := getRandomObjectName()
data := bytes.Repeat([]byte("a"), 128*1024) data := bytes.Repeat([]byte("a"), 128*1024)
err = objLayer.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) err = objLayer.MakeBucketWithLocation(ctx, bucket, MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket - %v", err) t.Fatalf("Failed to make a bucket - %v", err)
} }
@ -554,7 +554,7 @@ func TestHealCorrectQuorum(t *testing.T) {
data := bytes.Repeat([]byte("a"), 5*1024*1024) data := bytes.Repeat([]byte("a"), 5*1024*1024)
var opts ObjectOptions var opts ObjectOptions
err = objLayer.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) err = objLayer.MakeBucketWithLocation(ctx, bucket, MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket - %v", err) t.Fatalf("Failed to make a bucket - %v", err)
} }
@ -685,7 +685,7 @@ func TestHealObjectCorruptedPools(t *testing.T) {
data := bytes.Repeat([]byte("a"), 5*1024*1024) data := bytes.Repeat([]byte("a"), 5*1024*1024)
var opts ObjectOptions var opts ObjectOptions
err = objLayer.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) err = objLayer.MakeBucketWithLocation(ctx, bucket, MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket - %v", err) t.Fatalf("Failed to make a bucket - %v", err)
} }
@ -861,7 +861,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
data := bytes.Repeat([]byte("a"), 5*1024*1024) data := bytes.Repeat([]byte("a"), 5*1024*1024)
var opts ObjectOptions var opts ObjectOptions
err = objLayer.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) err = objLayer.MakeBucketWithLocation(ctx, bucket, MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket - %v", err) t.Fatalf("Failed to make a bucket - %v", err)
} }
@ -1004,7 +1004,7 @@ func TestHealObjectCorruptedParts(t *testing.T) {
data := bytes.Repeat([]byte("a"), 5*1024*1024) data := bytes.Repeat([]byte("a"), 5*1024*1024)
var opts ObjectOptions var opts ObjectOptions
err = objLayer.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) err = objLayer.MakeBucketWithLocation(ctx, bucket, MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket - %v", err) t.Fatalf("Failed to make a bucket - %v", err)
} }
@ -1161,7 +1161,7 @@ func TestHealObjectErasure(t *testing.T) {
data := bytes.Repeat([]byte("a"), 5*1024*1024) data := bytes.Repeat([]byte("a"), 5*1024*1024)
var opts ObjectOptions var opts ObjectOptions
err = obj.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) err = obj.MakeBucketWithLocation(ctx, bucket, MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket - %v", err) t.Fatalf("Failed to make a bucket - %v", err)
} }
@ -1258,7 +1258,7 @@ func TestHealEmptyDirectoryErasure(t *testing.T) {
object := "empty-dir/" object := "empty-dir/"
var opts ObjectOptions var opts ObjectOptions
err = obj.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) err = obj.MakeBucketWithLocation(ctx, bucket, MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket - %v", err) t.Fatalf("Failed to make a bucket - %v", err)
} }
@ -1359,7 +1359,7 @@ func TestHealLastDataShard(t *testing.T) {
} }
var opts ObjectOptions var opts ObjectOptions
err = obj.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) err = obj.MakeBucketWithLocation(ctx, bucket, MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket - %v", err) t.Fatalf("Failed to make a bucket - %v", err)
} }

View File

@ -53,7 +53,7 @@ func TestRepeatPutObjectPart(t *testing.T) {
defer objLayer.Shutdown(context.Background()) defer objLayer.Shutdown(context.Background())
defer removeRoots(disks) defer removeRoots(disks)
err = objLayer.MakeBucketWithLocation(ctx, "bucket1", BucketOptions{}) err = objLayer.MakeBucketWithLocation(ctx, "bucket1", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -101,7 +101,7 @@ func TestErasureDeleteObjectBasic(t *testing.T) {
} }
defer xl.Shutdown(context.Background()) defer xl.Shutdown(context.Background())
err = xl.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) err = xl.MakeBucketWithLocation(ctx, "bucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -155,7 +155,7 @@ func TestDeleteObjectsVersioned(t *testing.T) {
{bucketName, "dir/obj1"}, {bucketName, "dir/obj1"},
} }
err = obj.MakeBucketWithLocation(ctx, bucketName, BucketOptions{ err = obj.MakeBucketWithLocation(ctx, bucketName, MakeBucketOptions{
VersioningEnabled: true, VersioningEnabled: true,
}) })
if err != nil { if err != nil {
@ -244,7 +244,7 @@ func TestErasureDeleteObjectsErasureSet(t *testing.T) {
{bucketName, "obj_4"}, {bucketName, "obj_4"},
} }
err := erasureSets.MakeBucketWithLocation(ctx, bucketName, BucketOptions{}) err := erasureSets.MakeBucketWithLocation(ctx, bucketName, MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -305,7 +305,7 @@ func TestErasureDeleteObjectDiskNotFound(t *testing.T) {
xl := z.serverPools[0].sets[0] xl := z.serverPools[0].sets[0]
// Create "bucket" // Create "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) err = obj.MakeBucketWithLocation(ctx, "bucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -374,7 +374,7 @@ func TestErasureDeleteObjectDiskNotFoundErasure4(t *testing.T) {
xl := z.serverPools[0].sets[0] xl := z.serverPools[0].sets[0]
// Create "bucket" // Create "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) err = obj.MakeBucketWithLocation(ctx, "bucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -434,7 +434,7 @@ func TestErasureDeleteObjectDiskNotFoundErr(t *testing.T) {
xl := z.serverPools[0].sets[0] xl := z.serverPools[0].sets[0]
// Create "bucket" // Create "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) err = obj.MakeBucketWithLocation(ctx, "bucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -505,7 +505,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
xl := z.serverPools[0].sets[0] xl := z.serverPools[0].sets[0]
// Create "bucket" // Create "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) err = obj.MakeBucketWithLocation(ctx, "bucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -614,7 +614,7 @@ func TestHeadObjectNoQuorum(t *testing.T) {
xl := z.serverPools[0].sets[0] xl := z.serverPools[0].sets[0]
// Create "bucket" // Create "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) err = obj.MakeBucketWithLocation(ctx, "bucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -692,7 +692,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
xl := z.serverPools[0].sets[0] xl := z.serverPools[0].sets[0]
// Create "bucket" // Create "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) err = obj.MakeBucketWithLocation(ctx, "bucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -755,7 +755,7 @@ func TestPutObjectNoQuorumSmall(t *testing.T) {
xl := z.serverPools[0].sets[0] xl := z.serverPools[0].sets[0]
// Create "bucket" // Create "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{}) err = obj.MakeBucketWithLocation(ctx, "bucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -824,7 +824,7 @@ func TestPutObjectSmallInlineData(t *testing.T) {
object := "object" object := "object"
// Create "bucket" // Create "bucket"
err = obj.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) err = obj.MakeBucketWithLocation(ctx, bucket, MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -892,7 +892,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
ctx, cancel := context.WithCancel(GlobalContext) ctx, cancel := context.WithCancel(GlobalContext)
defer cancel() defer cancel()
err := obj.MakeBucketWithLocation(ctx, bucket, BucketOptions{}) err := obj.MakeBucketWithLocation(ctx, bucket, MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket %v", err) t.Fatalf("Failed to make a bucket %v", err)
} }
@ -1091,7 +1091,7 @@ func TestGetObjectInlineNotInline(t *testing.T) {
defer removeRoots(fsDirs) defer removeRoots(fsDirs)
// Create a testbucket // Create a testbucket
err = objLayer.MakeBucketWithLocation(ctx, "testbucket", BucketOptions{}) err = objLayer.MakeBucketWithLocation(ctx, "testbucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1157,7 +1157,7 @@ func TestGetObjectWithOutdatedDisks(t *testing.T) {
for i, testCase := range testCases { for i, testCase := range testCases {
// Step 1: create a bucket // Step 1: create a bucket
err = z.MakeBucketWithLocation(ctx, testCase.bucket, BucketOptions{VersioningEnabled: testCase.versioned}) err = z.MakeBucketWithLocation(ctx, testCase.bucket, MakeBucketOptions{VersioningEnabled: testCase.versioned})
if err != nil { if err != nil {
t.Fatalf("Test %d: Failed to create a bucket: %v", i+1, err) t.Fatalf("Test %d: Failed to create a bucket: %v", i+1, err)
} }

View File

@ -1119,7 +1119,7 @@ func (z *erasureServerPools) StartDecommission(ctx context.Context, idx int) (er
return errInvalidArgument return errInvalidArgument
} }
buckets, err := z.ListBuckets(ctx) buckets, err := z.ListBuckets(ctx, BucketOptions{})
if err != nil { if err != nil {
return err return err
} }
@ -1157,7 +1157,7 @@ func (z *erasureServerPools) StartDecommission(ctx context.Context, idx int) (er
pathJoin(minioMetaBucket, bucketMetaPrefix), pathJoin(minioMetaBucket, bucketMetaPrefix),
} { } {
var bucketExists BucketExists var bucketExists BucketExists
if err = z.MakeBucketWithLocation(ctx, metaBucket, BucketOptions{}); err != nil { if err = z.MakeBucketWithLocation(ctx, metaBucket, MakeBucketOptions{}); err != nil {
if !errors.As(err, &bucketExists) { if !errors.As(err, &bucketExists) {
return err return err
} }

View File

@ -613,7 +613,7 @@ func (z *erasureServerPools) NSScanner(ctx context.Context, bf *bloomFilter, upd
var results []dataUsageCache var results []dataUsageCache
var firstErr error var firstErr error
allBuckets, err := z.ListBuckets(ctx) allBuckets, err := z.ListBuckets(ctx, BucketOptions{})
if err != nil { if err != nil {
return err return err
} }
@ -718,7 +718,7 @@ func (z *erasureServerPools) NSScanner(ctx context.Context, bf *bloomFilter, upd
// MakeBucketWithLocation - creates a new bucket across all serverPools simultaneously // MakeBucketWithLocation - creates a new bucket across all serverPools simultaneously
// even if one of the sets fail to create buckets, we proceed all the successful // even if one of the sets fail to create buckets, we proceed all the successful
// operations. // operations.
func (z *erasureServerPools) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error { func (z *erasureServerPools) MakeBucketWithLocation(ctx context.Context, bucket string, opts MakeBucketOptions) error {
g := errgroup.WithNErrs(len(z.serverPools)) g := errgroup.WithNErrs(len(z.serverPools))
// Lock the bucket name before creating. // Lock the bucket name before creating.
@ -758,6 +758,7 @@ func (z *erasureServerPools) MakeBucketWithLocation(ctx context.Context, bucket
// If it doesn't exist we get a new, so ignore errors // If it doesn't exist we get a new, so ignore errors
meta := newBucketMetadata(bucket) meta := newBucketMetadata(bucket)
meta.SetCreatedAt(opts.CreatedAt)
if opts.LockEnabled { if opts.LockEnabled {
meta.VersioningConfigXML = enabledBucketVersioningConfig meta.VersioningConfigXML = enabledBucketVersioningConfig
meta.ObjectLockConfigXML = enabledBucketObjectLockConfig meta.ObjectLockConfigXML = enabledBucketObjectLockConfig
@ -1539,9 +1540,9 @@ func (z *erasureServerPools) CompleteMultipartUpload(ctx context.Context, bucket
} }
// GetBucketInfo - returns bucket info from one of the erasure coded serverPools. // GetBucketInfo - returns bucket info from one of the erasure coded serverPools.
func (z *erasureServerPools) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) { func (z *erasureServerPools) GetBucketInfo(ctx context.Context, bucket string, opts BucketOptions) (bucketInfo BucketInfo, err error) {
if z.SinglePool() { if z.SinglePool() {
bucketInfo, err = z.serverPools[0].GetBucketInfo(ctx, bucket) bucketInfo, err = z.serverPools[0].GetBucketInfo(ctx, bucket, opts)
if err != nil { if err != nil {
return bucketInfo, err return bucketInfo, err
} }
@ -1552,7 +1553,7 @@ func (z *erasureServerPools) GetBucketInfo(ctx context.Context, bucket string) (
return bucketInfo, nil return bucketInfo, nil
} }
for _, pool := range z.serverPools { for _, pool := range z.serverPools {
bucketInfo, err = pool.GetBucketInfo(ctx, bucket) bucketInfo, err = pool.GetBucketInfo(ctx, bucket, opts)
if err != nil { if err != nil {
if isErrBucketNotFound(err) { if isErrBucketNotFound(err) {
continue continue
@ -1626,7 +1627,11 @@ func (z *erasureServerPools) DeleteBucket(ctx context.Context, bucket string, op
// Purge the entire bucket metadata entirely. // Purge the entire bucket metadata entirely.
z.renameAll(context.Background(), minioMetaBucket, pathJoin(bucketMetaPrefix, bucket)) z.renameAll(context.Background(), minioMetaBucket, pathJoin(bucketMetaPrefix, bucket))
// If site replication is configured, hold on to deleted bucket state until sites sync
switch opts.SRDeleteOp {
case MarkDelete:
z.markDelete(context.Background(), minioMetaBucket, pathJoin(bucketMetaPrefix, deletedBucketsPrefix, bucket))
}
// Success. // Success.
return nil return nil
} }
@ -1644,6 +1649,27 @@ func (z *erasureServerPools) renameAll(ctx context.Context, bucket, prefix strin
} }
} }
// markDelete will create a directory of deleted bucket in .minio.sys/buckets/.deleted across all disks
// in situations where the deleted bucket needs to be held on to until all sites are in sync for
// site replication
func (z *erasureServerPools) markDelete(ctx context.Context, bucket, prefix string) {
for _, servers := range z.serverPools {
for _, set := range servers.sets {
set.markDelete(ctx, bucket, prefix)
}
}
}
// purgeDelete deletes vol entry in .minio.sys/buckets/.deleted after site replication
// syncs the delete to peers.
func (z *erasureServerPools) purgeDelete(ctx context.Context, bucket, prefix string) {
for _, servers := range z.serverPools {
for _, set := range servers.sets {
set.purgeDelete(ctx, bucket, prefix)
}
}
}
// This function is used to undo a successful DeleteBucket operation. // This function is used to undo a successful DeleteBucket operation.
func undoDeleteBucketServerPools(ctx context.Context, bucket string, serverPools []*erasureSets, errs []error) { func undoDeleteBucketServerPools(ctx context.Context, bucket string, serverPools []*erasureSets, errs []error) {
g := errgroup.WithNErrs(len(serverPools)) g := errgroup.WithNErrs(len(serverPools))
@ -1653,7 +1679,7 @@ func undoDeleteBucketServerPools(ctx context.Context, bucket string, serverPools
index := index index := index
g.Go(func() error { g.Go(func() error {
if errs[index] == nil { if errs[index] == nil {
return serverPools[index].MakeBucketWithLocation(ctx, bucket, BucketOptions{}) return serverPools[index].MakeBucketWithLocation(ctx, bucket, MakeBucketOptions{})
} }
return nil return nil
}, index) }, index)
@ -1665,15 +1691,15 @@ func undoDeleteBucketServerPools(ctx context.Context, bucket string, serverPools
// List all buckets from one of the serverPools, we are not doing merge // List all buckets from one of the serverPools, we are not doing merge
// sort here just for simplification. As per design it is assumed // sort here just for simplification. As per design it is assumed
// that all buckets are present on all serverPools. // that all buckets are present on all serverPools.
func (z *erasureServerPools) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) { func (z *erasureServerPools) ListBuckets(ctx context.Context, opts BucketOptions) (buckets []BucketInfo, err error) {
if z.SinglePool() { if z.SinglePool() {
buckets, err = z.serverPools[0].ListBuckets(ctx) buckets, err = z.serverPools[0].ListBuckets(ctx, opts)
} else { } else {
for idx, pool := range z.serverPools { for idx, pool := range z.serverPools {
if z.IsSuspended(idx) { if z.IsSuspended(idx) {
continue continue
} }
buckets, err = pool.ListBuckets(ctx) buckets, err = pool.ListBuckets(ctx, opts)
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
continue continue
@ -1685,9 +1711,9 @@ func (z *erasureServerPools) ListBuckets(ctx context.Context) (buckets []BucketI
return nil, err return nil, err
} }
for i := range buckets { for i := range buckets {
meta, err := globalBucketMetadataSys.Get(buckets[i].Name) createdAt, err := globalBucketMetadataSys.CreatedAt(buckets[i].Name)
if err == nil { if err == nil {
buckets[i].Created = meta.Created buckets[i].Created = createdAt
} }
} }
return buckets, nil return buckets, nil

View File

@ -27,6 +27,7 @@ import (
"net/http" "net/http"
"reflect" "reflect"
"sort" "sort"
"strings"
"sync" "sync"
"time" "time"
@ -689,7 +690,7 @@ func (s *erasureSets) Shutdown(ctx context.Context) error {
// MakeBucketLocation - creates a new bucket across all sets simultaneously, // MakeBucketLocation - creates a new bucket across all sets simultaneously,
// then return the first encountered error // then return the first encountered error
func (s *erasureSets) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error { func (s *erasureSets) MakeBucketWithLocation(ctx context.Context, bucket string, opts MakeBucketOptions) error {
g := errgroup.WithNErrs(len(s.sets)) g := errgroup.WithNErrs(len(s.sets))
// Create buckets in parallel across all sets. // Create buckets in parallel across all sets.
@ -760,8 +761,8 @@ func (s *erasureSets) getHashedSet(input string) (set *erasureObjects) {
} }
// GetBucketInfo - returns bucket info from one of the erasure coded set. // GetBucketInfo - returns bucket info from one of the erasure coded set.
func (s *erasureSets) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) { func (s *erasureSets) GetBucketInfo(ctx context.Context, bucket string, opts BucketOptions) (bucketInfo BucketInfo, err error) {
return s.getHashedSet("").GetBucketInfo(ctx, bucket) return s.getHashedSet("").GetBucketInfo(ctx, bucket, opts)
} }
// IsNotificationSupported returns whether bucket notification is applicable for this layer. // IsNotificationSupported returns whether bucket notification is applicable for this layer.
@ -825,7 +826,7 @@ func undoDeleteBucketSets(ctx context.Context, bucket string, sets []*erasureObj
index := index index := index
g.Go(func() error { g.Go(func() error {
if errs[index] == nil { if errs[index] == nil {
return sets[index].MakeBucketWithLocation(ctx, bucket, BucketOptions{}) return sets[index].MakeBucketWithLocation(ctx, bucket, MakeBucketOptions{})
} }
return nil return nil
}, index) }, index)
@ -837,7 +838,7 @@ func undoDeleteBucketSets(ctx context.Context, bucket string, sets []*erasureObj
// List all buckets from one of the set, we are not doing merge // List all buckets from one of the set, we are not doing merge
// sort here just for simplification. As per design it is assumed // sort here just for simplification. As per design it is assumed
// that all buckets are present on all sets. // that all buckets are present on all sets.
func (s *erasureSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) { func (s *erasureSets) ListBuckets(ctx context.Context, opts BucketOptions) (buckets []BucketInfo, err error) {
var listBuckets []BucketInfo var listBuckets []BucketInfo
healBuckets := map[string]VolInfo{} healBuckets := map[string]VolInfo{}
for _, set := range s.sets { for _, set := range s.sets {
@ -847,8 +848,34 @@ func (s *erasureSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, er
} }
} }
// include deleted buckets in listBuckets output
deletedBuckets := map[string]VolInfo{}
if opts.Deleted {
for _, set := range s.sets {
// lists all unique buckets across drives.
if err := listDeletedBuckets(ctx, set.getDisks(), deletedBuckets, s.defaultParityCount); err != nil {
return nil, err
}
}
}
for _, v := range healBuckets { for _, v := range healBuckets {
listBuckets = append(listBuckets, BucketInfo(v)) bi := BucketInfo{
Name: v.Name,
Created: v.Created,
}
if vi, ok := deletedBuckets[v.Name]; ok {
bi.Deleted = vi.Created
}
listBuckets = append(listBuckets, bi)
}
for _, v := range deletedBuckets {
if _, ok := healBuckets[v.Name]; !ok {
listBuckets = append(listBuckets, BucketInfo{
Name: v.Name,
Deleted: v.Created,
})
}
} }
sort.Slice(listBuckets, func(i, j int) bool { sort.Slice(listBuckets, func(i, j int) bool {
@ -858,6 +885,45 @@ func (s *erasureSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, er
return listBuckets, nil return listBuckets, nil
} }
// listDeletedBuckets lists deleted buckets from all disks.
func listDeletedBuckets(ctx context.Context, storageDisks []StorageAPI, delBuckets map[string]VolInfo, readQuorum int) error {
g := errgroup.WithNErrs(len(storageDisks))
var mu sync.Mutex
for index := range storageDisks {
index := index
g.Go(func() error {
if storageDisks[index] == nil {
// we ignore disk not found errors
return nil
}
volsInfo, err := storageDisks[index].ListDir(ctx, minioMetaBucket, pathJoin(bucketMetaPrefix, deletedBucketsPrefix), -1)
if err != nil {
if err == errFileNotFound {
return nil
}
return err
}
for _, volName := range volsInfo {
mu.Lock()
if _, ok := delBuckets[volName]; !ok {
vi, err := storageDisks[index].StatVol(ctx, pathJoin(minioMetaBucket, bucketMetaPrefix, deletedBucketsPrefix, volName))
if err != nil {
return err
}
bkt := strings.TrimPrefix(vi.Name, pathJoin(minioMetaBucket, bucketMetaPrefix, deletedBucketsPrefix))
bkt = strings.TrimPrefix(bkt, slashSeparator)
bkt = strings.TrimSuffix(bkt, slashSeparator)
vi.Name = bkt
delBuckets[bkt] = vi
}
mu.Unlock()
}
return nil
}, index)
}
return reduceReadQuorumErrs(ctx, g.Wait(), bucketMetadataOpIgnoredErrs, readQuorum)
}
// --- Object Operations --- // --- Object Operations ---
// GetObjectNInfo - returns object info and locked object ReadCloser // GetObjectNInfo - returns object info and locked object ReadCloser

View File

@ -108,16 +108,40 @@ func newErasureSingle(ctx context.Context, storageDisk StorageAPI, format *forma
// List all buckets from one of the set, we are not doing merge // List all buckets from one of the set, we are not doing merge
// sort here just for simplification. As per design it is assumed // sort here just for simplification. As per design it is assumed
// that all buckets are present on all sets. // that all buckets are present on all sets.
func (es *erasureSingle) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) { func (es *erasureSingle) ListBuckets(ctx context.Context, opts BucketOptions) (buckets []BucketInfo, err error) {
var listBuckets []BucketInfo var listBuckets []BucketInfo
healBuckets := map[string]VolInfo{} healBuckets := map[string]VolInfo{}
// lists all unique buckets across drives. // lists all unique buckets across drives.
if err := listAllBuckets(ctx, []StorageAPI{es.disk}, healBuckets, 0); err != nil { if err := listAllBuckets(ctx, []StorageAPI{es.disk}, healBuckets, 0); err != nil {
return nil, err return nil, err
} }
// include deleted buckets in listBuckets output
deletedBuckets := map[string]VolInfo{}
if opts.Deleted {
// lists all deleted buckets across drives.
if err := listDeletedBuckets(ctx, []StorageAPI{es.disk}, deletedBuckets, 0); err != nil {
return nil, err
}
}
for _, v := range healBuckets { for _, v := range healBuckets {
listBuckets = append(listBuckets, BucketInfo(v)) bi := BucketInfo{
Name: v.Name,
Created: v.Created,
}
if vi, ok := deletedBuckets[v.Name]; ok {
bi.Deleted = vi.Created
}
listBuckets = append(listBuckets, bi)
}
for _, v := range deletedBuckets {
if _, ok := healBuckets[v.Name]; !ok {
listBuckets = append(listBuckets, BucketInfo{
Name: v.Name,
Deleted: v.Created,
})
}
} }
sort.Slice(listBuckets, func(i, j int) bool { sort.Slice(listBuckets, func(i, j int) bool {
@ -256,7 +280,7 @@ type renameAllStorager interface {
// Bucket operations // Bucket operations
// MakeBucket - make a bucket. // MakeBucket - make a bucket.
func (es *erasureSingle) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error { func (es *erasureSingle) MakeBucketWithLocation(ctx context.Context, bucket string, opts MakeBucketOptions) error {
defer NSUpdated(bucket, slashSeparator) defer NSUpdated(bucket, slashSeparator)
// Lock the bucket name before creating. // Lock the bucket name before creating.
@ -289,6 +313,7 @@ func (es *erasureSingle) MakeBucketWithLocation(ctx context.Context, bucket stri
// If it doesn't exist we get a new, so ignore errors // If it doesn't exist we get a new, so ignore errors
meta := newBucketMetadata(bucket) meta := newBucketMetadata(bucket)
meta.SetCreatedAt(opts.CreatedAt)
if opts.LockEnabled { if opts.LockEnabled {
meta.VersioningConfigXML = enabledBucketVersioningConfig meta.VersioningConfigXML = enabledBucketVersioningConfig
meta.ObjectLockConfigXML = enabledBucketObjectLockConfig meta.ObjectLockConfigXML = enabledBucketObjectLockConfig
@ -308,12 +333,17 @@ func (es *erasureSingle) MakeBucketWithLocation(ctx context.Context, bucket stri
} }
// GetBucketInfo - returns BucketInfo for a bucket. // GetBucketInfo - returns BucketInfo for a bucket.
func (es *erasureSingle) GetBucketInfo(ctx context.Context, bucket string) (bi BucketInfo, e error) { func (es *erasureSingle) GetBucketInfo(ctx context.Context, bucket string, opts BucketOptions) (bi BucketInfo, e error) {
volInfo, err := es.disk.StatVol(ctx, bucket) volInfo, err := es.disk.StatVol(ctx, bucket)
if err != nil { if err != nil {
if opts.Deleted {
if dvi, derr := es.disk.StatVol(ctx, pathJoin(minioMetaBucket, bucketMetaPrefix, deletedBucketsPrefix, bucket)); derr == nil {
return BucketInfo{Name: bucket, Deleted: dvi.Created}, nil
}
}
return bi, toObjectErr(err, bucket) return bi, toObjectErr(err, bucket)
} }
return BucketInfo(volInfo), nil return BucketInfo{Name: volInfo.Name, Created: volInfo.Created}, nil
} }
// DeleteBucket - deletes a bucket. // DeleteBucket - deletes a bucket.
@ -326,6 +356,28 @@ func (es *erasureSingle) DeleteBucket(ctx context.Context, bucket string, opts D
deleteBucketMetadata(ctx, es, bucket) deleteBucketMetadata(ctx, es, bucket)
globalBucketMetadataSys.Remove(bucket) globalBucketMetadataSys.Remove(bucket)
if err == nil || errors.Is(err, errVolumeNotFound) {
if opts.SRDeleteOp == MarkDelete {
es.markDelete(ctx, minioMetaBucket, pathJoin(bucketMetaPrefix, deletedBucketsPrefix, bucket))
}
}
return toObjectErr(err, bucket)
}
// markDelete creates a vol entry in .minio.sys/buckets/.deleted until site replication
// syncs the delete to peers
func (es *erasureSingle) markDelete(ctx context.Context, bucket, prefix string) error {
err := es.disk.MakeVol(ctx, pathJoin(bucket, prefix))
if err != nil && errors.Is(err, errVolumeExists) {
return nil
}
return toObjectErr(err, bucket)
}
// purgeDelete deletes vol entry in .minio.sys/buckets/.deleted after site replication
// syncs the delete to peers OR on a new MakeBucket call.
func (es *erasureSingle) purgeDelete(ctx context.Context, bucket, prefix string) error {
err := es.disk.DeleteVol(ctx, pathJoin(bucket, prefix), true)
return toObjectErr(err, bucket) return toObjectErr(err, bucket)
} }
@ -3235,7 +3287,7 @@ func (es *erasureSingle) NSScanner(ctx context.Context, bf *bloomFilter, updates
results := make([]dataUsageCache, 1) results := make([]dataUsageCache, 1)
var firstErr error var firstErr error
allBuckets, err := es.ListBuckets(ctx) allBuckets, err := es.ListBuckets(ctx, BucketOptions{})
if err != nil { if err != nil {
return err return err
} }

View File

@ -56,7 +56,7 @@ func TestReadFSMetadata(t *testing.T) {
bucketName := "bucket" bucketName := "bucket"
objectName := "object" objectName := "object"
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}); err != nil {
t.Fatal("Unexpected err: ", err) t.Fatal("Unexpected err: ", err)
} }
if _, err := obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}); err != nil { if _, err := obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}); err != nil {
@ -92,7 +92,7 @@ func TestWriteFSMetadata(t *testing.T) {
bucketName := "bucket" bucketName := "bucket"
objectName := "object" objectName := "object"
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}); err != nil {
t.Fatal("Unexpected err: ", err) t.Fatal("Unexpected err: ", err)
} }
if _, err := obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}); err != nil { if _, err := obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}); err != nil {

View File

@ -45,7 +45,7 @@ func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) {
// Create a context we can cancel. // Create a context we can cancel.
ctx, cancel := context.WithCancel(GlobalContext) ctx, cancel := context.WithCancel(GlobalContext)
obj.MakeBucketWithLocation(ctx, bucketName, BucketOptions{}) obj.MakeBucketWithLocation(ctx, bucketName, MakeBucketOptions{})
uploadID, err := obj.NewMultipartUpload(ctx, bucketName, objectName, ObjectOptions{}) uploadID, err := obj.NewMultipartUpload(ctx, bucketName, objectName, ObjectOptions{})
if err != nil { if err != nil {
@ -99,7 +99,7 @@ func TestNewMultipartUploadFaultyDisk(t *testing.T) {
bucketName := "bucket" bucketName := "bucket"
objectName := "object" objectName := "object"
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}); err != nil {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }
@ -124,7 +124,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
data := []byte("12345") data := []byte("12345")
dataLen := int64(len(data)) dataLen := int64(len(data))
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}); err != nil {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }
@ -157,7 +157,7 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) {
objectName := "object" objectName := "object"
data := []byte("12345") data := []byte("12345")
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}); err != nil {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }
@ -190,7 +190,7 @@ func TestCompleteMultipartUpload(t *testing.T) {
objectName := "object" objectName := "object"
data := []byte("12345") data := []byte("12345")
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}); err != nil {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }
@ -227,7 +227,7 @@ func TestAbortMultipartUpload(t *testing.T) {
objectName := "object" objectName := "object"
data := []byte("12345") data := []byte("12345")
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}); err != nil {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }
@ -258,7 +258,7 @@ func TestListMultipartUploadsFaultyDisk(t *testing.T) {
bucketName := "bucket" bucketName := "bucket"
objectName := "object" objectName := "object"
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}); err != nil {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }

View File

@ -254,7 +254,7 @@ func (fs *FSObjects) NSScanner(ctx context.Context, bf *bloomFilter, updates cha
return err return err
} }
totalCache.Info.Name = dataUsageRoot totalCache.Info.Name = dataUsageRoot
buckets, err := fs.ListBuckets(ctx) buckets, err := fs.ListBuckets(ctx, BucketOptions{})
if err != nil { if err != nil {
return err return err
} }
@ -453,7 +453,7 @@ func (fs *FSObjects) statBucketDir(ctx context.Context, bucket string) (os.FileI
} }
// MakeBucketWithLocation - create a new bucket, returns if it already exists. // MakeBucketWithLocation - create a new bucket, returns if it already exists.
func (fs *FSObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error { func (fs *FSObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts MakeBucketOptions) error {
if opts.LockEnabled || opts.VersioningEnabled { if opts.LockEnabled || opts.VersioningEnabled {
return NotImplemented{} return NotImplemented{}
} }
@ -524,7 +524,7 @@ func (fs *FSObjects) DeleteBucketPolicy(ctx context.Context, bucket string) erro
} }
// GetBucketInfo - fetch bucket metadata info. // GetBucketInfo - fetch bucket metadata info.
func (fs *FSObjects) GetBucketInfo(ctx context.Context, bucket string) (bi BucketInfo, e error) { func (fs *FSObjects) GetBucketInfo(ctx context.Context, bucket string, opts BucketOptions) (bi BucketInfo, e error) {
st, err := fs.statBucketDir(ctx, bucket) st, err := fs.statBucketDir(ctx, bucket)
if err != nil { if err != nil {
return bi, toObjectErr(err, bucket) return bi, toObjectErr(err, bucket)
@ -543,7 +543,7 @@ func (fs *FSObjects) GetBucketInfo(ctx context.Context, bucket string) (bi Bucke
} }
// ListBuckets - list all s3 compatible buckets (directories) at fsPath. // ListBuckets - list all s3 compatible buckets (directories) at fsPath.
func (fs *FSObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) { func (fs *FSObjects) ListBuckets(ctx context.Context, opts BucketOptions) ([]BucketInfo, error) {
if err := checkPathLength(fs.fsPath); err != nil { if err := checkPathLength(fs.fsPath); err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return nil, err return nil, err

View File

@ -62,7 +62,7 @@ func TestFSShutdown(t *testing.T) {
fs := obj.(*FSObjects) fs := obj.(*FSObjects)
objectContent := "12345" objectContent := "12345"
obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}) obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{})
obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), ObjectOptions{}) obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), ObjectOptions{})
return fs, disk return fs, disk
} }
@ -95,18 +95,18 @@ func TestFSGetBucketInfo(t *testing.T) {
fs := obj.(*FSObjects) fs := obj.(*FSObjects)
bucketName := "bucket" bucketName := "bucket"
err := obj.MakeBucketWithLocation(GlobalContext, "a", BucketOptions{}) err := obj.MakeBucketWithLocation(GlobalContext, "a", MakeBucketOptions{})
if !isSameType(err, BucketNameInvalid{}) { if !isSameType(err, BucketNameInvalid{}) {
t.Fatal("BucketNameInvalid error not returned") t.Fatal("BucketNameInvalid error not returned")
} }
err = obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}) err = obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Test with valid parameters // Test with valid parameters
info, err := fs.GetBucketInfo(GlobalContext, bucketName) info, err := fs.GetBucketInfo(GlobalContext, bucketName, BucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -115,7 +115,7 @@ func TestFSGetBucketInfo(t *testing.T) {
} }
// Test with non-existent bucket // Test with non-existent bucket
_, err = fs.GetBucketInfo(GlobalContext, "a") _, err = fs.GetBucketInfo(GlobalContext, "a", BucketOptions{})
if !isSameType(err, BucketNotFound{}) { if !isSameType(err, BucketNotFound{}) {
t.Fatal("BucketNotFound error not returned") t.Fatal("BucketNotFound error not returned")
} }
@ -123,7 +123,7 @@ func TestFSGetBucketInfo(t *testing.T) {
// Check for buckets and should get disk not found. // Check for buckets and should get disk not found.
os.RemoveAll(disk) os.RemoveAll(disk)
if _, err = fs.GetBucketInfo(GlobalContext, bucketName); err != nil { if _, err = fs.GetBucketInfo(GlobalContext, bucketName, BucketOptions{}); err != nil {
if !isSameType(err, BucketNotFound{}) { if !isSameType(err, BucketNotFound{}) {
t.Fatal("BucketNotFound error not returned") t.Fatal("BucketNotFound error not returned")
} }
@ -139,7 +139,7 @@ func TestFSPutObject(t *testing.T) {
bucketName := "bucket" bucketName := "bucket"
objectName := "1/2/3/4/object" objectName := "1/2/3/4/object"
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -179,7 +179,7 @@ func TestFSDeleteObject(t *testing.T) {
bucketName := "bucket" bucketName := "bucket"
objectName := "object" objectName := "object"
obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}) obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{})
obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}) obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{})
// Test with invalid bucket name // Test with invalid bucket name
@ -223,7 +223,7 @@ func TestFSDeleteBucket(t *testing.T) {
fs := obj.(*FSObjects) fs := obj.(*FSObjects)
bucketName := "bucket" bucketName := "bucket"
err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}) err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
@ -242,7 +242,7 @@ func TestFSDeleteBucket(t *testing.T) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}) obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{})
// Delete bucket should get error disk not found. // Delete bucket should get error disk not found.
os.RemoveAll(disk) os.RemoveAll(disk)
@ -264,7 +264,7 @@ func TestFSListBuckets(t *testing.T) {
fs := obj.(*FSObjects) fs := obj.(*FSObjects)
bucketName := "bucket" bucketName := "bucket"
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}); err != nil {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
@ -279,7 +279,7 @@ func TestFSListBuckets(t *testing.T) {
f.Close() f.Close()
// Test list buckets to have only one entry. // Test list buckets to have only one entry.
buckets, err := fs.ListBuckets(GlobalContext) buckets, err := fs.ListBuckets(GlobalContext, BucketOptions{})
if err != nil { if err != nil {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
@ -289,7 +289,7 @@ func TestFSListBuckets(t *testing.T) {
// Test ListBuckets with disk not found. // Test ListBuckets with disk not found.
os.RemoveAll(disk) os.RemoveAll(disk)
if _, err := fs.ListBuckets(GlobalContext); err != nil { if _, err := fs.ListBuckets(GlobalContext, BucketOptions{}); err != nil {
if err != errDiskNotFound { if err != errDiskNotFound {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }

View File

@ -310,7 +310,7 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
go globalIAMSys.Init(GlobalContext, newObject, globalEtcdClient, globalRefreshIAMInterval) go globalIAMSys.Init(GlobalContext, newObject, globalEtcdClient, globalRefreshIAMInterval)
if gatewayName == NASBackendGateway { if gatewayName == NASBackendGateway {
buckets, err := newObject.ListBuckets(GlobalContext) buckets, err := newObject.ListBuckets(GlobalContext, BucketOptions{})
if err != nil { if err != nil {
logger.Fatal(err, "Unable to list buckets") logger.Fatal(err, "Unable to list buckets")
} }
@ -332,7 +332,7 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
// Populate existing buckets to the etcd backend // Populate existing buckets to the etcd backend
if globalDNSConfig != nil { if globalDNSConfig != nil {
buckets, err := newObject.ListBuckets(GlobalContext) buckets, err := newObject.ListBuckets(GlobalContext, BucketOptions{})
if err != nil { if err != nil {
logger.Fatal(err, "Unable to list buckets") logger.Fatal(err, "Unable to list buckets")
} }

View File

@ -734,7 +734,7 @@ func (l *s3EncObjects) cleanupStaleEncMultipartUploads(ctx context.Context, clea
// cleanupStaleUploads removes old custom encryption multipart uploads on backend // cleanupStaleUploads removes old custom encryption multipart uploads on backend
func (l *s3EncObjects) cleanupStaleUploads(ctx context.Context, expiry time.Duration) { func (l *s3EncObjects) cleanupStaleUploads(ctx context.Context, expiry time.Duration) {
buckets, err := l.s3Objects.ListBuckets(ctx) buckets, err := l.s3Objects.ListBuckets(ctx, minio.BucketOptions{})
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return return

View File

@ -325,7 +325,7 @@ func (l *s3Objects) StorageInfo(ctx context.Context) (si minio.StorageInfo, _ []
} }
// MakeBucket creates a new container on S3 backend. // MakeBucket creates a new container on S3 backend.
func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket string, opts minio.BucketOptions) error { func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket string, opts minio.MakeBucketOptions) error {
if opts.LockEnabled || opts.VersioningEnabled { if opts.LockEnabled || opts.VersioningEnabled {
return minio.NotImplemented{} return minio.NotImplemented{}
} }
@ -348,7 +348,7 @@ func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket string, o
} }
// GetBucketInfo gets bucket metadata.. // GetBucketInfo gets bucket metadata..
func (l *s3Objects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, e error) { func (l *s3Objects) GetBucketInfo(ctx context.Context, bucket string, opts minio.BucketOptions) (bi minio.BucketInfo, e error) {
buckets, err := l.Client.ListBuckets(ctx) buckets, err := l.Client.ListBuckets(ctx)
if err != nil { if err != nil {
// Listbuckets may be disallowed, proceed to check if // Listbuckets may be disallowed, proceed to check if
@ -381,7 +381,7 @@ func (l *s3Objects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.
} }
// ListBuckets lists all S3 buckets // ListBuckets lists all S3 buckets
func (l *s3Objects) ListBuckets(ctx context.Context) ([]minio.BucketInfo, error) { func (l *s3Objects) ListBuckets(ctx context.Context, opts minio.BucketOptions) ([]minio.BucketInfo, error) {
buckets, err := l.Client.ListBuckets(ctx) buckets, err := l.Client.ListBuckets(ctx)
if err != nil { if err != nil {
return nil, minio.ErrorRespToObjectError(err) return nil, minio.ErrorRespToObjectError(err)

View File

@ -113,7 +113,7 @@ func (api objectAPIHandlers) ListenNotificationHandler(w http.ResponseWriter, r
} }
if bucketName != "" { if bucketName != "" {
if _, err := objAPI.GetBucketInfo(ctx, bucketName); err != nil { if _, err := objAPI.GetBucketInfo(ctx, bucketName, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }

View File

@ -42,6 +42,9 @@ const (
// Buckets meta prefix. // Buckets meta prefix.
bucketMetaPrefix = "buckets" bucketMetaPrefix = "buckets"
// Deleted Buckets prefix.
deletedBucketsPrefix = ".deleted"
// ETag (hex encoded md5sum) of empty string. // ETag (hex encoded md5sum) of empty string.
emptyETag = "d41d8cd98f00b204e9800998ecf8427e" emptyETag = "d41d8cd98f00b204e9800998ecf8427e"
) )

View File

@ -78,6 +78,7 @@ type BucketInfo struct {
// Date and time when the bucket was created. // Date and time when the bucket was created.
Created time.Time Created time.Time
Deleted time.Time
} }
// ObjectInfo - represents object metadata. // ObjectInfo - represents object metadata.

View File

@ -84,7 +84,7 @@ func testDeleteObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
} }
for i, testCase := range testCases { for i, testCase := range testCases {
err := obj.MakeBucketWithLocation(context.Background(), testCase.bucketName, BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), testCase.bucketName, MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }

View File

@ -31,7 +31,7 @@ func TestGetObjectInfo(t *testing.T) {
// Testing GetObjectInfo(). // Testing GetObjectInfo().
func testGetObjectInfo(obj ObjectLayer, instanceType string, t TestErrHandler) { func testGetObjectInfo(obj ObjectLayer, instanceType string, t TestErrHandler) {
// This bucket is used for testing getObjectInfo operations. // This bucket is used for testing getObjectInfo operations.
err := obj.MakeBucketWithLocation(context.Background(), "test-getobjectinfo", BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), "test-getobjectinfo", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }

View File

@ -198,12 +198,12 @@ func checkPutObjectArgs(ctx context.Context, bucket, object string, obj getBucke
} }
type getBucketInfoI interface { type getBucketInfoI interface {
GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) GetBucketInfo(ctx context.Context, bucket string, opts BucketOptions) (bucketInfo BucketInfo, err error)
} }
// Checks whether bucket exists and returns appropriate error if not. // Checks whether bucket exists and returns appropriate error if not.
func checkBucketExist(ctx context.Context, bucket string, obj getBucketInfoI) error { func checkBucketExist(ctx context.Context, bucket string, obj getBucketInfoI) error {
_, err := obj.GetBucketInfo(ctx, bucket) _, err := obj.GetBucketInfo(ctx, bucket, BucketOptions{})
if err != nil { if err != nil {
return err return err
} }

View File

@ -102,18 +102,25 @@ type TransitionOptions struct {
ExpireRestored bool ExpireRestored bool
} }
// BucketOptions represents bucket options for ObjectLayer bucket operations // MakeBucketOptions represents bucket options for ObjectLayer bucket operations
type BucketOptions struct { type MakeBucketOptions struct {
Location string Location string
LockEnabled bool LockEnabled bool
VersioningEnabled bool VersioningEnabled bool
ForceCreate bool // Create buckets even if they are already created. ForceCreate bool // Create buckets even if they are already created.
CreatedAt time.Time // only for site replication
} }
// DeleteBucketOptions provides options for DeleteBucket calls. // DeleteBucketOptions provides options for DeleteBucket calls.
type DeleteBucketOptions struct { type DeleteBucketOptions struct {
Force bool // Force deletion Force bool // Force deletion
NoRecreate bool // Do not recreate on delete failures NoRecreate bool // Do not recreate on delete failures
SRDeleteOp SRBucketDeleteOp // only when site replication is enabled
}
// BucketOptions provides options for ListBuckets and GetBucketInfo call.
type BucketOptions struct {
Deleted bool // true only when site replication is enabled
} }
// SetReplicaStatus sets replica status and timestamp for delete operations in ObjectOptions // SetReplicaStatus sets replica status and timestamp for delete operations in ObjectOptions
@ -187,9 +194,9 @@ type ObjectLayer interface {
LocalStorageInfo(ctx context.Context) (StorageInfo, []error) LocalStorageInfo(ctx context.Context) (StorageInfo, []error)
// Bucket operations. // Bucket operations.
MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error MakeBucketWithLocation(ctx context.Context, bucket string, opts MakeBucketOptions) error
GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) GetBucketInfo(ctx context.Context, bucket string, opts BucketOptions) (bucketInfo BucketInfo, err error)
ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) ListBuckets(ctx context.Context, opts BucketOptions) (buckets []BucketInfo, err error)
DeleteBucket(ctx context.Context, bucket string, opts DeleteBucketOptions) error DeleteBucket(ctx context.Context, bucket string, opts DeleteBucketOptions) error
ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result ListObjectsInfo, err error)
ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error)

View File

@ -41,7 +41,7 @@ func testListObjectsVersionedFolders(obj ObjectLayer, instanceType string, t1 Te
"test-bucket-files", "test-bucket-files",
} }
for _, bucket := range testBuckets { for _, bucket := range testBuckets {
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{ err := obj.MakeBucketWithLocation(context.Background(), bucket, MakeBucketOptions{
VersioningEnabled: true, VersioningEnabled: true,
}) })
if err != nil { if err != nil {
@ -329,7 +329,7 @@ func _testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler, v
"test-bucket-max-keys-prefixes", "test-bucket-max-keys-prefixes",
} }
for _, bucket := range testBuckets { for _, bucket := range testBuckets {
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{ err := obj.MakeBucketWithLocation(context.Background(), bucket, MakeBucketOptions{
VersioningEnabled: versioned, VersioningEnabled: versioned,
}) })
if err != nil { if err != nil {
@ -1019,7 +1019,7 @@ func testDeleteObjectVersion(obj ObjectLayer, instanceType string, t1 TestErrHan
"bucket-suspended-version-id", "bucket-suspended-version-id",
} }
for _, bucket := range testBuckets { for _, bucket := range testBuckets {
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{ err := obj.MakeBucketWithLocation(context.Background(), bucket, MakeBucketOptions{
VersioningEnabled: true, VersioningEnabled: true,
}) })
if err != nil { if err != nil {
@ -1106,7 +1106,7 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand
"test-bucket-max-keys-prefixes", "test-bucket-max-keys-prefixes",
} }
for _, bucket := range testBuckets { for _, bucket := range testBuckets {
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{VersioningEnabled: true}) err := obj.MakeBucketWithLocation(context.Background(), bucket, MakeBucketOptions{VersioningEnabled: true})
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
@ -1736,7 +1736,7 @@ func testListObjectsContinuation(obj ObjectLayer, instanceType string, t1 TestEr
"test-bucket-list-object-continuation-2", "test-bucket-list-object-continuation-2",
} }
for _, bucket := range testBuckets { for _, bucket := range testBuckets {
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), bucket, MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
@ -1891,7 +1891,7 @@ func BenchmarkListObjects(b *testing.B) {
bucket := "ls-benchmark-bucket" bucket := "ls-benchmark-bucket"
// Create a bucket. // Create a bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), bucket, MakeBucketOptions{})
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }

View File

@ -56,7 +56,7 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr
} }
// Create bucket before intiating NewMultipartUpload. // Create bucket before intiating NewMultipartUpload.
err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) err = obj.MakeBucketWithLocation(context.Background(), bucket, MakeBucketOptions{})
if err != nil { if err != nil {
// failed to create newbucket, abort. // failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -89,7 +89,7 @@ func testObjectAbortMultipartUpload(obj ObjectLayer, instanceType string, t Test
object := "minio-object" object := "minio-object"
opts := ObjectOptions{} opts := ObjectOptions{}
// Create bucket before intiating NewMultipartUpload. // Create bucket before intiating NewMultipartUpload.
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), bucket, MakeBucketOptions{})
if err != nil { if err != nil {
// failed to create newbucket, abort. // failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -135,7 +135,7 @@ func testObjectAPIIsUploadIDExists(obj ObjectLayer, instanceType string, t TestE
object := "minio-object" object := "minio-object"
// Create bucket before intiating NewMultipartUpload. // Create bucket before intiating NewMultipartUpload.
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), bucket, MakeBucketOptions{})
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -167,7 +167,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
object := "minio-object" object := "minio-object"
opts := ObjectOptions{} opts := ObjectOptions{}
// Create bucket before intiating NewMultipartUpload. // Create bucket before intiating NewMultipartUpload.
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), bucket, MakeBucketOptions{})
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -179,7 +179,7 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
// Creating a dummy bucket for tests. // Creating a dummy bucket for tests.
err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", BucketOptions{}) err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", MakeBucketOptions{})
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -314,7 +314,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
// objectNames[0]. // objectNames[0].
// uploadIds [0]. // uploadIds [0].
// Create bucket before initiating NewMultipartUpload. // Create bucket before initiating NewMultipartUpload.
err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], MakeBucketOptions{})
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -332,7 +332,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
// objectNames[0]. // objectNames[0].
// uploadIds [1-3]. // uploadIds [1-3].
// Bucket to test for mutiple upload Id's for a given object. // Bucket to test for mutiple upload Id's for a given object.
err = obj.MakeBucketWithLocation(context.Background(), bucketNames[1], BucketOptions{}) err = obj.MakeBucketWithLocation(context.Background(), bucketNames[1], MakeBucketOptions{})
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -353,7 +353,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
// bucketnames[2]. // bucketnames[2].
// objectNames[0-2]. // objectNames[0-2].
// uploadIds [4-9]. // uploadIds [4-9].
err = obj.MakeBucketWithLocation(context.Background(), bucketNames[2], BucketOptions{}) err = obj.MakeBucketWithLocation(context.Background(), bucketNames[2], MakeBucketOptions{})
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -1195,7 +1195,7 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
// objectNames[0]. // objectNames[0].
// uploadIds [0]. // uploadIds [0].
// Create bucket before intiating NewMultipartUpload. // Create bucket before intiating NewMultipartUpload.
err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], MakeBucketOptions{})
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -1439,7 +1439,7 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
// objectNames[0]. // objectNames[0].
// uploadIds [0]. // uploadIds [0].
// Create bucket before intiating NewMultipartUpload. // Create bucket before intiating NewMultipartUpload.
err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), bucketNames[0], MakeBucketOptions{})
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -1681,7 +1681,7 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
// objectNames[0]. // objectNames[0].
// uploadIds [0]. // uploadIds [0].
// Create bucket before intiating NewMultipartUpload. // Create bucket before intiating NewMultipartUpload.
err = obj.MakeBucketWithLocation(context.Background(), bucketNames[0], BucketOptions{}) err = obj.MakeBucketWithLocation(context.Background(), bucketNames[0], MakeBucketOptions{})
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err) t.Fatalf("%s : %s", instanceType, err)

View File

@ -48,14 +48,14 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
object := "minio-object" object := "minio-object"
// Create bucket. // Create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), bucket, MakeBucketOptions{})
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
// Creating a dummy bucket for tests. // Creating a dummy bucket for tests.
err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", BucketOptions{}) err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", MakeBucketOptions{})
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -239,14 +239,14 @@ func testObjectAPIPutObjectDiskNotFound(obj ObjectLayer, instanceType string, di
object := "minio-object" object := "minio-object"
// Create bucket. // Create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), bucket, MakeBucketOptions{})
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
// Creating a dummy bucket for tests. // Creating a dummy bucket for tests.
err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", BucketOptions{}) err = obj.MakeBucketWithLocation(context.Background(), "unused-bucket", MakeBucketOptions{})
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -351,7 +351,7 @@ func testObjectAPIPutObjectStaleFiles(obj ObjectLayer, instanceType string, disk
object := "minio-object" object := "minio-object"
// Create bucket. // Create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), bucket, MakeBucketOptions{})
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -396,7 +396,7 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
object := "minio-object" object := "minio-object"
// Create bucket. // Create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), bucket, MakeBucketOptions{})
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())

View File

@ -922,7 +922,7 @@ func isRemoteCallRequired(ctx context.Context, bucket string, objAPI ObjectLayer
return false return false
} }
if globalBucketFederation { if globalBucketFederation {
_, err := objAPI.GetBucketInfo(ctx, bucket) _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{})
return err == toObjectErr(errVolumeNotFound, bucket) return err == toObjectErr(errVolumeNotFound, bucket)
} }
return false return false
@ -2758,7 +2758,7 @@ func (api objectAPIHandlers) PutObjectLegalHoldHandler(w http.ResponseWriter, r
return return
} }
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }
@ -2918,7 +2918,7 @@ func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r
return return
} }
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil { if _, err := objectAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return return
} }

View File

@ -77,7 +77,7 @@ func TestMakeBucket(t *testing.T) {
// Tests validate bucket creation. // Tests validate bucket creation.
func testMakeBucket(obj ObjectLayer, instanceType string, t TestErrHandler) { func testMakeBucket(obj ObjectLayer, instanceType string, t TestErrHandler) {
err := obj.MakeBucketWithLocation(context.Background(), "bucket-unknown", BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), "bucket-unknown", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -91,7 +91,7 @@ func TestMultipartObjectCreation(t *testing.T) {
// Tests validate creation of part files during Multipart operation. // Tests validate creation of part files during Multipart operation.
func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErrHandler) { func testMultipartObjectCreation(obj ObjectLayer, instanceType string, t TestErrHandler) {
var opts ObjectOptions var opts ObjectOptions
err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), "bucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -135,7 +135,7 @@ func TestMultipartObjectAbort(t *testing.T) {
// Tests validate abortion of Multipart operation. // Tests validate abortion of Multipart operation.
func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHandler) { func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHandler) {
var opts ObjectOptions var opts ObjectOptions
err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), "bucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -181,7 +181,7 @@ func TestMultipleObjectCreation(t *testing.T) {
func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrHandler) { func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrHandler) {
objects := make(map[string][]byte) objects := make(map[string][]byte)
var opts ObjectOptions var opts ObjectOptions
err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), "bucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -236,7 +236,7 @@ func TestPaging(t *testing.T) {
// Tests validate creation of objects and the order of listing using various filters for ListObjects operation. // Tests validate creation of objects and the order of listing using various filters for ListObjects operation.
func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) { func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) obj.MakeBucketWithLocation(context.Background(), "bucket", MakeBucketOptions{})
result, err := obj.ListObjects(context.Background(), "bucket", "", "", "", 0) result, err := obj.ListObjects(context.Background(), "bucket", "", "", "", 0)
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
@ -440,7 +440,7 @@ func TestObjectOverwriteWorks(t *testing.T) {
// Tests validate overwriting of an existing object. // Tests validate overwriting of an existing object.
func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, t TestErrHandler) { func testObjectOverwriteWorks(obj ObjectLayer, instanceType string, t TestErrHandler) {
err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), "bucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -494,11 +494,11 @@ func TestBucketRecreateFails(t *testing.T) {
// Tests validate that recreation of the bucket fails. // Tests validate that recreation of the bucket fails.
func testBucketRecreateFails(obj ObjectLayer, instanceType string, t TestErrHandler) { func testBucketRecreateFails(obj ObjectLayer, instanceType string, t TestErrHandler) {
err := obj.MakeBucketWithLocation(context.Background(), "string", BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), "string", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
err = obj.MakeBucketWithLocation(context.Background(), "string", BucketOptions{}) err = obj.MakeBucketWithLocation(context.Background(), "string", MakeBucketOptions{})
if err == nil { if err == nil {
t.Fatalf("%s: Expected error but found nil.", instanceType) t.Fatalf("%s: Expected error but found nil.", instanceType)
} }
@ -599,7 +599,7 @@ func testPutObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
length := int64(len(content)) length := int64(len(content))
readerEOF := newTestReaderEOF(content) readerEOF := newTestReaderEOF(content)
readerNoEOF := newTestReaderNoEOF(content) readerNoEOF := newTestReaderNoEOF(content)
err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), "bucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -639,7 +639,7 @@ func TestPutObjectInSubdir(t *testing.T) {
// Tests validate PutObject with subdirectory prefix. // Tests validate PutObject with subdirectory prefix.
func testPutObjectInSubdir(obj ObjectLayer, instanceType string, t TestErrHandler) { func testPutObjectInSubdir(obj ObjectLayer, instanceType string, t TestErrHandler) {
err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), "bucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -672,7 +672,7 @@ func TestListBuckets(t *testing.T) {
// Tests validate ListBuckets. // Tests validate ListBuckets.
func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) { func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) {
// test empty list. // test empty list.
buckets, err := obj.ListBuckets(context.Background()) buckets, err := obj.ListBuckets(context.Background(), BucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -681,12 +681,12 @@ func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) {
} }
// add one and test exists. // add one and test exists.
err = obj.MakeBucketWithLocation(context.Background(), "bucket1", BucketOptions{}) err = obj.MakeBucketWithLocation(context.Background(), "bucket1", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
buckets, err = obj.ListBuckets(context.Background()) buckets, err = obj.ListBuckets(context.Background(), BucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -695,12 +695,12 @@ func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) {
} }
// add two and test exists. // add two and test exists.
err = obj.MakeBucketWithLocation(context.Background(), "bucket2", BucketOptions{}) err = obj.MakeBucketWithLocation(context.Background(), "bucket2", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
buckets, err = obj.ListBuckets(context.Background()) buckets, err = obj.ListBuckets(context.Background(), BucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -709,12 +709,12 @@ func testListBuckets(obj ObjectLayer, instanceType string, t TestErrHandler) {
} }
// add three and test exists + prefix. // add three and test exists + prefix.
err = obj.MakeBucketWithLocation(context.Background(), "bucket22", BucketOptions{}) err = obj.MakeBucketWithLocation(context.Background(), "bucket22", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
buckets, err = obj.ListBuckets(context.Background()) buckets, err = obj.ListBuckets(context.Background(), BucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -733,15 +733,15 @@ func testListBucketsOrder(obj ObjectLayer, instanceType string, t TestErrHandler
// if implementation contains a map, order of map keys will vary. // if implementation contains a map, order of map keys will vary.
// this ensures they return in the same order each time. // this ensures they return in the same order each time.
// add one and test exists. // add one and test exists.
err := obj.MakeBucketWithLocation(context.Background(), "bucket1", BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), "bucket1", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
err = obj.MakeBucketWithLocation(context.Background(), "bucket2", BucketOptions{}) err = obj.MakeBucketWithLocation(context.Background(), "bucket2", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
buckets, err := obj.ListBuckets(context.Background()) buckets, err := obj.ListBuckets(context.Background(), BucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -786,7 +786,7 @@ func TestNonExistantObjectInBucket(t *testing.T) {
// Tests validate that GetObject fails on a non-existent bucket as expected. // Tests validate that GetObject fails on a non-existent bucket as expected.
func testNonExistantObjectInBucket(obj ObjectLayer, instanceType string, t TestErrHandler) { func testNonExistantObjectInBucket(obj ObjectLayer, instanceType string, t TestErrHandler) {
err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), "bucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -814,7 +814,7 @@ func TestGetDirectoryReturnsObjectNotFound(t *testing.T) {
// Tests validate that GetObject on an existing directory fails as expected. // Tests validate that GetObject on an existing directory fails as expected.
func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string, t TestErrHandler) { func testGetDirectoryReturnsObjectNotFound(obj ObjectLayer, instanceType string, t TestErrHandler) {
bucketName := "bucket" bucketName := "bucket"
err := obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), bucketName, MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }
@ -856,7 +856,7 @@ func TestContentType(t *testing.T) {
// Test content-type. // Test content-type.
func testContentType(obj ObjectLayer, instanceType string, t TestErrHandler) { func testContentType(obj ObjectLayer, instanceType string, t TestErrHandler) {
err := obj.MakeBucketWithLocation(context.Background(), "bucket", BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), "bucket", MakeBucketOptions{})
if err != nil { if err != nil {
t.Fatalf("%s: <ERROR> %s", instanceType, err) t.Fatalf("%s: <ERROR> %s", instanceType, err)
} }

View File

@ -141,7 +141,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
// objectNames[0]. // objectNames[0].
// uploadIds [0]. // uploadIds [0].
// Create bucket before initiating NewMultipartUpload. // Create bucket before initiating NewMultipartUpload.
err := obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), bucketName, MakeBucketOptions{})
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
@ -460,7 +460,7 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t
curTime := UTCNow() curTime := UTCNow()
curTimePlus5Min := curTime.Add(time.Minute * 5) curTimePlus5Min := curTime.Add(time.Minute * 5)
err = obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) err = obj.MakeBucketWithLocation(context.Background(), bucketName, MakeBucketOptions{})
if err != nil { if err != nil {
// Failed to create newbucket, abort. // Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())

View File

@ -607,7 +607,7 @@ func serverMain(ctx *cli.Context) {
initDataScanner(GlobalContext, newObject) initDataScanner(GlobalContext, newObject)
// List buckets to heal, and be re-used for loading configs. // List buckets to heal, and be re-used for loading configs.
buckets, err := newObject.ListBuckets(GlobalContext) buckets, err := newObject.ListBuckets(GlobalContext, BucketOptions{})
if err != nil { if err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("Unable to list buckets to heal: %w", err)) logger.LogIf(GlobalContext, fmt.Errorf("Unable to list buckets to heal: %w", err))
} }

View File

@ -627,7 +627,7 @@ const (
// replication is enabled. It is responsible for the creation of the same bucket // replication is enabled. It is responsible for the creation of the same bucket
// on remote clusters, and creating replication rules on local and peer // on remote clusters, and creating replication rules on local and peer
// clusters. // clusters.
func (c *SiteReplicationSys) MakeBucketHook(ctx context.Context, bucket string, opts BucketOptions) error { func (c *SiteReplicationSys) MakeBucketHook(ctx context.Context, bucket string, opts MakeBucketOptions) error {
// At this point, the local bucket is created. // At this point, the local bucket is created.
c.RLock() c.RLock()
@ -650,6 +650,9 @@ func (c *SiteReplicationSys) MakeBucketHook(ctx context.Context, bucket string,
if opts.ForceCreate { if opts.ForceCreate {
optsMap["forceCreate"] = "true" optsMap["forceCreate"] = "true"
} }
createdAt, _ := globalBucketMetadataSys.CreatedAt(bucket)
optsMap["createdAt"] = createdAt.Format(time.RFC3339Nano)
opts.CreatedAt = createdAt
// Create bucket and enable versioning on all peers. // Create bucket and enable versioning on all peers.
makeBucketConcErr := c.concDo( makeBucketConcErr := c.concDo(
@ -726,7 +729,7 @@ func (c *SiteReplicationSys) DeleteBucketHook(ctx context.Context, bucket string
} }
// PeerBucketMakeWithVersioningHandler - creates bucket and enables versioning. // PeerBucketMakeWithVersioningHandler - creates bucket and enables versioning.
func (c *SiteReplicationSys) PeerBucketMakeWithVersioningHandler(ctx context.Context, bucket string, opts BucketOptions) error { func (c *SiteReplicationSys) PeerBucketMakeWithVersioningHandler(ctx context.Context, bucket string, opts MakeBucketOptions) error {
objAPI := newObjectLayerFn() objAPI := newObjectLayerFn()
if objAPI == nil { if objAPI == nil {
return errServerNotInitialized return errServerNotInitialized
@ -751,6 +754,8 @@ func (c *SiteReplicationSys) PeerBucketMakeWithVersioningHandler(ctx context.Con
return wrapSRErr(c.annotateErr(makeBucketWithVersion, err)) return wrapSRErr(c.annotateErr(makeBucketWithVersion, err))
} }
meta.SetCreatedAt(opts.CreatedAt)
meta.VersioningConfigXML = enabledBucketVersioningConfig meta.VersioningConfigXML = enabledBucketVersioningConfig
if opts.LockEnabled { if opts.LockEnabled {
meta.ObjectLockConfigXML = enabledBucketObjectLockConfig meta.ObjectLockConfigXML = enabledBucketObjectLockConfig
@ -950,7 +955,7 @@ func (c *SiteReplicationSys) PeerBucketConfigureReplHandler(ctx context.Context,
// PeerBucketDeleteHandler - deletes bucket on local in response to a delete // PeerBucketDeleteHandler - deletes bucket on local in response to a delete
// bucket request from a peer. // bucket request from a peer.
func (c *SiteReplicationSys) PeerBucketDeleteHandler(ctx context.Context, bucket string, forceDelete bool) error { func (c *SiteReplicationSys) PeerBucketDeleteHandler(ctx context.Context, bucket string, opts DeleteBucketOptions) error {
c.RLock() c.RLock()
defer c.RUnlock() defer c.RUnlock()
if !c.enabled { if !c.enabled {
@ -967,8 +972,7 @@ func (c *SiteReplicationSys) PeerBucketDeleteHandler(ctx context.Context, bucket
return err return err
} }
} }
err := objAPI.DeleteBucket(ctx, bucket, opts)
err := objAPI.DeleteBucket(ctx, bucket, DeleteBucketOptions{Force: forceDelete})
if err != nil { if err != nil {
if globalDNSConfig != nil { if globalDNSConfig != nil {
if err2 := globalDNSConfig.Put(bucket); err2 != nil { if err2 := globalDNSConfig.Put(bucket); err2 != nil {
@ -1498,7 +1502,7 @@ func (c *SiteReplicationSys) listBuckets(ctx context.Context) ([]BucketInfo, err
if objAPI == nil { if objAPI == nil {
return nil, errSRObjectLayerNotReady return nil, errSRObjectLayerNotReady
} }
return objAPI.ListBuckets(ctx) return objAPI.ListBuckets(ctx, BucketOptions{Deleted: true})
} }
// syncToAllPeers is used for syncing local data to all remote peers, it is // syncToAllPeers is used for syncing local data to all remote peers, it is
@ -1521,11 +1525,12 @@ func (c *SiteReplicationSys) syncToAllPeers(ctx context.Context) error {
} }
} }
var opts BucketOptions var opts MakeBucketOptions
if lockConfig != nil { if lockConfig != nil {
opts.LockEnabled = lockConfig.ObjectLockEnabled == "Enabled" opts.LockEnabled = lockConfig.ObjectLockEnabled == "Enabled"
} }
opts.CreatedAt, _ = globalBucketMetadataSys.CreatedAt(bucket)
// Now call the MakeBucketHook on existing bucket - this will // Now call the MakeBucketHook on existing bucket - this will
// create buckets and replication rules on peer clusters. // create buckets and replication rules on peer clusters.
err = c.MakeBucketHook(ctx, bucket, opts) err = c.MakeBucketHook(ctx, bucket, opts)
@ -2161,7 +2166,7 @@ func (c *SiteReplicationSys) RemoveRemoteTargetsForEndpoint(ctx context.Context,
m[t.Arn] = t m[t.Arn] = t
} }
} }
buckets, err := objectAPI.ListBuckets(ctx) buckets, err := objectAPI.ListBuckets(ctx, BucketOptions{})
for _, b := range buckets { for _, b := range buckets {
config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, b.Name) config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, b.Name)
if err != nil { if err != nil {
@ -2361,10 +2366,8 @@ func (c *SiteReplicationSys) siteReplicationStatus(ctx context.Context, objAPI O
sris := make([]madmin.SRInfo, len(c.state.Peers)) sris := make([]madmin.SRInfo, len(c.state.Peers))
depIdx := make(map[string]int, len(c.state.Peers)) depIdx := make(map[string]int, len(c.state.Peers))
depIDs := make([]string, 0, len(c.state.Peers))
i := 0 i := 0
for d := range c.state.Peers { for d := range c.state.Peers {
depIDs = append(depIDs, d)
depIdx[d] = i depIdx[d] = i
i++ i++
} }
@ -2810,14 +2813,18 @@ func (c *SiteReplicationSys) siteReplicationStatus(ctx context.Context, objAPI O
info.BucketStats[b] = make(map[string]srBucketStatsSummary, numSites) info.BucketStats[b] = make(map[string]srBucketStatsSummary, numSites)
for i, s := range slc { for i, s := range slc {
dIdx := depIdx[s.DeploymentID] dIdx := depIdx[s.DeploymentID]
var hasBucket bool var hasBucket, isBucketMarkedDeleted bool
if bi, ok := sris[dIdx].Buckets[s.Bucket]; ok {
hasBucket = !bi.CreatedAt.Equal(timeSentinel) bi, ok := sris[dIdx].Buckets[s.Bucket]
if ok {
isBucketMarkedDeleted = !bi.DeletedAt.IsZero() && (bi.CreatedAt.IsZero() || bi.DeletedAt.After(bi.CreatedAt))
hasBucket = !bi.CreatedAt.IsZero()
} }
quotaCfgSet := hasBucket && quotaCfgs[i] != nil && *quotaCfgs[i] != madmin.BucketQuota{} quotaCfgSet := hasBucket && quotaCfgs[i] != nil && *quotaCfgs[i] != madmin.BucketQuota{}
ss := madmin.SRBucketStatsSummary{ ss := madmin.SRBucketStatsSummary{
DeploymentID: s.DeploymentID, DeploymentID: s.DeploymentID,
HasBucket: hasBucket, HasBucket: hasBucket,
BucketMarkedDeleted: isBucketMarkedDeleted,
TagMismatch: tagMismatch, TagMismatch: tagMismatch,
OLockConfigMismatch: olockCfgMismatch, OLockConfigMismatch: olockCfgMismatch,
SSEConfigMismatch: sseCfgMismatch, SSEConfigMismatch: sseCfgMismatch,
@ -3083,13 +3090,16 @@ func (c *SiteReplicationSys) SiteReplicationMetaInfo(ctx context.Context, objAPI
err error err error
) )
if opts.Entity == madmin.SRBucketEntity { if opts.Entity == madmin.SRBucketEntity {
bi, err := objAPI.GetBucketInfo(ctx, opts.EntityValue) bi, err := objAPI.GetBucketInfo(ctx, opts.EntityValue, BucketOptions{Deleted: opts.ShowDeleted})
if err != nil { if err != nil {
if isErrBucketNotFound(err) {
return info, nil
}
return info, errSRBackendIssue(err) return info, errSRBackendIssue(err)
} }
buckets = append(buckets, bi) buckets = append(buckets, bi)
} else { } else {
buckets, err = objAPI.ListBuckets(ctx) buckets, err = objAPI.ListBuckets(ctx, BucketOptions{Deleted: opts.ShowDeleted})
if err != nil { if err != nil {
return info, errSRBackendIssue(err) return info, errSRBackendIssue(err)
} }
@ -3097,16 +3107,17 @@ func (c *SiteReplicationSys) SiteReplicationMetaInfo(ctx context.Context, objAPI
info.Buckets = make(map[string]madmin.SRBucketInfo, len(buckets)) info.Buckets = make(map[string]madmin.SRBucketInfo, len(buckets))
for _, bucketInfo := range buckets { for _, bucketInfo := range buckets {
bucket := bucketInfo.Name bucket := bucketInfo.Name
createdAt, err := globalBucketMetadataSys.CreatedAt(bucket) bucketExists := bucketInfo.Deleted.IsZero() || (!bucketInfo.Created.IsZero() && bucketInfo.Created.After(bucketInfo.Deleted))
if err != nil {
return info, errSRBackendIssue(err)
}
bms := madmin.SRBucketInfo{ bms := madmin.SRBucketInfo{
Bucket: bucket, Bucket: bucket,
CreatedAt: createdAt, CreatedAt: bucketInfo.Created.UTC(),
DeletedAt: bucketInfo.Deleted.UTC(),
Location: globalSite.Region, Location: globalSite.Region,
} }
if !bucketExists {
info.Buckets[bucket] = bms
continue
}
// Get bucket policy if present. // Get bucket policy if present.
policy, updatedAt, err := globalBucketMetadataSys.GetPolicyConfig(bucket) policy, updatedAt, err := globalBucketMetadataSys.GetPolicyConfig(bucket)
found := true found := true
@ -3555,22 +3566,60 @@ type srStatusInfo struct {
GroupStats map[string]map[string]srGroupStatsSummary GroupStats map[string]map[string]srGroupStatsSummary
} }
// SRBucketDeleteOp - type of delete op
type SRBucketDeleteOp string
const (
// MarkDelete creates .minio.sys/buckets/.deleted/<bucket> vol entry to hold onto deleted bucket's state
// until peers are synced in site replication setup.
MarkDelete SRBucketDeleteOp = "MarkDelete"
// Purge deletes the .minio.sys/buckets/.deleted/<bucket> vol entry
Purge SRBucketDeleteOp = "Purge"
// NoOp no action needed
NoOp SRBucketDeleteOp = "NoOp"
)
// Empty returns true if this Op is not set
func (s SRBucketDeleteOp) Empty() bool {
return string(s) == "" || string(s) == string(NoOp)
}
func getSRBucketDeleteOp(isSiteReplicated bool) SRBucketDeleteOp {
if !isSiteReplicated {
return NoOp
}
return MarkDelete
}
func (c *SiteReplicationSys) healBuckets(ctx context.Context, objAPI ObjectLayer) error { func (c *SiteReplicationSys) healBuckets(ctx context.Context, objAPI ObjectLayer) error {
info, err := c.siteReplicationStatus(ctx, objAPI, madmin.SRStatusOptions{ buckets, err := c.listBuckets(ctx)
Buckets: true,
})
if err != nil { if err != nil {
return err return err
} }
for bucket := range info.BucketStats { for _, bi := range buckets {
c.healCreateMissingBucket(ctx, objAPI, bucket, info) bucket := bi.Name
c.healVersioningMetadata(ctx, objAPI, bucket, info) info, err := c.siteReplicationStatus(ctx, objAPI, madmin.SRStatusOptions{
c.healOLockConfigMetadata(ctx, objAPI, bucket, info) Entity: madmin.SRBucketEntity,
c.healSSEMetadata(ctx, objAPI, bucket, info) EntityValue: bucket,
c.healBucketReplicationConfig(ctx, objAPI, bucket, info) ShowDeleted: true,
c.healBucketPolicies(ctx, objAPI, bucket, info) })
c.healTagMetadata(ctx, objAPI, bucket, info) if err != nil {
c.healBucketQuotaConfig(ctx, objAPI, bucket, info) logger.LogIf(ctx, err)
continue
}
c.healBucket(ctx, objAPI, bucket, info)
if bi.Deleted.IsZero() || (!bi.Created.IsZero() && bi.Deleted.Before(bi.Created)) {
c.healVersioningMetadata(ctx, objAPI, bucket, info)
c.healOLockConfigMetadata(ctx, objAPI, bucket, info)
c.healSSEMetadata(ctx, objAPI, bucket, info)
c.healBucketReplicationConfig(ctx, objAPI, bucket, info)
c.healBucketPolicies(ctx, objAPI, bucket, info)
c.healTagMetadata(ctx, objAPI, bucket, info)
c.healBucketQuotaConfig(ctx, objAPI, bucket, info)
}
// Notification and ILM are site specific settings. // Notification and ILM are site specific settings.
} }
return nil return nil
@ -4014,84 +4063,176 @@ func (c *SiteReplicationSys) healOLockConfigMetadata(ctx context.Context, objAPI
return nil return nil
} }
func (c *SiteReplicationSys) healCreateMissingBucket(ctx context.Context, objAPI ObjectLayer, bucket string, info srStatusInfo) error { func (c *SiteReplicationSys) purgeDeletedBucket(ctx context.Context, objAPI ObjectLayer, bucket string) {
bs := info.BucketStats[bucket] z, ok := objAPI.(*erasureServerPools)
if !ok {
if z, ok := objAPI.(*erasureSingle); ok {
z.purgeDelete(context.Background(), minioMetaBucket, pathJoin(bucketMetaPrefix, deletedBucketsPrefix, bucket))
}
return
}
z.purgeDelete(context.Background(), minioMetaBucket, pathJoin(bucketMetaPrefix, deletedBucketsPrefix, bucket))
}
// healBucket creates/deletes the bucket according to latest state across clusters participating in site replication.
func (c *SiteReplicationSys) healBucket(ctx context.Context, objAPI ObjectLayer, bucket string, info srStatusInfo) error {
bs := info.BucketStats[bucket]
c.RLock() c.RLock()
defer c.RUnlock() defer c.RUnlock()
if !c.enabled { if !c.enabled {
return nil return nil
} }
numSites := len(c.state.Peers)
mostRecent := func(d1, d2 time.Time) time.Time {
if d1.IsZero() {
return d2
}
if d2.IsZero() {
return d1
}
if d1.After(d2) {
return d1
}
return d2
}
bucketCnt := 0
var ( var (
latestID string latestID string
lastUpdate time.Time lastUpdate time.Time
withB []string
missingB []string
deletedCnt int
) )
var dIDs []string
for dID, ss := range bs { for dID, ss := range bs {
if ss.HasBucket {
bucketCnt++
} else {
dIDs = append(dIDs, dID)
}
if lastUpdate.IsZero() { if lastUpdate.IsZero() {
lastUpdate = ss.meta.CreatedAt lastUpdate = mostRecent(ss.meta.CreatedAt, ss.meta.DeletedAt)
latestID = dID latestID = dID
} }
if ss.meta.CreatedAt.After(lastUpdate) { recentUpdt := mostRecent(ss.meta.CreatedAt, ss.meta.DeletedAt)
lastUpdate = ss.meta.CreatedAt if recentUpdt.After(lastUpdate) {
lastUpdate = recentUpdt
latestID = dID latestID = dID
} }
if ss.BucketMarkedDeleted {
deletedCnt++
}
if ss.HasBucket {
withB = append(withB, dID)
} else {
missingB = append(missingB, dID)
}
} }
latestPeerName := info.Sites[latestID].Name latestPeerName := info.Sites[latestID].Name
bStatus := info.BucketStats[bucket][latestID].meta bStatus := info.BucketStats[bucket][latestID].meta
var opts BucketOptions isMakeBucket := len(missingB) > 0
optsMap := make(map[string]string) deleteOp := NoOp
if bStatus.Location != "" { if latestID != globalDeploymentID {
optsMap["location"] = bStatus.Location return nil
opts.Location = bStatus.Location
} }
if lastUpdate.Equal(bStatus.DeletedAt) {
isMakeBucket = false
switch {
case len(withB) == numSites && deletedCnt == numSites:
deleteOp = NoOp
case len(withB) == 0 && len(missingB) == numSites:
deleteOp = Purge
default:
deleteOp = MarkDelete
}
}
if isMakeBucket {
var opts MakeBucketOptions
optsMap := make(map[string]string)
if bStatus.Location != "" {
optsMap["location"] = bStatus.Location
opts.Location = bStatus.Location
}
optsMap["versioningEnabled"] = "true" optsMap["versioningEnabled"] = "true"
opts.VersioningEnabled = true opts.VersioningEnabled = true
if bStatus.ObjectLockConfig != nil { opts.CreatedAt = bStatus.CreatedAt
config, err := base64.StdEncoding.DecodeString(*bStatus.ObjectLockConfig) optsMap["createdAt"] = bStatus.CreatedAt.Format(time.RFC3339Nano)
if err != nil {
return err if bStatus.ObjectLockConfig != nil {
config, err := base64.StdEncoding.DecodeString(*bStatus.ObjectLockConfig)
if err != nil {
return err
}
if bytes.Equal([]byte(string(config)), enabledBucketObjectLockConfig) {
optsMap["lockEnabled"] = "true"
opts.LockEnabled = true
}
} }
if bytes.Equal([]byte(string(config)), enabledBucketObjectLockConfig) { for _, dID := range missingB {
optsMap["lockEnabled"] = "true" peerName := info.Sites[dID].Name
opts.LockEnabled = true if dID == globalDeploymentID {
err := c.PeerBucketMakeWithVersioningHandler(ctx, bucket, opts)
if err != nil {
return c.annotateErr(makeBucketWithVersion, fmt.Errorf("error healing bucket for site replication %w from %s -> %s",
err, latestPeerName, peerName))
}
} else {
admClient, err := c.getAdminClient(ctx, dID)
if err != nil {
return c.annotateErr(configureReplication, fmt.Errorf("unable to use admin client for %s: %w", dID, err))
}
if err = admClient.SRPeerBucketOps(ctx, bucket, madmin.MakeWithVersioningBktOp, optsMap); err != nil {
return c.annotatePeerErr(peerName, makeBucketWithVersion, err)
}
if err = admClient.SRPeerBucketOps(ctx, bucket, madmin.ConfigureReplBktOp, nil); err != nil {
return c.annotatePeerErr(peerName, configureReplication, err)
}
}
} }
if len(missingB) > 0 {
// configure replication from current cluster to other clusters
err := c.PeerBucketConfigureReplHandler(ctx, bucket)
if err != nil {
return c.annotateErr(configureReplication, err)
}
}
return nil
} }
for _, dID := range dIDs { // all buckets are marked deleted across sites at this point. It should be safe to purge the .minio.sys/buckets/.deleted/<bucket> entry
peerName := info.Sites[dID].Name // from disk
if dID == globalDeploymentID { if deleteOp == Purge {
err := c.PeerBucketMakeWithVersioningHandler(ctx, bucket, opts) for _, dID := range missingB {
if err != nil { peerName := info.Sites[dID].Name
return c.annotateErr(makeBucketWithVersion, fmt.Errorf("error healing bucket for site replication %w from %s -> %s", if dID == globalDeploymentID {
err, latestPeerName, peerName)) c.purgeDeletedBucket(ctx, objAPI, bucket)
} } else {
} else { admClient, err := c.getAdminClient(ctx, dID)
admClient, err := c.getAdminClient(ctx, dID) if err != nil {
if err != nil { return c.annotateErr(configureReplication, fmt.Errorf("unable to use admin client for %s: %w", dID, err))
return c.annotateErr(configureReplication, fmt.Errorf("unable to use admin client for %s: %w", dID, err)) }
} if err = admClient.SRPeerBucketOps(ctx, bucket, madmin.PurgeDeletedBucketOp, nil); err != nil {
if err = admClient.SRPeerBucketOps(ctx, bucket, madmin.MakeWithVersioningBktOp, optsMap); err != nil { return c.annotatePeerErr(peerName, deleteBucket, err)
return c.annotatePeerErr(peerName, makeBucketWithVersion, err) }
}
if err = admClient.SRPeerBucketOps(ctx, bucket, madmin.ConfigureReplBktOp, nil); err != nil {
return c.annotatePeerErr(peerName, configureReplication, err)
} }
} }
} }
if len(dIDs) > 0 { // Mark buckets deleted on remaining peers
// configure replication from current cluster to other clusters if deleteOp == MarkDelete {
err := c.PeerBucketConfigureReplHandler(ctx, bucket) for _, dID := range withB {
if err != nil { peerName := info.Sites[dID].Name
return c.annotateErr(configureReplication, err) if dID == globalDeploymentID {
err := c.PeerBucketDeleteHandler(ctx, bucket, DeleteBucketOptions{
Force: true,
})
if err != nil {
return c.annotateErr(deleteBucket, fmt.Errorf("error healing bucket for site replication %w from %s -> %s",
err, latestPeerName, peerName))
}
} else {
admClient, err := c.getAdminClient(ctx, dID)
if err != nil {
return c.annotateErr(configureReplication, fmt.Errorf("unable to use admin client for %s: %w", dID, err))
}
if err = admClient.SRPeerBucketOps(ctx, bucket, madmin.ForceDeleteBucketBktOp, nil); err != nil {
return c.annotatePeerErr(peerName, deleteBucket, err)
}
}
} }
} }

View File

@ -1523,7 +1523,7 @@ func initAPIHandlerTest(ctx context.Context, obj ObjectLayer, endpoints []string
bucketName := getRandomBucketName() bucketName := getRandomBucketName()
// Create bucket. // Create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucketName, BucketOptions{}) err := obj.MakeBucketWithLocation(context.Background(), bucketName, MakeBucketOptions{})
if err != nil { if err != nil {
// failed to create newbucket, return err. // failed to create newbucket, return err.
return "", nil, err return "", nil, err

View File

@ -142,6 +142,9 @@ fi
./mc mb minio1/newbucket ./mc mb minio1/newbucket
# create a bucket bucket2 on minio1.
./mc mb minio1/bucket2
sleep 5 sleep 5
./mc stat minio2/newbucket ./mc stat minio2/newbucket
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
@ -246,7 +249,8 @@ if [ $? -ne 0 ]; then
echo "expecting tag set to be successful. exiting.." echo "expecting tag set to be successful. exiting.."
exit_1; exit_1;
fi fi
sleep 5
sleep 10
val=$(./mc tag list minio1/newbucket --json | jq -r .tagset | jq -r .key) val=$(./mc tag list minio1/newbucket --json | jq -r .tagset | jq -r .key)
if [ "${val}" != "val1" ]; then if [ "${val}" != "val1" ]; then
@ -259,10 +263,12 @@ kill -9 ${site1_pid}
./mc tag set minio2/newbucket "key=val2" ./mc tag set minio2/newbucket "key=val2"
# create a new bucket on minio2. This should replicate to minio1 after it comes online. # create a new bucket on minio2. This should replicate to minio1 after it comes online.
./mc mb minio2/newbucket2 ./mc mb minio2/newbucket2
# delete bucket2 on minio2. This should replicate to minio1 after it comes online.
./mc rb minio2/bucket2
# Restart minio1 instance # Restart minio1 instance
minio server --config-dir /tmp/minio-ldap --address ":9001" /tmp/minio-ldap-idp1/{1...4} >/tmp/minio1_1.log 2>&1 & minio server --config-dir /tmp/minio-ldap --address ":9001" /tmp/minio-ldap-idp1/{1...4} >/tmp/minio1_1.log 2>&1 &
sleep 10 sleep 15
# Test whether most recent tag update on minio2 is replicated to minio1 # Test whether most recent tag update on minio2 is replicated to minio1
val=$(./mc tag list minio1/newbucket --json | jq -r .tagset | jq -r .key ) val=$(./mc tag list minio1/newbucket --json | jq -r .tagset | jq -r .key )
if [ "${val}" != "val2" ]; then if [ "${val}" != "val2" ]; then
@ -270,10 +276,11 @@ if [ "${val}" != "val2" ]; then
exit_1; exit_1;
fi fi
# Test if bucket created when minio1 is down healed # Test if bucket created/deleted when minio1 is down healed
diff -q <(./mc ls minio1 | awk '{print $3}') <(./mc ls minio2 | awk '{print $3}') 1>/dev/null diff -q <(./mc ls minio1) <(./mc ls minio2) 1>/dev/null
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "expected bucket to have replicated, exiting..." echo "expected `bucket2` delete and `newbucket2` creation to have replicated, exiting..."
exit_1; exit_1;
fi fi
cleanup cleanup

View File

@ -305,6 +305,9 @@ if [ $? -ne 0 ]; then
exit_1; exit_1;
fi fi
# create a bucket bucket2 on minio1.
./mc mb minio1/bucket2
sleep 10 sleep 10
# Test whether policy unset replicated to minio1 # Test whether policy unset replicated to minio1
@ -319,6 +322,9 @@ kill -9 ${site1_pid}
./mc tag set minio2/newbucket "key=val2" ./mc tag set minio2/newbucket "key=val2"
# create a new bucket on minio2. This should replicate to minio1 after it comes online. # create a new bucket on minio2. This should replicate to minio1 after it comes online.
./mc mb minio2/newbucket2 ./mc mb minio2/newbucket2
# delete bucket2 on minio2. This should replicate to minio1 after it comes online.
./mc rb minio2/bucket2
# Restart minio1 instance # Restart minio1 instance
minio server --config-dir /tmp/minio-internal --address ":9001" /tmp/minio-internal-idp1/{1...4} >/tmp/minio1_1.log 2>&1 & minio server --config-dir /tmp/minio-internal --address ":9001" /tmp/minio-internal-idp1/{1...4} >/tmp/minio1_1.log 2>&1 &
sleep 15 sleep 15
@ -329,9 +335,9 @@ if [ "${val}" != "val2" ]; then
echo "expected bucket tag to have replicated, exiting..." echo "expected bucket tag to have replicated, exiting..."
exit_1; exit_1;
fi fi
# Test if bucket created when minio1 is down healed # Test if bucket created/deleted when minio1 is down healed
diff -q <(./mc ls minio1 | awk '{print $3}') <(./mc ls minio2 | awk '{print $3}') 1>/dev/null diff -q <(./mc ls minio1) <(./mc ls minio2) 1>/dev/null
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "expected bucket to have replicated, exiting..." echo "expected `bucket2` delete and `newbucket2` creation to have replicated, exiting..."
exit_1; exit_1;
fi fi

View File

@ -155,6 +155,9 @@ if [ $? -eq 0 ]; then
exit_1; exit_1;
fi fi
# create a bucket bucket2 on minio1.
./mc mb minio1/bucket2
./mc mb minio1/newbucket ./mc mb minio1/newbucket
sleep 5 sleep 5
@ -231,8 +234,8 @@ if [ $? -ne 0 ]; then
echo "expecting tag set to be successful. exiting.." echo "expecting tag set to be successful. exiting.."
exit_1; exit_1;
fi fi
sleep 5
sleep 10
val=$(./mc tag list minio1/newbucket --json | jq -r .tagset | jq -r .key) val=$(./mc tag list minio1/newbucket --json | jq -r .tagset | jq -r .key)
if [ "${val}" != "val1" ]; then if [ "${val}" != "val1" ]; then
echo "expected bucket tag to have replicated, exiting..." echo "expected bucket tag to have replicated, exiting..."
@ -244,10 +247,12 @@ kill -9 ${site1_pid}
./mc tag set minio2/newbucket "key=val2" ./mc tag set minio2/newbucket "key=val2"
# create a new bucket on minio2. This should replicate to minio1 after it comes online. # create a new bucket on minio2. This should replicate to minio1 after it comes online.
./mc mb minio2/newbucket2 ./mc mb minio2/newbucket2
# delete bucket2 on minio2. This should replicate to minio1 after it comes online.
./mc rb minio2/bucket2
# Restart minio1 instance # Restart minio1 instance
minio server --address ":9001" --console-address ":10000" /tmp/minio1/{1...4} >/tmp/minio1_1.log 2>&1 & minio server --address ":9001" --console-address ":10000" /tmp/minio1/{1...4} >/tmp/minio1_1.log 2>&1 &
sleep 10 sleep 15
# Test whether most recent tag update on minio2 is replicated to minio1 # Test whether most recent tag update on minio2 is replicated to minio1
val=$(./mc tag list minio1/newbucket --json | jq -r .tagset | jq -r .key ) val=$(./mc tag list minio1/newbucket --json | jq -r .tagset | jq -r .key )
if [ "${val}" != "val2" ]; then if [ "${val}" != "val2" ]; then
@ -255,9 +260,9 @@ if [ "${val}" != "val2" ]; then
exit_1; exit_1;
fi fi
# Test if bucket created when minio1 is down healed # Test if bucket created/deleted when minio1 is down healed
diff -q <(./mc ls minio1 | awk '{print $3}') <(./mc ls minio2 | awk '{print $3}') 1>/dev/null diff -q <(./mc ls minio1) <(./mc ls minio2) 1>/dev/null
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "expected bucket to have replicated, exiting..." echo "expected `bucket2` delete and `newbucket2` creation to have replicated, exiting..."
exit_1; exit_1;
fi fi

2
go.mod
View File

@ -48,7 +48,7 @@ require (
github.com/minio/dperf v0.4.2 github.com/minio/dperf v0.4.2
github.com/minio/highwayhash v1.0.2 github.com/minio/highwayhash v1.0.2
github.com/minio/kes v0.20.0 github.com/minio/kes v0.20.0
github.com/minio/madmin-go v1.4.3 github.com/minio/madmin-go v1.4.6
github.com/minio/minio-go/v7 v7.0.32 github.com/minio/minio-go/v7 v7.0.32
github.com/minio/pkg v1.1.26 github.com/minio/pkg v1.1.26
github.com/minio/selfupdate v0.5.0 github.com/minio/selfupdate v0.5.0

1219
go.sum

File diff suppressed because it is too large Load Diff