2016-06-01 19:43:31 -04:00
|
|
|
/*
|
2020-06-12 23:04:01 -04:00
|
|
|
* MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
|
2016-06-01 19:43:31 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-05-20 23:48:47 -04:00
|
|
|
|
|
|
|
import (
|
2018-03-14 15:01:47 -04:00
|
|
|
"context"
|
2017-11-25 14:58:29 -05:00
|
|
|
|
2020-07-14 12:38:05 -04:00
|
|
|
"github.com/minio/minio-go/v7/pkg/s3utils"
|
2018-04-05 18:04:40 -04:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
2019-10-14 12:44:51 -04:00
|
|
|
"github.com/minio/minio/pkg/sync/errgroup"
|
2016-05-20 23:48:47 -04:00
|
|
|
)
|
|
|
|
|
2016-11-23 23:05:04 -05:00
|
|
|
// list all errors that can be ignore in a bucket operation.
|
2020-07-24 16:16:11 -04:00
|
|
|
var bucketOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnformattedDisk)
|
2016-11-23 23:05:04 -05:00
|
|
|
|
2016-11-20 19:57:12 -05:00
|
|
|
// list all errors that can be ignored in a bucket metadata operation.
|
|
|
|
var bucketMetadataOpIgnoredErrs = append(bucketOpIgnoredErrs, errVolumeNotFound)
|
|
|
|
|
2016-05-20 23:48:47 -04:00
|
|
|
/// Bucket operations
|
|
|
|
|
|
|
|
// MakeBucket - make a bucket.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (er erasureObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error {
|
2016-05-20 23:48:47 -04:00
|
|
|
// Verify if bucket is valid.
|
2019-03-05 13:42:32 -05:00
|
|
|
if err := s3utils.CheckValidBucketNameStrict(bucket); err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
return BucketNameInvalid{Bucket: bucket}
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
storageDisks := er.getDisks()
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2019-10-14 12:44:51 -04:00
|
|
|
g := errgroup.WithNErrs(len(storageDisks))
|
2016-05-20 23:48:47 -04:00
|
|
|
|
|
|
|
// Make a volume entry on all underlying storage disks.
|
2019-10-14 12:44:51 -04:00
|
|
|
for index := range storageDisks {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
|
|
|
if storageDisks[index] != nil {
|
2020-09-04 12:45:06 -04:00
|
|
|
if err := storageDisks[index].MakeVol(ctx, bucket); err != nil {
|
2019-10-14 12:44:51 -04:00
|
|
|
if err != errVolumeExists {
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
}
|
|
|
|
return err
|
2018-04-05 18:04:40 -04:00
|
|
|
}
|
2019-10-14 12:44:51 -04:00
|
|
|
return nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2019-10-14 12:44:51 -04:00
|
|
|
return errDiskNotFound
|
|
|
|
}, index)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2020-03-31 12:32:16 -04:00
|
|
|
writeQuorum := getWriteQuorum(len(storageDisks))
|
2019-10-14 12:44:51 -04:00
|
|
|
err := reduceWriteQuorumErrs(ctx, g.Wait(), bucketOpIgnoredErrs, writeQuorum)
|
2017-02-01 14:16:17 -05:00
|
|
|
return toObjectErr(err, bucket)
|
2016-06-17 14:57:51 -04:00
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2019-10-14 12:44:51 -04:00
|
|
|
func undoDeleteBucket(storageDisks []StorageAPI, bucket string) {
|
|
|
|
g := errgroup.WithNErrs(len(storageDisks))
|
2016-07-21 03:27:08 -04:00
|
|
|
// Undo previous make bucket entry on all underlying storage disks.
|
2019-10-14 12:44:51 -04:00
|
|
|
for index := range storageDisks {
|
|
|
|
if storageDisks[index] == nil {
|
2016-07-21 03:27:08 -04:00
|
|
|
continue
|
|
|
|
}
|
2019-10-14 12:44:51 -04:00
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
2020-09-04 12:45:06 -04:00
|
|
|
_ = storageDisks[index].MakeVol(context.Background(), bucket)
|
2019-10-14 12:44:51 -04:00
|
|
|
return nil
|
|
|
|
}, index)
|
2016-07-21 03:27:08 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for all make vol to finish.
|
2019-10-14 12:44:51 -04:00
|
|
|
g.Wait()
|
2016-07-21 03:27:08 -04:00
|
|
|
}
|
|
|
|
|
2016-06-01 19:43:31 -04:00
|
|
|
// getBucketInfo - returns the BucketInfo from one of the load balanced disks.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (er erasureObjects) getBucketInfo(ctx context.Context, bucketName string) (bucketInfo BucketInfo, err error) {
|
2017-04-12 18:38:35 -04:00
|
|
|
var bucketErrs []error
|
2020-06-12 23:04:01 -04:00
|
|
|
for _, disk := range er.getLoadBalancedDisks() {
|
2016-06-02 19:34:15 -04:00
|
|
|
if disk == nil {
|
2017-04-12 18:38:35 -04:00
|
|
|
bucketErrs = append(bucketErrs, errDiskNotFound)
|
2016-06-02 19:34:15 -04:00
|
|
|
continue
|
|
|
|
}
|
2020-09-04 12:45:06 -04:00
|
|
|
volInfo, serr := disk.StatVol(ctx, bucketName)
|
2017-04-12 18:38:35 -04:00
|
|
|
if serr == nil {
|
2019-02-13 07:59:36 -05:00
|
|
|
return BucketInfo(volInfo), nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
err = serr
|
2016-07-08 01:10:27 -04:00
|
|
|
// For any reason disk went offline continue and pick the next one.
|
2018-04-10 12:36:37 -04:00
|
|
|
if IsErrIgnored(err, bucketMetadataOpIgnoredErrs...) {
|
2017-04-12 18:38:35 -04:00
|
|
|
bucketErrs = append(bucketErrs, err)
|
2016-07-08 01:10:27 -04:00
|
|
|
continue
|
2016-06-01 19:43:31 -04:00
|
|
|
}
|
2017-04-12 18:38:35 -04:00
|
|
|
// Any error which cannot be ignored, we return quickly.
|
|
|
|
return BucketInfo{}, err
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2017-04-12 18:38:35 -04:00
|
|
|
// If all our errors were ignored, then we try to
|
|
|
|
// reduce to one error based on read quorum.
|
|
|
|
// `nil` is deliberately passed for ignoredErrs
|
|
|
|
// because these errors were already ignored.
|
2020-06-12 23:04:01 -04:00
|
|
|
readQuorum := getReadQuorum(len(er.getDisks()))
|
2018-04-05 18:04:40 -04:00
|
|
|
return BucketInfo{}, reduceReadQuorumErrs(ctx, bucketErrs, nil, readQuorum)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2016-05-26 06:15:01 -04:00
|
|
|
// GetBucketInfo - returns BucketInfo for a bucket.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (er erasureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi BucketInfo, e error) {
|
|
|
|
bucketInfo, err := er.getBucketInfo(ctx, bucket)
|
2016-05-20 23:48:47 -04:00
|
|
|
if err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return bi, toObjectErr(err, bucket)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-05-26 06:15:01 -04:00
|
|
|
return bucketInfo, nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2019-02-05 20:58:48 -05:00
|
|
|
// Dangling buckets should be handled appropriately, in this following situation
|
|
|
|
// we actually have quorum error to be `nil` but we have some disks where
|
|
|
|
// the bucket delete returned `errVolumeNotEmpty` but this is not correct
|
|
|
|
// can only happen if there are dangling objects in a bucket. Under such
|
|
|
|
// a situation we simply attempt a full delete of the bucket including
|
|
|
|
// the dangling objects. All of this happens under a lock and there
|
|
|
|
// is no way a user can create buckets and sneak in objects into namespace,
|
|
|
|
// so it is safer to do.
|
|
|
|
func deleteDanglingBucket(ctx context.Context, storageDisks []StorageAPI, dErrs []error, bucket string) {
|
|
|
|
for index, err := range dErrs {
|
|
|
|
if err == errVolumeNotEmpty {
|
|
|
|
// Attempt to delete bucket again.
|
2020-09-04 12:45:06 -04:00
|
|
|
if derr := storageDisks[index].DeleteVol(ctx, bucket, false); derr == errVolumeNotEmpty {
|
2019-02-05 20:58:48 -05:00
|
|
|
_ = cleanupDir(ctx, storageDisks[index], bucket, "")
|
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
_ = storageDisks[index].DeleteVol(ctx, bucket, false)
|
2019-02-05 20:58:48 -05:00
|
|
|
|
|
|
|
// Cleanup all the previously incomplete multiparts.
|
|
|
|
_ = cleanupDir(ctx, storageDisks[index], minioMetaMultipartBucket, bucket)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-26 06:15:01 -04:00
|
|
|
// DeleteBucket - deletes a bucket.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (er erasureObjects) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error {
|
2016-05-20 23:48:47 -04:00
|
|
|
// Collect if all disks report volume not found.
|
2020-06-12 23:04:01 -04:00
|
|
|
storageDisks := er.getDisks()
|
2019-02-05 20:58:48 -05:00
|
|
|
|
2019-10-14 12:44:51 -04:00
|
|
|
g := errgroup.WithNErrs(len(storageDisks))
|
|
|
|
|
|
|
|
for index := range storageDisks {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
|
|
|
if storageDisks[index] != nil {
|
2020-09-04 12:45:06 -04:00
|
|
|
if err := storageDisks[index].DeleteVol(ctx, bucket, forceDelete); err != nil {
|
2019-10-14 12:44:51 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
err := cleanupDir(ctx, storageDisks[index], minioMetaMultipartBucket, bucket)
|
|
|
|
if err != nil && err != errVolumeNotFound {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2019-10-14 12:44:51 -04:00
|
|
|
return errDiskNotFound
|
|
|
|
}, index)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for all the delete vols to finish.
|
2019-10-14 12:44:51 -04:00
|
|
|
dErrs := g.Wait()
|
2018-02-15 20:45:57 -05:00
|
|
|
|
2020-03-28 00:52:59 -04:00
|
|
|
if forceDelete {
|
|
|
|
for _, err := range dErrs {
|
|
|
|
if err != nil {
|
|
|
|
undoDeleteBucket(storageDisks, bucket)
|
|
|
|
return toObjectErr(err, bucket)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-31 12:32:16 -04:00
|
|
|
writeQuorum := getWriteQuorum(len(storageDisks))
|
2018-04-05 18:04:40 -04:00
|
|
|
err := reduceWriteQuorumErrs(ctx, dErrs, bucketOpIgnoredErrs, writeQuorum)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err == errErasureWriteQuorum {
|
2019-10-14 12:44:51 -04:00
|
|
|
undoDeleteBucket(storageDisks, bucket)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2018-02-09 18:19:30 -05:00
|
|
|
if err != nil {
|
|
|
|
return toObjectErr(err, bucket)
|
|
|
|
}
|
|
|
|
|
2019-02-05 20:58:48 -05:00
|
|
|
// If we reduce quorum to nil, means we have deleted buckets properly
|
|
|
|
// on some servers in quorum, we should look for volumeNotEmpty errors
|
|
|
|
// and delete those buckets as well.
|
2020-09-11 09:39:19 -04:00
|
|
|
//
|
|
|
|
// let this call succeed, even if client cancels the context
|
|
|
|
// this is to ensure that we don't leave any stale content
|
|
|
|
deleteDanglingBucket(context.Background(), storageDisks, dErrs, bucket)
|
2019-02-05 20:58:48 -05:00
|
|
|
|
2018-02-09 18:19:30 -05:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsNotificationSupported returns whether bucket notification is applicable for this layer.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (er erasureObjects) IsNotificationSupported() bool {
|
2018-02-09 18:19:30 -05:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2020-07-20 15:52:49 -04:00
|
|
|
// IsListenSupported returns whether listen bucket notification is applicable for this layer.
|
|
|
|
func (er erasureObjects) IsListenSupported() bool {
|
2018-12-05 17:03:42 -05:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2019-01-05 17:16:43 -05:00
|
|
|
// IsEncryptionSupported returns whether server side encryption is implemented for this layer.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (er erasureObjects) IsEncryptionSupported() bool {
|
2018-02-09 18:19:30 -05:00
|
|
|
return true
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2018-09-27 23:36:17 -04:00
|
|
|
|
|
|
|
// IsCompressionSupported returns whether compression is applicable for this layer.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (er erasureObjects) IsCompressionSupported() bool {
|
2018-09-27 23:36:17 -04:00
|
|
|
return true
|
|
|
|
}
|
2020-05-23 14:09:35 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// IsTaggingSupported indicates whether erasureObjects implements tagging support.
|
|
|
|
func (er erasureObjects) IsTaggingSupported() bool {
|
2020-05-23 14:09:35 -04:00
|
|
|
return true
|
|
|
|
}
|