diff --git a/cmd/erasure-healing.go b/cmd/erasure-healing.go index c07891b55..334075ac0 100644 --- a/cmd/erasure-healing.go +++ b/cmd/erasure-healing.go @@ -961,24 +961,18 @@ func isObjectDangling(metaArr []FileInfo, errs []error, dataErrs []error) (valid return validMeta, notFoundMetaErrs > dataBlocks } - quorum := validMeta.Erasure.DataBlocks - if validMeta.Erasure.DataBlocks == validMeta.Erasure.ParityBlocks { - quorum++ - } - // TODO: It is possible to replay the object via just single // xl.meta file, considering quorum number of data-dirs are still // present on other drives. // // However this requires a bit of a rewrite, leave this up for // future work. - - if notFoundMetaErrs > 0 && notFoundMetaErrs >= quorum { + if notFoundMetaErrs > 0 && notFoundMetaErrs > validMeta.Erasure.ParityBlocks { // All xl.meta is beyond data blocks missing, this is dangling return validMeta, true } - if !validMeta.IsRemote() && notFoundPartsErrs > 0 && notFoundPartsErrs >= quorum { + if !validMeta.IsRemote() && notFoundPartsErrs > 0 && notFoundPartsErrs > validMeta.Erasure.ParityBlocks { // All data-dir is beyond data blocks missing, this is dangling return validMeta, true } diff --git a/cmd/erasure-multipart.go b/cmd/erasure-multipart.go index 88842d024..165b8a61b 100644 --- a/cmd/erasure-multipart.go +++ b/cmd/erasure-multipart.go @@ -301,10 +301,10 @@ func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, objec // to read the metadata entry. var uploads []MultipartInfo - populatedUploadIds := set.NewStringSet() + populatedUploadIDs := set.NewStringSet() for _, uploadID := range uploadIDs { - if populatedUploadIds.Contains(uploadID) { + if populatedUploadIDs.Contains(uploadID) { continue } // If present, use time stored in ID. @@ -321,7 +321,7 @@ func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, objec UploadID: base64.RawURLEncoding.EncodeToString([]byte(fmt.Sprintf("%s.%s", globalDeploymentID(), uploadID))), Initiated: startTime, }) - populatedUploadIds.Add(uploadID) + populatedUploadIDs.Add(uploadID) } sort.Slice(uploads, func(i int, j int) bool { diff --git a/cmd/erasure-server-pool.go b/cmd/erasure-server-pool.go index 779b20bda..4dc64ac5c 100644 --- a/cmd/erasure-server-pool.go +++ b/cmd/erasure-server-pool.go @@ -1840,12 +1840,6 @@ func (z *erasureServerPools) DeleteBucket(ctx context.Context, bucket string, op } } - if err != nil && !isErrBucketNotFound(err) { - if !opts.NoRecreate { - z.s3Peer.MakeBucket(ctx, bucket, MakeBucketOptions{}) - } - } - if err == nil { // Purge the entire bucket metadata entirely. z.deleteAll(context.Background(), minioMetaBucket, pathJoin(bucketMetaPrefix, bucket)) diff --git a/cmd/peer-s3-client.go b/cmd/peer-s3-client.go index 0ffeba7f9..26df72ae1 100644 --- a/cmd/peer-s3-client.go +++ b/cmd/peer-s3-client.go @@ -474,9 +474,12 @@ func (sys *S3PeerSys) DeleteBucket(ctx context.Context, bucket string, opts Dele perPoolErrs = append(perPoolErrs, errs[i]) } } - if poolErr := reduceWriteQuorumErrs(ctx, perPoolErrs, bucketOpIgnoredErrs, len(perPoolErrs)/2+1); poolErr != nil && poolErr != errVolumeNotFound { - // re-create successful deletes, since we are return an error. - sys.MakeBucket(ctx, bucket, MakeBucketOptions{}) + poolErr := reduceWriteQuorumErrs(ctx, perPoolErrs, bucketOpIgnoredErrs, len(perPoolErrs)/2+1) + if poolErr != nil && !errors.Is(poolErr, errVolumeNotFound) { + if !opts.NoRecreate { + // re-create successful deletes, since we are return an error. + sys.MakeBucket(ctx, bucket, MakeBucketOptions{}) + } return toObjectErr(poolErr, bucket) } }