copy bucket slice to avoid skipping .minio.sys/buckets (#13912)

healing was skipping `.minio.sys/buckets` path so
essentially not healing `.usage.json` - fix this
by making a copy of `buckets` slice.
This commit is contained in:
Harshavardhana 2021-12-15 09:18:09 -08:00 committed by GitHub
parent 88ad742da0
commit 5f7e6d03ff
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 20 additions and 10 deletions

View File

@ -375,15 +375,13 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
buckets, _ := z.ListBuckets(ctx) buckets, _ := z.ListBuckets(ctx)
buckets = append(buckets, BucketInfo{
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
})
// Buckets data are dispersed in multiple zones/sets, make // Buckets data are dispersed in multiple zones/sets, make
// sure to heal all bucket metadata configuration. // sure to heal all bucket metadata configuration.
buckets = append(buckets, []BucketInfo{ buckets = append(buckets, BucketInfo{
{Name: pathJoin(minioMetaBucket, bucketMetaPrefix)}, Name: pathJoin(minioMetaBucket, minioConfigPrefix),
}...) }, BucketInfo{
Name: pathJoin(minioMetaBucket, bucketMetaPrefix),
})
// Heal latest buckets first. // Heal latest buckets first.
sort.Slice(buckets, func(i, j int) bool { sort.Slice(buckets, func(i, j int) bool {
@ -441,10 +439,12 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
continue continue
} }
logger.Info("Healing disk '%s' on %s pool complete", disk, humanize.Ordinal(i+1)) logger.Info("Healing disk '%s' on %s pool, %s set complete", disk,
humanize.Ordinal(i+1), humanize.Ordinal(setIndex+1))
logger.Info("Summary:\n") logger.Info("Summary:\n")
tracker.printTo(os.Stdout) tracker.printTo(os.Stdout)
logger.LogIf(ctx, tracker.delete(ctx)) logger.LogIf(ctx, tracker.delete(ctx))
logger.Info("\n")
// Only upon success pop the healed disk. // Only upon success pop the healed disk.
globalBackgroundHealState.popHealLocalDisks(disk.Endpoint()) globalBackgroundHealState.popHealLocalDisks(disk.Endpoint())

View File

@ -168,9 +168,14 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
bgSeq := mustGetHealSequence(ctx) bgSeq := mustGetHealSequence(ctx)
scanMode := globalHealConfig.ScanMode() scanMode := globalHealConfig.ScanMode()
// Make sure to copy since `buckets slice`
// is modified in place by tracker.
healBuckets := make([]string, len(buckets))
copy(healBuckets, buckets)
var retErr error var retErr error
// Heal all buckets with all objects // Heal all buckets with all objects
for _, bucket := range buckets { for _, bucket := range healBuckets {
if tracker.isHealed(bucket) { if tracker.isHealed(bucket) {
continue continue
} }
@ -318,6 +323,8 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
default: default:
tracker.bucketDone(bucket) tracker.bucketDone(bucket)
logger.LogIf(ctx, tracker.update(ctx)) logger.LogIf(ctx, tracker.update(ctx))
logger.Info("Healing bucket %s content on %s erasure set complete",
bucket, humanize.Ordinal(tracker.SetIndex+1))
} }
} }
tracker.Object = "" tracker.Object = ""

View File

@ -1962,7 +1962,10 @@ func (s *xlStorage) Delete(ctx context.Context, volume string, path string, recu
func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string) (err error) { func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string) (err error) {
defer func() { defer func() {
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, fmt.Errorf("srcVolume: %s, srcPath: %s, dstVolume: %s:, dstPath: %s - error %v",
srcVolume, srcPath,
dstVolume, dstPath,
err))
} }
if err == nil { if err == nil {
if s.globalSync { if s.globalSync {