mirror of
https://github.com/minio/minio.git
synced 2024-12-23 21:55:53 -05:00
copy bucket slice to avoid skipping .minio.sys/buckets (#13912)
healing was skipping `.minio.sys/buckets` path so essentially not healing `.usage.json` - fix this by making a copy of `buckets` slice.
This commit is contained in:
parent
88ad742da0
commit
5f7e6d03ff
@ -375,15 +375,13 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
|
||||
buckets, _ := z.ListBuckets(ctx)
|
||||
|
||||
buckets = append(buckets, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
|
||||
})
|
||||
|
||||
// Buckets data are dispersed in multiple zones/sets, make
|
||||
// sure to heal all bucket metadata configuration.
|
||||
buckets = append(buckets, []BucketInfo{
|
||||
{Name: pathJoin(minioMetaBucket, bucketMetaPrefix)},
|
||||
}...)
|
||||
buckets = append(buckets, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, minioConfigPrefix),
|
||||
}, BucketInfo{
|
||||
Name: pathJoin(minioMetaBucket, bucketMetaPrefix),
|
||||
})
|
||||
|
||||
// Heal latest buckets first.
|
||||
sort.Slice(buckets, func(i, j int) bool {
|
||||
@ -441,10 +439,12 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools, bgSeq
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("Healing disk '%s' on %s pool complete", disk, humanize.Ordinal(i+1))
|
||||
logger.Info("Healing disk '%s' on %s pool, %s set complete", disk,
|
||||
humanize.Ordinal(i+1), humanize.Ordinal(setIndex+1))
|
||||
logger.Info("Summary:\n")
|
||||
tracker.printTo(os.Stdout)
|
||||
logger.LogIf(ctx, tracker.delete(ctx))
|
||||
logger.Info("\n")
|
||||
|
||||
// Only upon success pop the healed disk.
|
||||
globalBackgroundHealState.popHealLocalDisks(disk.Endpoint())
|
||||
|
@ -168,9 +168,14 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
|
||||
bgSeq := mustGetHealSequence(ctx)
|
||||
scanMode := globalHealConfig.ScanMode()
|
||||
|
||||
// Make sure to copy since `buckets slice`
|
||||
// is modified in place by tracker.
|
||||
healBuckets := make([]string, len(buckets))
|
||||
copy(healBuckets, buckets)
|
||||
|
||||
var retErr error
|
||||
// Heal all buckets with all objects
|
||||
for _, bucket := range buckets {
|
||||
for _, bucket := range healBuckets {
|
||||
if tracker.isHealed(bucket) {
|
||||
continue
|
||||
}
|
||||
@ -318,6 +323,8 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
|
||||
default:
|
||||
tracker.bucketDone(bucket)
|
||||
logger.LogIf(ctx, tracker.update(ctx))
|
||||
logger.Info("Healing bucket %s content on %s erasure set complete",
|
||||
bucket, humanize.Ordinal(tracker.SetIndex+1))
|
||||
}
|
||||
}
|
||||
tracker.Object = ""
|
||||
|
@ -1962,7 +1962,10 @@ func (s *xlStorage) Delete(ctx context.Context, volume string, path string, recu
|
||||
func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
logger.LogIf(ctx, fmt.Errorf("srcVolume: %s, srcPath: %s, dstVolume: %s:, dstPath: %s - error %v",
|
||||
srcVolume, srcPath,
|
||||
dstVolume, dstPath,
|
||||
err))
|
||||
}
|
||||
if err == nil {
|
||||
if s.globalSync {
|
||||
|
Loading…
Reference in New Issue
Block a user