2022-08-25 16:07:15 -04:00
|
|
|
// Copyright (c) 2015-2022 MinIO, Inc.
|
2021-04-18 15:41:13 -04:00
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2019-10-28 13:27:49 -04:00
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2021-03-04 17:36:23 -05:00
|
|
|
"fmt"
|
2023-09-11 17:48:54 -04:00
|
|
|
"runtime"
|
2021-03-04 17:36:23 -05:00
|
|
|
"sort"
|
2019-10-28 13:27:49 -04:00
|
|
|
"time"
|
|
|
|
|
2021-11-19 11:46:47 -05:00
|
|
|
"github.com/dustin/go-humanize"
|
2023-06-19 20:53:08 -04:00
|
|
|
"github.com/minio/madmin-go/v3"
|
2021-06-01 17:59:40 -04:00
|
|
|
"github.com/minio/minio/internal/color"
|
2021-08-23 11:50:35 -04:00
|
|
|
"github.com/minio/minio/internal/config/storageclass"
|
2024-01-28 13:04:17 -05:00
|
|
|
xioutil "github.com/minio/minio/internal/ioutil"
|
2021-06-01 17:59:40 -04:00
|
|
|
"github.com/minio/minio/internal/logger"
|
2023-09-04 15:57:37 -04:00
|
|
|
"github.com/minio/pkg/v2/console"
|
|
|
|
"github.com/minio/pkg/v2/wildcard"
|
2023-09-11 17:48:54 -04:00
|
|
|
"github.com/minio/pkg/v2/workers"
|
2019-10-28 13:27:49 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
bgHealingUUID = "0000-0000-0000-0000"
|
|
|
|
)
|
|
|
|
|
|
|
|
// NewBgHealSequence creates a background healing sequence
|
2021-02-26 18:11:42 -05:00
|
|
|
// operation which scans all objects and heal them.
|
2020-06-29 16:07:26 -04:00
|
|
|
func newBgHealSequence() *healSequence {
|
2019-10-28 13:27:49 -04:00
|
|
|
reqInfo := &logger.ReqInfo{API: "BackgroundHeal"}
|
2020-05-04 01:35:40 -04:00
|
|
|
ctx, cancelCtx := context.WithCancel(logger.SetReqInfo(GlobalContext, reqInfo))
|
2019-10-28 13:27:49 -04:00
|
|
|
|
|
|
|
hs := madmin.HealOpts{
|
|
|
|
// Remove objects that do not have read-quorum
|
2022-04-07 11:10:40 -04:00
|
|
|
Remove: healDeleteDangling,
|
2019-10-28 13:27:49 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return &healSequence{
|
|
|
|
startTime: UTCNow(),
|
|
|
|
clientToken: bgHealingUUID,
|
2020-08-07 22:43:06 -04:00
|
|
|
// run-background heal with reserved bucket
|
|
|
|
bucket: minioReservedBucket,
|
|
|
|
settings: hs,
|
2019-10-28 13:27:49 -04:00
|
|
|
currentStatus: healSequenceStatus{
|
|
|
|
Summary: healNotStartedStatus,
|
|
|
|
HealSettings: hs,
|
|
|
|
},
|
2020-05-04 01:35:40 -04:00
|
|
|
cancelCtx: cancelCtx,
|
|
|
|
ctx: ctx,
|
|
|
|
reportProgress: false,
|
|
|
|
scannedItemsMap: make(map[madmin.HealItemType]int64),
|
|
|
|
healedItemsMap: make(map[madmin.HealItemType]int64),
|
|
|
|
healFailedItemsMap: make(map[string]int64),
|
2019-10-28 13:27:49 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-16 18:32:00 -05:00
|
|
|
// getLocalBackgroundHealStatus will return the heal status of the local node
|
|
|
|
func getLocalBackgroundHealStatus(ctx context.Context, o ObjectLayer) (madmin.BgHealState, bool) {
|
2020-09-10 12:16:26 -04:00
|
|
|
if globalBackgroundHealState == nil {
|
|
|
|
return madmin.BgHealState{}, false
|
|
|
|
}
|
2020-09-24 18:36:47 -04:00
|
|
|
|
2019-10-28 13:27:49 -04:00
|
|
|
bgSeq, ok := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID)
|
|
|
|
if !ok {
|
2020-08-07 22:43:06 -04:00
|
|
|
return madmin.BgHealState{}, false
|
2019-10-28 13:27:49 -04:00
|
|
|
}
|
|
|
|
|
2021-07-16 01:32:06 -04:00
|
|
|
status := madmin.BgHealState{
|
|
|
|
ScannedItemsCount: bgSeq.getScannedItemsCount(),
|
|
|
|
}
|
|
|
|
|
2022-01-02 12:15:06 -05:00
|
|
|
healDisksMap := map[string]struct{}{}
|
2020-09-24 18:36:47 -04:00
|
|
|
for _, ep := range getLocalDisksToHeal() {
|
|
|
|
healDisksMap[ep.String()] = struct{}{}
|
|
|
|
}
|
|
|
|
|
2021-03-04 17:36:23 -05:00
|
|
|
if o == nil {
|
|
|
|
healing := globalBackgroundHealState.getLocalHealingDisks()
|
|
|
|
for _, disk := range healing {
|
|
|
|
status.HealDisks = append(status.HealDisks, disk.Endpoint)
|
2020-09-24 18:36:47 -04:00
|
|
|
}
|
2021-03-04 17:36:23 -05:00
|
|
|
|
|
|
|
return status, true
|
2020-09-04 20:09:02 -04:00
|
|
|
}
|
|
|
|
|
2023-12-21 19:56:43 -05:00
|
|
|
si := o.LocalStorageInfo(ctx, true)
|
2021-03-04 17:36:23 -05:00
|
|
|
|
|
|
|
indexed := make(map[string][]madmin.Disk)
|
|
|
|
for _, disk := range si.Disks {
|
|
|
|
setIdx := fmt.Sprintf("%d-%d", disk.PoolIndex, disk.SetIndex)
|
|
|
|
indexed[setIdx] = append(indexed[setIdx], disk)
|
2020-08-07 16:22:53 -04:00
|
|
|
}
|
|
|
|
|
2021-03-04 17:36:23 -05:00
|
|
|
for id, disks := range indexed {
|
|
|
|
ss := madmin.SetStatus{
|
|
|
|
ID: id,
|
|
|
|
SetIndex: disks[0].SetIndex,
|
|
|
|
PoolIndex: disks[0].PoolIndex,
|
|
|
|
}
|
|
|
|
for _, disk := range disks {
|
|
|
|
ss.Disks = append(ss.Disks, disk)
|
|
|
|
if disk.Healing {
|
|
|
|
ss.HealStatus = "Healing"
|
|
|
|
ss.HealPriority = "high"
|
|
|
|
status.HealDisks = append(status.HealDisks, disk.Endpoint)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sortDisks(ss.Disks)
|
|
|
|
status.Sets = append(status.Sets, ss)
|
|
|
|
}
|
|
|
|
sort.Slice(status.Sets, func(i, j int) bool {
|
|
|
|
return status.Sets[i].ID < status.Sets[j].ID
|
|
|
|
})
|
|
|
|
|
2021-08-23 11:50:35 -04:00
|
|
|
backendInfo := o.BackendInfo()
|
|
|
|
status.SCParity = make(map[string]int)
|
|
|
|
status.SCParity[storageclass.STANDARD] = backendInfo.StandardSCParity
|
|
|
|
status.SCParity[storageclass.RRS] = backendInfo.RRSCParity
|
|
|
|
|
2021-03-04 17:36:23 -05:00
|
|
|
return status, true
|
2019-10-28 13:27:49 -04:00
|
|
|
}
|
|
|
|
|
2020-12-13 14:57:08 -05:00
|
|
|
// healErasureSet lists and heals all objects in a specific erasure set
|
2021-11-19 11:46:47 -05:00
|
|
|
func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, tracker *healingTracker) error {
|
2022-04-07 11:10:40 -04:00
|
|
|
scanMode := madmin.HealNormalScan
|
2021-08-25 20:46:20 -04:00
|
|
|
|
2021-12-15 12:18:09 -05:00
|
|
|
// Make sure to copy since `buckets slice`
|
|
|
|
// is modified in place by tracker.
|
|
|
|
healBuckets := make([]string, len(buckets))
|
|
|
|
copy(healBuckets, buckets)
|
|
|
|
|
2024-01-09 23:34:04 -05:00
|
|
|
objAPI := newObjectLayerFn()
|
|
|
|
if objAPI == nil {
|
|
|
|
return errServerNotInitialized
|
|
|
|
}
|
|
|
|
|
2022-06-21 10:53:55 -04:00
|
|
|
for _, bucket := range healBuckets {
|
2024-01-09 23:34:04 -05:00
|
|
|
_, err := objAPI.HealBucket(ctx, bucket, madmin.HealOpts{ScanMode: scanMode})
|
2022-06-21 10:53:55 -04:00
|
|
|
if err != nil {
|
|
|
|
// Log bucket healing error if any, we shall retry again.
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-25 15:45:46 -05:00
|
|
|
info, err := tracker.disk.DiskInfo(ctx, DiskInfoOptions{})
|
2023-09-11 17:48:54 -04:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to get disk information before healing it: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var numHealers uint64
|
|
|
|
|
|
|
|
if numCores := uint64(runtime.GOMAXPROCS(0)); info.NRRequests > numCores {
|
|
|
|
numHealers = numCores / 4
|
|
|
|
} else {
|
|
|
|
numHealers = info.NRRequests / 4
|
|
|
|
}
|
|
|
|
if numHealers < 4 {
|
|
|
|
numHealers = 4
|
|
|
|
}
|
|
|
|
// allow overriding this value as well..
|
|
|
|
if v := globalHealConfig.GetWorkers(); v > 0 {
|
|
|
|
numHealers = uint64(v)
|
|
|
|
}
|
|
|
|
|
2024-02-15 18:13:30 -05:00
|
|
|
logger.Event(ctx, fmt.Sprintf("Healing drive '%s' - use %d parallel workers.", tracker.disk.String(), numHealers))
|
2023-09-11 17:48:54 -04:00
|
|
|
|
|
|
|
jt, _ := workers.New(int(numHealers))
|
|
|
|
|
2021-11-19 11:46:47 -05:00
|
|
|
var retErr error
|
2019-10-28 13:27:49 -04:00
|
|
|
// Heal all buckets with all objects
|
2021-12-15 12:18:09 -05:00
|
|
|
for _, bucket := range healBuckets {
|
2021-11-19 11:46:47 -05:00
|
|
|
if tracker.isHealed(bucket) {
|
2021-03-04 17:36:23 -05:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
var forwardTo string
|
|
|
|
// If we resume to the same bucket, forward to last known item.
|
2023-04-18 17:49:56 -04:00
|
|
|
if b := tracker.getBucket(); b != "" {
|
|
|
|
if b == bucket {
|
|
|
|
forwardTo = tracker.getObject()
|
2021-03-04 17:36:23 -05:00
|
|
|
} else {
|
|
|
|
// Reset to where last bucket ended if resuming.
|
|
|
|
tracker.resume()
|
|
|
|
}
|
|
|
|
}
|
2023-04-18 17:49:56 -04:00
|
|
|
tracker.setObject("")
|
|
|
|
tracker.setBucket(bucket)
|
2022-06-21 10:53:55 -04:00
|
|
|
// Heal current bucket again in case if it is failed
|
2023-08-09 15:51:47 -04:00
|
|
|
// in the beginning of erasure set healing
|
2024-01-09 23:34:04 -05:00
|
|
|
if _, err := objAPI.HealBucket(ctx, bucket, madmin.HealOpts{
|
2021-08-25 20:46:20 -04:00
|
|
|
ScanMode: scanMode,
|
|
|
|
}); err != nil {
|
2021-09-02 23:56:13 -04:00
|
|
|
logger.LogIf(ctx, err)
|
2021-11-19 11:46:47 -05:00
|
|
|
continue
|
2020-04-01 15:14:00 -04:00
|
|
|
}
|
2019-10-28 13:27:49 -04:00
|
|
|
|
2023-12-01 10:56:24 -05:00
|
|
|
vc, _ := globalBucketVersioningSys.Get(bucket)
|
|
|
|
|
|
|
|
// Check if the current bucket has a configured lifecycle policy
|
|
|
|
lc, _ := globalLifecycleSys.Get(bucket)
|
|
|
|
|
|
|
|
// Check if bucket is object locked.
|
|
|
|
lr, _ := globalBucketObjectLockSys.Get(bucket)
|
|
|
|
rcfg, _ := getReplicationConfig(ctx, bucket)
|
|
|
|
|
2021-01-19 05:40:52 -05:00
|
|
|
if serverDebugLog {
|
2022-08-04 19:10:08 -04:00
|
|
|
console.Debugf(color.Green("healDrive:")+" healing bucket %s content on %s erasure set\n",
|
2023-04-18 17:49:56 -04:00
|
|
|
bucket, humanize.Ordinal(er.setIndex+1))
|
2021-01-19 05:40:52 -05:00
|
|
|
}
|
|
|
|
|
2023-12-01 03:18:04 -05:00
|
|
|
disks, _ := er.getOnlineDisksWithHealing(false)
|
2021-02-18 14:06:54 -05:00
|
|
|
if len(disks) == 0 {
|
2024-02-28 02:02:14 -05:00
|
|
|
// No object healing necessary
|
|
|
|
tracker.bucketDone(bucket)
|
|
|
|
logger.LogIf(ctx, tracker.update(ctx))
|
2021-08-24 15:43:57 -04:00
|
|
|
continue
|
2020-06-09 20:09:19 -04:00
|
|
|
}
|
2021-03-06 12:25:48 -05:00
|
|
|
|
2021-02-18 14:06:54 -05:00
|
|
|
// Limit listing to 3 drives.
|
|
|
|
if len(disks) > 3 {
|
|
|
|
disks = disks[:3]
|
|
|
|
}
|
2021-03-06 12:25:48 -05:00
|
|
|
|
2022-09-07 11:47:21 -04:00
|
|
|
type healEntryResult struct {
|
|
|
|
bytes uint64
|
|
|
|
success bool
|
2024-02-28 12:05:40 -05:00
|
|
|
skipped bool
|
2022-09-07 11:47:21 -04:00
|
|
|
entryDone bool
|
|
|
|
name string
|
|
|
|
}
|
|
|
|
healEntryDone := func(name string) healEntryResult {
|
|
|
|
return healEntryResult{
|
|
|
|
entryDone: true,
|
|
|
|
name: name,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
healEntrySuccess := func(sz uint64) healEntryResult {
|
|
|
|
return healEntryResult{
|
|
|
|
bytes: sz,
|
|
|
|
success: true,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
healEntryFailure := func(sz uint64) healEntryResult {
|
|
|
|
return healEntryResult{
|
|
|
|
bytes: sz,
|
|
|
|
}
|
|
|
|
}
|
2024-02-28 12:05:40 -05:00
|
|
|
healEntrySkipped := func(sz uint64) healEntryResult {
|
|
|
|
return healEntryResult{
|
|
|
|
bytes: sz,
|
|
|
|
skipped: true,
|
|
|
|
}
|
|
|
|
}
|
2022-09-07 11:47:21 -04:00
|
|
|
|
2023-12-01 10:56:24 -05:00
|
|
|
filterLifecycle := func(bucket, object string, fi FileInfo) bool {
|
|
|
|
if lc == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
versioned := vc != nil && vc.Versioned(object)
|
|
|
|
objInfo := fi.ToObjectInfo(bucket, object, versioned)
|
|
|
|
|
|
|
|
evt := evalActionFromLifecycle(ctx, *lc, lr, rcfg, objInfo)
|
|
|
|
switch {
|
|
|
|
case evt.Action.DeleteRestored(): // if restored copy has expired,delete it synchronously
|
|
|
|
applyExpiryOnTransitionedObject(ctx, newObjectLayerFn(), objInfo, evt, lcEventSrc_Heal)
|
|
|
|
return false
|
|
|
|
case evt.Action.Delete():
|
|
|
|
globalExpiryState.enqueueByDays(objInfo, evt, lcEventSrc_Heal)
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-07 11:47:21 -04:00
|
|
|
// Collect updates to tracker from concurrent healEntry calls
|
2023-08-09 15:51:47 -04:00
|
|
|
results := make(chan healEntryResult, 1000)
|
2022-09-07 11:47:21 -04:00
|
|
|
go func() {
|
|
|
|
for res := range results {
|
|
|
|
if res.entryDone {
|
2023-04-18 17:49:56 -04:00
|
|
|
tracker.setObject(res.name)
|
|
|
|
if time.Since(tracker.getLastUpdate()) > time.Minute {
|
2022-09-07 11:47:21 -04:00
|
|
|
logger.LogIf(ctx, tracker.update(ctx))
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-02-28 12:05:40 -05:00
|
|
|
tracker.updateProgress(res.success, res.skipped, res.bytes)
|
2022-09-07 11:47:21 -04:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2023-08-09 15:51:47 -04:00
|
|
|
send := func(result healEntryResult) bool {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2024-02-28 12:05:40 -05:00
|
|
|
if !contextCanceled(ctx) {
|
|
|
|
logger.LogIf(ctx, ctx.Err())
|
|
|
|
}
|
2023-08-09 15:51:47 -04:00
|
|
|
return false
|
|
|
|
case results <- result:
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-07 11:47:21 -04:00
|
|
|
// Note: updates from healEntry to tracker must be sent on results channel.
|
2023-02-27 07:55:32 -05:00
|
|
|
healEntry := func(bucket string, entry metaCacheEntry) {
|
2023-09-11 17:48:54 -04:00
|
|
|
defer jt.Give()
|
|
|
|
|
2022-01-11 00:22:17 -05:00
|
|
|
if entry.name == "" && len(entry.metadata) == 0 {
|
|
|
|
// ignore entries that don't have metadata.
|
|
|
|
return
|
|
|
|
}
|
2021-02-18 14:06:54 -05:00
|
|
|
if entry.isDir() {
|
2022-01-11 00:22:17 -05:00
|
|
|
// ignore healing entry.name's with `/` suffix.
|
2021-02-18 14:06:54 -05:00
|
|
|
return
|
2020-06-09 20:09:19 -04:00
|
|
|
}
|
2023-02-27 07:55:32 -05:00
|
|
|
|
2021-03-07 12:38:31 -05:00
|
|
|
// We might land at .metacache, .trash, .multipart
|
|
|
|
// no need to heal them skip, only when bucket
|
|
|
|
// is '.minio.sys'
|
2021-11-19 11:46:47 -05:00
|
|
|
if bucket == minioMetaBucket {
|
2021-03-07 12:38:31 -05:00
|
|
|
if wildcard.Match("buckets/*/.metacache/*", entry.name) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if wildcard.Match("tmp/.trash/*", entry.name) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if wildcard.Match("multipart/*", entry.name) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2021-08-26 23:32:58 -04:00
|
|
|
|
2023-09-11 17:48:54 -04:00
|
|
|
// erasureObjects layer needs object names to be encoded
|
|
|
|
encodedEntryName := encodeDirObject(entry.name)
|
|
|
|
|
2022-09-07 11:47:21 -04:00
|
|
|
var result healEntryResult
|
2021-11-19 11:46:47 -05:00
|
|
|
fivs, err := entry.fileInfoVersions(bucket)
|
2021-02-18 14:06:54 -05:00
|
|
|
if err != nil {
|
2023-09-11 17:48:54 -04:00
|
|
|
_, err := er.HealObject(ctx, bucket, encodedEntryName, "",
|
|
|
|
madmin.HealOpts{
|
|
|
|
ScanMode: scanMode,
|
|
|
|
Remove: healDeleteDangling,
|
|
|
|
})
|
2021-11-19 11:46:47 -05:00
|
|
|
if err != nil {
|
2023-10-10 17:39:48 -04:00
|
|
|
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
2023-02-27 07:55:32 -05:00
|
|
|
// queueing happens across namespace, ignore
|
|
|
|
// objects that are not found.
|
|
|
|
return
|
|
|
|
}
|
2022-09-07 11:47:21 -04:00
|
|
|
result = healEntryFailure(0)
|
2022-01-11 00:22:17 -05:00
|
|
|
logger.LogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", bucket, entry.name, err))
|
2021-11-19 11:46:47 -05:00
|
|
|
} else {
|
2022-09-07 11:47:21 -04:00
|
|
|
result = healEntrySuccess(0)
|
2021-11-19 11:46:47 -05:00
|
|
|
}
|
2022-09-07 11:47:21 -04:00
|
|
|
|
2023-08-09 15:51:47 -04:00
|
|
|
send(result)
|
2021-02-18 14:06:54 -05:00
|
|
|
return
|
|
|
|
}
|
2021-08-25 20:46:20 -04:00
|
|
|
|
2023-02-27 07:55:32 -05:00
|
|
|
var versionNotFound int
|
2021-02-18 14:06:54 -05:00
|
|
|
for _, version := range fivs.Versions {
|
2023-12-01 10:56:24 -05:00
|
|
|
// Ignore a version with a modtime newer than healing start time.
|
2023-05-31 16:10:45 -04:00
|
|
|
if version.ModTime.After(tracker.Started) {
|
|
|
|
continue
|
|
|
|
}
|
2023-12-01 10:56:24 -05:00
|
|
|
|
|
|
|
// Apply lifecycle rules on the objects that are expired.
|
|
|
|
if filterLifecycle(bucket, version.Name, version) {
|
|
|
|
versionNotFound++
|
2024-02-28 12:05:40 -05:00
|
|
|
if !send(healEntrySkipped(uint64(version.Size))) {
|
|
|
|
return
|
|
|
|
}
|
2023-12-01 10:56:24 -05:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-09-11 17:48:54 -04:00
|
|
|
if _, err := er.HealObject(ctx, bucket, encodedEntryName,
|
|
|
|
version.VersionID, madmin.HealOpts{
|
|
|
|
ScanMode: scanMode,
|
|
|
|
Remove: healDeleteDangling,
|
|
|
|
}); err != nil {
|
2023-10-10 17:39:48 -04:00
|
|
|
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
2023-02-27 07:55:32 -05:00
|
|
|
// queueing happens across namespace, ignore
|
|
|
|
// objects that are not found.
|
|
|
|
versionNotFound++
|
|
|
|
continue
|
|
|
|
}
|
2021-09-02 23:56:13 -04:00
|
|
|
// If not deleted, assume they failed.
|
2022-09-07 11:47:21 -04:00
|
|
|
result = healEntryFailure(uint64(version.Size))
|
2022-01-11 00:22:17 -05:00
|
|
|
if version.VersionID != "" {
|
2023-10-10 17:39:48 -04:00
|
|
|
logger.LogIf(ctx, fmt.Errorf("unable to heal object %s/%s-v(%s): %w", bucket, version.Name, version.VersionID, err))
|
2022-01-11 00:22:17 -05:00
|
|
|
} else {
|
|
|
|
logger.LogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", bucket, version.Name, err))
|
|
|
|
}
|
2021-03-04 17:36:23 -05:00
|
|
|
} else {
|
2022-09-07 11:47:21 -04:00
|
|
|
result = healEntrySuccess(uint64(version.Size))
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2022-09-07 11:47:21 -04:00
|
|
|
|
2023-08-09 15:51:47 -04:00
|
|
|
if !send(result) {
|
2022-09-07 11:47:21 -04:00
|
|
|
return
|
|
|
|
}
|
2020-04-01 15:14:00 -04:00
|
|
|
}
|
2023-10-10 17:39:48 -04:00
|
|
|
// All versions resulted in 'ObjectNotFound/VersionNotFound'
|
2023-02-27 07:55:32 -05:00
|
|
|
if versionNotFound == len(fivs.Versions) {
|
|
|
|
return
|
|
|
|
}
|
2022-09-07 11:47:21 -04:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case results <- healEntryDone(entry.name):
|
2021-03-04 17:36:23 -05:00
|
|
|
}
|
2021-08-25 20:46:20 -04:00
|
|
|
|
|
|
|
// Wait and proceed if there are active requests
|
|
|
|
waitForLowHTTPReq()
|
2019-10-28 13:27:49 -04:00
|
|
|
}
|
2021-03-06 12:25:48 -05:00
|
|
|
|
2023-02-27 07:55:32 -05:00
|
|
|
actualBucket, prefix := path2BucketObject(bucket)
|
|
|
|
|
2021-03-06 12:25:48 -05:00
|
|
|
// How to resolve partial results.
|
|
|
|
resolver := metadataResolutionParams{
|
|
|
|
dirQuorum: 1,
|
|
|
|
objQuorum: 1,
|
2023-02-27 07:55:32 -05:00
|
|
|
bucket: actualBucket,
|
2021-03-06 12:25:48 -05:00
|
|
|
}
|
|
|
|
|
2023-02-08 12:25:29 -05:00
|
|
|
err := listPathRaw(ctx, listPathRawOptions{
|
2021-02-18 14:06:54 -05:00
|
|
|
disks: disks,
|
2023-02-27 07:55:32 -05:00
|
|
|
bucket: actualBucket,
|
|
|
|
path: prefix,
|
2021-02-18 14:06:54 -05:00
|
|
|
recursive: true,
|
2021-03-04 17:36:23 -05:00
|
|
|
forwardTo: forwardTo,
|
2021-02-18 14:06:54 -05:00
|
|
|
minDisks: 1,
|
|
|
|
reportNotFound: false,
|
2022-08-25 16:07:15 -04:00
|
|
|
agreed: func(entry metaCacheEntry) {
|
2023-09-11 17:48:54 -04:00
|
|
|
jt.Take()
|
|
|
|
go healEntry(actualBucket, entry)
|
2022-08-25 16:07:15 -04:00
|
|
|
},
|
2022-07-07 16:45:34 -04:00
|
|
|
partial: func(entries metaCacheEntries, _ []error) {
|
2021-03-06 12:25:48 -05:00
|
|
|
entry, ok := entries.resolve(&resolver)
|
2021-08-26 23:32:58 -04:00
|
|
|
if !ok {
|
2024-01-18 02:03:17 -05:00
|
|
|
// check if we can get one entry at least
|
2021-08-26 23:32:58 -04:00
|
|
|
// proceed to heal nonetheless.
|
|
|
|
entry, _ = entries.firstFound()
|
2021-02-18 14:06:54 -05:00
|
|
|
}
|
2023-09-11 17:48:54 -04:00
|
|
|
jt.Take()
|
|
|
|
go healEntry(actualBucket, *entry)
|
2021-02-18 14:06:54 -05:00
|
|
|
},
|
|
|
|
finished: nil,
|
|
|
|
})
|
2023-09-11 17:48:54 -04:00
|
|
|
jt.Wait() // synchronize all the concurrent heal jobs
|
2024-01-28 13:04:17 -05:00
|
|
|
xioutil.SafeClose(results)
|
2021-07-22 15:14:44 -04:00
|
|
|
if err != nil {
|
2021-11-19 11:46:47 -05:00
|
|
|
// Set this such that when we return this function
|
|
|
|
// we let the caller retry this disk again for the
|
|
|
|
// buckets it failed to list.
|
|
|
|
retErr = err
|
2021-07-22 15:14:44 -04:00
|
|
|
logger.LogIf(ctx, err)
|
2021-11-19 11:46:47 -05:00
|
|
|
continue
|
2021-07-22 15:14:44 -04:00
|
|
|
}
|
|
|
|
|
2021-03-04 17:36:23 -05:00
|
|
|
select {
|
|
|
|
// If context is canceled don't mark as done...
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
default:
|
2021-11-19 11:46:47 -05:00
|
|
|
tracker.bucketDone(bucket)
|
2021-03-04 17:36:23 -05:00
|
|
|
logger.LogIf(ctx, tracker.update(ctx))
|
|
|
|
}
|
2019-10-28 13:27:49 -04:00
|
|
|
}
|
2023-04-18 17:49:56 -04:00
|
|
|
|
|
|
|
tracker.setObject("")
|
|
|
|
tracker.setBucket("")
|
2019-10-28 13:27:49 -04:00
|
|
|
|
2021-11-19 11:46:47 -05:00
|
|
|
return retErr
|
2019-10-28 13:27:49 -04:00
|
|
|
}
|
|
|
|
|
2023-04-19 10:47:42 -04:00
|
|
|
func healBucket(bucket string, scan madmin.HealScanMode) error {
|
|
|
|
// Get background heal sequence to send elements to heal
|
|
|
|
bgSeq, ok := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID)
|
|
|
|
if ok {
|
|
|
|
return bgSeq.queueHealTask(healSource{bucket: bucket}, madmin.HealItemBucket)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-01-19 03:54:22 -05:00
|
|
|
// healObject sends the given object/version to the background healing workers
|
2023-04-19 10:47:42 -04:00
|
|
|
func healObject(bucket, object, versionID string, scan madmin.HealScanMode) error {
|
2020-04-01 15:14:00 -04:00
|
|
|
// Get background heal sequence to send elements to heal
|
2020-08-07 22:43:06 -04:00
|
|
|
bgSeq, ok := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID)
|
2021-08-26 17:06:04 -04:00
|
|
|
if ok {
|
2023-04-19 10:47:42 -04:00
|
|
|
return bgSeq.queueHealTask(healSource{
|
2021-08-26 17:06:04 -04:00
|
|
|
bucket: bucket,
|
|
|
|
object: object,
|
|
|
|
versionID: versionID,
|
2023-01-19 08:20:54 -05:00
|
|
|
noWait: true, // do not block callers.
|
2021-08-26 17:06:04 -04:00
|
|
|
opts: &madmin.HealOpts{
|
|
|
|
Remove: healDeleteDangling, // if found dangling purge it.
|
|
|
|
ScanMode: scan,
|
|
|
|
},
|
|
|
|
}, madmin.HealItemObject)
|
2020-04-01 15:14:00 -04:00
|
|
|
}
|
2023-04-19 10:47:42 -04:00
|
|
|
return nil
|
2020-04-01 15:14:00 -04:00
|
|
|
}
|