2021-04-18 15:41:13 -04:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2016-06-17 14:57:51 -04:00
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-05-25 19:42:31 -04:00
|
|
|
|
2016-11-16 19:42:23 -05:00
|
|
|
import (
|
2021-03-29 20:00:55 -04:00
|
|
|
"bytes"
|
2018-03-14 15:01:47 -04:00
|
|
|
"context"
|
2020-08-03 21:17:48 -04:00
|
|
|
"errors"
|
2016-11-16 19:42:23 -05:00
|
|
|
"fmt"
|
2019-01-17 07:58:18 -05:00
|
|
|
"io"
|
2020-06-12 23:04:01 -04:00
|
|
|
"sync"
|
2017-11-25 14:58:29 -05:00
|
|
|
|
2021-05-06 11:52:02 -04:00
|
|
|
"github.com/minio/madmin-go"
|
2021-06-01 17:59:40 -04:00
|
|
|
"github.com/minio/minio/internal/logger"
|
|
|
|
"github.com/minio/minio/internal/sync/errgroup"
|
2016-11-16 19:42:23 -05:00
|
|
|
)
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2016-11-16 19:42:23 -05:00
|
|
|
// Heals a bucket if it doesn't exist on one of the disks, additionally
|
|
|
|
// also heals the missing entries for bucket metadata files
|
|
|
|
// `policy.json, notification.xml, listeners.json`.
|
2020-12-13 14:57:08 -05:00
|
|
|
func (er erasureObjects) HealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (
|
2019-02-10 22:53:13 -05:00
|
|
|
result madmin.HealResultItem, err error) {
|
2020-12-13 14:57:08 -05:00
|
|
|
if !opts.DryRun {
|
2021-05-17 11:25:48 -04:00
|
|
|
defer NSUpdated(bucket, slashSeparator)
|
2020-04-27 13:06:21 -04:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
storageDisks := er.getDisks()
|
|
|
|
storageEndpoints := er.getEndpoints()
|
2018-03-16 18:09:31 -04:00
|
|
|
|
2017-12-22 06:28:13 -05:00
|
|
|
// get write quorum for an object
|
2021-01-16 15:08:02 -05:00
|
|
|
writeQuorum := len(storageDisks) - er.defaultParityCount
|
|
|
|
if writeQuorum == er.defaultParityCount {
|
|
|
|
writeQuorum++
|
|
|
|
}
|
2017-12-22 06:28:13 -05:00
|
|
|
|
2016-11-16 19:42:23 -05:00
|
|
|
// Heal bucket.
|
2021-01-05 16:24:22 -05:00
|
|
|
return healBucket(ctx, storageDisks, storageEndpoints, bucket, writeQuorum, opts)
|
2016-11-16 19:42:23 -05:00
|
|
|
}
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2016-11-19 20:37:57 -05:00
|
|
|
// Heal bucket - create buckets on disks where it does not exist.
|
2021-09-29 14:36:19 -04:00
|
|
|
func healBucket(ctx context.Context, storageDisks []StorageAPI, storageEndpoints []Endpoint, bucket string, writeQuorum int,
|
2021-01-05 16:24:22 -05:00
|
|
|
opts madmin.HealOpts) (res madmin.HealResultItem, err error) {
|
2016-10-17 05:10:23 -04:00
|
|
|
|
|
|
|
// Initialize sync waitgroup.
|
2019-10-14 12:44:51 -04:00
|
|
|
g := errgroup.WithNErrs(len(storageDisks))
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Disk states slices
|
|
|
|
beforeState := make([]string, len(storageDisks))
|
|
|
|
afterState := make([]string, len(storageDisks))
|
|
|
|
|
2016-10-17 05:10:23 -04:00
|
|
|
// Make a volume entry on all underlying storage disks.
|
2019-10-14 12:44:51 -04:00
|
|
|
for index := range storageDisks {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
|
|
|
if storageDisks[index] == nil {
|
|
|
|
beforeState[index] = madmin.DriveStateOffline
|
|
|
|
afterState[index] = madmin.DriveStateOffline
|
|
|
|
return errDiskNotFound
|
|
|
|
}
|
2020-09-04 12:45:06 -04:00
|
|
|
if _, serr := storageDisks[index].StatVol(ctx, bucket); serr != nil {
|
2018-04-10 12:36:37 -04:00
|
|
|
if serr == errDiskNotFound {
|
2018-02-15 20:45:57 -05:00
|
|
|
beforeState[index] = madmin.DriveStateOffline
|
|
|
|
afterState[index] = madmin.DriveStateOffline
|
2019-10-14 12:44:51 -04:00
|
|
|
return serr
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
2018-04-10 12:36:37 -04:00
|
|
|
if serr != errVolumeNotFound {
|
2018-01-22 17:54:55 -05:00
|
|
|
beforeState[index] = madmin.DriveStateCorrupt
|
|
|
|
afterState[index] = madmin.DriveStateCorrupt
|
2019-10-14 12:44:51 -04:00
|
|
|
return serr
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
|
|
|
|
beforeState[index] = madmin.DriveStateMissing
|
|
|
|
afterState[index] = madmin.DriveStateMissing
|
|
|
|
|
|
|
|
// mutate only if not a dry-run
|
2021-01-05 16:24:22 -05:00
|
|
|
if opts.DryRun {
|
2019-10-14 12:44:51 -04:00
|
|
|
return nil
|
2018-01-22 17:54:55 -05:00
|
|
|
}
|
|
|
|
|
2020-01-24 18:38:07 -05:00
|
|
|
return serr
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
2018-02-15 20:45:57 -05:00
|
|
|
beforeState[index] = madmin.DriveStateOk
|
|
|
|
afterState[index] = madmin.DriveStateOk
|
2019-10-14 12:44:51 -04:00
|
|
|
return nil
|
|
|
|
}, index)
|
2016-05-25 19:42:31 -04:00
|
|
|
}
|
2016-07-12 18:20:31 -04:00
|
|
|
|
2019-10-14 12:44:51 -04:00
|
|
|
errs := g.Wait()
|
2016-07-12 18:20:31 -04:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Initialize heal result info
|
|
|
|
res = madmin.HealResultItem{
|
2021-01-25 12:45:09 -05:00
|
|
|
Type: madmin.HealItemBucket,
|
|
|
|
Bucket: bucket,
|
|
|
|
DiskCount: len(storageDisks),
|
|
|
|
ParityBlocks: len(storageDisks) / 2,
|
|
|
|
DataBlocks: len(storageDisks) / 2,
|
2018-01-22 17:54:55 -05:00
|
|
|
}
|
2020-01-24 18:38:07 -05:00
|
|
|
|
2019-10-14 12:44:51 -04:00
|
|
|
for i := range beforeState {
|
2020-06-10 20:10:31 -04:00
|
|
|
res.Before.Drives = append(res.Before.Drives, madmin.HealDriveInfo{
|
|
|
|
UUID: "",
|
2021-09-29 14:36:19 -04:00
|
|
|
Endpoint: storageEndpoints[i].String(),
|
2020-06-10 20:10:31 -04:00
|
|
|
State: beforeState[i],
|
|
|
|
})
|
2018-01-22 17:54:55 -05:00
|
|
|
}
|
|
|
|
|
2021-01-25 12:45:09 -05:00
|
|
|
reducedErr := reduceWriteQuorumErrs(ctx, errs, bucketOpIgnoredErrs, writeQuorum-1)
|
|
|
|
if errors.Is(reducedErr, errVolumeNotFound) && !opts.Recreate {
|
|
|
|
for i := range beforeState {
|
|
|
|
res.After.Drives = append(res.After.Drives, madmin.HealDriveInfo{
|
|
|
|
UUID: "",
|
2021-09-29 14:36:19 -04:00
|
|
|
Endpoint: storageEndpoints[i].String(),
|
2021-01-25 12:45:09 -05:00
|
|
|
State: madmin.DriveStateOk,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
|
2020-01-24 18:38:07 -05:00
|
|
|
// Initialize sync waitgroup.
|
|
|
|
g = errgroup.WithNErrs(len(storageDisks))
|
|
|
|
|
|
|
|
// Make a volume entry on all underlying storage disks.
|
|
|
|
for index := range storageDisks {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
|
|
|
if beforeState[index] == madmin.DriveStateMissing {
|
2020-09-04 12:45:06 -04:00
|
|
|
makeErr := storageDisks[index].MakeVol(ctx, bucket)
|
2020-01-24 18:38:07 -05:00
|
|
|
if makeErr == nil {
|
|
|
|
afterState[index] = madmin.DriveStateOk
|
|
|
|
}
|
|
|
|
return makeErr
|
|
|
|
}
|
|
|
|
return errs[index]
|
|
|
|
}, index)
|
|
|
|
}
|
|
|
|
|
|
|
|
errs = g.Wait()
|
|
|
|
|
|
|
|
reducedErr = reduceWriteQuorumErrs(ctx, errs, bucketOpIgnoredErrs, writeQuorum)
|
2020-04-20 16:48:54 -04:00
|
|
|
if reducedErr != nil {
|
|
|
|
return res, reducedErr
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := range afterState {
|
2020-06-10 20:10:31 -04:00
|
|
|
res.After.Drives = append(res.After.Drives, madmin.HealDriveInfo{
|
|
|
|
UUID: "",
|
2021-09-29 14:36:19 -04:00
|
|
|
Endpoint: storageEndpoints[i].String(),
|
2020-06-10 20:10:31 -04:00
|
|
|
State: afterState[i],
|
|
|
|
})
|
2016-07-12 18:20:31 -04:00
|
|
|
}
|
2020-04-20 16:48:54 -04:00
|
|
|
return res, nil
|
2016-05-25 19:42:31 -04:00
|
|
|
}
|
|
|
|
|
2017-01-19 12:34:18 -05:00
|
|
|
// listAllBuckets lists all buckets from all disks. It also
|
|
|
|
// returns the occurrence of each buckets in all disks
|
2020-09-24 12:53:38 -04:00
|
|
|
func listAllBuckets(ctx context.Context, storageDisks []StorageAPI, healBuckets map[string]VolInfo) error {
|
|
|
|
g := errgroup.WithNErrs(len(storageDisks))
|
|
|
|
var mu sync.Mutex
|
|
|
|
for index := range storageDisks {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
|
|
|
if storageDisks[index] == nil {
|
|
|
|
// we ignore disk not found errors
|
|
|
|
return nil
|
2016-11-16 19:42:23 -05:00
|
|
|
}
|
2020-09-24 12:53:38 -04:00
|
|
|
volsInfo, err := storageDisks[index].ListVols(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2017-01-19 12:34:18 -05:00
|
|
|
}
|
2020-09-24 12:53:38 -04:00
|
|
|
for _, volInfo := range volsInfo {
|
|
|
|
// StorageAPI can send volume names which are
|
|
|
|
// incompatible with buckets - these are
|
|
|
|
// skipped, like the meta-bucket.
|
|
|
|
if isReservedOrInvalidBucket(volInfo.Name, false) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
mu.Lock()
|
|
|
|
if _, ok := healBuckets[volInfo.Name]; !ok {
|
|
|
|
healBuckets[volInfo.Name] = volInfo
|
|
|
|
}
|
|
|
|
mu.Unlock()
|
2020-05-06 17:25:05 -04:00
|
|
|
}
|
2020-09-24 12:53:38 -04:00
|
|
|
return nil
|
|
|
|
}, index)
|
2017-01-19 12:34:18 -05:00
|
|
|
}
|
2020-09-24 12:53:38 -04:00
|
|
|
return reduceReadQuorumErrs(ctx, g.Wait(), bucketMetadataOpIgnoredErrs, len(storageDisks)/2)
|
2017-01-19 12:34:18 -05:00
|
|
|
}
|
|
|
|
|
2019-01-30 13:53:57 -05:00
|
|
|
// Only heal on disks where we are sure that healing is needed. We can expand
|
|
|
|
// this list as and when we figure out more errors can be added to this list safely.
|
2021-08-23 16:14:55 -04:00
|
|
|
func shouldHealObjectOnDisk(erErr, dataErr error, meta FileInfo, latestMeta FileInfo) bool {
|
2020-08-03 21:17:48 -04:00
|
|
|
switch {
|
|
|
|
case errors.Is(erErr, errFileNotFound) || errors.Is(erErr, errFileVersionNotFound):
|
2019-01-30 13:53:57 -05:00
|
|
|
return true
|
2020-08-03 21:17:48 -04:00
|
|
|
case errors.Is(erErr, errCorruptedFormat):
|
2019-01-30 13:53:57 -05:00
|
|
|
return true
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
if erErr == nil {
|
2021-07-26 14:48:09 -04:00
|
|
|
if !meta.Deleted && !meta.IsRemote() {
|
2021-04-19 13:30:42 -04:00
|
|
|
// If xl.meta was read fine but there may be problem with the part.N files.
|
|
|
|
if IsErr(dataErr, []error{
|
|
|
|
errFileNotFound,
|
|
|
|
errFileVersionNotFound,
|
|
|
|
errFileCorrupt,
|
|
|
|
}...) {
|
|
|
|
return true
|
|
|
|
}
|
2019-01-30 13:53:57 -05:00
|
|
|
}
|
2021-08-23 16:14:55 -04:00
|
|
|
if !latestMeta.MetadataEquals(meta) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if !latestMeta.TransitionInfoEquals(meta) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if !latestMeta.ReplicationInfoEquals(meta) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if !latestMeta.ModTime.Equal(meta.ModTime) {
|
2020-07-17 20:41:29 -04:00
|
|
|
return true
|
|
|
|
}
|
2020-07-21 16:54:06 -04:00
|
|
|
if meta.XLV1 {
|
2019-01-30 13:53:57 -05:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Heals an object by re-writing corrupt/missing erasure blocks.
|
2021-04-07 13:39:48 -04:00
|
|
|
func (er erasureObjects) healObject(ctx context.Context, bucket string, object string, versionID string, opts madmin.HealOpts) (result madmin.HealResultItem, err error) {
|
2021-05-17 11:25:48 -04:00
|
|
|
if !opts.DryRun {
|
|
|
|
defer NSUpdated(bucket, object)
|
|
|
|
}
|
2019-03-14 16:08:51 -04:00
|
|
|
|
2020-11-23 12:12:17 -05:00
|
|
|
dryRun := opts.DryRun
|
|
|
|
scanMode := opts.ScanMode
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
storageDisks := er.getDisks()
|
|
|
|
storageEndpoints := er.getEndpoints()
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Initialize heal result object
|
|
|
|
result = madmin.HealResultItem{
|
2019-03-26 17:57:44 -04:00
|
|
|
Type: madmin.HealItemObject,
|
|
|
|
Bucket: bucket,
|
|
|
|
Object: object,
|
|
|
|
DiskCount: len(storageDisks),
|
2021-01-20 16:12:12 -05:00
|
|
|
ParityBlocks: er.defaultParityCount,
|
|
|
|
DataBlocks: len(storageDisks) - er.defaultParityCount,
|
2017-04-14 13:28:35 -04:00
|
|
|
}
|
|
|
|
|
Revert heal locks (#12365)
A lot of healing is likely to be on non-existing objects and
locks are very expensive and will slow down scanning
significantly.
In cases where all are valid or, all are broken allow
rejection without locking.
Keep the existing behavior, but move the check for
dangling objects to after the lock has been acquired.
```
_, err = getLatestFileInfo(ctx, partsMetadata, errs)
if err != nil {
return er.purgeObjectDangling(ctx, bucket, object, versionID, partsMetadata, errs, []error{}, opts)
}
```
Revert "heal: Hold lock when reading xl.meta from disks (#12362)"
This reverts commit abd32065aaae4080396a1b4b04a110454368b028
2021-05-25 20:02:06 -04:00
|
|
|
if !opts.NoLock {
|
|
|
|
lk := er.NewNSLock(bucket, object)
|
|
|
|
lkctx, err := lk.GetLock(ctx, globalOperationTimeout)
|
|
|
|
if err != nil {
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
ctx = lkctx.Context()
|
|
|
|
defer lk.Unlock(lkctx.Cancel)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Re-read when we have lock...
|
2021-04-07 13:39:48 -04:00
|
|
|
partsMetadata, errs := readAllFileInfo(ctx, storageDisks, bucket, object, versionID, true)
|
2021-03-04 17:36:23 -05:00
|
|
|
|
2021-10-04 11:53:38 -04:00
|
|
|
if _, err = getLatestFileInfo(ctx, partsMetadata, errs, er.defaultParityCount); err != nil {
|
Revert heal locks (#12365)
A lot of healing is likely to be on non-existing objects and
locks are very expensive and will slow down scanning
significantly.
In cases where all are valid or, all are broken allow
rejection without locking.
Keep the existing behavior, but move the check for
dangling objects to after the lock has been acquired.
```
_, err = getLatestFileInfo(ctx, partsMetadata, errs)
if err != nil {
return er.purgeObjectDangling(ctx, bucket, object, versionID, partsMetadata, errs, []error{}, opts)
}
```
Revert "heal: Hold lock when reading xl.meta from disks (#12362)"
This reverts commit abd32065aaae4080396a1b4b04a110454368b028
2021-05-25 20:02:06 -04:00
|
|
|
return er.purgeObjectDangling(ctx, bucket, object, versionID, partsMetadata, errs, []error{}, opts)
|
|
|
|
}
|
2021-07-26 14:48:09 -04:00
|
|
|
|
2021-03-04 17:36:23 -05:00
|
|
|
// List of disks having latest version of the object er.meta
|
|
|
|
// (by modtime).
|
2021-05-14 19:50:47 -04:00
|
|
|
_, modTime, dataDir := listOnlineDisks(storageDisks, partsMetadata, errs)
|
|
|
|
|
|
|
|
// make sure all parts metadata dataDir is same as returned by listOnlineDisks()
|
|
|
|
// the reason is its possible that some of the disks might have stale data, for those
|
|
|
|
// we simply override them with maximally occurring 'dataDir' - this ensures that
|
|
|
|
// disksWithAllParts() verifies same dataDir across all drives.
|
|
|
|
for i := range partsMetadata {
|
|
|
|
partsMetadata[i].DataDir = dataDir
|
|
|
|
}
|
|
|
|
|
|
|
|
// List of disks having all parts as per latest metadata.
|
|
|
|
// NOTE: do not pass in latestDisks to diskWithAllParts since
|
|
|
|
// the diskWithAllParts needs to reach the drive to ensure
|
|
|
|
// validity of the metadata content, we should make sure that
|
|
|
|
// we pass in disks as is for it to be verified. Once verified
|
|
|
|
// the disksWithAllParts() returns the actual disks that can be
|
|
|
|
// used here for reconstruction. This is done to ensure that
|
|
|
|
// we do not skip drives that have inconsistent metadata to be
|
|
|
|
// skipped from purging when they are stale.
|
2021-07-26 14:48:09 -04:00
|
|
|
availableDisks, dataErrs := disksWithAllParts(ctx, storageDisks, partsMetadata,
|
|
|
|
errs, bucket, object, scanMode)
|
2021-03-04 17:36:23 -05:00
|
|
|
|
2021-08-23 16:14:55 -04:00
|
|
|
// Latest FileInfo for reference. If a valid metadata is not
|
|
|
|
// present, it is as good as object not found.
|
|
|
|
latestMeta, err := pickValidFileInfo(ctx, partsMetadata, modTime, dataDir, result.DataBlocks)
|
|
|
|
if err != nil {
|
|
|
|
return result, toObjectErr(err, bucket, object, versionID)
|
|
|
|
}
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Loop to find number of disks with valid data, per-drive
|
|
|
|
// data state and a list of outdated disks on which data needs
|
|
|
|
// to be healed.
|
|
|
|
outDatedDisks := make([]StorageAPI, len(storageDisks))
|
2017-04-14 13:28:35 -04:00
|
|
|
numAvailableDisks := 0
|
2018-01-22 17:54:55 -05:00
|
|
|
disksToHealCount := 0
|
|
|
|
for i, v := range availableDisks {
|
|
|
|
driveState := ""
|
|
|
|
switch {
|
|
|
|
case v != nil:
|
|
|
|
driveState = madmin.DriveStateOk
|
2017-03-04 17:53:28 -05:00
|
|
|
numAvailableDisks++
|
2018-01-22 17:54:55 -05:00
|
|
|
// If data is sane on any one disk, we can
|
|
|
|
// extract the correct object size.
|
2020-06-12 23:04:01 -04:00
|
|
|
result.ObjectSize = partsMetadata[i].Size
|
2020-07-07 23:54:09 -04:00
|
|
|
if partsMetadata[i].Erasure.ParityBlocks > 0 && partsMetadata[i].Erasure.DataBlocks > 0 {
|
|
|
|
result.ParityBlocks = partsMetadata[i].Erasure.ParityBlocks
|
|
|
|
result.DataBlocks = partsMetadata[i].Erasure.DataBlocks
|
|
|
|
}
|
2018-10-02 20:13:51 -04:00
|
|
|
case errs[i] == errDiskNotFound, dataErrs[i] == errDiskNotFound:
|
2018-01-22 17:54:55 -05:00
|
|
|
driveState = madmin.DriveStateOffline
|
2020-09-21 18:16:16 -04:00
|
|
|
case errs[i] == errFileNotFound, errs[i] == errFileVersionNotFound, errs[i] == errVolumeNotFound:
|
2018-01-22 17:54:55 -05:00
|
|
|
fallthrough
|
2020-07-06 11:09:48 -04:00
|
|
|
case dataErrs[i] == errFileNotFound, dataErrs[i] == errFileVersionNotFound, dataErrs[i] == errVolumeNotFound:
|
2018-01-22 17:54:55 -05:00
|
|
|
driveState = madmin.DriveStateMissing
|
|
|
|
default:
|
|
|
|
// all remaining cases imply corrupt data/metadata
|
|
|
|
driveState = madmin.DriveStateCorrupt
|
|
|
|
}
|
|
|
|
|
2021-08-23 16:14:55 -04:00
|
|
|
if shouldHealObjectOnDisk(errs[i], dataErrs[i], partsMetadata[i], latestMeta) {
|
2018-01-22 17:54:55 -05:00
|
|
|
outDatedDisks[i] = storageDisks[i]
|
|
|
|
disksToHealCount++
|
2018-02-15 20:45:57 -05:00
|
|
|
result.Before.Drives = append(result.Before.Drives, madmin.HealDriveInfo{
|
|
|
|
UUID: "",
|
2021-09-29 14:36:19 -04:00
|
|
|
Endpoint: storageEndpoints[i].String(),
|
2018-02-15 20:45:57 -05:00
|
|
|
State: driveState,
|
|
|
|
})
|
|
|
|
result.After.Drives = append(result.After.Drives, madmin.HealDriveInfo{
|
|
|
|
UUID: "",
|
2021-09-29 14:36:19 -04:00
|
|
|
Endpoint: storageEndpoints[i].String(),
|
2018-02-15 20:45:57 -05:00
|
|
|
State: driveState,
|
|
|
|
})
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
result.Before.Drives = append(result.Before.Drives, madmin.HealDriveInfo{
|
|
|
|
UUID: "",
|
2021-09-29 14:36:19 -04:00
|
|
|
Endpoint: storageEndpoints[i].String(),
|
2018-02-15 20:45:57 -05:00
|
|
|
State: driveState,
|
|
|
|
})
|
|
|
|
result.After.Drives = append(result.After.Drives, madmin.HealDriveInfo{
|
|
|
|
UUID: "",
|
2021-09-29 14:36:19 -04:00
|
|
|
Endpoint: storageEndpoints[i].String(),
|
2018-02-15 20:45:57 -05:00
|
|
|
State: driveState,
|
|
|
|
})
|
2017-09-28 18:57:19 -04:00
|
|
|
}
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
if isAllNotFound(errs) {
|
2020-11-23 21:50:53 -05:00
|
|
|
err = toObjectErr(errFileNotFound, bucket, object)
|
|
|
|
if versionID != "" {
|
|
|
|
err = toObjectErr(errFileVersionNotFound, bucket, object, versionID)
|
|
|
|
}
|
2020-11-23 12:12:17 -05:00
|
|
|
// File is fully gone, fileInfo is empty.
|
2021-07-26 11:01:41 -04:00
|
|
|
return er.defaultHealResult(FileInfo{}, storageDisks, storageEndpoints, errs,
|
|
|
|
bucket, object, versionID), err
|
2020-10-28 12:18:35 -04:00
|
|
|
}
|
2021-05-14 19:50:47 -04:00
|
|
|
|
2017-03-04 17:53:28 -05:00
|
|
|
// If less than read quorum number of disks have all the parts
|
|
|
|
// of the data, we can't reconstruct the erasure-coded data.
|
2021-01-20 16:12:12 -05:00
|
|
|
if numAvailableDisks < result.DataBlocks {
|
2020-11-23 12:12:17 -05:00
|
|
|
return er.purgeObjectDangling(ctx, bucket, object, versionID, partsMetadata, errs, dataErrs, opts)
|
2017-03-04 17:53:28 -05:00
|
|
|
}
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
if disksToHealCount == 0 {
|
|
|
|
// Nothing to heal!
|
|
|
|
return result, nil
|
|
|
|
}
|
2017-03-04 17:53:28 -05:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// After this point, only have to repair data on disk - so
|
|
|
|
// return if it is a dry-run
|
|
|
|
if dryRun {
|
|
|
|
return result, nil
|
2017-03-31 20:55:15 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
cleanFileInfo := func(fi FileInfo) FileInfo {
|
|
|
|
// Returns a copy of the 'fi' with checksums and parts nil'ed.
|
|
|
|
nfi := fi
|
2021-08-23 16:14:55 -04:00
|
|
|
if !fi.IsRemote() {
|
|
|
|
nfi.Erasure.Index = 0
|
|
|
|
nfi.Erasure.Checksums = nil
|
2021-04-19 13:30:42 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
return nfi
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
|
|
|
|
2020-07-07 23:54:09 -04:00
|
|
|
// We write at temporary location and then rename to final location.
|
|
|
|
tmpID := mustGetUUID()
|
2020-07-17 20:41:29 -04:00
|
|
|
migrateDataDir := mustGetUUID()
|
2020-07-07 23:54:09 -04:00
|
|
|
|
2021-04-01 00:14:08 -04:00
|
|
|
copyPartsMetadata := make([]FileInfo, len(partsMetadata))
|
2019-01-17 07:58:18 -05:00
|
|
|
for i := range outDatedDisks {
|
|
|
|
if outDatedDisks[i] == nil {
|
|
|
|
continue
|
|
|
|
}
|
2021-04-01 00:14:08 -04:00
|
|
|
copyPartsMetadata[i] = partsMetadata[i]
|
2020-06-12 23:04:01 -04:00
|
|
|
partsMetadata[i] = cleanFileInfo(latestMeta)
|
2019-01-17 07:58:18 -05:00
|
|
|
}
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2021-04-02 01:12:03 -04:00
|
|
|
// source data dir shall be empty in case of XLV1
|
|
|
|
// differentiate it with dstDataDir for readability
|
|
|
|
// srcDataDir is the one used with newBitrotReader()
|
|
|
|
// to read existing content.
|
|
|
|
srcDataDir := latestMeta.DataDir
|
|
|
|
dstDataDir := latestMeta.DataDir
|
2020-07-21 16:54:06 -04:00
|
|
|
if latestMeta.XLV1 {
|
2021-04-02 01:12:03 -04:00
|
|
|
dstDataDir = migrateDataDir
|
2020-07-21 16:54:06 -04:00
|
|
|
}
|
|
|
|
|
2021-03-29 20:00:55 -04:00
|
|
|
var inlineBuffers []*bytes.Buffer
|
2021-09-09 11:55:43 -04:00
|
|
|
if latestMeta.InlineData() {
|
2021-03-29 20:00:55 -04:00
|
|
|
inlineBuffers = make([]*bytes.Buffer, len(outDatedDisks))
|
|
|
|
}
|
|
|
|
|
2021-08-23 16:14:55 -04:00
|
|
|
// Reorder so that we have data disks first and parity disks next.
|
|
|
|
latestDisks := shuffleDisks(availableDisks, latestMeta.Erasure.Distribution)
|
|
|
|
outDatedDisks = shuffleDisks(outDatedDisks, latestMeta.Erasure.Distribution)
|
|
|
|
partsMetadata = shufflePartsMetadata(partsMetadata, latestMeta.Erasure.Distribution)
|
|
|
|
copyPartsMetadata = shufflePartsMetadata(copyPartsMetadata, latestMeta.Erasure.Distribution)
|
|
|
|
|
2021-05-06 19:06:57 -04:00
|
|
|
if !latestMeta.Deleted && !latestMeta.IsRemote() {
|
2020-07-07 23:54:09 -04:00
|
|
|
result.DataBlocks = latestMeta.Erasure.DataBlocks
|
|
|
|
result.ParityBlocks = latestMeta.Erasure.ParityBlocks
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2020-07-07 23:54:09 -04:00
|
|
|
// Heal each part. erasureHealFile() will write the healed
|
|
|
|
// part to .minio/tmp/uuid/ which needs to be renamed later to
|
|
|
|
// the final location.
|
|
|
|
erasure, err := NewErasure(ctx, latestMeta.Erasure.DataBlocks,
|
|
|
|
latestMeta.Erasure.ParityBlocks, latestMeta.Erasure.BlockSize)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil {
|
|
|
|
return result, toObjectErr(err, bucket, object)
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2020-07-07 23:54:09 -04:00
|
|
|
erasureInfo := latestMeta.Erasure
|
2021-06-07 13:06:06 -04:00
|
|
|
bp := er.bp
|
|
|
|
if erasureInfo.BlockSize == blockSizeV1 {
|
|
|
|
bp = er.bpOld
|
|
|
|
}
|
2020-07-07 23:54:09 -04:00
|
|
|
for partIndex := 0; partIndex < len(latestMeta.Parts); partIndex++ {
|
|
|
|
partSize := latestMeta.Parts[partIndex].Size
|
|
|
|
partActualSize := latestMeta.Parts[partIndex].ActualSize
|
|
|
|
partNumber := latestMeta.Parts[partIndex].Number
|
|
|
|
tillOffset := erasure.ShardFileOffset(0, partSize, partSize)
|
|
|
|
readers := make([]io.ReaderAt, len(latestDisks))
|
|
|
|
checksumAlgo := erasureInfo.GetChecksumInfo(partNumber).Algorithm
|
|
|
|
for i, disk := range latestDisks {
|
|
|
|
if disk == OfflineDisk {
|
|
|
|
continue
|
|
|
|
}
|
2021-04-01 00:14:08 -04:00
|
|
|
checksumInfo := copyPartsMetadata[i].Erasure.GetChecksumInfo(partNumber)
|
2021-04-02 01:12:03 -04:00
|
|
|
partPath := pathJoin(object, srcDataDir, fmt.Sprintf("part.%d", partNumber))
|
2021-06-07 13:06:06 -04:00
|
|
|
readers[i] = newBitrotReader(disk, partsMetadata[i].Data, bucket, partPath, tillOffset, checksumAlgo,
|
|
|
|
checksumInfo.Hash, erasure.ShardSize())
|
2020-07-07 23:54:09 -04:00
|
|
|
}
|
|
|
|
writers := make([]io.Writer, len(outDatedDisks))
|
|
|
|
for i, disk := range outDatedDisks {
|
|
|
|
if disk == OfflineDisk {
|
|
|
|
continue
|
|
|
|
}
|
2021-04-02 01:12:03 -04:00
|
|
|
partPath := pathJoin(tmpID, dstDataDir, fmt.Sprintf("part.%d", partNumber))
|
2021-03-29 20:00:55 -04:00
|
|
|
if len(inlineBuffers) > 0 {
|
2021-09-09 11:55:43 -04:00
|
|
|
inlineBuffers[i] = bytes.NewBuffer(make([]byte, 0, erasure.ShardFileSize(latestMeta.Size)+32))
|
2021-03-29 20:00:55 -04:00
|
|
|
writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize())
|
|
|
|
} else {
|
|
|
|
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, partPath,
|
2021-05-17 11:32:28 -04:00
|
|
|
tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize())
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
2017-09-28 18:57:19 -04:00
|
|
|
}
|
2021-06-07 13:06:06 -04:00
|
|
|
err = erasure.Heal(ctx, readers, writers, partSize, bp)
|
2020-07-07 23:54:09 -04:00
|
|
|
closeBitrotReaders(readers)
|
|
|
|
closeBitrotWriters(writers)
|
|
|
|
if err != nil {
|
|
|
|
return result, toObjectErr(err, bucket, object)
|
|
|
|
}
|
2021-04-19 13:30:42 -04:00
|
|
|
|
2020-07-07 23:54:09 -04:00
|
|
|
// outDatedDisks that had write errors should not be
|
|
|
|
// written to for remaining parts, so we nil it out.
|
|
|
|
for i, disk := range outDatedDisks {
|
|
|
|
if disk == OfflineDisk {
|
|
|
|
continue
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2020-07-07 23:54:09 -04:00
|
|
|
// A non-nil stale disk which did not receive
|
|
|
|
// a healed part checksum had a write error.
|
|
|
|
if writers[i] == nil {
|
|
|
|
outDatedDisks[i] = nil
|
|
|
|
disksToHealCount--
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-04-02 01:12:03 -04:00
|
|
|
partsMetadata[i].DataDir = dstDataDir
|
2020-07-07 23:54:09 -04:00
|
|
|
partsMetadata[i].AddObjectPart(partNumber, "", partSize, partActualSize)
|
|
|
|
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
|
|
|
|
PartNumber: partNumber,
|
|
|
|
Algorithm: checksumAlgo,
|
|
|
|
Hash: bitrotWriterSum(writers[i]),
|
|
|
|
})
|
2021-04-21 22:06:08 -04:00
|
|
|
if len(inlineBuffers) > 0 && inlineBuffers[i] != nil {
|
2021-03-29 20:00:55 -04:00
|
|
|
partsMetadata[i].Data = inlineBuffers[i].Bytes()
|
|
|
|
} else {
|
|
|
|
partsMetadata[i].Data = nil
|
|
|
|
}
|
2020-07-07 23:54:09 -04:00
|
|
|
}
|
2017-09-28 18:57:19 -04:00
|
|
|
|
2020-07-07 23:54:09 -04:00
|
|
|
// If all disks are having errors, we give up.
|
|
|
|
if disksToHealCount == 0 {
|
|
|
|
return result, fmt.Errorf("all disks had write errors, unable to heal")
|
|
|
|
}
|
2021-04-19 13:30:42 -04:00
|
|
|
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
2021-04-19 13:30:42 -04:00
|
|
|
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
|
|
|
|
2020-10-12 17:19:46 -04:00
|
|
|
defer er.deleteObject(context.Background(), minioMetaTmpBucket, tmpID, len(storageDisks)/2+1)
|
2019-04-25 10:33:26 -04:00
|
|
|
|
2016-10-17 05:10:23 -04:00
|
|
|
// Rename from tmp location to the actual location.
|
2020-07-21 16:54:06 -04:00
|
|
|
for i, disk := range outDatedDisks {
|
2020-06-12 23:04:01 -04:00
|
|
|
if disk == OfflineDisk {
|
2016-08-31 14:42:57 -04:00
|
|
|
continue
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
2021-04-20 13:44:39 -04:00
|
|
|
// record the index of the updated disks
|
|
|
|
partsMetadata[i].Erasure.Index = i + 1
|
|
|
|
|
2016-10-17 05:10:23 -04:00
|
|
|
// Attempt a rename now from healed data to final location.
|
2021-04-20 13:44:39 -04:00
|
|
|
if err = disk.RenameData(ctx, minioMetaTmpBucket, tmpID, partsMetadata[i], bucket, object); err != nil {
|
|
|
|
logger.LogIf(ctx, err)
|
2020-06-12 23:04:01 -04:00
|
|
|
return result, toObjectErr(err, bucket, object)
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
|
2021-08-23 16:14:55 -04:00
|
|
|
// Remove any remaining parts from outdated disks from before transition.
|
|
|
|
if partsMetadata[i].IsRemote() {
|
|
|
|
rmDataDir := partsMetadata[i].DataDir
|
|
|
|
disk.DeleteVol(ctx, pathJoin(bucket, encodeDirObject(object), rmDataDir), true)
|
|
|
|
}
|
|
|
|
|
2018-03-27 21:11:39 -04:00
|
|
|
for i, v := range result.Before.Drives {
|
|
|
|
if v.Endpoint == disk.String() {
|
|
|
|
result.After.Drives[i].State = madmin.DriveStateOk
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
|
|
|
|
// Set the size of the object in the heal result
|
2020-06-12 23:04:01 -04:00
|
|
|
result.ObjectSize = latestMeta.Size
|
2018-01-22 17:54:55 -05:00
|
|
|
|
|
|
|
return result, nil
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
2016-11-16 19:42:23 -05:00
|
|
|
|
2018-05-10 19:53:42 -04:00
|
|
|
// healObjectDir - heals object directory specifically, this special call
|
|
|
|
// is needed since we do not have a special backend format for directories.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (er erasureObjects) healObjectDir(ctx context.Context, bucket, object string, dryRun bool, remove bool) (hr madmin.HealResultItem, err error) {
|
|
|
|
storageDisks := er.getDisks()
|
|
|
|
storageEndpoints := er.getEndpoints()
|
2018-05-10 19:53:42 -04:00
|
|
|
|
|
|
|
// Initialize heal result object
|
|
|
|
hr = madmin.HealResultItem{
|
|
|
|
Type: madmin.HealItemObject,
|
|
|
|
Bucket: bucket,
|
|
|
|
Object: object,
|
|
|
|
DiskCount: len(storageDisks),
|
2021-01-16 15:08:02 -05:00
|
|
|
ParityBlocks: er.defaultParityCount,
|
|
|
|
DataBlocks: len(storageDisks) - er.defaultParityCount,
|
2018-05-10 19:53:42 -04:00
|
|
|
ObjectSize: 0,
|
|
|
|
}
|
|
|
|
|
2019-01-30 13:51:56 -05:00
|
|
|
hr.Before.Drives = make([]madmin.HealDriveInfo, len(storageDisks))
|
|
|
|
hr.After.Drives = make([]madmin.HealDriveInfo, len(storageDisks))
|
|
|
|
|
2019-04-23 17:54:28 -04:00
|
|
|
errs := statAllDirs(ctx, storageDisks, bucket, object)
|
2020-03-30 12:48:24 -04:00
|
|
|
danglingObject := isObjectDirDangling(errs)
|
|
|
|
if danglingObject {
|
|
|
|
if !dryRun && remove {
|
2020-06-12 23:04:01 -04:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
// Remove versions in bulk for each disk
|
|
|
|
for index, disk := range storageDisks {
|
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
wg.Add(1)
|
|
|
|
go func(index int, disk StorageAPI) {
|
|
|
|
defer wg.Done()
|
2020-10-28 12:18:35 -04:00
|
|
|
_ = disk.Delete(ctx, bucket, object, false)
|
2020-06-12 23:04:01 -04:00
|
|
|
}(index, disk)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
2021-05-17 11:25:48 -04:00
|
|
|
NSUpdated(bucket, object)
|
2019-04-23 17:54:28 -04:00
|
|
|
}
|
|
|
|
}
|
2018-05-10 19:53:42 -04:00
|
|
|
|
2019-04-23 17:54:28 -04:00
|
|
|
// Prepare object creation in all disks
|
|
|
|
for i, err := range errs {
|
2021-09-29 14:36:19 -04:00
|
|
|
drive := storageEndpoints[i].String()
|
2019-04-23 17:54:28 -04:00
|
|
|
switch err {
|
2019-08-01 17:13:06 -04:00
|
|
|
case nil:
|
2020-03-30 12:48:24 -04:00
|
|
|
hr.Before.Drives[i] = madmin.HealDriveInfo{Endpoint: drive, State: madmin.DriveStateOk}
|
|
|
|
hr.After.Drives[i] = madmin.HealDriveInfo{Endpoint: drive, State: madmin.DriveStateOk}
|
2019-04-23 17:54:28 -04:00
|
|
|
case errDiskNotFound:
|
|
|
|
hr.Before.Drives[i] = madmin.HealDriveInfo{State: madmin.DriveStateOffline}
|
|
|
|
hr.After.Drives[i] = madmin.HealDriveInfo{State: madmin.DriveStateOffline}
|
2019-08-01 17:13:06 -04:00
|
|
|
case errVolumeNotFound, errFileNotFound:
|
|
|
|
// Bucket or prefix/directory not found
|
2019-04-23 17:54:28 -04:00
|
|
|
hr.Before.Drives[i] = madmin.HealDriveInfo{Endpoint: drive, State: madmin.DriveStateMissing}
|
|
|
|
hr.After.Drives[i] = madmin.HealDriveInfo{Endpoint: drive, State: madmin.DriveStateMissing}
|
|
|
|
default:
|
|
|
|
hr.Before.Drives[i] = madmin.HealDriveInfo{Endpoint: drive, State: madmin.DriveStateCorrupt}
|
|
|
|
hr.After.Drives[i] = madmin.HealDriveInfo{Endpoint: drive, State: madmin.DriveStateCorrupt}
|
|
|
|
}
|
|
|
|
}
|
2020-10-28 12:18:35 -04:00
|
|
|
if dryRun || danglingObject || isAllNotFound(errs) {
|
2020-11-23 21:50:53 -05:00
|
|
|
// Nothing to do, file is already gone.
|
|
|
|
return hr, toObjectErr(errFileNotFound, bucket, object)
|
2019-04-23 17:54:28 -04:00
|
|
|
}
|
|
|
|
for i, err := range errs {
|
2020-03-30 12:48:24 -04:00
|
|
|
if err == errVolumeNotFound || err == errFileNotFound {
|
2019-08-01 17:13:06 -04:00
|
|
|
// Bucket or prefix/directory not found
|
2020-09-04 12:45:06 -04:00
|
|
|
merr := storageDisks[i].MakeVol(ctx, pathJoin(bucket, object))
|
2019-04-23 17:54:28 -04:00
|
|
|
switch merr {
|
|
|
|
case nil, errVolumeExists:
|
|
|
|
hr.After.Drives[i].State = madmin.DriveStateOk
|
|
|
|
case errDiskNotFound:
|
|
|
|
hr.After.Drives[i].State = madmin.DriveStateOffline
|
2019-01-30 13:51:56 -05:00
|
|
|
default:
|
2019-04-23 17:54:28 -04:00
|
|
|
logger.LogIf(ctx, merr)
|
|
|
|
hr.After.Drives[i].State = madmin.DriveStateCorrupt
|
2018-05-10 19:53:42 -04:00
|
|
|
}
|
2019-04-23 17:54:28 -04:00
|
|
|
}
|
2018-05-10 19:53:42 -04:00
|
|
|
}
|
|
|
|
return hr, nil
|
|
|
|
}
|
|
|
|
|
2018-10-02 20:13:51 -04:00
|
|
|
// Populates default heal result item entries with possible values when we are returning prematurely.
|
|
|
|
// This is to ensure that in any circumstance we are not returning empty arrays with wrong values.
|
2021-09-29 14:36:19 -04:00
|
|
|
func (er erasureObjects) defaultHealResult(lfi FileInfo, storageDisks []StorageAPI, storageEndpoints []Endpoint, errs []error, bucket, object, versionID string) madmin.HealResultItem {
|
2018-10-02 20:13:51 -04:00
|
|
|
// Initialize heal result object
|
|
|
|
result := madmin.HealResultItem{
|
2021-07-26 11:01:41 -04:00
|
|
|
Type: madmin.HealItemObject,
|
|
|
|
Bucket: bucket,
|
|
|
|
Object: object,
|
|
|
|
ObjectSize: lfi.Size,
|
|
|
|
VersionID: versionID,
|
|
|
|
DiskCount: len(storageDisks),
|
2018-10-02 20:13:51 -04:00
|
|
|
}
|
2021-05-25 12:34:27 -04:00
|
|
|
|
2020-11-23 12:12:17 -05:00
|
|
|
if lfi.IsValid() {
|
2021-05-24 16:39:38 -04:00
|
|
|
result.ParityBlocks = lfi.Erasure.ParityBlocks
|
|
|
|
} else {
|
|
|
|
// Default to most common configuration for erasure blocks.
|
2021-07-26 11:01:41 -04:00
|
|
|
result.ParityBlocks = er.defaultParityCount
|
2021-05-24 16:39:38 -04:00
|
|
|
}
|
2021-05-25 12:34:27 -04:00
|
|
|
result.DataBlocks = len(storageDisks) - result.ParityBlocks
|
2021-05-24 16:39:38 -04:00
|
|
|
|
2018-10-02 20:13:51 -04:00
|
|
|
for index, disk := range storageDisks {
|
|
|
|
if disk == nil {
|
|
|
|
result.Before.Drives = append(result.Before.Drives, madmin.HealDriveInfo{
|
2020-06-10 20:10:31 -04:00
|
|
|
UUID: "",
|
2021-09-29 14:36:19 -04:00
|
|
|
Endpoint: storageEndpoints[index].String(),
|
2020-06-10 20:10:31 -04:00
|
|
|
State: madmin.DriveStateOffline,
|
2018-10-02 20:13:51 -04:00
|
|
|
})
|
|
|
|
result.After.Drives = append(result.After.Drives, madmin.HealDriveInfo{
|
2020-06-10 20:10:31 -04:00
|
|
|
UUID: "",
|
2021-09-29 14:36:19 -04:00
|
|
|
Endpoint: storageEndpoints[index].String(),
|
2020-06-10 20:10:31 -04:00
|
|
|
State: madmin.DriveStateOffline,
|
2018-10-02 20:13:51 -04:00
|
|
|
})
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
driveState := madmin.DriveStateCorrupt
|
|
|
|
switch errs[index] {
|
|
|
|
case errFileNotFound, errVolumeNotFound:
|
|
|
|
driveState = madmin.DriveStateMissing
|
2021-07-26 11:01:41 -04:00
|
|
|
case nil:
|
|
|
|
driveState = madmin.DriveStateOk
|
2018-10-02 20:13:51 -04:00
|
|
|
}
|
|
|
|
result.Before.Drives = append(result.Before.Drives, madmin.HealDriveInfo{
|
|
|
|
UUID: "",
|
2021-09-29 14:36:19 -04:00
|
|
|
Endpoint: storageEndpoints[index].String(),
|
2018-10-02 20:13:51 -04:00
|
|
|
State: driveState,
|
|
|
|
})
|
|
|
|
result.After.Drives = append(result.After.Drives, madmin.HealDriveInfo{
|
|
|
|
UUID: "",
|
2021-09-29 14:36:19 -04:00
|
|
|
Endpoint: storageEndpoints[index].String(),
|
2018-10-02 20:13:51 -04:00
|
|
|
State: driveState,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2019-04-23 17:54:28 -04:00
|
|
|
// Stat all directories.
|
|
|
|
func statAllDirs(ctx context.Context, storageDisks []StorageAPI, bucket, prefix string) []error {
|
2019-10-14 12:44:51 -04:00
|
|
|
g := errgroup.WithNErrs(len(storageDisks))
|
2019-04-23 17:54:28 -04:00
|
|
|
for index, disk := range storageDisks {
|
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
2019-10-14 12:44:51 -04:00
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
2020-09-04 12:45:06 -04:00
|
|
|
entries, err := storageDisks[index].ListDir(ctx, bucket, prefix, 1)
|
2019-04-23 17:54:28 -04:00
|
|
|
if err != nil {
|
2019-10-14 12:44:51 -04:00
|
|
|
return err
|
2019-04-23 17:54:28 -04:00
|
|
|
}
|
|
|
|
if len(entries) > 0 {
|
2019-10-14 12:44:51 -04:00
|
|
|
return errVolumeNotEmpty
|
2019-04-23 17:54:28 -04:00
|
|
|
}
|
2019-10-14 12:44:51 -04:00
|
|
|
return nil
|
|
|
|
}, index)
|
2019-04-23 17:54:28 -04:00
|
|
|
}
|
|
|
|
|
2019-10-14 12:44:51 -04:00
|
|
|
return g.Wait()
|
2019-04-23 17:54:28 -04:00
|
|
|
}
|
|
|
|
|
2020-10-28 12:18:35 -04:00
|
|
|
// isAllNotFound will return if any element of the error slice is not
|
|
|
|
// errFileNotFound, errFileVersionNotFound or errVolumeNotFound.
|
|
|
|
// A 0 length slice will always return false.
|
|
|
|
func isAllNotFound(errs []error) bool {
|
|
|
|
for _, err := range errs {
|
|
|
|
if errors.Is(err, errFileNotFound) || errors.Is(err, errVolumeNotFound) || errors.Is(err, errFileVersionNotFound) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return len(errs) > 0
|
|
|
|
}
|
|
|
|
|
2019-04-23 17:54:28 -04:00
|
|
|
// ObjectDir is considered dangling/corrupted if any only
|
|
|
|
// if total disks - a combination of corrupted and missing
|
|
|
|
// files is lesser than N/2+1 number of disks.
|
2020-10-28 12:18:35 -04:00
|
|
|
// If no files were found false will be returned.
|
2019-04-23 17:54:28 -04:00
|
|
|
func isObjectDirDangling(errs []error) (ok bool) {
|
2020-03-30 12:48:24 -04:00
|
|
|
var found int
|
|
|
|
var notFound int
|
|
|
|
var foundNotEmpty int
|
|
|
|
var otherFound int
|
2019-04-23 17:54:28 -04:00
|
|
|
for _, readErr := range errs {
|
2020-03-30 12:48:24 -04:00
|
|
|
if readErr == nil {
|
|
|
|
found++
|
|
|
|
} else if readErr == errFileNotFound || readErr == errVolumeNotFound {
|
|
|
|
notFound++
|
|
|
|
} else if readErr == errVolumeNotEmpty {
|
|
|
|
foundNotEmpty++
|
|
|
|
} else {
|
|
|
|
otherFound++
|
2019-04-23 17:54:28 -04:00
|
|
|
}
|
|
|
|
}
|
2020-10-28 12:18:35 -04:00
|
|
|
found = found + foundNotEmpty + otherFound
|
|
|
|
return found < notFound && found > 0
|
2019-04-23 17:54:28 -04:00
|
|
|
}
|
|
|
|
|
2020-11-23 12:12:17 -05:00
|
|
|
func (er erasureObjects) purgeObjectDangling(ctx context.Context, bucket, object, versionID string,
|
|
|
|
metaArr []FileInfo, errs []error, dataErrs []error, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
|
|
|
|
|
|
|
storageDisks := er.getDisks()
|
|
|
|
storageEndpoints := er.getEndpoints()
|
|
|
|
// Check if the object is dangling, if yes and user requested
|
|
|
|
// remove we simply delete it from namespace.
|
|
|
|
m, ok := isObjectDangling(metaArr, errs, dataErrs)
|
|
|
|
if ok {
|
2021-05-25 12:34:27 -04:00
|
|
|
parityBlocks := m.Erasure.ParityBlocks
|
|
|
|
if m.Erasure.ParityBlocks == 0 {
|
|
|
|
parityBlocks = er.defaultParityCount
|
|
|
|
}
|
2021-05-27 13:11:50 -04:00
|
|
|
dataBlocks := m.Erasure.DataBlocks
|
|
|
|
if m.Erasure.DataBlocks == 0 {
|
|
|
|
dataBlocks = len(storageDisks) - parityBlocks
|
|
|
|
}
|
2021-05-25 12:34:27 -04:00
|
|
|
writeQuorum := dataBlocks
|
|
|
|
if dataBlocks == parityBlocks {
|
2021-01-16 15:08:02 -05:00
|
|
|
writeQuorum++
|
2020-11-23 12:12:17 -05:00
|
|
|
}
|
|
|
|
var err error
|
2020-11-23 21:50:53 -05:00
|
|
|
var returnNotFound bool
|
2020-11-23 12:12:17 -05:00
|
|
|
if !opts.DryRun && opts.Remove {
|
2021-08-03 16:26:57 -04:00
|
|
|
err = er.deleteObjectVersion(ctx, bucket, object, writeQuorum, FileInfo{
|
|
|
|
VersionID: versionID,
|
|
|
|
}, false)
|
2020-11-23 12:12:17 -05:00
|
|
|
|
|
|
|
// If Delete was successful, make sure to return the appropriate error
|
|
|
|
// and heal result appropriate with delete's error messages
|
|
|
|
errs = make([]error, len(errs))
|
|
|
|
for i := range errs {
|
|
|
|
errs[i] = err
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
// Dangling object successfully purged, size is '0'
|
|
|
|
m.Size = 0
|
|
|
|
}
|
2020-11-23 21:50:53 -05:00
|
|
|
|
|
|
|
// Delete successfully purged dangling content, return ObjectNotFound/VersionNotFound instead.
|
|
|
|
if countErrs(errs, nil) == len(errs) {
|
|
|
|
returnNotFound = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if returnNotFound {
|
|
|
|
err = toObjectErr(errFileNotFound, bucket, object)
|
|
|
|
if versionID != "" {
|
|
|
|
err = toObjectErr(errFileVersionNotFound, bucket, object, versionID)
|
|
|
|
}
|
2021-07-26 11:01:41 -04:00
|
|
|
return er.defaultHealResult(m, storageDisks, storageEndpoints,
|
|
|
|
errs, bucket, object, versionID), err
|
2020-11-23 12:12:17 -05:00
|
|
|
}
|
2021-07-26 11:01:41 -04:00
|
|
|
return er.defaultHealResult(m, storageDisks, storageEndpoints,
|
|
|
|
errs, bucket, object, versionID), toObjectErr(err, bucket, object, versionID)
|
2020-11-23 12:12:17 -05:00
|
|
|
}
|
|
|
|
|
2021-01-16 15:08:02 -05:00
|
|
|
readQuorum := len(storageDisks) - er.defaultParityCount
|
2020-11-23 12:12:17 -05:00
|
|
|
|
2021-07-26 11:01:41 -04:00
|
|
|
err := toObjectErr(reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum),
|
|
|
|
bucket, object, versionID)
|
|
|
|
return er.defaultHealResult(m, storageDisks, storageEndpoints, errs, bucket, object, versionID), err
|
2020-11-23 12:12:17 -05:00
|
|
|
}
|
|
|
|
|
2019-02-05 20:58:48 -05:00
|
|
|
// Object is considered dangling/corrupted if any only
|
|
|
|
// if total disks - a combination of corrupted and missing
|
|
|
|
// files is lesser than number of data blocks.
|
2020-06-12 23:04:01 -04:00
|
|
|
func isObjectDangling(metaArr []FileInfo, errs []error, dataErrs []error) (validMeta FileInfo, ok bool) {
|
2019-02-05 20:58:48 -05:00
|
|
|
// We can consider an object data not reliable
|
2020-06-12 23:04:01 -04:00
|
|
|
// when er.meta is not found in read quorum disks.
|
|
|
|
// or when er.meta is not readable in read quorum disks.
|
2020-07-07 23:54:09 -04:00
|
|
|
var notFoundErasureMeta, corruptedErasureMeta int
|
2019-02-05 20:58:48 -05:00
|
|
|
for _, readErr := range errs {
|
2020-08-03 21:17:48 -04:00
|
|
|
if errors.Is(readErr, errFileNotFound) || errors.Is(readErr, errFileVersionNotFound) {
|
2020-07-07 23:54:09 -04:00
|
|
|
notFoundErasureMeta++
|
2020-08-03 21:17:48 -04:00
|
|
|
} else if errors.Is(readErr, errCorruptedFormat) {
|
2020-07-07 23:54:09 -04:00
|
|
|
corruptedErasureMeta++
|
2019-02-05 20:58:48 -05:00
|
|
|
}
|
|
|
|
}
|
2019-03-26 17:57:44 -04:00
|
|
|
var notFoundParts int
|
|
|
|
for i := range dataErrs {
|
|
|
|
// Only count part errors, if the error is not
|
2020-06-12 23:04:01 -04:00
|
|
|
// same as er.meta error. This is to avoid
|
|
|
|
// double counting when both parts and er.meta
|
2019-03-26 17:57:44 -04:00
|
|
|
// are not available.
|
|
|
|
if errs[i] != dataErrs[i] {
|
2020-08-03 21:17:48 -04:00
|
|
|
if IsErr(dataErrs[i], []error{
|
|
|
|
errFileNotFound,
|
|
|
|
errFileVersionNotFound,
|
|
|
|
}...) {
|
2019-03-26 17:57:44 -04:00
|
|
|
notFoundParts++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-02-05 20:58:48 -05:00
|
|
|
|
|
|
|
for _, m := range metaArr {
|
|
|
|
if !m.IsValid() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
validMeta = m
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2021-05-14 19:50:47 -04:00
|
|
|
if validMeta.Deleted || validMeta.IsRemote() {
|
2021-01-20 16:12:12 -05:00
|
|
|
// notFoundParts is ignored since a
|
|
|
|
// - delete marker does not have any parts
|
|
|
|
// - transition status of complete has no parts
|
2020-10-15 16:06:40 -04:00
|
|
|
return validMeta, corruptedErasureMeta+notFoundErasureMeta > len(errs)/2
|
2020-07-07 23:54:09 -04:00
|
|
|
}
|
|
|
|
|
2019-02-05 20:58:48 -05:00
|
|
|
// We couldn't find any valid meta we are indeed corrupted, return true right away.
|
|
|
|
if validMeta.Erasure.DataBlocks == 0 {
|
|
|
|
return validMeta, true
|
|
|
|
}
|
|
|
|
|
2019-03-26 17:57:44 -04:00
|
|
|
// We have valid meta, now verify if we have enough files with parity blocks.
|
2020-07-07 23:54:09 -04:00
|
|
|
return validMeta, corruptedErasureMeta+notFoundErasureMeta+notFoundParts > validMeta.Erasure.ParityBlocks
|
2019-02-05 20:58:48 -05:00
|
|
|
}
|
|
|
|
|
2019-03-26 17:57:44 -04:00
|
|
|
// HealObject - heal the given object, automatically deletes the object if stale/corrupted if `remove` is true.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (er erasureObjects) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (hr madmin.HealResultItem, err error) {
|
2021-09-02 23:56:13 -04:00
|
|
|
defer func() {
|
|
|
|
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
|
|
|
err = nil
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2018-08-20 19:58:47 -04:00
|
|
|
// Create context that also contains information about the object and bucket.
|
|
|
|
// The top level handler might not have this information.
|
|
|
|
reqInfo := logger.GetReqInfo(ctx)
|
|
|
|
var newReqInfo *logger.ReqInfo
|
|
|
|
if reqInfo != nil {
|
2018-11-19 17:47:03 -05:00
|
|
|
newReqInfo = logger.NewReqInfo(reqInfo.RemoteHost, reqInfo.UserAgent, reqInfo.DeploymentID, reqInfo.RequestID, reqInfo.API, bucket, object)
|
2018-08-20 19:58:47 -04:00
|
|
|
} else {
|
2018-11-19 17:47:03 -05:00
|
|
|
newReqInfo = logger.NewReqInfo("", "", globalDeploymentID, "", "Heal", bucket, object)
|
2018-08-20 19:58:47 -04:00
|
|
|
}
|
2020-04-09 12:30:02 -04:00
|
|
|
healCtx := logger.SetReqInfo(GlobalContext, newReqInfo)
|
2018-08-20 19:58:47 -04:00
|
|
|
|
2018-05-10 19:53:42 -04:00
|
|
|
// Healing directories handle it separately.
|
2019-12-06 02:16:06 -05:00
|
|
|
if HasSuffix(object, SlashSeparator) {
|
2020-06-12 23:04:01 -04:00
|
|
|
return er.healObjectDir(healCtx, bucket, object, opts.DryRun, opts.Remove)
|
2018-05-10 19:53:42 -04:00
|
|
|
}
|
|
|
|
|
Revert heal locks (#12365)
A lot of healing is likely to be on non-existing objects and
locks are very expensive and will slow down scanning
significantly.
In cases where all are valid or, all are broken allow
rejection without locking.
Keep the existing behavior, but move the check for
dangling objects to after the lock has been acquired.
```
_, err = getLatestFileInfo(ctx, partsMetadata, errs)
if err != nil {
return er.purgeObjectDangling(ctx, bucket, object, versionID, partsMetadata, errs, []error{}, opts)
}
```
Revert "heal: Hold lock when reading xl.meta from disks (#12362)"
This reverts commit abd32065aaae4080396a1b4b04a110454368b028
2021-05-25 20:02:06 -04:00
|
|
|
storageDisks := er.getDisks()
|
|
|
|
storageEndpoints := er.getEndpoints()
|
|
|
|
|
2021-03-29 20:00:55 -04:00
|
|
|
// When versionID is empty, we read directly from the `null` versionID for healing.
|
|
|
|
if versionID == "" {
|
|
|
|
versionID = nullVersionID
|
|
|
|
}
|
|
|
|
|
Revert heal locks (#12365)
A lot of healing is likely to be on non-existing objects and
locks are very expensive and will slow down scanning
significantly.
In cases where all are valid or, all are broken allow
rejection without locking.
Keep the existing behavior, but move the check for
dangling objects to after the lock has been acquired.
```
_, err = getLatestFileInfo(ctx, partsMetadata, errs)
if err != nil {
return er.purgeObjectDangling(ctx, bucket, object, versionID, partsMetadata, errs, []error{}, opts)
}
```
Revert "heal: Hold lock when reading xl.meta from disks (#12362)"
This reverts commit abd32065aaae4080396a1b4b04a110454368b028
2021-05-25 20:02:06 -04:00
|
|
|
// Perform quick read without lock.
|
|
|
|
// This allows to quickly check if all is ok or all are missing.
|
2021-07-26 19:51:09 -04:00
|
|
|
_, errs := readAllFileInfo(healCtx, storageDisks, bucket, object, versionID, false)
|
2020-10-28 12:18:35 -04:00
|
|
|
if isAllNotFound(errs) {
|
2020-11-23 21:50:53 -05:00
|
|
|
err = toObjectErr(errFileNotFound, bucket, object)
|
|
|
|
if versionID != "" {
|
|
|
|
err = toObjectErr(errFileVersionNotFound, bucket, object, versionID)
|
|
|
|
}
|
2020-11-23 12:12:17 -05:00
|
|
|
// Nothing to do, file is already gone.
|
2021-07-26 11:01:41 -04:00
|
|
|
return er.defaultHealResult(FileInfo{}, storageDisks, storageEndpoints,
|
|
|
|
errs, bucket, object, versionID), err
|
2019-02-05 20:58:48 -05:00
|
|
|
}
|
|
|
|
|
2016-11-16 19:42:23 -05:00
|
|
|
// Heal the object.
|
2021-04-07 13:39:48 -04:00
|
|
|
return er.healObject(healCtx, bucket, object, versionID, opts)
|
2016-11-16 19:42:23 -05:00
|
|
|
}
|