2016-06-17 14:57:51 -04:00
|
|
|
/*
|
2019-04-09 14:39:42 -04:00
|
|
|
* MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc.
|
2016-06-17 14:57:51 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-05-25 19:42:31 -04:00
|
|
|
|
2016-11-16 19:42:23 -05:00
|
|
|
import (
|
2018-03-14 15:01:47 -04:00
|
|
|
"context"
|
2016-11-16 19:42:23 -05:00
|
|
|
"fmt"
|
2019-01-17 07:58:18 -05:00
|
|
|
"io"
|
2016-11-16 19:42:23 -05:00
|
|
|
"sync"
|
2019-01-30 13:53:57 -05:00
|
|
|
"time"
|
2017-11-25 14:58:29 -05:00
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
2018-01-22 17:54:55 -05:00
|
|
|
"github.com/minio/minio/pkg/madmin"
|
2016-11-16 19:42:23 -05:00
|
|
|
)
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2018-04-09 13:25:41 -04:00
|
|
|
func (xl xlObjects) ReloadFormat(ctx context.Context, dryRun bool) error {
|
2018-04-10 12:36:37 -04:00
|
|
|
logger.LogIf(ctx, NotImplemented{})
|
|
|
|
return NotImplemented{}
|
2018-04-09 13:25:41 -04:00
|
|
|
}
|
|
|
|
|
2018-03-14 15:01:47 -04:00
|
|
|
func (xl xlObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, NotImplemented{})
|
|
|
|
return madmin.HealResultItem{}, NotImplemented{}
|
2016-11-16 19:42:23 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Heals a bucket if it doesn't exist on one of the disks, additionally
|
|
|
|
// also heals the missing entries for bucket metadata files
|
|
|
|
// `policy.json, notification.xml, listeners.json`.
|
2019-02-05 20:58:48 -05:00
|
|
|
func (xl xlObjects) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (
|
2019-02-10 22:53:13 -05:00
|
|
|
result madmin.HealResultItem, err error) {
|
2018-01-22 17:54:55 -05:00
|
|
|
|
2018-03-16 18:09:31 -04:00
|
|
|
storageDisks := xl.getDisks()
|
|
|
|
|
2017-12-22 06:28:13 -05:00
|
|
|
// get write quorum for an object
|
2018-03-16 18:09:31 -04:00
|
|
|
writeQuorum := len(storageDisks)/2 + 1
|
2017-12-22 06:28:13 -05:00
|
|
|
|
2016-11-16 19:42:23 -05:00
|
|
|
// Heal bucket.
|
2019-02-10 22:53:13 -05:00
|
|
|
return healBucket(ctx, storageDisks, bucket, writeQuorum, dryRun)
|
2016-11-16 19:42:23 -05:00
|
|
|
}
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2016-11-19 20:37:57 -05:00
|
|
|
// Heal bucket - create buckets on disks where it does not exist.
|
2018-04-05 18:04:40 -04:00
|
|
|
func healBucket(ctx context.Context, storageDisks []StorageAPI, bucket string, writeQuorum int,
|
2018-01-22 17:54:55 -05:00
|
|
|
dryRun bool) (res madmin.HealResultItem, err error) {
|
2016-10-17 05:10:23 -04:00
|
|
|
|
|
|
|
// Initialize sync waitgroup.
|
2019-09-11 13:22:12 -04:00
|
|
|
var wg sync.WaitGroup
|
2016-10-17 05:10:23 -04:00
|
|
|
|
|
|
|
// Initialize list of errors.
|
2016-11-16 19:42:23 -05:00
|
|
|
var dErrs = make([]error, len(storageDisks))
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Disk states slices
|
|
|
|
beforeState := make([]string, len(storageDisks))
|
|
|
|
afterState := make([]string, len(storageDisks))
|
|
|
|
|
2016-10-17 05:10:23 -04:00
|
|
|
// Make a volume entry on all underlying storage disks.
|
2016-11-16 19:42:23 -05:00
|
|
|
for index, disk := range storageDisks {
|
2016-10-17 05:10:23 -04:00
|
|
|
if disk == nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
dErrs[index] = errDiskNotFound
|
2018-01-22 17:54:55 -05:00
|
|
|
beforeState[index] = madmin.DriveStateOffline
|
|
|
|
afterState[index] = madmin.DriveStateOffline
|
2016-10-17 05:10:23 -04:00
|
|
|
continue
|
2016-05-25 19:42:31 -04:00
|
|
|
}
|
2016-10-17 05:10:23 -04:00
|
|
|
wg.Add(1)
|
2018-01-22 17:54:55 -05:00
|
|
|
|
2016-10-17 05:10:23 -04:00
|
|
|
// Make a volume inside a go-routine.
|
|
|
|
go func(index int, disk StorageAPI) {
|
|
|
|
defer wg.Done()
|
2018-03-16 18:09:31 -04:00
|
|
|
if _, serr := disk.StatVol(bucket); serr != nil {
|
2018-04-10 12:36:37 -04:00
|
|
|
if serr == errDiskNotFound {
|
2018-02-15 20:45:57 -05:00
|
|
|
beforeState[index] = madmin.DriveStateOffline
|
|
|
|
afterState[index] = madmin.DriveStateOffline
|
2018-03-16 18:09:31 -04:00
|
|
|
dErrs[index] = serr
|
2018-02-15 20:45:57 -05:00
|
|
|
return
|
|
|
|
}
|
2018-04-10 12:36:37 -04:00
|
|
|
if serr != errVolumeNotFound {
|
2018-01-22 17:54:55 -05:00
|
|
|
beforeState[index] = madmin.DriveStateCorrupt
|
|
|
|
afterState[index] = madmin.DriveStateCorrupt
|
2018-03-16 18:09:31 -04:00
|
|
|
dErrs[index] = serr
|
2016-10-17 05:10:23 -04:00
|
|
|
return
|
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
|
|
|
|
beforeState[index] = madmin.DriveStateMissing
|
|
|
|
afterState[index] = madmin.DriveStateMissing
|
|
|
|
|
|
|
|
// mutate only if not a dry-run
|
|
|
|
if dryRun {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
makeErr := disk.MakeVol(bucket)
|
2018-02-15 20:45:57 -05:00
|
|
|
dErrs[index] = makeErr
|
2018-01-22 17:54:55 -05:00
|
|
|
if makeErr == nil {
|
|
|
|
afterState[index] = madmin.DriveStateOk
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
2018-02-15 20:45:57 -05:00
|
|
|
return
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
2018-02-15 20:45:57 -05:00
|
|
|
beforeState[index] = madmin.DriveStateOk
|
|
|
|
afterState[index] = madmin.DriveStateOk
|
2016-10-17 05:10:23 -04:00
|
|
|
}(index, disk)
|
2016-05-25 19:42:31 -04:00
|
|
|
}
|
2016-07-12 18:20:31 -04:00
|
|
|
|
2016-10-17 05:10:23 -04:00
|
|
|
// Wait for all make vol to finish.
|
|
|
|
wg.Wait()
|
2016-07-12 18:20:31 -04:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Initialize heal result info
|
|
|
|
res = madmin.HealResultItem{
|
|
|
|
Type: madmin.HealItemBucket,
|
|
|
|
Bucket: bucket,
|
|
|
|
DiskCount: len(storageDisks),
|
|
|
|
}
|
|
|
|
for i, before := range beforeState {
|
2019-01-30 13:40:43 -05:00
|
|
|
if storageDisks[i] != nil {
|
|
|
|
drive := storageDisks[i].String()
|
2018-02-15 20:45:57 -05:00
|
|
|
res.Before.Drives = append(res.Before.Drives, madmin.HealDriveInfo{
|
|
|
|
UUID: "",
|
2019-01-30 13:40:43 -05:00
|
|
|
Endpoint: drive,
|
2018-02-15 20:45:57 -05:00
|
|
|
State: before,
|
|
|
|
})
|
|
|
|
res.After.Drives = append(res.After.Drives, madmin.HealDriveInfo{
|
|
|
|
UUID: "",
|
2019-01-30 13:40:43 -05:00
|
|
|
Endpoint: drive,
|
2018-02-15 20:45:57 -05:00
|
|
|
State: afterState[i],
|
|
|
|
})
|
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
}
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
reducedErr := reduceWriteQuorumErrs(ctx, dErrs, bucketOpIgnoredErrs, writeQuorum)
|
2018-04-10 12:36:37 -04:00
|
|
|
if reducedErr == errXLWriteQuorum {
|
2016-10-17 05:10:23 -04:00
|
|
|
// Purge successfully created buckets if we don't have writeQuorum.
|
2016-11-16 19:42:23 -05:00
|
|
|
undoMakeBucket(storageDisks, bucket)
|
2016-07-12 18:20:31 -04:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
return res, reducedErr
|
2016-05-25 19:42:31 -04:00
|
|
|
}
|
|
|
|
|
2017-01-19 12:34:18 -05:00
|
|
|
// listAllBuckets lists all buckets from all disks. It also
|
|
|
|
// returns the occurrence of each buckets in all disks
|
2018-01-22 17:54:55 -05:00
|
|
|
func listAllBuckets(storageDisks []StorageAPI) (buckets map[string]VolInfo,
|
|
|
|
bucketsOcc map[string]int, err error) {
|
|
|
|
|
2017-01-19 12:34:18 -05:00
|
|
|
buckets = make(map[string]VolInfo)
|
|
|
|
bucketsOcc = make(map[string]int)
|
2016-11-16 19:42:23 -05:00
|
|
|
for _, disk := range storageDisks {
|
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
var volsInfo []VolInfo
|
|
|
|
volsInfo, err = disk.ListVols()
|
2018-01-22 17:54:55 -05:00
|
|
|
if err != nil {
|
2018-04-10 12:36:37 -04:00
|
|
|
if IsErrIgnored(err, bucketMetadataOpIgnoredErrs...) {
|
2018-01-22 17:54:55 -05:00
|
|
|
continue
|
2016-11-16 19:42:23 -05:00
|
|
|
}
|
2018-02-13 20:03:50 -05:00
|
|
|
return nil, nil, err
|
2017-01-19 12:34:18 -05:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
for _, volInfo := range volsInfo {
|
|
|
|
// StorageAPI can send volume names which are
|
|
|
|
// incompatible with buckets - these are
|
|
|
|
// skipped, like the meta-bucket.
|
2019-04-04 02:10:37 -04:00
|
|
|
if isReservedOrInvalidBucket(volInfo.Name, false) {
|
2018-01-22 17:54:55 -05:00
|
|
|
continue
|
2017-01-19 12:34:18 -05:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
// Increase counter per bucket name
|
|
|
|
bucketsOcc[volInfo.Name]++
|
|
|
|
// Save volume info under bucket name
|
|
|
|
buckets[volInfo.Name] = volInfo
|
2017-01-19 12:34:18 -05:00
|
|
|
}
|
|
|
|
}
|
2018-02-13 20:03:50 -05:00
|
|
|
return buckets, bucketsOcc, nil
|
2017-01-19 12:34:18 -05:00
|
|
|
}
|
|
|
|
|
2019-01-30 13:53:57 -05:00
|
|
|
// Only heal on disks where we are sure that healing is needed. We can expand
|
|
|
|
// this list as and when we figure out more errors can be added to this list safely.
|
|
|
|
func shouldHealObjectOnDisk(xlErr, dataErr error, meta xlMetaV1, quorumModTime time.Time) bool {
|
|
|
|
switch xlErr {
|
|
|
|
case errFileNotFound:
|
|
|
|
return true
|
|
|
|
case errCorruptedFormat:
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if xlErr == nil {
|
|
|
|
// If xl.json was read fine but there is some problem with the part.N files.
|
|
|
|
if dataErr == errFileNotFound {
|
|
|
|
return true
|
|
|
|
}
|
2019-07-12 19:29:44 -04:00
|
|
|
if dataErr == errFileUnexpectedSize {
|
|
|
|
return true
|
|
|
|
}
|
2019-07-08 16:51:18 -04:00
|
|
|
if _, ok := dataErr.(HashMismatchError); ok {
|
2019-01-30 13:53:57 -05:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
if quorumModTime != meta.Stat.ModTime {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Heals an object by re-writing corrupt/missing erasure blocks.
|
2019-03-26 17:57:44 -04:00
|
|
|
func (xl xlObjects) healObject(ctx context.Context, bucket string, object string,
|
|
|
|
partsMetadata []xlMetaV1, errs []error, latestXLMeta xlMetaV1,
|
|
|
|
dryRun bool, remove bool, scanMode madmin.HealScanMode) (result madmin.HealResultItem, err error) {
|
2019-03-14 16:08:51 -04:00
|
|
|
|
2019-03-26 17:57:44 -04:00
|
|
|
dataBlocks := latestXLMeta.Erasure.DataBlocks
|
2018-01-22 17:54:55 -05:00
|
|
|
|
2019-03-26 17:57:44 -04:00
|
|
|
storageDisks := xl.getDisks()
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// List of disks having latest version of the object xl.json
|
|
|
|
// (by modtime).
|
2016-11-16 19:42:23 -05:00
|
|
|
latestDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
|
2017-03-04 17:53:28 -05:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// List of disks having all parts as per latest xl.json.
|
2019-03-14 16:08:51 -04:00
|
|
|
availableDisks, dataErrs := disksWithAllParts(ctx, latestDisks, partsMetadata, errs, bucket, object, scanMode)
|
2017-03-04 17:53:28 -05:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Initialize heal result object
|
|
|
|
result = madmin.HealResultItem{
|
2019-03-26 17:57:44 -04:00
|
|
|
Type: madmin.HealItemObject,
|
|
|
|
Bucket: bucket,
|
|
|
|
Object: object,
|
|
|
|
DiskCount: len(storageDisks),
|
|
|
|
ParityBlocks: latestXLMeta.Erasure.ParityBlocks,
|
|
|
|
DataBlocks: latestXLMeta.Erasure.DataBlocks,
|
2018-01-22 17:54:55 -05:00
|
|
|
|
|
|
|
// Initialize object size to -1, so we can detect if we are
|
|
|
|
// unable to reliably find the object size.
|
|
|
|
ObjectSize: -1,
|
2017-04-14 13:28:35 -04:00
|
|
|
}
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Loop to find number of disks with valid data, per-drive
|
|
|
|
// data state and a list of outdated disks on which data needs
|
|
|
|
// to be healed.
|
|
|
|
outDatedDisks := make([]StorageAPI, len(storageDisks))
|
2017-04-14 13:28:35 -04:00
|
|
|
numAvailableDisks := 0
|
2018-01-22 17:54:55 -05:00
|
|
|
disksToHealCount := 0
|
|
|
|
for i, v := range availableDisks {
|
|
|
|
driveState := ""
|
|
|
|
switch {
|
|
|
|
case v != nil:
|
|
|
|
driveState = madmin.DriveStateOk
|
2017-03-04 17:53:28 -05:00
|
|
|
numAvailableDisks++
|
2018-01-22 17:54:55 -05:00
|
|
|
// If data is sane on any one disk, we can
|
|
|
|
// extract the correct object size.
|
|
|
|
result.ObjectSize = partsMetadata[i].Stat.Size
|
|
|
|
result.ParityBlocks = partsMetadata[i].Erasure.ParityBlocks
|
|
|
|
result.DataBlocks = partsMetadata[i].Erasure.DataBlocks
|
2018-10-02 20:13:51 -04:00
|
|
|
case errs[i] == errDiskNotFound, dataErrs[i] == errDiskNotFound:
|
2018-01-22 17:54:55 -05:00
|
|
|
driveState = madmin.DriveStateOffline
|
2018-04-10 12:36:37 -04:00
|
|
|
case errs[i] == errFileNotFound, errs[i] == errVolumeNotFound:
|
2018-01-22 17:54:55 -05:00
|
|
|
fallthrough
|
2018-04-10 12:36:37 -04:00
|
|
|
case dataErrs[i] == errFileNotFound, dataErrs[i] == errVolumeNotFound:
|
2018-01-22 17:54:55 -05:00
|
|
|
driveState = madmin.DriveStateMissing
|
|
|
|
default:
|
|
|
|
// all remaining cases imply corrupt data/metadata
|
|
|
|
driveState = madmin.DriveStateCorrupt
|
|
|
|
}
|
|
|
|
|
2019-01-30 13:53:57 -05:00
|
|
|
var drive string
|
|
|
|
if storageDisks[i] != nil {
|
|
|
|
drive = storageDisks[i].String()
|
|
|
|
}
|
|
|
|
if shouldHealObjectOnDisk(errs[i], dataErrs[i], partsMetadata[i], modTime) {
|
2018-01-22 17:54:55 -05:00
|
|
|
outDatedDisks[i] = storageDisks[i]
|
|
|
|
disksToHealCount++
|
2018-02-15 20:45:57 -05:00
|
|
|
result.Before.Drives = append(result.Before.Drives, madmin.HealDriveInfo{
|
|
|
|
UUID: "",
|
2018-03-27 21:11:39 -04:00
|
|
|
Endpoint: drive,
|
2018-02-15 20:45:57 -05:00
|
|
|
State: driveState,
|
|
|
|
})
|
|
|
|
result.After.Drives = append(result.After.Drives, madmin.HealDriveInfo{
|
|
|
|
UUID: "",
|
2018-03-27 21:11:39 -04:00
|
|
|
Endpoint: drive,
|
2018-02-15 20:45:57 -05:00
|
|
|
State: driveState,
|
|
|
|
})
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
result.Before.Drives = append(result.Before.Drives, madmin.HealDriveInfo{
|
|
|
|
UUID: "",
|
|
|
|
Endpoint: drive,
|
|
|
|
State: driveState,
|
|
|
|
})
|
|
|
|
result.After.Drives = append(result.After.Drives, madmin.HealDriveInfo{
|
|
|
|
UUID: "",
|
|
|
|
Endpoint: drive,
|
|
|
|
State: driveState,
|
|
|
|
})
|
2017-09-28 18:57:19 -04:00
|
|
|
}
|
|
|
|
|
2017-03-04 17:53:28 -05:00
|
|
|
// If less than read quorum number of disks have all the parts
|
|
|
|
// of the data, we can't reconstruct the erasure-coded data.
|
2019-03-26 17:57:44 -04:00
|
|
|
if numAvailableDisks < dataBlocks {
|
|
|
|
// Check if xl.json, and corresponding parts are also missing.
|
|
|
|
if m, ok := isObjectDangling(partsMetadata, errs, dataErrs); ok {
|
|
|
|
writeQuorum := m.Erasure.DataBlocks + 1
|
|
|
|
if m.Erasure.DataBlocks == 0 {
|
|
|
|
writeQuorum = len(storageDisks)/2 + 1
|
|
|
|
}
|
|
|
|
if !dryRun && remove {
|
|
|
|
err = xl.deleteObject(ctx, bucket, object, writeQuorum, false)
|
|
|
|
}
|
|
|
|
return defaultHealResult(latestXLMeta, storageDisks, errs, bucket, object), err
|
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
return result, toObjectErr(errXLReadQuorum, bucket, object)
|
2017-03-04 17:53:28 -05:00
|
|
|
}
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
if disksToHealCount == 0 {
|
|
|
|
// Nothing to heal!
|
|
|
|
return result, nil
|
|
|
|
}
|
2017-03-04 17:53:28 -05:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// After this point, only have to repair data on disk - so
|
|
|
|
// return if it is a dry-run
|
|
|
|
if dryRun {
|
|
|
|
return result, nil
|
2017-03-31 20:55:15 -04:00
|
|
|
}
|
|
|
|
|
2017-03-04 17:53:28 -05:00
|
|
|
// Latest xlMetaV1 for reference. If a valid metadata is not
|
|
|
|
// present, it is as good as object not found.
|
2019-03-26 17:57:44 -04:00
|
|
|
latestMeta, pErr := pickValidXLMeta(ctx, partsMetadata, modTime, dataBlocks)
|
2016-11-20 23:56:44 -05:00
|
|
|
if pErr != nil {
|
2018-01-22 17:54:55 -05:00
|
|
|
return result, toObjectErr(pErr, bucket, object)
|
2016-11-20 23:56:44 -05:00
|
|
|
}
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Clear data files of the object on outdated disks
|
|
|
|
for _, disk := range outDatedDisks {
|
2017-09-28 18:57:19 -04:00
|
|
|
// Before healing outdated disks, we need to remove
|
|
|
|
// xl.json and part files from "bucket/object/" so
|
|
|
|
// that rename(minioMetaBucket, "tmp/tmpuuid/",
|
|
|
|
// "bucket", "object/") succeeds.
|
2016-10-17 05:10:23 -04:00
|
|
|
if disk == nil {
|
|
|
|
// Not an outdated disk.
|
2016-08-17 14:36:33 -04:00
|
|
|
continue
|
|
|
|
}
|
2017-01-30 03:45:56 -05:00
|
|
|
|
2018-04-10 12:36:37 -04:00
|
|
|
// List and delete the object directory,
|
2019-04-23 17:54:28 -04:00
|
|
|
files, derr := disk.ListDir(bucket, object, -1, "")
|
2018-01-22 17:54:55 -05:00
|
|
|
if derr == nil {
|
2017-09-28 18:57:19 -04:00
|
|
|
for _, entry := range files {
|
|
|
|
_ = disk.DeleteFile(bucket,
|
|
|
|
pathJoin(object, entry))
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reorder so that we have data disks first and parity disks next.
|
2019-07-08 16:51:18 -04:00
|
|
|
latestDisks = shuffleDisks(availableDisks, latestMeta.Erasure.Distribution)
|
2017-02-24 12:20:40 -05:00
|
|
|
outDatedDisks = shuffleDisks(outDatedDisks, latestMeta.Erasure.Distribution)
|
|
|
|
partsMetadata = shufflePartsMetadata(partsMetadata, latestMeta.Erasure.Distribution)
|
2019-01-17 07:58:18 -05:00
|
|
|
for i := range outDatedDisks {
|
|
|
|
if outDatedDisks[i] == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
partsMetadata[i] = newXLMetaFromXLMeta(latestMeta)
|
|
|
|
}
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2017-09-28 18:57:19 -04:00
|
|
|
// We write at temporary location and then rename to final location.
|
2016-11-22 19:52:37 -05:00
|
|
|
tmpID := mustGetUUID()
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2017-09-28 18:57:19 -04:00
|
|
|
// Heal each part. erasureHealFile() will write the healed
|
|
|
|
// part to .minio/tmp/uuid/ which needs to be renamed later to
|
|
|
|
// the final location.
|
2018-08-24 02:35:37 -04:00
|
|
|
erasure, err := NewErasure(ctx, latestMeta.Erasure.DataBlocks,
|
2018-01-22 17:54:55 -05:00
|
|
|
latestMeta.Erasure.ParityBlocks, latestMeta.Erasure.BlockSize)
|
2017-08-14 21:08:42 -04:00
|
|
|
if err != nil {
|
2018-01-22 17:54:55 -05:00
|
|
|
return result, toObjectErr(err, bucket, object)
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
2018-08-06 18:14:08 -04:00
|
|
|
|
2019-01-17 07:58:18 -05:00
|
|
|
erasureInfo := latestMeta.Erasure
|
2016-10-17 05:10:23 -04:00
|
|
|
for partIndex := 0; partIndex < len(latestMeta.Parts); partIndex++ {
|
|
|
|
partName := latestMeta.Parts[partIndex].Name
|
|
|
|
partSize := latestMeta.Parts[partIndex].Size
|
2019-01-17 07:58:18 -05:00
|
|
|
partActualSize := latestMeta.Parts[partIndex].ActualSize
|
|
|
|
partNumber := latestMeta.Parts[partIndex].Number
|
|
|
|
tillOffset := erasure.ShardFileTillOffset(0, partSize, partSize)
|
|
|
|
readers := make([]io.ReaderAt, len(latestDisks))
|
2019-01-19 21:28:40 -05:00
|
|
|
checksumAlgo := erasureInfo.GetChecksumInfo(partName).Algorithm
|
2018-08-06 18:14:08 -04:00
|
|
|
for i, disk := range latestDisks {
|
|
|
|
if disk == OfflineDisk {
|
|
|
|
continue
|
|
|
|
}
|
2019-01-19 21:28:40 -05:00
|
|
|
checksumInfo := partsMetadata[i].Erasure.GetChecksumInfo(partName)
|
|
|
|
readers[i] = newBitrotReader(disk, bucket, pathJoin(object, partName), tillOffset, checksumAlgo, checksumInfo.Hash, erasure.ShardSize())
|
2018-08-06 18:14:08 -04:00
|
|
|
}
|
2019-01-17 07:58:18 -05:00
|
|
|
writers := make([]io.Writer, len(outDatedDisks))
|
2018-08-06 18:14:08 -04:00
|
|
|
for i, disk := range outDatedDisks {
|
|
|
|
if disk == OfflineDisk {
|
|
|
|
continue
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
2019-01-19 21:28:40 -05:00
|
|
|
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, pathJoin(tmpID, partName), tillOffset, checksumAlgo, erasure.ShardSize())
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
2019-01-17 07:58:18 -05:00
|
|
|
hErr := erasure.Heal(ctx, readers, writers, partSize)
|
|
|
|
closeBitrotReaders(readers)
|
|
|
|
closeBitrotWriters(writers)
|
2017-03-04 17:53:28 -05:00
|
|
|
if hErr != nil {
|
2018-01-22 17:54:55 -05:00
|
|
|
return result, toObjectErr(hErr, bucket, object)
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
2017-09-28 18:57:19 -04:00
|
|
|
// outDatedDisks that had write errors should not be
|
|
|
|
// written to for remaining parts, so we nil it out.
|
|
|
|
for i, disk := range outDatedDisks {
|
|
|
|
if disk == nil {
|
|
|
|
continue
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
2017-09-28 18:57:19 -04:00
|
|
|
// A non-nil stale disk which did not receive
|
|
|
|
// a healed part checksum had a write error.
|
2019-01-17 07:58:18 -05:00
|
|
|
if writers[i] == nil {
|
2017-09-28 18:57:19 -04:00
|
|
|
outDatedDisks[i] = nil
|
2018-01-22 17:54:55 -05:00
|
|
|
disksToHealCount--
|
2017-09-28 18:57:19 -04:00
|
|
|
continue
|
|
|
|
}
|
2019-01-17 07:58:18 -05:00
|
|
|
partsMetadata[i].AddObjectPart(partNumber, partName, "", partSize, partActualSize)
|
2019-01-19 21:28:40 -05:00
|
|
|
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{partName, checksumAlgo, bitrotWriterSum(writers[i])})
|
2017-09-28 18:57:19 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// If all disks are having errors, we give up.
|
2018-01-22 17:54:55 -05:00
|
|
|
if disksToHealCount == 0 {
|
|
|
|
return result, fmt.Errorf("all disks without up-to-date data had write errors")
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-25 10:33:26 -04:00
|
|
|
// Cleanup in case of xl.json writing failure
|
|
|
|
writeQuorum := latestMeta.Erasure.DataBlocks + 1
|
|
|
|
defer xl.deleteObject(ctx, minioMetaTmpBucket, tmpID, writeQuorum, false)
|
|
|
|
|
2016-10-17 05:10:23 -04:00
|
|
|
// Generate and write `xl.json` generated from other disks.
|
2018-10-02 20:13:51 -04:00
|
|
|
outDatedDisks, aErr := writeUniqueXLMetadata(ctx, outDatedDisks, minioMetaTmpBucket, tmpID,
|
2017-09-28 18:57:19 -04:00
|
|
|
partsMetadata, diskCount(outDatedDisks))
|
2017-03-04 17:53:28 -05:00
|
|
|
if aErr != nil {
|
2018-01-22 17:54:55 -05:00
|
|
|
return result, toObjectErr(aErr, bucket, object)
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
|
|
|
|
2016-10-17 05:10:23 -04:00
|
|
|
// Rename from tmp location to the actual location.
|
2018-03-27 21:11:39 -04:00
|
|
|
for _, disk := range outDatedDisks {
|
2016-10-17 05:10:23 -04:00
|
|
|
if disk == nil {
|
2016-08-31 14:42:57 -04:00
|
|
|
continue
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
2017-09-28 18:57:19 -04:00
|
|
|
|
2016-10-17 05:10:23 -04:00
|
|
|
// Attempt a rename now from healed data to final location.
|
2017-09-28 18:57:19 -04:00
|
|
|
aErr = disk.RenameFile(minioMetaTmpBucket, retainSlash(tmpID), bucket,
|
|
|
|
retainSlash(object))
|
2017-03-04 17:53:28 -05:00
|
|
|
if aErr != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, aErr)
|
|
|
|
return result, toObjectErr(aErr, bucket, object)
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
|
2018-03-27 21:11:39 -04:00
|
|
|
for i, v := range result.Before.Drives {
|
|
|
|
if v.Endpoint == disk.String() {
|
|
|
|
result.After.Drives[i].State = madmin.DriveStateOk
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
|
|
|
|
// Set the size of the object in the heal result
|
|
|
|
result.ObjectSize = latestMeta.Stat.Size
|
|
|
|
|
|
|
|
return result, nil
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
2016-11-16 19:42:23 -05:00
|
|
|
|
2018-05-10 19:53:42 -04:00
|
|
|
// healObjectDir - heals object directory specifically, this special call
|
|
|
|
// is needed since we do not have a special backend format for directories.
|
|
|
|
func (xl xlObjects) healObjectDir(ctx context.Context, bucket, object string, dryRun bool) (hr madmin.HealResultItem, err error) {
|
|
|
|
storageDisks := xl.getDisks()
|
|
|
|
|
|
|
|
// Initialize heal result object
|
|
|
|
hr = madmin.HealResultItem{
|
|
|
|
Type: madmin.HealItemObject,
|
|
|
|
Bucket: bucket,
|
|
|
|
Object: object,
|
|
|
|
DiskCount: len(storageDisks),
|
|
|
|
ParityBlocks: len(storageDisks) / 2,
|
|
|
|
DataBlocks: len(storageDisks) / 2,
|
|
|
|
ObjectSize: 0,
|
|
|
|
}
|
|
|
|
|
2019-01-30 13:51:56 -05:00
|
|
|
hr.Before.Drives = make([]madmin.HealDriveInfo, len(storageDisks))
|
|
|
|
hr.After.Drives = make([]madmin.HealDriveInfo, len(storageDisks))
|
|
|
|
|
2019-04-23 17:54:28 -04:00
|
|
|
errs := statAllDirs(ctx, storageDisks, bucket, object)
|
|
|
|
if isObjectDirDangling(errs) {
|
|
|
|
for i, err := range errs {
|
|
|
|
if err == nil {
|
|
|
|
storageDisks[i].DeleteFile(bucket, object)
|
2019-01-30 13:51:56 -05:00
|
|
|
}
|
2019-04-23 17:54:28 -04:00
|
|
|
}
|
|
|
|
}
|
2018-05-10 19:53:42 -04:00
|
|
|
|
2019-04-23 17:54:28 -04:00
|
|
|
// Prepare object creation in all disks
|
|
|
|
for i, err := range errs {
|
|
|
|
var drive string
|
|
|
|
if storageDisks[i] != nil {
|
|
|
|
drive = storageDisks[i].String()
|
|
|
|
}
|
|
|
|
switch err {
|
2019-08-01 17:13:06 -04:00
|
|
|
case nil:
|
|
|
|
hr.Before.Drives[i] = madmin.HealDriveInfo{State: madmin.DriveStateOk}
|
|
|
|
hr.After.Drives[i] = madmin.HealDriveInfo{State: madmin.DriveStateOk}
|
2019-04-23 17:54:28 -04:00
|
|
|
case errDiskNotFound:
|
|
|
|
hr.Before.Drives[i] = madmin.HealDriveInfo{State: madmin.DriveStateOffline}
|
|
|
|
hr.After.Drives[i] = madmin.HealDriveInfo{State: madmin.DriveStateOffline}
|
2019-08-01 17:13:06 -04:00
|
|
|
case errVolumeNotFound, errFileNotFound:
|
|
|
|
// Bucket or prefix/directory not found
|
2019-04-23 17:54:28 -04:00
|
|
|
hr.Before.Drives[i] = madmin.HealDriveInfo{Endpoint: drive, State: madmin.DriveStateMissing}
|
|
|
|
hr.After.Drives[i] = madmin.HealDriveInfo{Endpoint: drive, State: madmin.DriveStateMissing}
|
|
|
|
default:
|
|
|
|
hr.Before.Drives[i] = madmin.HealDriveInfo{Endpoint: drive, State: madmin.DriveStateCorrupt}
|
|
|
|
hr.After.Drives[i] = madmin.HealDriveInfo{Endpoint: drive, State: madmin.DriveStateCorrupt}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if dryRun {
|
|
|
|
return hr, nil
|
|
|
|
}
|
|
|
|
for i, err := range errs {
|
|
|
|
switch err {
|
2019-08-01 17:13:06 -04:00
|
|
|
case errVolumeNotFound, errFileNotFound:
|
|
|
|
// Bucket or prefix/directory not found
|
2019-04-23 17:54:28 -04:00
|
|
|
merr := storageDisks[i].MakeVol(pathJoin(bucket, object))
|
|
|
|
switch merr {
|
|
|
|
case nil, errVolumeExists:
|
|
|
|
hr.After.Drives[i].State = madmin.DriveStateOk
|
|
|
|
case errDiskNotFound:
|
|
|
|
hr.After.Drives[i].State = madmin.DriveStateOffline
|
2019-01-30 13:51:56 -05:00
|
|
|
default:
|
2019-04-23 17:54:28 -04:00
|
|
|
logger.LogIf(ctx, merr)
|
|
|
|
hr.After.Drives[i].State = madmin.DriveStateCorrupt
|
2018-05-10 19:53:42 -04:00
|
|
|
}
|
2019-04-23 17:54:28 -04:00
|
|
|
}
|
2018-05-10 19:53:42 -04:00
|
|
|
}
|
|
|
|
return hr, nil
|
|
|
|
}
|
|
|
|
|
2018-10-02 20:13:51 -04:00
|
|
|
// Populates default heal result item entries with possible values when we are returning prematurely.
|
|
|
|
// This is to ensure that in any circumstance we are not returning empty arrays with wrong values.
|
2019-03-26 17:57:44 -04:00
|
|
|
func defaultHealResult(latestXLMeta xlMetaV1, storageDisks []StorageAPI, errs []error, bucket, object string) madmin.HealResultItem {
|
2018-10-02 20:13:51 -04:00
|
|
|
// Initialize heal result object
|
|
|
|
result := madmin.HealResultItem{
|
|
|
|
Type: madmin.HealItemObject,
|
|
|
|
Bucket: bucket,
|
|
|
|
Object: object,
|
|
|
|
DiskCount: len(storageDisks),
|
|
|
|
|
|
|
|
// Initialize object size to -1, so we can detect if we are
|
|
|
|
// unable to reliably find the object size.
|
|
|
|
ObjectSize: -1,
|
|
|
|
}
|
2019-03-26 17:57:44 -04:00
|
|
|
if latestXLMeta.IsValid() {
|
|
|
|
result.ObjectSize = latestXLMeta.Stat.Size
|
|
|
|
}
|
2018-10-02 20:13:51 -04:00
|
|
|
|
|
|
|
for index, disk := range storageDisks {
|
|
|
|
if disk == nil {
|
|
|
|
result.Before.Drives = append(result.Before.Drives, madmin.HealDriveInfo{
|
|
|
|
UUID: "",
|
|
|
|
State: madmin.DriveStateOffline,
|
|
|
|
})
|
|
|
|
result.After.Drives = append(result.After.Drives, madmin.HealDriveInfo{
|
|
|
|
UUID: "",
|
|
|
|
State: madmin.DriveStateOffline,
|
|
|
|
})
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
drive := disk.String()
|
|
|
|
driveState := madmin.DriveStateCorrupt
|
|
|
|
switch errs[index] {
|
|
|
|
case errFileNotFound, errVolumeNotFound:
|
|
|
|
driveState = madmin.DriveStateMissing
|
|
|
|
}
|
|
|
|
result.Before.Drives = append(result.Before.Drives, madmin.HealDriveInfo{
|
|
|
|
UUID: "",
|
|
|
|
Endpoint: drive,
|
|
|
|
State: driveState,
|
|
|
|
})
|
|
|
|
result.After.Drives = append(result.After.Drives, madmin.HealDriveInfo{
|
|
|
|
UUID: "",
|
|
|
|
Endpoint: drive,
|
|
|
|
State: driveState,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-03-26 17:57:44 -04:00
|
|
|
if !latestXLMeta.IsValid() {
|
|
|
|
// Default to most common configuration for erasure blocks.
|
|
|
|
result.ParityBlocks = len(storageDisks) / 2
|
|
|
|
result.DataBlocks = len(storageDisks) / 2
|
|
|
|
} else {
|
|
|
|
result.ParityBlocks = latestXLMeta.Erasure.ParityBlocks
|
|
|
|
result.DataBlocks = latestXLMeta.Erasure.DataBlocks
|
|
|
|
}
|
2018-10-02 20:13:51 -04:00
|
|
|
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2019-04-23 17:54:28 -04:00
|
|
|
// Stat all directories.
|
|
|
|
func statAllDirs(ctx context.Context, storageDisks []StorageAPI, bucket, prefix string) []error {
|
|
|
|
var errs = make([]error, len(storageDisks))
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for index, disk := range storageDisks {
|
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
wg.Add(1)
|
|
|
|
go func(index int, disk StorageAPI) {
|
|
|
|
defer wg.Done()
|
|
|
|
entries, err := disk.ListDir(bucket, prefix, 1, "")
|
|
|
|
if err != nil {
|
|
|
|
errs[index] = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(entries) > 0 {
|
|
|
|
errs[index] = errVolumeNotEmpty
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}(index, disk)
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
return errs
|
|
|
|
}
|
|
|
|
|
|
|
|
// ObjectDir is considered dangling/corrupted if any only
|
|
|
|
// if total disks - a combination of corrupted and missing
|
|
|
|
// files is lesser than N/2+1 number of disks.
|
|
|
|
func isObjectDirDangling(errs []error) (ok bool) {
|
|
|
|
var notFoundDir int
|
|
|
|
for _, readErr := range errs {
|
|
|
|
if readErr == errFileNotFound {
|
|
|
|
notFoundDir++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return notFoundDir > len(errs)/2
|
|
|
|
}
|
|
|
|
|
2019-02-05 20:58:48 -05:00
|
|
|
// Object is considered dangling/corrupted if any only
|
|
|
|
// if total disks - a combination of corrupted and missing
|
|
|
|
// files is lesser than number of data blocks.
|
2019-03-26 17:57:44 -04:00
|
|
|
func isObjectDangling(metaArr []xlMetaV1, errs []error, dataErrs []error) (validMeta xlMetaV1, ok bool) {
|
2019-02-05 20:58:48 -05:00
|
|
|
// We can consider an object data not reliable
|
|
|
|
// when xl.json is not found in read quorum disks.
|
|
|
|
// or when xl.json is not readable in read quorum disks.
|
|
|
|
var notFoundXLJSON, corruptedXLJSON int
|
|
|
|
for _, readErr := range errs {
|
|
|
|
if readErr == errFileNotFound {
|
|
|
|
notFoundXLJSON++
|
|
|
|
} else if readErr == errCorruptedFormat {
|
|
|
|
corruptedXLJSON++
|
|
|
|
}
|
|
|
|
}
|
2019-03-26 17:57:44 -04:00
|
|
|
var notFoundParts int
|
|
|
|
for i := range dataErrs {
|
|
|
|
// Only count part errors, if the error is not
|
|
|
|
// same as xl.json error. This is to avoid
|
|
|
|
// double counting when both parts and xl.json
|
|
|
|
// are not available.
|
|
|
|
if errs[i] != dataErrs[i] {
|
|
|
|
if dataErrs[i] == errFileNotFound {
|
|
|
|
notFoundParts++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-02-05 20:58:48 -05:00
|
|
|
|
|
|
|
for _, m := range metaArr {
|
|
|
|
if !m.IsValid() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
validMeta = m
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// We couldn't find any valid meta we are indeed corrupted, return true right away.
|
|
|
|
if validMeta.Erasure.DataBlocks == 0 {
|
|
|
|
return validMeta, true
|
|
|
|
}
|
|
|
|
|
2019-03-26 17:57:44 -04:00
|
|
|
// We have valid meta, now verify if we have enough files with parity blocks.
|
|
|
|
return validMeta, corruptedXLJSON+notFoundXLJSON+notFoundParts > validMeta.Erasure.ParityBlocks
|
2019-02-05 20:58:48 -05:00
|
|
|
}
|
|
|
|
|
2019-03-26 17:57:44 -04:00
|
|
|
// HealObject - heal the given object, automatically deletes the object if stale/corrupted if `remove` is true.
|
2019-03-14 16:08:51 -04:00
|
|
|
func (xl xlObjects) HealObject(ctx context.Context, bucket, object string, dryRun bool, remove bool, scanMode madmin.HealScanMode) (hr madmin.HealResultItem, err error) {
|
2018-08-20 19:58:47 -04:00
|
|
|
// Create context that also contains information about the object and bucket.
|
|
|
|
// The top level handler might not have this information.
|
|
|
|
reqInfo := logger.GetReqInfo(ctx)
|
|
|
|
var newReqInfo *logger.ReqInfo
|
|
|
|
if reqInfo != nil {
|
2018-11-19 17:47:03 -05:00
|
|
|
newReqInfo = logger.NewReqInfo(reqInfo.RemoteHost, reqInfo.UserAgent, reqInfo.DeploymentID, reqInfo.RequestID, reqInfo.API, bucket, object)
|
2018-08-20 19:58:47 -04:00
|
|
|
} else {
|
2018-11-19 17:47:03 -05:00
|
|
|
newReqInfo = logger.NewReqInfo("", "", globalDeploymentID, "", "Heal", bucket, object)
|
2018-08-20 19:58:47 -04:00
|
|
|
}
|
|
|
|
healCtx := logger.SetReqInfo(context.Background(), newReqInfo)
|
|
|
|
|
2018-05-10 19:53:42 -04:00
|
|
|
// Healing directories handle it separately.
|
2019-08-06 15:08:58 -04:00
|
|
|
if hasSuffix(object, SlashSeparator) {
|
2018-08-20 19:58:47 -04:00
|
|
|
return xl.healObjectDir(healCtx, bucket, object, dryRun)
|
2018-05-10 19:53:42 -04:00
|
|
|
}
|
|
|
|
|
2018-10-02 20:13:51 -04:00
|
|
|
storageDisks := xl.getDisks()
|
|
|
|
|
2017-12-22 06:28:13 -05:00
|
|
|
// Read metadata files from all the disks
|
2018-10-02 20:13:51 -04:00
|
|
|
partsMetadata, errs := readAllXLMetadata(healCtx, storageDisks, bucket, object)
|
2017-12-22 06:28:13 -05:00
|
|
|
|
2019-02-05 20:58:48 -05:00
|
|
|
// Check if the object is dangling, if yes and user requested
|
|
|
|
// remove we simply delete it from namespace.
|
2019-03-26 17:57:44 -04:00
|
|
|
if m, ok := isObjectDangling(partsMetadata, errs, []error{}); ok {
|
2019-02-05 20:58:48 -05:00
|
|
|
writeQuorum := m.Erasure.DataBlocks + 1
|
|
|
|
if m.Erasure.DataBlocks == 0 {
|
|
|
|
writeQuorum = len(xl.getDisks())/2 + 1
|
|
|
|
}
|
|
|
|
if !dryRun && remove {
|
|
|
|
err = xl.deleteObject(healCtx, bucket, object, writeQuorum, false)
|
|
|
|
}
|
2019-03-26 17:57:44 -04:00
|
|
|
return defaultHealResult(xlMetaV1{}, storageDisks, errs, bucket, object), err
|
2019-02-05 20:58:48 -05:00
|
|
|
}
|
|
|
|
|
2018-08-20 19:58:47 -04:00
|
|
|
latestXLMeta, err := getLatestXLMeta(healCtx, partsMetadata, errs)
|
2018-07-31 03:23:29 -04:00
|
|
|
if err != nil {
|
2019-03-26 17:57:44 -04:00
|
|
|
return defaultHealResult(xlMetaV1{}, storageDisks, errs, bucket, object), toObjectErr(err, bucket, object)
|
2018-07-31 03:23:29 -04:00
|
|
|
}
|
2017-12-22 06:28:13 -05:00
|
|
|
|
2016-11-16 19:42:23 -05:00
|
|
|
// Lock the object before healing.
|
2019-06-29 01:09:17 -04:00
|
|
|
objectLock := xl.nsMutex.NewNSLock(ctx, bucket, object)
|
2018-01-22 17:54:55 -05:00
|
|
|
if lerr := objectLock.GetRLock(globalHealingTimeout); lerr != nil {
|
2019-03-26 17:57:44 -04:00
|
|
|
return defaultHealResult(latestXLMeta, storageDisks, errs, bucket, object), lerr
|
2017-08-31 14:29:22 -04:00
|
|
|
}
|
2016-11-16 19:42:23 -05:00
|
|
|
defer objectLock.RUnlock()
|
|
|
|
|
2019-03-26 17:57:44 -04:00
|
|
|
errCount := 0
|
|
|
|
for _, err := range errs {
|
|
|
|
if err != nil {
|
|
|
|
errCount++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if errCount == len(errs) {
|
|
|
|
// Only if we get errors from all the disks we return error. Else we need to
|
|
|
|
// continue to return filled madmin.HealResultItem struct which includes info
|
|
|
|
// on what disks the file is available etc.
|
|
|
|
if reducedErr := reduceReadQuorumErrs(ctx, errs, nil, latestXLMeta.Erasure.DataBlocks); reducedErr != nil {
|
|
|
|
if m, ok := isObjectDangling(partsMetadata, errs, []error{}); ok {
|
|
|
|
writeQuorum := m.Erasure.DataBlocks + 1
|
|
|
|
if m.Erasure.DataBlocks == 0 {
|
|
|
|
writeQuorum = len(storageDisks)/2 + 1
|
|
|
|
}
|
|
|
|
if !dryRun && remove {
|
|
|
|
err = xl.deleteObject(ctx, bucket, object, writeQuorum, false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return defaultHealResult(latestXLMeta, storageDisks, errs, bucket, object), toObjectErr(reducedErr, bucket, object)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-16 19:42:23 -05:00
|
|
|
// Heal the object.
|
2019-03-26 17:57:44 -04:00
|
|
|
return xl.healObject(healCtx, bucket, object, partsMetadata, errs, latestXLMeta, dryRun, remove, scanMode)
|
2016-11-16 19:42:23 -05:00
|
|
|
}
|