2016-06-17 14:57:51 -04:00
|
|
|
/*
|
|
|
|
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-05-25 19:42:31 -04:00
|
|
|
|
2016-11-16 19:42:23 -05:00
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"path"
|
2017-01-19 12:34:18 -05:00
|
|
|
"sort"
|
2016-11-16 19:42:23 -05:00
|
|
|
"sync"
|
2017-11-25 14:58:29 -05:00
|
|
|
|
|
|
|
"github.com/minio/minio/pkg/errors"
|
2018-01-22 17:54:55 -05:00
|
|
|
"github.com/minio/minio/pkg/madmin"
|
2016-11-16 19:42:23 -05:00
|
|
|
)
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2016-11-16 19:42:23 -05:00
|
|
|
// healFormatXL - heals missing `format.json` on freshly or corrupted
|
|
|
|
// disks (missing format.json but does have erasure coded data in it).
|
2018-01-22 17:54:55 -05:00
|
|
|
func healFormatXL(storageDisks []StorageAPI, dryRun bool) (res madmin.HealResultItem,
|
|
|
|
err error) {
|
|
|
|
|
2016-11-16 19:42:23 -05:00
|
|
|
// Attempt to load all `format.json`.
|
|
|
|
formatConfigs, sErrs := loadAllFormats(storageDisks)
|
|
|
|
|
2016-12-02 02:15:17 -05:00
|
|
|
// Generic format check.
|
|
|
|
// - if (no quorum) return error
|
|
|
|
// - if (disks not recognized) // Always error.
|
2016-12-11 18:18:55 -05:00
|
|
|
if err = genericFormatCheckXL(formatConfigs, sErrs); err != nil {
|
2018-01-22 17:54:55 -05:00
|
|
|
return res, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prepare heal-result
|
|
|
|
res = madmin.HealResultItem{
|
|
|
|
Type: madmin.HealItemMetadata,
|
|
|
|
Detail: "disk-format",
|
|
|
|
DiskCount: len(storageDisks),
|
|
|
|
}
|
|
|
|
res.InitDrives()
|
|
|
|
// Existing formats are available (i.e. ok), so save it in
|
|
|
|
// result, also populate disks to be healed.
|
|
|
|
for i, format := range formatConfigs {
|
|
|
|
drive := globalEndpoints.GetString(i)
|
|
|
|
switch {
|
|
|
|
case format != nil:
|
|
|
|
res.DriveInfo.Before[drive] = madmin.DriveStateOk
|
|
|
|
case sErrs[i] == errCorruptedFormat:
|
|
|
|
res.DriveInfo.Before[drive] = madmin.DriveStateCorrupt
|
|
|
|
case sErrs[i] == errUnformattedDisk:
|
|
|
|
res.DriveInfo.Before[drive] = madmin.DriveStateMissing
|
|
|
|
default:
|
|
|
|
res.DriveInfo.Before[drive] = madmin.DriveStateOffline
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Copy "after" drive state too
|
|
|
|
for k, v := range res.DriveInfo.Before {
|
|
|
|
res.DriveInfo.After[k] = v
|
2016-11-16 19:42:23 -05:00
|
|
|
}
|
|
|
|
|
2017-08-03 07:37:02 -04:00
|
|
|
numDisks := len(storageDisks)
|
|
|
|
_, unformattedDiskCount, diskNotFoundCount,
|
|
|
|
corruptedFormatCount, otherErrCount := formatErrsSummary(sErrs)
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case unformattedDiskCount == numDisks:
|
|
|
|
// all unformatted.
|
2018-01-22 17:54:55 -05:00
|
|
|
if !dryRun {
|
|
|
|
err = initFormatXL(storageDisks)
|
|
|
|
if err != nil {
|
|
|
|
return res, err
|
|
|
|
}
|
|
|
|
for i := 0; i < len(storageDisks); i++ {
|
|
|
|
drive := globalEndpoints.GetString(i)
|
|
|
|
res.DriveInfo.After[drive] = madmin.DriveStateOk
|
|
|
|
}
|
2017-08-03 07:37:02 -04:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
return res, nil
|
2017-08-03 07:37:02 -04:00
|
|
|
|
|
|
|
case diskNotFoundCount > 0:
|
2018-01-22 17:54:55 -05:00
|
|
|
return res, fmt.Errorf("cannot proceed with heal as %s",
|
2017-08-03 07:37:02 -04:00
|
|
|
errSomeDiskOffline)
|
|
|
|
|
|
|
|
case otherErrCount > 0:
|
2018-01-22 17:54:55 -05:00
|
|
|
return res, fmt.Errorf("cannot proceed with heal as some disks had unhandled errors")
|
2017-08-03 07:37:02 -04:00
|
|
|
|
|
|
|
case corruptedFormatCount > 0:
|
2018-01-22 17:54:55 -05:00
|
|
|
// heal corrupted disks
|
|
|
|
err = healFormatXLCorruptedDisks(storageDisks, formatConfigs,
|
|
|
|
dryRun)
|
|
|
|
if err != nil {
|
|
|
|
return res, err
|
2016-11-16 19:42:23 -05:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
// success
|
|
|
|
if !dryRun {
|
|
|
|
for i := 0; i < len(storageDisks); i++ {
|
|
|
|
drive := globalEndpoints.GetString(i)
|
|
|
|
res.DriveInfo.After[drive] = madmin.DriveStateOk
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return res, nil
|
2017-08-03 07:37:02 -04:00
|
|
|
|
|
|
|
case unformattedDiskCount > 0:
|
2018-01-22 17:54:55 -05:00
|
|
|
// heal unformatted disks
|
|
|
|
err = healFormatXLFreshDisks(storageDisks, formatConfigs,
|
|
|
|
dryRun)
|
|
|
|
if err != nil {
|
|
|
|
return res, err
|
2016-11-16 19:42:23 -05:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
// success
|
|
|
|
if !dryRun {
|
|
|
|
for i := 0; i < len(storageDisks); i++ {
|
|
|
|
drive := globalEndpoints.GetString(i)
|
|
|
|
res.DriveInfo.After[drive] = madmin.DriveStateOk
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return res, nil
|
2016-11-16 19:42:23 -05:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
|
|
|
|
return res, nil
|
2016-11-16 19:42:23 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Heals a bucket if it doesn't exist on one of the disks, additionally
|
|
|
|
// also heals the missing entries for bucket metadata files
|
|
|
|
// `policy.json, notification.xml, listeners.json`.
|
2018-01-22 17:54:55 -05:00
|
|
|
func (xl xlObjects) HealBucket(bucket string, dryRun bool) (
|
|
|
|
results []madmin.HealResultItem, err error) {
|
|
|
|
|
|
|
|
if err = checkBucketExist(bucket, xl); err != nil {
|
|
|
|
return nil, err
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
|
|
|
|
2017-12-22 06:28:13 -05:00
|
|
|
// get write quorum for an object
|
|
|
|
writeQuorum := len(xl.storageDisks)/2 + 1
|
2018-01-12 23:34:52 -05:00
|
|
|
bucketLock := xl.nsMutex.NewNSLock(bucket, "")
|
2018-01-22 17:54:55 -05:00
|
|
|
if err = bucketLock.GetLock(globalHealingTimeout); err != nil {
|
|
|
|
return nil, err
|
2018-01-12 23:34:52 -05:00
|
|
|
}
|
|
|
|
defer bucketLock.Unlock()
|
2017-12-22 06:28:13 -05:00
|
|
|
|
2016-11-16 19:42:23 -05:00
|
|
|
// Heal bucket.
|
2018-01-22 17:54:55 -05:00
|
|
|
result, err := healBucket(xl.storageDisks, bucket, writeQuorum, dryRun)
|
|
|
|
if err != nil {
|
|
|
|
return results, err
|
2016-11-16 19:42:23 -05:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
results = append(results, result)
|
2016-11-16 19:42:23 -05:00
|
|
|
|
|
|
|
// Proceed to heal bucket metadata.
|
2018-01-22 17:54:55 -05:00
|
|
|
metaResults, err := healBucketMetadata(xl, bucket, dryRun)
|
|
|
|
results = append(results, metaResults...)
|
|
|
|
return results, err
|
2016-11-16 19:42:23 -05:00
|
|
|
}
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2016-11-19 20:37:57 -05:00
|
|
|
// Heal bucket - create buckets on disks where it does not exist.
|
2018-01-22 17:54:55 -05:00
|
|
|
func healBucket(storageDisks []StorageAPI, bucket string, writeQuorum int,
|
|
|
|
dryRun bool) (res madmin.HealResultItem, err error) {
|
2016-10-17 05:10:23 -04:00
|
|
|
|
|
|
|
// Initialize sync waitgroup.
|
|
|
|
var wg = &sync.WaitGroup{}
|
|
|
|
|
|
|
|
// Initialize list of errors.
|
2016-11-16 19:42:23 -05:00
|
|
|
var dErrs = make([]error, len(storageDisks))
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Disk states slices
|
|
|
|
beforeState := make([]string, len(storageDisks))
|
|
|
|
afterState := make([]string, len(storageDisks))
|
|
|
|
|
2016-10-17 05:10:23 -04:00
|
|
|
// Make a volume entry on all underlying storage disks.
|
2016-11-16 19:42:23 -05:00
|
|
|
for index, disk := range storageDisks {
|
2016-10-17 05:10:23 -04:00
|
|
|
if disk == nil {
|
2017-11-25 14:58:29 -05:00
|
|
|
dErrs[index] = errors.Trace(errDiskNotFound)
|
2018-01-22 17:54:55 -05:00
|
|
|
beforeState[index] = madmin.DriveStateOffline
|
|
|
|
afterState[index] = madmin.DriveStateOffline
|
2016-10-17 05:10:23 -04:00
|
|
|
continue
|
2016-05-25 19:42:31 -04:00
|
|
|
}
|
2016-10-17 05:10:23 -04:00
|
|
|
wg.Add(1)
|
2018-01-22 17:54:55 -05:00
|
|
|
|
2016-10-17 05:10:23 -04:00
|
|
|
// Make a volume inside a go-routine.
|
|
|
|
go func(index int, disk StorageAPI) {
|
|
|
|
defer wg.Done()
|
|
|
|
if _, err := disk.StatVol(bucket); err != nil {
|
2018-01-22 17:54:55 -05:00
|
|
|
if errors.Cause(err) != errVolumeNotFound {
|
|
|
|
beforeState[index] = madmin.DriveStateCorrupt
|
|
|
|
afterState[index] = madmin.DriveStateCorrupt
|
2017-11-25 14:58:29 -05:00
|
|
|
dErrs[index] = errors.Trace(err)
|
2016-10-17 05:10:23 -04:00
|
|
|
return
|
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
|
|
|
|
beforeState[index] = madmin.DriveStateMissing
|
|
|
|
afterState[index] = madmin.DriveStateMissing
|
|
|
|
|
|
|
|
// mutate only if not a dry-run
|
|
|
|
if dryRun {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
makeErr := disk.MakeVol(bucket)
|
|
|
|
dErrs[index] = errors.Trace(makeErr)
|
|
|
|
if makeErr == nil {
|
|
|
|
afterState[index] = madmin.DriveStateOk
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
} else {
|
|
|
|
beforeState[index] = madmin.DriveStateOk
|
|
|
|
afterState[index] = madmin.DriveStateOk
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
|
|
|
}(index, disk)
|
2016-05-25 19:42:31 -04:00
|
|
|
}
|
2016-07-12 18:20:31 -04:00
|
|
|
|
2016-10-17 05:10:23 -04:00
|
|
|
// Wait for all make vol to finish.
|
|
|
|
wg.Wait()
|
2016-07-12 18:20:31 -04:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Initialize heal result info
|
|
|
|
res = madmin.HealResultItem{
|
|
|
|
Type: madmin.HealItemBucket,
|
|
|
|
Bucket: bucket,
|
|
|
|
DiskCount: len(storageDisks),
|
|
|
|
}
|
|
|
|
res.InitDrives()
|
|
|
|
for i, before := range beforeState {
|
|
|
|
drive := globalEndpoints.GetString(i)
|
|
|
|
res.DriveInfo.Before[drive] = before
|
|
|
|
res.DriveInfo.After[drive] = afterState[i]
|
|
|
|
}
|
|
|
|
|
2017-02-01 14:16:17 -05:00
|
|
|
reducedErr := reduceWriteQuorumErrs(dErrs, bucketOpIgnoredErrs, writeQuorum)
|
2017-11-25 14:58:29 -05:00
|
|
|
if errors.Cause(reducedErr) == errXLWriteQuorum {
|
2016-10-17 05:10:23 -04:00
|
|
|
// Purge successfully created buckets if we don't have writeQuorum.
|
2016-11-16 19:42:23 -05:00
|
|
|
undoMakeBucket(storageDisks, bucket)
|
2016-07-12 18:20:31 -04:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
return res, reducedErr
|
2016-05-25 19:42:31 -04:00
|
|
|
}
|
|
|
|
|
2016-11-16 19:42:23 -05:00
|
|
|
// Heals all the metadata associated for a given bucket, this function
|
|
|
|
// heals `policy.json`, `notification.xml` and `listeners.json`.
|
2018-01-22 17:54:55 -05:00
|
|
|
func healBucketMetadata(xl xlObjects, bucket string, dryRun bool) (
|
|
|
|
results []madmin.HealResultItem, err error) {
|
|
|
|
|
2016-11-16 19:42:23 -05:00
|
|
|
healBucketMetaFn := func(metaPath string) error {
|
2018-01-22 17:54:55 -05:00
|
|
|
result, healErr := xl.HealObject(minioMetaBucket, metaPath, dryRun)
|
|
|
|
// If object is not found, no result to add.
|
|
|
|
if isErrObjectNotFound(healErr) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if healErr != nil {
|
|
|
|
return healErr
|
2017-08-31 14:29:22 -04:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
result.Type = madmin.HealItemBucketMetadata
|
|
|
|
results = append(results, result)
|
2016-11-16 19:42:23 -05:00
|
|
|
return nil
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
2016-06-17 14:57:51 -04:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Heal `policy.json` for missing entries, ignores if
|
|
|
|
// `policy.json` is not found.
|
2017-01-16 20:05:00 -05:00
|
|
|
policyPath := pathJoin(bucketConfigPrefix, bucket, bucketPolicyConfig)
|
2018-01-22 17:54:55 -05:00
|
|
|
err = healBucketMetaFn(policyPath)
|
|
|
|
if err != nil {
|
|
|
|
return results, err
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
2016-05-25 19:42:31 -04:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Heal `notification.xml` for missing entries, ignores if
|
|
|
|
// `notification.xml` is not found.
|
|
|
|
nConfigPath := path.Join(bucketConfigPrefix, bucket,
|
|
|
|
bucketNotificationConfig)
|
|
|
|
err = healBucketMetaFn(nConfigPath)
|
|
|
|
if err != nil {
|
|
|
|
return results, err
|
2016-11-16 19:42:23 -05:00
|
|
|
}
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Heal `listeners.json` for missing entries, ignores if
|
|
|
|
// `listeners.json` is not found.
|
2016-11-16 19:42:23 -05:00
|
|
|
lConfigPath := path.Join(bucketConfigPrefix, bucket, bucketListenerConfig)
|
2018-01-22 17:54:55 -05:00
|
|
|
err = healBucketMetaFn(lConfigPath)
|
|
|
|
return results, err
|
2016-11-16 19:42:23 -05:00
|
|
|
}
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2017-01-19 12:34:18 -05:00
|
|
|
// listAllBuckets lists all buckets from all disks. It also
|
|
|
|
// returns the occurrence of each buckets in all disks
|
2018-01-22 17:54:55 -05:00
|
|
|
func listAllBuckets(storageDisks []StorageAPI) (buckets map[string]VolInfo,
|
|
|
|
bucketsOcc map[string]int, err error) {
|
|
|
|
|
2017-01-19 12:34:18 -05:00
|
|
|
buckets = make(map[string]VolInfo)
|
|
|
|
bucketsOcc = make(map[string]int)
|
2016-11-16 19:42:23 -05:00
|
|
|
for _, disk := range storageDisks {
|
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
var volsInfo []VolInfo
|
|
|
|
volsInfo, err = disk.ListVols()
|
2018-01-22 17:54:55 -05:00
|
|
|
if err != nil {
|
|
|
|
if errors.IsErrIgnored(err, bucketMetadataOpIgnoredErrs...) {
|
|
|
|
continue
|
2016-11-16 19:42:23 -05:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
break
|
2017-01-19 12:34:18 -05:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
for _, volInfo := range volsInfo {
|
|
|
|
// StorageAPI can send volume names which are
|
|
|
|
// incompatible with buckets - these are
|
|
|
|
// skipped, like the meta-bucket.
|
|
|
|
if !IsValidBucketName(volInfo.Name) ||
|
|
|
|
isMinioMetaBucketName(volInfo.Name) {
|
|
|
|
continue
|
2017-01-19 12:34:18 -05:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
// Increase counter per bucket name
|
|
|
|
bucketsOcc[volInfo.Name]++
|
|
|
|
// Save volume info under bucket name
|
|
|
|
buckets[volInfo.Name] = volInfo
|
2017-01-19 12:34:18 -05:00
|
|
|
}
|
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
return buckets, bucketsOcc, err
|
2017-01-19 12:34:18 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListBucketsHeal - Find all buckets that need to be healed
|
|
|
|
func (xl xlObjects) ListBucketsHeal() ([]BucketInfo, error) {
|
|
|
|
listBuckets := []BucketInfo{}
|
|
|
|
// List all buckets that can be found in all disks
|
2018-01-22 17:54:55 -05:00
|
|
|
buckets, _, err := listAllBuckets(xl.storageDisks)
|
2017-01-19 12:34:18 -05:00
|
|
|
if err != nil {
|
|
|
|
return listBuckets, err
|
|
|
|
}
|
2017-03-06 05:00:15 -05:00
|
|
|
|
2017-01-19 12:34:18 -05:00
|
|
|
// Iterate over all buckets
|
|
|
|
for _, currBucket := range buckets {
|
2018-01-22 17:54:55 -05:00
|
|
|
listBuckets = append(listBuckets,
|
|
|
|
BucketInfo{currBucket.Name, currBucket.Created})
|
2017-01-19 12:34:18 -05:00
|
|
|
}
|
2017-03-06 05:00:15 -05:00
|
|
|
|
2017-01-19 12:34:18 -05:00
|
|
|
// Sort found buckets
|
|
|
|
sort.Sort(byBucketName(listBuckets))
|
|
|
|
return listBuckets, nil
|
2016-11-16 19:42:23 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// This function is meant for all the healing that needs to be done
|
|
|
|
// during startup i.e healing of buckets, bucket metadata (policy.json,
|
|
|
|
// notification.xml, listeners.json) etc. Currently this function
|
|
|
|
// supports quick healing of buckets, bucket metadata.
|
2017-12-22 06:28:13 -05:00
|
|
|
func quickHeal(xlObj xlObjects, writeQuorum int, readQuorum int) error {
|
2017-03-06 05:00:15 -05:00
|
|
|
// List all bucket name occurrence from all disks.
|
2017-12-22 06:28:13 -05:00
|
|
|
_, bucketOcc, err := listAllBuckets(xlObj.storageDisks)
|
2016-11-16 19:42:23 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-03-06 05:00:15 -05:00
|
|
|
|
|
|
|
// All bucket names and bucket metadata that should be healed.
|
|
|
|
for bucketName, occCount := range bucketOcc {
|
|
|
|
// Heal bucket only if healing is needed.
|
2017-12-22 06:28:13 -05:00
|
|
|
if occCount != len(xlObj.storageDisks) {
|
2018-01-12 23:34:52 -05:00
|
|
|
bucketLock := xlObj.nsMutex.NewNSLock(bucketName, "")
|
|
|
|
if perr := bucketLock.GetLock(globalHealingTimeout); perr != nil {
|
|
|
|
return perr
|
|
|
|
}
|
|
|
|
defer bucketLock.Unlock()
|
|
|
|
|
2017-03-06 05:00:15 -05:00
|
|
|
// Heal bucket and then proceed to heal bucket metadata if any.
|
2018-01-22 17:54:55 -05:00
|
|
|
if _, err = healBucket(xlObj.storageDisks, bucketName, writeQuorum, false); err == nil {
|
|
|
|
if _, err = healBucketMetadata(xlObj, bucketName, false); err == nil {
|
2017-03-06 05:00:15 -05:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
return err
|
2016-11-16 19:42:23 -05:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2017-03-06 05:00:15 -05:00
|
|
|
|
|
|
|
// Success.
|
2016-11-16 19:42:23 -05:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Heals an object by re-writing corrupt/missing erasure blocks.
|
|
|
|
func healObject(storageDisks []StorageAPI, bucket string, object string,
|
|
|
|
quorum int, dryRun bool) (result madmin.HealResultItem, err error) {
|
2017-09-28 18:57:19 -04:00
|
|
|
|
2016-11-16 19:42:23 -05:00
|
|
|
partsMetadata, errs := readAllXLMetadata(storageDisks, bucket, object)
|
2018-01-22 17:54:55 -05:00
|
|
|
|
2017-03-04 17:53:28 -05:00
|
|
|
// readQuorum suffices for xl.json since we use monotonic
|
|
|
|
// system time to break the tie when a split-brain situation
|
|
|
|
// arises.
|
2018-01-22 17:54:55 -05:00
|
|
|
if reducedErr := reduceReadQuorumErrs(errs, nil, quorum); reducedErr != nil {
|
|
|
|
return result, toObjectErr(reducedErr, bucket, object)
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// List of disks having latest version of the object xl.json
|
|
|
|
// (by modtime).
|
2016-11-16 19:42:23 -05:00
|
|
|
latestDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
|
2017-03-04 17:53:28 -05:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// List of disks having all parts as per latest xl.json.
|
|
|
|
availableDisks, dataErrs, aErr := disksWithAllParts(latestDisks, partsMetadata, errs, bucket, object)
|
2017-03-04 17:53:28 -05:00
|
|
|
if aErr != nil {
|
2018-01-22 17:54:55 -05:00
|
|
|
return result, toObjectErr(aErr, bucket, object)
|
2017-03-04 17:53:28 -05:00
|
|
|
}
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Initialize heal result object
|
|
|
|
result = madmin.HealResultItem{
|
|
|
|
Type: madmin.HealItemObject,
|
|
|
|
Bucket: bucket,
|
|
|
|
Object: object,
|
|
|
|
DiskCount: len(storageDisks),
|
|
|
|
|
|
|
|
// Initialize object size to -1, so we can detect if we are
|
|
|
|
// unable to reliably find the object size.
|
|
|
|
ObjectSize: -1,
|
2017-04-14 13:28:35 -04:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
result.InitDrives()
|
2017-04-14 13:28:35 -04:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Loop to find number of disks with valid data, per-drive
|
|
|
|
// data state and a list of outdated disks on which data needs
|
|
|
|
// to be healed.
|
|
|
|
outDatedDisks := make([]StorageAPI, len(storageDisks))
|
2017-04-14 13:28:35 -04:00
|
|
|
numAvailableDisks := 0
|
2018-01-22 17:54:55 -05:00
|
|
|
disksToHealCount := 0
|
|
|
|
for i, v := range availableDisks {
|
|
|
|
driveState := ""
|
|
|
|
switch {
|
|
|
|
case v != nil:
|
|
|
|
driveState = madmin.DriveStateOk
|
2017-03-04 17:53:28 -05:00
|
|
|
numAvailableDisks++
|
2018-01-22 17:54:55 -05:00
|
|
|
// If data is sane on any one disk, we can
|
|
|
|
// extract the correct object size.
|
|
|
|
result.ObjectSize = partsMetadata[i].Stat.Size
|
|
|
|
result.ParityBlocks = partsMetadata[i].Erasure.ParityBlocks
|
|
|
|
result.DataBlocks = partsMetadata[i].Erasure.DataBlocks
|
|
|
|
case errors.Cause(errs[i]) == errDiskNotFound:
|
|
|
|
driveState = madmin.DriveStateOffline
|
|
|
|
case errors.Cause(errs[i]) == errFileNotFound, errors.Cause(errs[i]) == errVolumeNotFound:
|
|
|
|
fallthrough
|
|
|
|
case errors.Cause(dataErrs[i]) == errFileNotFound, errors.Cause(dataErrs[i]) == errVolumeNotFound:
|
|
|
|
driveState = madmin.DriveStateMissing
|
|
|
|
default:
|
|
|
|
// all remaining cases imply corrupt data/metadata
|
|
|
|
driveState = madmin.DriveStateCorrupt
|
|
|
|
}
|
|
|
|
drive := globalEndpoints.GetString(i)
|
|
|
|
result.DriveInfo.Before[drive] = driveState
|
|
|
|
// copy for 'after' state
|
|
|
|
result.DriveInfo.After[drive] = driveState
|
|
|
|
|
|
|
|
// an online disk without valid data/metadata is
|
|
|
|
// outdated and can be healed.
|
|
|
|
if errs[i] != errDiskNotFound && v == nil {
|
|
|
|
outDatedDisks[i] = storageDisks[i]
|
|
|
|
disksToHealCount++
|
2017-03-04 17:53:28 -05:00
|
|
|
}
|
2017-09-28 18:57:19 -04:00
|
|
|
}
|
|
|
|
|
2017-03-04 17:53:28 -05:00
|
|
|
// If less than read quorum number of disks have all the parts
|
|
|
|
// of the data, we can't reconstruct the erasure-coded data.
|
|
|
|
if numAvailableDisks < quorum {
|
2018-01-22 17:54:55 -05:00
|
|
|
return result, toObjectErr(errXLReadQuorum, bucket, object)
|
2017-03-04 17:53:28 -05:00
|
|
|
}
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
if disksToHealCount == 0 {
|
|
|
|
// Nothing to heal!
|
|
|
|
return result, nil
|
|
|
|
}
|
2017-03-04 17:53:28 -05:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// After this point, only have to repair data on disk - so
|
|
|
|
// return if it is a dry-run
|
|
|
|
if dryRun {
|
|
|
|
return result, nil
|
2017-03-31 20:55:15 -04:00
|
|
|
}
|
|
|
|
|
2017-03-04 17:53:28 -05:00
|
|
|
// Latest xlMetaV1 for reference. If a valid metadata is not
|
|
|
|
// present, it is as good as object not found.
|
2016-11-20 23:56:44 -05:00
|
|
|
latestMeta, pErr := pickValidXLMeta(partsMetadata, modTime)
|
|
|
|
if pErr != nil {
|
2018-01-22 17:54:55 -05:00
|
|
|
return result, toObjectErr(pErr, bucket, object)
|
2016-11-20 23:56:44 -05:00
|
|
|
}
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Clear data files of the object on outdated disks
|
|
|
|
for _, disk := range outDatedDisks {
|
2017-09-28 18:57:19 -04:00
|
|
|
// Before healing outdated disks, we need to remove
|
|
|
|
// xl.json and part files from "bucket/object/" so
|
|
|
|
// that rename(minioMetaBucket, "tmp/tmpuuid/",
|
|
|
|
// "bucket", "object/") succeeds.
|
2016-10-17 05:10:23 -04:00
|
|
|
if disk == nil {
|
|
|
|
// Not an outdated disk.
|
2016-08-17 14:36:33 -04:00
|
|
|
continue
|
|
|
|
}
|
2017-01-30 03:45:56 -05:00
|
|
|
|
2017-09-28 18:57:19 -04:00
|
|
|
// List and delete the object directory, ignoring
|
|
|
|
// errors.
|
2018-01-22 17:54:55 -05:00
|
|
|
files, derr := disk.ListDir(bucket, object)
|
|
|
|
if derr == nil {
|
2017-09-28 18:57:19 -04:00
|
|
|
for _, entry := range files {
|
|
|
|
_ = disk.DeleteFile(bucket,
|
|
|
|
pathJoin(object, entry))
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reorder so that we have data disks first and parity disks next.
|
2017-02-24 12:20:40 -05:00
|
|
|
latestDisks = shuffleDisks(latestDisks, latestMeta.Erasure.Distribution)
|
|
|
|
outDatedDisks = shuffleDisks(outDatedDisks, latestMeta.Erasure.Distribution)
|
|
|
|
partsMetadata = shufflePartsMetadata(partsMetadata, latestMeta.Erasure.Distribution)
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2017-09-28 18:57:19 -04:00
|
|
|
// We write at temporary location and then rename to final location.
|
2016-11-22 19:52:37 -05:00
|
|
|
tmpID := mustGetUUID()
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2017-09-28 18:57:19 -04:00
|
|
|
// Checksum of the part files. checkSumInfos[index] will
|
|
|
|
// contain checksums of all the part files in the
|
|
|
|
// outDatedDisks[index]
|
2017-08-14 21:08:42 -04:00
|
|
|
checksumInfos := make([][]ChecksumInfo, len(outDatedDisks))
|
2016-10-17 05:10:23 -04:00
|
|
|
|
2017-09-28 18:57:19 -04:00
|
|
|
// Heal each part. erasureHealFile() will write the healed
|
|
|
|
// part to .minio/tmp/uuid/ which needs to be renamed later to
|
|
|
|
// the final location.
|
2018-01-22 17:54:55 -05:00
|
|
|
storage, err := NewErasureStorage(latestDisks, latestMeta.Erasure.DataBlocks,
|
|
|
|
latestMeta.Erasure.ParityBlocks, latestMeta.Erasure.BlockSize)
|
2017-08-14 21:08:42 -04:00
|
|
|
if err != nil {
|
2018-01-22 17:54:55 -05:00
|
|
|
return result, toObjectErr(err, bucket, object)
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
|
|
|
checksums := make([][]byte, len(latestDisks))
|
2016-10-17 05:10:23 -04:00
|
|
|
for partIndex := 0; partIndex < len(latestMeta.Parts); partIndex++ {
|
|
|
|
partName := latestMeta.Parts[partIndex].Name
|
|
|
|
partSize := latestMeta.Parts[partIndex].Size
|
|
|
|
erasure := latestMeta.Erasure
|
2017-08-14 21:08:42 -04:00
|
|
|
var algorithm BitrotAlgorithm
|
|
|
|
for i, disk := range storage.disks {
|
|
|
|
if disk != OfflineDisk {
|
|
|
|
info := partsMetadata[i].Erasure.GetChecksumInfo(partName)
|
|
|
|
algorithm = info.Algorithm
|
|
|
|
checksums[i] = info.Hash
|
|
|
|
}
|
|
|
|
}
|
2016-10-17 05:10:23 -04:00
|
|
|
// Heal the part file.
|
2017-09-28 18:57:19 -04:00
|
|
|
file, hErr := storage.HealFile(outDatedDisks, bucket, pathJoin(object, partName),
|
|
|
|
erasure.BlockSize, minioMetaTmpBucket, pathJoin(tmpID, partName), partSize,
|
|
|
|
algorithm, checksums)
|
2017-03-04 17:53:28 -05:00
|
|
|
if hErr != nil {
|
2018-01-22 17:54:55 -05:00
|
|
|
return result, toObjectErr(hErr, bucket, object)
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
2017-09-28 18:57:19 -04:00
|
|
|
// outDatedDisks that had write errors should not be
|
|
|
|
// written to for remaining parts, so we nil it out.
|
|
|
|
for i, disk := range outDatedDisks {
|
|
|
|
if disk == nil {
|
|
|
|
continue
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
2017-09-28 18:57:19 -04:00
|
|
|
// A non-nil stale disk which did not receive
|
|
|
|
// a healed part checksum had a write error.
|
|
|
|
if file.Checksums[i] == nil {
|
|
|
|
outDatedDisks[i] = nil
|
2018-01-22 17:54:55 -05:00
|
|
|
disksToHealCount--
|
2017-09-28 18:57:19 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
// append part checksums
|
|
|
|
checksumInfos[i] = append(checksumInfos[i],
|
|
|
|
ChecksumInfo{partName, file.Algorithm, file.Checksums[i]})
|
|
|
|
}
|
|
|
|
|
|
|
|
// If all disks are having errors, we give up.
|
2018-01-22 17:54:55 -05:00
|
|
|
if disksToHealCount == 0 {
|
|
|
|
return result, fmt.Errorf("all disks without up-to-date data had write errors")
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// xl.json should be written to all the healed disks.
|
|
|
|
for index, disk := range outDatedDisks {
|
2016-08-17 14:36:33 -04:00
|
|
|
if disk == nil {
|
2016-10-17 05:10:23 -04:00
|
|
|
continue
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
2016-10-17 05:10:23 -04:00
|
|
|
partsMetadata[index] = latestMeta
|
2017-08-14 21:08:42 -04:00
|
|
|
partsMetadata[index].Erasure.Checksums = checksumInfos[index]
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate and write `xl.json` generated from other disks.
|
2017-09-28 18:57:19 -04:00
|
|
|
outDatedDisks, aErr = writeUniqueXLMetadata(outDatedDisks, minioMetaTmpBucket, tmpID,
|
|
|
|
partsMetadata, diskCount(outDatedDisks))
|
2017-03-04 17:53:28 -05:00
|
|
|
if aErr != nil {
|
2018-01-22 17:54:55 -05:00
|
|
|
return result, toObjectErr(aErr, bucket, object)
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
|
|
|
|
2016-10-17 05:10:23 -04:00
|
|
|
// Rename from tmp location to the actual location.
|
2018-01-22 17:54:55 -05:00
|
|
|
for diskIndex, disk := range outDatedDisks {
|
2016-10-17 05:10:23 -04:00
|
|
|
if disk == nil {
|
2016-08-31 14:42:57 -04:00
|
|
|
continue
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
2017-09-28 18:57:19 -04:00
|
|
|
|
2016-10-17 05:10:23 -04:00
|
|
|
// Attempt a rename now from healed data to final location.
|
2017-09-28 18:57:19 -04:00
|
|
|
aErr = disk.RenameFile(minioMetaTmpBucket, retainSlash(tmpID), bucket,
|
|
|
|
retainSlash(object))
|
2017-03-04 17:53:28 -05:00
|
|
|
if aErr != nil {
|
2018-01-22 17:54:55 -05:00
|
|
|
return result, toObjectErr(errors.Trace(aErr), bucket, object)
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
|
|
|
|
realDiskIdx := unshuffleIndex(diskIndex,
|
|
|
|
latestMeta.Erasure.Distribution)
|
|
|
|
drive := globalEndpoints.GetString(realDiskIdx)
|
|
|
|
result.DriveInfo.After[drive] = madmin.DriveStateOk
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
|
|
|
|
// Set the size of the object in the heal result
|
|
|
|
result.ObjectSize = latestMeta.Stat.Size
|
|
|
|
|
|
|
|
return result, nil
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
2016-11-16 19:42:23 -05:00
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// HealObject - heal the given object.
|
|
|
|
//
|
2016-11-16 19:42:23 -05:00
|
|
|
// FIXME: If an object object was deleted and one disk was down,
|
|
|
|
// and later the disk comes back up again, heal on the object
|
|
|
|
// should delete it.
|
2018-01-22 17:54:55 -05:00
|
|
|
func (xl xlObjects) HealObject(bucket, object string, dryRun bool) (
|
|
|
|
hr madmin.HealResultItem, err error) {
|
|
|
|
|
|
|
|
// FIXME: Metadata is read again in the healObject() call below.
|
2017-12-22 06:28:13 -05:00
|
|
|
// Read metadata files from all the disks
|
|
|
|
partsMetadata, errs := readAllXLMetadata(xl.storageDisks, bucket, object)
|
|
|
|
|
|
|
|
// get read quorum for this object
|
2018-01-22 17:54:55 -05:00
|
|
|
var readQuorum int
|
|
|
|
readQuorum, _, err = objectQuorumFromMeta(xl, partsMetadata, errs)
|
2017-12-22 06:28:13 -05:00
|
|
|
if err != nil {
|
2018-01-22 17:54:55 -05:00
|
|
|
return hr, err
|
2017-12-22 06:28:13 -05:00
|
|
|
}
|
|
|
|
|
2016-11-16 19:42:23 -05:00
|
|
|
// Lock the object before healing.
|
2018-01-12 23:34:52 -05:00
|
|
|
objectLock := xl.nsMutex.NewNSLock(bucket, object)
|
2018-01-22 17:54:55 -05:00
|
|
|
if lerr := objectLock.GetRLock(globalHealingTimeout); lerr != nil {
|
|
|
|
return hr, lerr
|
2017-08-31 14:29:22 -04:00
|
|
|
}
|
2016-11-16 19:42:23 -05:00
|
|
|
defer objectLock.RUnlock()
|
|
|
|
|
|
|
|
// Heal the object.
|
2018-01-22 17:54:55 -05:00
|
|
|
return healObject(xl.storageDisks, bucket, object, readQuorum, dryRun)
|
2016-11-16 19:42:23 -05:00
|
|
|
}
|