2021-04-18 15:41:13 -04:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2016-10-17 05:10:23 -04:00
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
2017-03-04 17:53:28 -05:00
|
|
|
import (
|
2021-03-29 20:00:55 -04:00
|
|
|
"bytes"
|
2018-04-05 18:04:40 -04:00
|
|
|
"context"
|
2017-03-04 17:53:28 -05:00
|
|
|
"time"
|
2017-11-25 14:58:29 -05:00
|
|
|
|
2021-05-06 11:52:02 -04:00
|
|
|
"github.com/minio/madmin-go"
|
2017-03-04 17:53:28 -05:00
|
|
|
)
|
2016-10-17 05:10:23 -04:00
|
|
|
|
|
|
|
// commonTime returns a maximally occurring time from a list of time.
|
2021-11-21 13:41:30 -05:00
|
|
|
func commonTime(modTimes []time.Time) (modTime time.Time) {
|
2021-04-21 22:06:08 -04:00
|
|
|
timeOccurenceMap := make(map[int64]int, len(modTimes))
|
2016-10-17 05:10:23 -04:00
|
|
|
// Ignore the uuid sentinel and count the rest.
|
2021-11-21 13:41:30 -05:00
|
|
|
for _, t := range modTimes {
|
2021-11-20 14:26:30 -05:00
|
|
|
if t.Equal(timeSentinel) {
|
2016-10-17 05:10:23 -04:00
|
|
|
continue
|
|
|
|
}
|
2021-11-20 14:26:30 -05:00
|
|
|
timeOccurenceMap[t.UnixNano()]++
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
2020-11-02 20:52:13 -05:00
|
|
|
|
2021-11-21 13:41:30 -05:00
|
|
|
var maxima int // Counter for remembering max occurrence of elements.
|
|
|
|
|
2016-10-17 05:10:23 -04:00
|
|
|
// Find the common cardinality from previously collected
|
|
|
|
// occurrences of elements.
|
2020-11-02 20:52:13 -05:00
|
|
|
for nano, count := range timeOccurenceMap {
|
2021-11-15 12:46:55 -05:00
|
|
|
t := time.Unix(0, nano).UTC()
|
2020-11-02 20:52:13 -05:00
|
|
|
if count > maxima || (count == maxima && t.After(modTime)) {
|
2016-10-17 05:10:23 -04:00
|
|
|
maxima = count
|
2020-11-02 20:52:13 -05:00
|
|
|
modTime = t
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
|
|
|
}
|
2020-11-02 20:52:13 -05:00
|
|
|
|
2021-11-21 13:41:30 -05:00
|
|
|
// Return the collected common modTime.
|
|
|
|
return modTime
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Beginning of unix time is treated as sentinel value here.
|
|
|
|
var timeSentinel = time.Unix(0, 0).UTC()
|
|
|
|
|
|
|
|
// Boot modTimes up to disk count, setting the value to time sentinel.
|
|
|
|
func bootModtimes(diskCount int) []time.Time {
|
|
|
|
modTimes := make([]time.Time, diskCount)
|
|
|
|
// Boots up all the modtimes.
|
|
|
|
for i := range modTimes {
|
|
|
|
modTimes[i] = timeSentinel
|
|
|
|
}
|
|
|
|
return modTimes
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Extracts list of times from FileInfo slice and returns, skips
|
2017-03-04 17:53:28 -05:00
|
|
|
// slice elements which have errors.
|
2020-06-12 23:04:01 -04:00
|
|
|
func listObjectModtimes(partsMetadata []FileInfo, errs []error) (modTimes []time.Time) {
|
2016-10-17 05:10:23 -04:00
|
|
|
modTimes = bootModtimes(len(partsMetadata))
|
|
|
|
for index, metadata := range partsMetadata {
|
2017-03-04 17:53:28 -05:00
|
|
|
if errs[index] != nil {
|
|
|
|
continue
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
2017-03-04 17:53:28 -05:00
|
|
|
// Once the file is found, save the uuid saved on disk.
|
2020-06-12 23:04:01 -04:00
|
|
|
modTimes[index] = metadata.ModTime
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
|
|
|
return modTimes
|
|
|
|
}
|
|
|
|
|
2021-11-21 13:41:30 -05:00
|
|
|
func filterOnlineDisksInplace(fi FileInfo, partsMetadata []FileInfo, onlineDisks []StorageAPI) {
|
|
|
|
for i, meta := range partsMetadata {
|
|
|
|
if fi.XLV1 == meta.XLV1 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
onlineDisks[i] = nil
|
|
|
|
}
|
|
|
|
}
|
2021-10-04 11:53:38 -04:00
|
|
|
|
2017-03-04 17:53:28 -05:00
|
|
|
// Notes:
|
|
|
|
// There are 5 possible states a disk could be in,
|
2020-06-12 23:04:01 -04:00
|
|
|
// 1. __online__ - has the latest copy of xl.meta - returned by listOnlineDisks
|
2017-03-04 17:53:28 -05:00
|
|
|
//
|
|
|
|
// 2. __offline__ - err == errDiskNotFound
|
|
|
|
//
|
2020-06-12 23:04:01 -04:00
|
|
|
// 3. __availableWithParts__ - has the latest copy of xl.meta and has all
|
2017-03-04 17:53:28 -05:00
|
|
|
// parts with checksums matching; returned by disksWithAllParts
|
|
|
|
//
|
|
|
|
// 4. __outdated__ - returned by outDatedDisk, provided []StorageAPI
|
|
|
|
// returned by diskWithAllParts is passed for latestDisks.
|
2020-06-12 23:04:01 -04:00
|
|
|
// - has an old copy of xl.meta
|
|
|
|
// - doesn't have xl.meta (errFileNotFound)
|
|
|
|
// - has the latest xl.meta but one or more parts are corrupt
|
2017-03-04 17:53:28 -05:00
|
|
|
//
|
2020-06-12 23:04:01 -04:00
|
|
|
// 5. __missingParts__ - has the latest copy of xl.meta but has some parts
|
2017-03-04 17:53:28 -05:00
|
|
|
// missing. This is identified separately since this may need manual
|
|
|
|
// inspection to understand the root cause. E.g, this could be due to
|
|
|
|
// backend filesystem corruption.
|
|
|
|
|
|
|
|
// listOnlineDisks - returns
|
2020-06-12 23:04:01 -04:00
|
|
|
// - a slice of disks where disk having 'older' xl.meta (or nothing)
|
2017-03-04 17:53:28 -05:00
|
|
|
// are set to nil.
|
|
|
|
// - latest (in time) of the maximally occurring modTime(s).
|
2021-11-21 13:41:30 -05:00
|
|
|
func listOnlineDisks(disks []StorageAPI, partsMetadata []FileInfo, errs []error) (onlineDisks []StorageAPI, modTime time.Time) {
|
2016-10-17 05:10:23 -04:00
|
|
|
onlineDisks = make([]StorageAPI, len(disks))
|
|
|
|
|
|
|
|
// List all the file commit ids from parts metadata.
|
|
|
|
modTimes := listObjectModtimes(partsMetadata, errs)
|
|
|
|
|
|
|
|
// Reduce list of UUIDs to a single common value.
|
2021-11-21 13:41:30 -05:00
|
|
|
modTime = commonTime(modTimes)
|
2016-10-17 05:10:23 -04:00
|
|
|
|
|
|
|
// Create a new online disks slice, which have common uuid.
|
|
|
|
for index, t := range modTimes {
|
2021-11-21 13:41:30 -05:00
|
|
|
if partsMetadata[index].IsValid() && t.Equal(modTime) {
|
2016-10-17 05:10:23 -04:00
|
|
|
onlineDisks[index] = disks[index]
|
|
|
|
} else {
|
|
|
|
onlineDisks[index] = nil
|
|
|
|
}
|
|
|
|
}
|
2021-04-21 22:06:08 -04:00
|
|
|
|
2021-11-21 13:41:30 -05:00
|
|
|
return onlineDisks, modTime
|
2021-11-15 12:46:55 -05:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Returns the latest updated FileInfo files and error in case of failure.
|
2021-11-22 12:36:29 -05:00
|
|
|
func getLatestFileInfo(ctx context.Context, partsMetadata []FileInfo, errs []error) (FileInfo, error) {
|
2018-07-31 03:23:29 -04:00
|
|
|
// There should be atleast half correct entries, if not return failure
|
2021-11-22 12:36:29 -05:00
|
|
|
reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, len(partsMetadata)/2)
|
2021-11-15 12:46:55 -05:00
|
|
|
if reducedErr != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
return FileInfo{}, reducedErr
|
2018-07-31 03:23:29 -04:00
|
|
|
}
|
|
|
|
|
2017-12-22 06:28:13 -05:00
|
|
|
// List all the file commit ids from parts metadata.
|
|
|
|
modTimes := listObjectModtimes(partsMetadata, errs)
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Count all latest updated FileInfo values
|
2017-12-22 06:28:13 -05:00
|
|
|
var count int
|
2020-06-12 23:04:01 -04:00
|
|
|
var latestFileInfo FileInfo
|
2017-12-22 06:28:13 -05:00
|
|
|
|
|
|
|
// Reduce list of UUIDs to a single common value - i.e. the last updated Time
|
2021-11-21 13:41:30 -05:00
|
|
|
modTime := commonTime(modTimes)
|
2017-12-22 06:28:13 -05:00
|
|
|
|
2021-11-22 12:36:29 -05:00
|
|
|
if modTime.IsZero() || modTime.Equal(timeSentinel) {
|
|
|
|
return FileInfo{}, errErasureReadQuorum
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Interate through all the modTimes and count the FileInfo(s) with latest time.
|
2017-12-22 06:28:13 -05:00
|
|
|
for index, t := range modTimes {
|
2021-11-21 13:41:30 -05:00
|
|
|
if partsMetadata[index].IsValid() && t.Equal(modTime) {
|
2020-06-12 23:04:01 -04:00
|
|
|
latestFileInfo = partsMetadata[index]
|
2017-12-22 06:28:13 -05:00
|
|
|
count++
|
|
|
|
}
|
|
|
|
}
|
2021-10-04 11:53:38 -04:00
|
|
|
|
2021-11-22 12:36:29 -05:00
|
|
|
if !latestFileInfo.IsValid() {
|
|
|
|
return FileInfo{}, errErasureReadQuorum
|
|
|
|
}
|
|
|
|
|
|
|
|
if count < latestFileInfo.Erasure.DataBlocks {
|
2020-06-12 23:04:01 -04:00
|
|
|
return FileInfo{}, errErasureReadQuorum
|
2018-07-31 03:23:29 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
return latestFileInfo, nil
|
Revert heal locks (#12365)
A lot of healing is likely to be on non-existing objects and
locks are very expensive and will slow down scanning
significantly.
In cases where all are valid or, all are broken allow
rejection without locking.
Keep the existing behavior, but move the check for
dangling objects to after the lock has been acquired.
```
_, err = getLatestFileInfo(ctx, partsMetadata, errs)
if err != nil {
return er.purgeObjectDangling(ctx, bucket, object, versionID, partsMetadata, errs, []error{}, opts)
}
```
Revert "heal: Hold lock when reading xl.meta from disks (#12362)"
This reverts commit abd32065aaae4080396a1b4b04a110454368b028
2021-05-25 20:02:06 -04:00
|
|
|
}
|
|
|
|
|
2017-03-04 17:53:28 -05:00
|
|
|
// disksWithAllParts - This function needs to be called with
|
|
|
|
// []StorageAPI returned by listOnlineDisks. Returns,
|
2017-09-28 18:57:19 -04:00
|
|
|
//
|
2020-06-12 23:04:01 -04:00
|
|
|
// - disks which have all parts specified in the latest xl.meta.
|
2017-09-28 18:57:19 -04:00
|
|
|
//
|
2018-01-22 17:54:55 -05:00
|
|
|
// - slice of errors about the state of data files on disk - can have
|
|
|
|
// a not-found error or a hash-mismatch error.
|
2021-07-26 14:48:09 -04:00
|
|
|
func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetadata []FileInfo,
|
2021-11-22 12:36:29 -05:00
|
|
|
errs []error, latestMeta FileInfo,
|
|
|
|
bucket, object string, scanMode madmin.HealScanMode) ([]StorageAPI, []error) {
|
2021-05-19 15:04:08 -04:00
|
|
|
|
2017-03-04 17:53:28 -05:00
|
|
|
availableDisks := make([]StorageAPI, len(onlineDisks))
|
2018-01-22 17:54:55 -05:00
|
|
|
dataErrs := make([]error, len(onlineDisks))
|
2020-10-28 03:10:20 -04:00
|
|
|
inconsistent := 0
|
|
|
|
for i, meta := range partsMetadata {
|
|
|
|
if !meta.IsValid() {
|
|
|
|
// Since for majority of the cases erasure.Index matches with erasure.Distribution we can
|
|
|
|
// consider the offline disks as consistent.
|
|
|
|
continue
|
|
|
|
}
|
2021-07-26 14:48:09 -04:00
|
|
|
if !meta.Deleted {
|
|
|
|
if len(meta.Erasure.Distribution) != len(onlineDisks) {
|
|
|
|
// Erasure distribution seems to have lesser
|
|
|
|
// number of items than number of online disks.
|
|
|
|
inconsistent++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if meta.Erasure.Distribution[i] != meta.Erasure.Index {
|
|
|
|
// Mismatch indexes with distribution order
|
|
|
|
inconsistent++
|
|
|
|
}
|
2020-10-28 03:10:20 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
erasureDistributionReliable := true
|
|
|
|
if inconsistent > len(partsMetadata)/2 {
|
|
|
|
// If there are too many inconsistent files, then we can't trust erasure.Distribution (most likely
|
|
|
|
// because of bugs found in CopyObject/PutObjectTags) https://github.com/minio/minio/pull/10772
|
|
|
|
erasureDistributionReliable = false
|
|
|
|
}
|
|
|
|
|
2017-09-28 18:57:19 -04:00
|
|
|
for i, onlineDisk := range onlineDisks {
|
2020-06-12 23:04:01 -04:00
|
|
|
if errs[i] != nil {
|
2019-03-26 17:57:44 -04:00
|
|
|
dataErrs[i] = errs[i]
|
2017-03-04 17:53:28 -05:00
|
|
|
continue
|
|
|
|
}
|
2021-11-22 12:36:29 -05:00
|
|
|
if onlineDisk == OfflineDisk {
|
2020-06-12 23:04:01 -04:00
|
|
|
dataErrs[i] = errDiskNotFound
|
|
|
|
continue
|
|
|
|
}
|
2021-05-19 15:04:08 -04:00
|
|
|
|
2021-07-26 14:48:09 -04:00
|
|
|
meta := partsMetadata[i]
|
2021-11-22 12:36:29 -05:00
|
|
|
if !meta.ModTime.Equal(latestMeta.ModTime) || meta.DataDir != latestMeta.DataDir {
|
2021-05-19 15:04:08 -04:00
|
|
|
dataErrs[i] = errFileCorrupt
|
|
|
|
partsMetadata[i] = FileInfo{}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-10-28 03:10:20 -04:00
|
|
|
if erasureDistributionReliable {
|
|
|
|
if !meta.IsValid() {
|
|
|
|
continue
|
|
|
|
}
|
2020-10-28 22:24:01 -04:00
|
|
|
|
2021-07-26 14:48:09 -04:00
|
|
|
if !meta.Deleted {
|
|
|
|
if len(meta.Erasure.Distribution) != len(onlineDisks) {
|
|
|
|
// Erasure distribution is not the same as onlineDisks
|
|
|
|
// attempt a fix if possible, assuming other entries
|
|
|
|
// might have the right erasure distribution.
|
|
|
|
partsMetadata[i] = FileInfo{}
|
|
|
|
dataErrs[i] = errFileCorrupt
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Since erasure.Distribution is trustable we can fix the mismatching erasure.Index
|
|
|
|
if meta.Erasure.Distribution[i] != meta.Erasure.Index {
|
|
|
|
partsMetadata[i] = FileInfo{}
|
|
|
|
dataErrs[i] = errFileCorrupt
|
|
|
|
continue
|
|
|
|
}
|
2020-10-28 03:10:20 -04:00
|
|
|
}
|
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
|
2021-03-29 20:00:55 -04:00
|
|
|
// Always check data, if we got it.
|
2021-03-30 04:51:00 -04:00
|
|
|
if (len(meta.Data) > 0 || meta.Size == 0) && len(meta.Parts) > 0 {
|
2021-03-29 20:00:55 -04:00
|
|
|
checksumInfo := meta.Erasure.GetChecksumInfo(meta.Parts[0].Number)
|
2021-05-17 20:49:48 -04:00
|
|
|
dataErrs[i] = bitrotVerify(bytes.NewReader(meta.Data),
|
2021-03-29 20:00:55 -04:00
|
|
|
int64(len(meta.Data)),
|
|
|
|
meta.Erasure.ShardFileSize(meta.Size),
|
|
|
|
checksumInfo.Algorithm,
|
|
|
|
checksumInfo.Hash, meta.Erasure.ShardSize())
|
|
|
|
if dataErrs[i] == nil {
|
|
|
|
// All parts verified, mark it as all data available.
|
|
|
|
availableDisks[i] = onlineDisk
|
2021-05-14 19:50:47 -04:00
|
|
|
} else {
|
|
|
|
// upon errors just make that disk's fileinfo invalid
|
|
|
|
partsMetadata[i] = FileInfo{}
|
2021-03-29 20:00:55 -04:00
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-11-22 12:36:29 -05:00
|
|
|
meta.DataDir = latestMeta.DataDir
|
2019-03-14 16:08:51 -04:00
|
|
|
switch scanMode {
|
|
|
|
case madmin.HealDeepScan:
|
2020-06-12 23:04:01 -04:00
|
|
|
// disk has a valid xl.meta but may not have all the
|
2019-03-14 16:08:51 -04:00
|
|
|
// parts. This is considered an outdated disk, since
|
|
|
|
// it needs healing too.
|
2021-11-22 12:36:29 -05:00
|
|
|
if !meta.Deleted && !meta.IsRemote() {
|
|
|
|
dataErrs[i] = onlineDisk.VerifyFile(ctx, bucket, object, meta)
|
2021-04-19 13:30:42 -04:00
|
|
|
}
|
2019-03-14 16:08:51 -04:00
|
|
|
case madmin.HealNormalScan:
|
2021-11-22 12:36:29 -05:00
|
|
|
if !meta.Deleted && !meta.IsRemote() {
|
|
|
|
dataErrs[i] = onlineDisk.CheckParts(ctx, bucket, object, meta)
|
2021-04-19 13:30:42 -04:00
|
|
|
}
|
2017-09-28 18:57:19 -04:00
|
|
|
}
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
if dataErrs[i] == nil {
|
2017-09-28 18:57:19 -04:00
|
|
|
// All parts verified, mark it as all data available.
|
|
|
|
availableDisks[i] = onlineDisk
|
2021-05-14 19:50:47 -04:00
|
|
|
} else {
|
|
|
|
// upon errors just make that disk's fileinfo invalid
|
|
|
|
partsMetadata[i] = FileInfo{}
|
2017-03-04 17:53:28 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-02 20:13:51 -04:00
|
|
|
return availableDisks, dataErrs
|
2017-03-04 17:53:28 -05:00
|
|
|
}
|