2016-10-17 05:10:23 -04:00
|
|
|
/*
|
2017-01-17 13:02:58 -05:00
|
|
|
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
2016-10-17 05:10:23 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
2017-03-04 17:53:28 -05:00
|
|
|
import (
|
|
|
|
"path/filepath"
|
|
|
|
"time"
|
2017-11-25 14:58:29 -05:00
|
|
|
|
|
|
|
"github.com/minio/minio/pkg/errors"
|
2017-03-04 17:53:28 -05:00
|
|
|
)
|
2016-10-17 05:10:23 -04:00
|
|
|
|
|
|
|
// commonTime returns a maximally occurring time from a list of time.
|
2017-01-17 13:02:58 -05:00
|
|
|
func commonTime(modTimes []time.Time) (modTime time.Time, count int) {
|
2016-10-17 05:10:23 -04:00
|
|
|
var maxima int // Counter for remembering max occurrence of elements.
|
|
|
|
timeOccurenceMap := make(map[time.Time]int)
|
|
|
|
// Ignore the uuid sentinel and count the rest.
|
|
|
|
for _, time := range modTimes {
|
|
|
|
if time == timeSentinel {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
timeOccurenceMap[time]++
|
|
|
|
}
|
|
|
|
// Find the common cardinality from previously collected
|
|
|
|
// occurrences of elements.
|
|
|
|
for time, count := range timeOccurenceMap {
|
2018-01-22 17:54:55 -05:00
|
|
|
if count > maxima || (count == maxima && time.After(modTime)) {
|
2016-10-17 05:10:23 -04:00
|
|
|
maxima = count
|
|
|
|
modTime = time
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Return the collected common uuid.
|
2017-01-17 13:02:58 -05:00
|
|
|
return modTime, maxima
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Beginning of unix time is treated as sentinel value here.
|
|
|
|
var timeSentinel = time.Unix(0, 0).UTC()
|
|
|
|
|
|
|
|
// Boot modTimes up to disk count, setting the value to time sentinel.
|
|
|
|
func bootModtimes(diskCount int) []time.Time {
|
|
|
|
modTimes := make([]time.Time, diskCount)
|
|
|
|
// Boots up all the modtimes.
|
|
|
|
for i := range modTimes {
|
|
|
|
modTimes[i] = timeSentinel
|
|
|
|
}
|
|
|
|
return modTimes
|
|
|
|
}
|
|
|
|
|
|
|
|
// Extracts list of times from xlMetaV1 slice and returns, skips
|
2017-03-04 17:53:28 -05:00
|
|
|
// slice elements which have errors.
|
2016-10-17 05:10:23 -04:00
|
|
|
func listObjectModtimes(partsMetadata []xlMetaV1, errs []error) (modTimes []time.Time) {
|
|
|
|
modTimes = bootModtimes(len(partsMetadata))
|
|
|
|
for index, metadata := range partsMetadata {
|
2017-03-04 17:53:28 -05:00
|
|
|
if errs[index] != nil {
|
|
|
|
continue
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
2017-03-04 17:53:28 -05:00
|
|
|
// Once the file is found, save the uuid saved on disk.
|
|
|
|
modTimes[index] = metadata.Stat.ModTime
|
2016-10-17 05:10:23 -04:00
|
|
|
}
|
|
|
|
return modTimes
|
|
|
|
}
|
|
|
|
|
2017-03-04 17:53:28 -05:00
|
|
|
// Notes:
|
|
|
|
// There are 5 possible states a disk could be in,
|
|
|
|
// 1. __online__ - has the latest copy of xl.json - returned by listOnlineDisks
|
|
|
|
//
|
|
|
|
// 2. __offline__ - err == errDiskNotFound
|
|
|
|
//
|
|
|
|
// 3. __availableWithParts__ - has the latest copy of xl.json and has all
|
|
|
|
// parts with checksums matching; returned by disksWithAllParts
|
|
|
|
//
|
|
|
|
// 4. __outdated__ - returned by outDatedDisk, provided []StorageAPI
|
|
|
|
// returned by diskWithAllParts is passed for latestDisks.
|
|
|
|
// - has an old copy of xl.json
|
|
|
|
// - doesn't have xl.json (errFileNotFound)
|
|
|
|
// - has the latest xl.json but one or more parts are corrupt
|
|
|
|
//
|
|
|
|
// 5. __missingParts__ - has the latest copy of xl.json but has some parts
|
|
|
|
// missing. This is identified separately since this may need manual
|
|
|
|
// inspection to understand the root cause. E.g, this could be due to
|
|
|
|
// backend filesystem corruption.
|
|
|
|
|
|
|
|
// listOnlineDisks - returns
|
|
|
|
// - a slice of disks where disk having 'older' xl.json (or nothing)
|
|
|
|
// are set to nil.
|
|
|
|
// - latest (in time) of the maximally occurring modTime(s).
|
2016-10-17 05:10:23 -04:00
|
|
|
func listOnlineDisks(disks []StorageAPI, partsMetadata []xlMetaV1, errs []error) (onlineDisks []StorageAPI, modTime time.Time) {
|
|
|
|
onlineDisks = make([]StorageAPI, len(disks))
|
|
|
|
|
|
|
|
// List all the file commit ids from parts metadata.
|
|
|
|
modTimes := listObjectModtimes(partsMetadata, errs)
|
|
|
|
|
|
|
|
// Reduce list of UUIDs to a single common value.
|
2017-01-17 13:02:58 -05:00
|
|
|
modTime, _ = commonTime(modTimes)
|
2016-10-17 05:10:23 -04:00
|
|
|
|
|
|
|
// Create a new online disks slice, which have common uuid.
|
|
|
|
for index, t := range modTimes {
|
|
|
|
if t == modTime {
|
|
|
|
onlineDisks[index] = disks[index]
|
|
|
|
} else {
|
|
|
|
onlineDisks[index] = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return onlineDisks, modTime
|
|
|
|
}
|
|
|
|
|
2017-12-22 06:28:13 -05:00
|
|
|
// Returns one of the latest updated xlMeta files and count of total valid xlMeta(s) updated latest
|
|
|
|
func getLatestXLMeta(partsMetadata []xlMetaV1, errs []error) (xlMetaV1, int) {
|
|
|
|
// List all the file commit ids from parts metadata.
|
|
|
|
modTimes := listObjectModtimes(partsMetadata, errs)
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
// Count all latest updated xlMeta values
|
2017-12-22 06:28:13 -05:00
|
|
|
var count int
|
|
|
|
var latestXLMeta xlMetaV1
|
|
|
|
|
|
|
|
// Reduce list of UUIDs to a single common value - i.e. the last updated Time
|
|
|
|
modTime, _ := commonTime(modTimes)
|
|
|
|
|
|
|
|
// Interate through all the modTimes and count the xlMeta(s) with latest time.
|
|
|
|
for index, t := range modTimes {
|
|
|
|
if t == modTime && partsMetadata[index].IsValid() {
|
|
|
|
latestXLMeta = partsMetadata[index]
|
|
|
|
count++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Return one of the latest xlMetaData, and the count of lastest updated xlMeta files
|
|
|
|
return latestXLMeta, count
|
|
|
|
}
|
|
|
|
|
2017-03-04 17:53:28 -05:00
|
|
|
// disksWithAllParts - This function needs to be called with
|
|
|
|
// []StorageAPI returned by listOnlineDisks. Returns,
|
2017-09-28 18:57:19 -04:00
|
|
|
//
|
2017-03-04 17:53:28 -05:00
|
|
|
// - disks which have all parts specified in the latest xl.json.
|
2017-09-28 18:57:19 -04:00
|
|
|
//
|
2018-01-22 17:54:55 -05:00
|
|
|
// - slice of errors about the state of data files on disk - can have
|
|
|
|
// a not-found error or a hash-mismatch error.
|
2017-09-28 18:57:19 -04:00
|
|
|
//
|
|
|
|
// - non-nil error if any of the disks failed unexpectedly (i.e. error
|
|
|
|
// other than file not found and not a checksum error).
|
|
|
|
func disksWithAllParts(onlineDisks []StorageAPI, partsMetadata []xlMetaV1, errs []error, bucket,
|
|
|
|
object string) ([]StorageAPI, []error, error) {
|
|
|
|
|
2017-03-04 17:53:28 -05:00
|
|
|
availableDisks := make([]StorageAPI, len(onlineDisks))
|
2017-09-28 18:57:19 -04:00
|
|
|
buffer := []byte{}
|
2018-01-22 17:54:55 -05:00
|
|
|
dataErrs := make([]error, len(onlineDisks))
|
2017-08-14 21:08:42 -04:00
|
|
|
|
2017-09-28 18:57:19 -04:00
|
|
|
for i, onlineDisk := range onlineDisks {
|
2018-01-22 17:54:55 -05:00
|
|
|
if onlineDisk == nil {
|
2017-03-04 17:53:28 -05:00
|
|
|
continue
|
|
|
|
}
|
2018-01-22 17:54:55 -05:00
|
|
|
|
2017-03-04 17:53:28 -05:00
|
|
|
// disk has a valid xl.json but may not have all the
|
|
|
|
// parts. This is considered an outdated disk, since
|
|
|
|
// it needs healing too.
|
2017-09-28 18:57:19 -04:00
|
|
|
for _, part := range partsMetadata[i].Parts {
|
2017-03-04 17:53:28 -05:00
|
|
|
partPath := filepath.Join(object, part.Name)
|
2017-09-28 18:57:19 -04:00
|
|
|
checksumInfo := partsMetadata[i].Erasure.GetChecksumInfo(part.Name)
|
|
|
|
verifier := NewBitrotVerifier(checksumInfo.Algorithm, checksumInfo.Hash)
|
|
|
|
|
|
|
|
// verification happens even if a 0-length
|
|
|
|
// buffer is passed
|
|
|
|
_, hErr := onlineDisk.ReadFile(bucket, partPath, 0, buffer, verifier)
|
2018-01-22 17:54:55 -05:00
|
|
|
|
|
|
|
_, isCorrupt := hErr.(hashMismatchError)
|
|
|
|
switch {
|
|
|
|
case isCorrupt:
|
|
|
|
fallthrough
|
|
|
|
case hErr == errFileNotFound, hErr == errVolumeNotFound:
|
|
|
|
dataErrs[i] = hErr
|
|
|
|
break
|
|
|
|
case hErr != nil:
|
|
|
|
// abort on unhandled errors
|
2017-11-25 14:58:29 -05:00
|
|
|
return nil, nil, errors.Trace(hErr)
|
2017-03-04 17:53:28 -05:00
|
|
|
}
|
2017-09-28 18:57:19 -04:00
|
|
|
}
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
if dataErrs[i] == nil {
|
2017-09-28 18:57:19 -04:00
|
|
|
// All parts verified, mark it as all data available.
|
|
|
|
availableDisks[i] = onlineDisk
|
2017-03-04 17:53:28 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-22 17:54:55 -05:00
|
|
|
return availableDisks, dataErrs, nil
|
2017-03-04 17:53:28 -05:00
|
|
|
}
|