mirror of
https://github.com/minio/minio.git
synced 2024-12-25 06:35:56 -05:00
a337ea4d11
- Changes related to moving admin APIs - admin APIs now have an endpoint under /minio/admin - admin APIs are now versioned - a new API to server the version is added at "GET /minio/admin/version" and all API operations have the path prefix /minio/admin/v1/<operation> - new service stop API added - credentials change API is moved to /minio/admin/v1/config/credential - credentials change API and configuration get/set API now require TLS so that credentials are protected - all API requests now receive JSON - heal APIs are disabled as they will be changed substantially - Heal API changes Heal API is now provided at a single endpoint with the ability for a client to start a heal sequence on all the data in the server, a single bucket, or under a prefix within a bucket. When a heal sequence is started, the server returns a unique token that needs to be used for subsequent 'status' requests to fetch heal results. On each status request from the client, the server returns heal result records that it has accumulated since the previous status request. The server accumulates upto 1000 records and pauses healing further objects until the client requests for status. If the client does not request any further records for a long time, the server aborts the heal sequence automatically. A heal result record is returned for each entity healed on the server, such as system metadata, object metadata, buckets and objects, and has information about the before and after states on each disk. A client may request to force restart a heal sequence - this causes the running heal sequence to be aborted at the next safe spot and starts a new heal sequence.
198 lines
6.4 KiB
Go
198 lines
6.4 KiB
Go
/*
|
|
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
package cmd
|
|
|
|
import (
|
|
"path/filepath"
|
|
"time"
|
|
|
|
"github.com/minio/minio/pkg/errors"
|
|
)
|
|
|
|
// commonTime returns a maximally occurring time from a list of time.
|
|
func commonTime(modTimes []time.Time) (modTime time.Time, count int) {
|
|
var maxima int // Counter for remembering max occurrence of elements.
|
|
timeOccurenceMap := make(map[time.Time]int)
|
|
// Ignore the uuid sentinel and count the rest.
|
|
for _, time := range modTimes {
|
|
if time == timeSentinel {
|
|
continue
|
|
}
|
|
timeOccurenceMap[time]++
|
|
}
|
|
// Find the common cardinality from previously collected
|
|
// occurrences of elements.
|
|
for time, count := range timeOccurenceMap {
|
|
if count > maxima || (count == maxima && time.After(modTime)) {
|
|
maxima = count
|
|
modTime = time
|
|
}
|
|
}
|
|
// Return the collected common uuid.
|
|
return modTime, maxima
|
|
}
|
|
|
|
// Beginning of unix time is treated as sentinel value here.
|
|
var timeSentinel = time.Unix(0, 0).UTC()
|
|
|
|
// Boot modTimes up to disk count, setting the value to time sentinel.
|
|
func bootModtimes(diskCount int) []time.Time {
|
|
modTimes := make([]time.Time, diskCount)
|
|
// Boots up all the modtimes.
|
|
for i := range modTimes {
|
|
modTimes[i] = timeSentinel
|
|
}
|
|
return modTimes
|
|
}
|
|
|
|
// Extracts list of times from xlMetaV1 slice and returns, skips
|
|
// slice elements which have errors.
|
|
func listObjectModtimes(partsMetadata []xlMetaV1, errs []error) (modTimes []time.Time) {
|
|
modTimes = bootModtimes(len(partsMetadata))
|
|
for index, metadata := range partsMetadata {
|
|
if errs[index] != nil {
|
|
continue
|
|
}
|
|
// Once the file is found, save the uuid saved on disk.
|
|
modTimes[index] = metadata.Stat.ModTime
|
|
}
|
|
return modTimes
|
|
}
|
|
|
|
// Notes:
|
|
// There are 5 possible states a disk could be in,
|
|
// 1. __online__ - has the latest copy of xl.json - returned by listOnlineDisks
|
|
//
|
|
// 2. __offline__ - err == errDiskNotFound
|
|
//
|
|
// 3. __availableWithParts__ - has the latest copy of xl.json and has all
|
|
// parts with checksums matching; returned by disksWithAllParts
|
|
//
|
|
// 4. __outdated__ - returned by outDatedDisk, provided []StorageAPI
|
|
// returned by diskWithAllParts is passed for latestDisks.
|
|
// - has an old copy of xl.json
|
|
// - doesn't have xl.json (errFileNotFound)
|
|
// - has the latest xl.json but one or more parts are corrupt
|
|
//
|
|
// 5. __missingParts__ - has the latest copy of xl.json but has some parts
|
|
// missing. This is identified separately since this may need manual
|
|
// inspection to understand the root cause. E.g, this could be due to
|
|
// backend filesystem corruption.
|
|
|
|
// listOnlineDisks - returns
|
|
// - a slice of disks where disk having 'older' xl.json (or nothing)
|
|
// are set to nil.
|
|
// - latest (in time) of the maximally occurring modTime(s).
|
|
func listOnlineDisks(disks []StorageAPI, partsMetadata []xlMetaV1, errs []error) (onlineDisks []StorageAPI, modTime time.Time) {
|
|
onlineDisks = make([]StorageAPI, len(disks))
|
|
|
|
// List all the file commit ids from parts metadata.
|
|
modTimes := listObjectModtimes(partsMetadata, errs)
|
|
|
|
// Reduce list of UUIDs to a single common value.
|
|
modTime, _ = commonTime(modTimes)
|
|
|
|
// Create a new online disks slice, which have common uuid.
|
|
for index, t := range modTimes {
|
|
if t == modTime {
|
|
onlineDisks[index] = disks[index]
|
|
} else {
|
|
onlineDisks[index] = nil
|
|
}
|
|
}
|
|
return onlineDisks, modTime
|
|
}
|
|
|
|
// Returns one of the latest updated xlMeta files and count of total valid xlMeta(s) updated latest
|
|
func getLatestXLMeta(partsMetadata []xlMetaV1, errs []error) (xlMetaV1, int) {
|
|
// List all the file commit ids from parts metadata.
|
|
modTimes := listObjectModtimes(partsMetadata, errs)
|
|
|
|
// Count all latest updated xlMeta values
|
|
var count int
|
|
var latestXLMeta xlMetaV1
|
|
|
|
// Reduce list of UUIDs to a single common value - i.e. the last updated Time
|
|
modTime, _ := commonTime(modTimes)
|
|
|
|
// Interate through all the modTimes and count the xlMeta(s) with latest time.
|
|
for index, t := range modTimes {
|
|
if t == modTime && partsMetadata[index].IsValid() {
|
|
latestXLMeta = partsMetadata[index]
|
|
count++
|
|
}
|
|
}
|
|
// Return one of the latest xlMetaData, and the count of lastest updated xlMeta files
|
|
return latestXLMeta, count
|
|
}
|
|
|
|
// disksWithAllParts - This function needs to be called with
|
|
// []StorageAPI returned by listOnlineDisks. Returns,
|
|
//
|
|
// - disks which have all parts specified in the latest xl.json.
|
|
//
|
|
// - slice of errors about the state of data files on disk - can have
|
|
// a not-found error or a hash-mismatch error.
|
|
//
|
|
// - non-nil error if any of the disks failed unexpectedly (i.e. error
|
|
// other than file not found and not a checksum error).
|
|
func disksWithAllParts(onlineDisks []StorageAPI, partsMetadata []xlMetaV1, errs []error, bucket,
|
|
object string) ([]StorageAPI, []error, error) {
|
|
|
|
availableDisks := make([]StorageAPI, len(onlineDisks))
|
|
buffer := []byte{}
|
|
dataErrs := make([]error, len(onlineDisks))
|
|
|
|
for i, onlineDisk := range onlineDisks {
|
|
if onlineDisk == nil {
|
|
continue
|
|
}
|
|
|
|
// disk has a valid xl.json but may not have all the
|
|
// parts. This is considered an outdated disk, since
|
|
// it needs healing too.
|
|
for _, part := range partsMetadata[i].Parts {
|
|
partPath := filepath.Join(object, part.Name)
|
|
checksumInfo := partsMetadata[i].Erasure.GetChecksumInfo(part.Name)
|
|
verifier := NewBitrotVerifier(checksumInfo.Algorithm, checksumInfo.Hash)
|
|
|
|
// verification happens even if a 0-length
|
|
// buffer is passed
|
|
_, hErr := onlineDisk.ReadFile(bucket, partPath, 0, buffer, verifier)
|
|
|
|
_, isCorrupt := hErr.(hashMismatchError)
|
|
switch {
|
|
case isCorrupt:
|
|
fallthrough
|
|
case hErr == errFileNotFound, hErr == errVolumeNotFound:
|
|
dataErrs[i] = hErr
|
|
break
|
|
case hErr != nil:
|
|
// abort on unhandled errors
|
|
return nil, nil, errors.Trace(hErr)
|
|
}
|
|
}
|
|
|
|
if dataErrs[i] == nil {
|
|
// All parts verified, mark it as all data available.
|
|
availableDisks[i] = onlineDisk
|
|
}
|
|
}
|
|
|
|
return availableDisks, dataErrs, nil
|
|
}
|