mirror of
https://github.com/minio/minio.git
synced 2024-12-25 06:35:56 -05:00
a337ea4d11
- Changes related to moving admin APIs - admin APIs now have an endpoint under /minio/admin - admin APIs are now versioned - a new API to server the version is added at "GET /minio/admin/version" and all API operations have the path prefix /minio/admin/v1/<operation> - new service stop API added - credentials change API is moved to /minio/admin/v1/config/credential - credentials change API and configuration get/set API now require TLS so that credentials are protected - all API requests now receive JSON - heal APIs are disabled as they will be changed substantially - Heal API changes Heal API is now provided at a single endpoint with the ability for a client to start a heal sequence on all the data in the server, a single bucket, or under a prefix within a bucket. When a heal sequence is started, the server returns a unique token that needs to be used for subsequent 'status' requests to fetch heal results. On each status request from the client, the server returns heal result records that it has accumulated since the previous status request. The server accumulates upto 1000 records and pauses healing further objects until the client requests for status. If the client does not request any further records for a long time, the server aborts the heal sequence automatically. A heal result record is returned for each entity healed on the server, such as system metadata, object metadata, buckets and objects, and has information about the before and after states on each disk. A client may request to force restart a heal sequence - this causes the running heal sequence to be aborted at the next safe spot and starts a new heal sequence.
432 lines
13 KiB
Go
432 lines
13 KiB
Go
/*
|
|
* Minio Cloud Storage, (C) 2016 Minio, Inc.
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
package cmd
|
|
|
|
import (
|
|
"encoding/hex"
|
|
"errors"
|
|
"hash/crc32"
|
|
"path"
|
|
"sync"
|
|
"time"
|
|
|
|
errors2 "github.com/minio/minio/pkg/errors"
|
|
"github.com/tidwall/gjson"
|
|
)
|
|
|
|
// Returns number of errors that occurred the most (incl. nil) and the
|
|
// corresponding error value. NB When there is more than one error value that
|
|
// occurs maximum number of times, the error value returned depends on how
|
|
// golang's map orders keys. This doesn't affect correctness as long as quorum
|
|
// value is greater than or equal to simple majority, since none of the equally
|
|
// maximal values would occur quorum or more number of times.
|
|
func reduceErrs(errs []error, ignoredErrs []error) (maxCount int, maxErr error) {
|
|
errorCounts := make(map[error]int)
|
|
errs = errors2.Causes(errs)
|
|
for _, err := range errs {
|
|
if errors2.IsErrIgnored(err, ignoredErrs...) {
|
|
continue
|
|
}
|
|
errorCounts[err]++
|
|
}
|
|
|
|
max := 0
|
|
for err, count := range errorCounts {
|
|
switch {
|
|
case max < count:
|
|
max = count
|
|
maxErr = err
|
|
|
|
// Prefer `nil` over other error values with the same
|
|
// number of occurrences.
|
|
case max == count && err == nil:
|
|
maxErr = err
|
|
}
|
|
}
|
|
return max, maxErr
|
|
}
|
|
|
|
// reduceQuorumErrs behaves like reduceErrs by only for returning
|
|
// values of maximally occurring errors validated against a generic
|
|
// quorum number that can be read or write quorum depending on usage.
|
|
// Additionally a special error is provided to be returned in case
|
|
// quorum is not satisfied.
|
|
func reduceQuorumErrs(errs []error, ignoredErrs []error, quorum int, quorumErr error) (maxErr error) {
|
|
var maxCount int
|
|
maxCount, maxErr = reduceErrs(errs, ignoredErrs)
|
|
switch {
|
|
case maxErr == nil && maxCount >= quorum:
|
|
// Success in quorum.
|
|
case maxErr != nil && maxCount >= quorum:
|
|
// Errors in quorum.
|
|
maxErr = errors2.Trace(maxErr, errs...)
|
|
default:
|
|
// No quorum satisfied.
|
|
maxErr = errors2.Trace(quorumErr, errs...)
|
|
}
|
|
return
|
|
}
|
|
|
|
// reduceReadQuorumErrs behaves like reduceErrs but only for returning
|
|
// values of maximally occurring errors validated against readQuorum.
|
|
func reduceReadQuorumErrs(errs []error, ignoredErrs []error, readQuorum int) (maxErr error) {
|
|
return reduceQuorumErrs(errs, ignoredErrs, readQuorum, errXLReadQuorum)
|
|
}
|
|
|
|
// reduceWriteQuorumErrs behaves like reduceErrs but only for returning
|
|
// values of maximally occurring errors validated against writeQuorum.
|
|
func reduceWriteQuorumErrs(errs []error, ignoredErrs []error, writeQuorum int) (maxErr error) {
|
|
return reduceQuorumErrs(errs, ignoredErrs, writeQuorum, errXLWriteQuorum)
|
|
}
|
|
|
|
// Similar to 'len(slice)' but returns the actual elements count
|
|
// skipping the unallocated elements.
|
|
func diskCount(disks []StorageAPI) int {
|
|
diskCount := 0
|
|
for _, disk := range disks {
|
|
if disk == nil {
|
|
continue
|
|
}
|
|
diskCount++
|
|
}
|
|
return diskCount
|
|
}
|
|
|
|
// hashOrder - hashes input key to return consistent
|
|
// hashed integer slice. Returned integer order is salted
|
|
// with an input key. This results in consistent order.
|
|
// NOTE: collisions are fine, we are not looking for uniqueness
|
|
// in the slices returned.
|
|
func hashOrder(key string, cardinality int) []int {
|
|
if cardinality < 0 {
|
|
// Returns an empty int slice for negative cardinality.
|
|
return nil
|
|
}
|
|
nums := make([]int, cardinality)
|
|
keyCrc := crc32.Checksum([]byte(key), crc32.IEEETable)
|
|
|
|
start := int(keyCrc % uint32(cardinality))
|
|
for i := 1; i <= cardinality; i++ {
|
|
nums[i-1] = 1 + ((start + i) % cardinality)
|
|
}
|
|
return nums
|
|
}
|
|
|
|
func parseXLStat(xlMetaBuf []byte) (si statInfo, e error) {
|
|
// obtain stat info.
|
|
stat := statInfo{}
|
|
// fetching modTime.
|
|
modTime, err := time.Parse(time.RFC3339, gjson.GetBytes(xlMetaBuf, "stat.modTime").String())
|
|
if err != nil {
|
|
return si, err
|
|
}
|
|
stat.ModTime = modTime
|
|
// obtain Stat.Size .
|
|
stat.Size = gjson.GetBytes(xlMetaBuf, "stat.size").Int()
|
|
return stat, nil
|
|
}
|
|
|
|
func parseXLVersion(xlMetaBuf []byte) string {
|
|
return gjson.GetBytes(xlMetaBuf, "version").String()
|
|
}
|
|
|
|
func parseXLFormat(xlMetaBuf []byte) string {
|
|
return gjson.GetBytes(xlMetaBuf, "format").String()
|
|
}
|
|
|
|
func parseXLRelease(xlMetaBuf []byte) string {
|
|
return gjson.GetBytes(xlMetaBuf, "minio.release").String()
|
|
}
|
|
|
|
func parseXLErasureInfo(xlMetaBuf []byte) (ErasureInfo, error) {
|
|
erasure := ErasureInfo{}
|
|
erasureResult := gjson.GetBytes(xlMetaBuf, "erasure")
|
|
// parse the xlV1Meta.Erasure.Distribution.
|
|
disResult := erasureResult.Get("distribution").Array()
|
|
|
|
distribution := make([]int, len(disResult))
|
|
for i, dis := range disResult {
|
|
distribution[i] = int(dis.Int())
|
|
}
|
|
erasure.Distribution = distribution
|
|
|
|
erasure.Algorithm = erasureResult.Get("algorithm").String()
|
|
erasure.DataBlocks = int(erasureResult.Get("data").Int())
|
|
erasure.ParityBlocks = int(erasureResult.Get("parity").Int())
|
|
erasure.BlockSize = erasureResult.Get("blockSize").Int()
|
|
erasure.Index = int(erasureResult.Get("index").Int())
|
|
|
|
checkSumsResult := erasureResult.Get("checksum").Array()
|
|
// Parse xlMetaV1.Erasure.Checksum array.
|
|
checkSums := make([]ChecksumInfo, len(checkSumsResult))
|
|
for i, v := range checkSumsResult {
|
|
algorithm := BitrotAlgorithmFromString(v.Get("algorithm").String())
|
|
if !algorithm.Available() {
|
|
return erasure, errors2.Trace(errBitrotHashAlgoInvalid)
|
|
}
|
|
hash, err := hex.DecodeString(v.Get("hash").String())
|
|
if err != nil {
|
|
return erasure, errors2.Trace(err)
|
|
}
|
|
checkSums[i] = ChecksumInfo{Name: v.Get("name").String(), Algorithm: algorithm, Hash: hash}
|
|
}
|
|
erasure.Checksums = checkSums
|
|
return erasure, nil
|
|
}
|
|
|
|
func parseXLParts(xlMetaBuf []byte) []objectPartInfo {
|
|
// Parse the XL Parts.
|
|
partsResult := gjson.GetBytes(xlMetaBuf, "parts").Array()
|
|
partInfo := make([]objectPartInfo, len(partsResult))
|
|
for i, p := range partsResult {
|
|
info := objectPartInfo{}
|
|
info.Number = int(p.Get("number").Int())
|
|
info.Name = p.Get("name").String()
|
|
info.ETag = p.Get("etag").String()
|
|
info.Size = p.Get("size").Int()
|
|
partInfo[i] = info
|
|
}
|
|
return partInfo
|
|
}
|
|
|
|
func parseXLMetaMap(xlMetaBuf []byte) map[string]string {
|
|
// Get xlMetaV1.Meta map.
|
|
metaMapResult := gjson.GetBytes(xlMetaBuf, "meta").Map()
|
|
metaMap := make(map[string]string)
|
|
for key, valResult := range metaMapResult {
|
|
metaMap[key] = valResult.String()
|
|
}
|
|
return metaMap
|
|
}
|
|
|
|
// Constructs XLMetaV1 using `gjson` lib to retrieve each field.
|
|
func xlMetaV1UnmarshalJSON(xlMetaBuf []byte) (xlMeta xlMetaV1, e error) {
|
|
// obtain version.
|
|
xlMeta.Version = parseXLVersion(xlMetaBuf)
|
|
// obtain format.
|
|
xlMeta.Format = parseXLFormat(xlMetaBuf)
|
|
// Parse xlMetaV1.Stat .
|
|
stat, err := parseXLStat(xlMetaBuf)
|
|
if err != nil {
|
|
return xlMeta, err
|
|
}
|
|
|
|
xlMeta.Stat = stat
|
|
// parse the xlV1Meta.Erasure fields.
|
|
xlMeta.Erasure, err = parseXLErasureInfo(xlMetaBuf)
|
|
if err != nil {
|
|
return xlMeta, err
|
|
}
|
|
|
|
// Parse the XL Parts.
|
|
xlMeta.Parts = parseXLParts(xlMetaBuf)
|
|
// Get the xlMetaV1.Realse field.
|
|
xlMeta.Minio.Release = parseXLRelease(xlMetaBuf)
|
|
// parse xlMetaV1.
|
|
xlMeta.Meta = parseXLMetaMap(xlMetaBuf)
|
|
|
|
return xlMeta, nil
|
|
}
|
|
|
|
// read xl.json from the given disk, parse and return xlV1MetaV1.Parts.
|
|
func readXLMetaParts(disk StorageAPI, bucket string, object string) ([]objectPartInfo, error) {
|
|
// Reads entire `xl.json`.
|
|
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
|
|
if err != nil {
|
|
return nil, errors2.Trace(err)
|
|
}
|
|
// obtain xlMetaV1{}.Partsusing `github.com/tidwall/gjson`.
|
|
xlMetaParts := parseXLParts(xlMetaBuf)
|
|
|
|
return xlMetaParts, nil
|
|
}
|
|
|
|
// read xl.json from the given disk and parse xlV1Meta.Stat and xlV1Meta.Meta using gjson.
|
|
func readXLMetaStat(disk StorageAPI, bucket string, object string) (si statInfo, mp map[string]string, e error) {
|
|
// Reads entire `xl.json`.
|
|
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
|
|
if err != nil {
|
|
return si, nil, errors2.Trace(err)
|
|
}
|
|
|
|
// obtain version.
|
|
xlVersion := parseXLVersion(xlMetaBuf)
|
|
|
|
// obtain format.
|
|
xlFormat := parseXLFormat(xlMetaBuf)
|
|
|
|
// Validate if the xl.json we read is sane, return corrupted format.
|
|
if !isXLMetaFormatValid(xlVersion, xlFormat) {
|
|
// For version mismatchs and unrecognized format, return corrupted format.
|
|
return si, nil, errors2.Trace(errCorruptedFormat)
|
|
}
|
|
|
|
// obtain xlMetaV1{}.Meta using `github.com/tidwall/gjson`.
|
|
xlMetaMap := parseXLMetaMap(xlMetaBuf)
|
|
|
|
// obtain xlMetaV1{}.Stat using `github.com/tidwall/gjson`.
|
|
xlStat, err := parseXLStat(xlMetaBuf)
|
|
if err != nil {
|
|
return si, nil, errors2.Trace(err)
|
|
}
|
|
|
|
// Return structured `xl.json`.
|
|
return xlStat, xlMetaMap, nil
|
|
}
|
|
|
|
// readXLMeta reads `xl.json` and returns back XL metadata structure.
|
|
func readXLMeta(disk StorageAPI, bucket string, object string) (xlMeta xlMetaV1, err error) {
|
|
// Reads entire `xl.json`.
|
|
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
|
|
if err != nil {
|
|
return xlMetaV1{}, errors2.Trace(err)
|
|
}
|
|
// obtain xlMetaV1{} using `github.com/tidwall/gjson`.
|
|
xlMeta, err = xlMetaV1UnmarshalJSON(xlMetaBuf)
|
|
if err != nil {
|
|
return xlMetaV1{}, errors2.Trace(err)
|
|
}
|
|
// Return structured `xl.json`.
|
|
return xlMeta, nil
|
|
}
|
|
|
|
// Reads all `xl.json` metadata as a xlMetaV1 slice.
|
|
// Returns error slice indicating the failed metadata reads.
|
|
func readAllXLMetadata(disks []StorageAPI, bucket, object string) ([]xlMetaV1, []error) {
|
|
errs := make([]error, len(disks))
|
|
metadataArray := make([]xlMetaV1, len(disks))
|
|
var wg = &sync.WaitGroup{}
|
|
// Read `xl.json` parallelly across disks.
|
|
for index, disk := range disks {
|
|
if disk == nil {
|
|
errs[index] = errDiskNotFound
|
|
continue
|
|
}
|
|
wg.Add(1)
|
|
// Read `xl.json` in routine.
|
|
go func(index int, disk StorageAPI) {
|
|
defer wg.Done()
|
|
var err error
|
|
metadataArray[index], err = readXLMeta(disk, bucket, object)
|
|
if err != nil {
|
|
errs[index] = err
|
|
return
|
|
}
|
|
}(index, disk)
|
|
}
|
|
|
|
// Wait for all the routines to finish.
|
|
wg.Wait()
|
|
|
|
// Return all the metadata.
|
|
return metadataArray, errs
|
|
}
|
|
|
|
// Return shuffled partsMetadata depending on distribution.
|
|
func shufflePartsMetadata(partsMetadata []xlMetaV1, distribution []int) (shuffledPartsMetadata []xlMetaV1) {
|
|
if distribution == nil {
|
|
return partsMetadata
|
|
}
|
|
shuffledPartsMetadata = make([]xlMetaV1, len(partsMetadata))
|
|
// Shuffle slice xl metadata for expected distribution.
|
|
for index := range partsMetadata {
|
|
blockIndex := distribution[index]
|
|
shuffledPartsMetadata[blockIndex-1] = partsMetadata[index]
|
|
}
|
|
return shuffledPartsMetadata
|
|
}
|
|
|
|
// shuffleDisks - shuffle input disks slice depending on the
|
|
// erasure distribution. Return shuffled slice of disks with
|
|
// their expected distribution.
|
|
func shuffleDisks(disks []StorageAPI, distribution []int) (shuffledDisks []StorageAPI) {
|
|
if distribution == nil {
|
|
return disks
|
|
}
|
|
shuffledDisks = make([]StorageAPI, len(disks))
|
|
// Shuffle disks for expected distribution.
|
|
for index := range disks {
|
|
blockIndex := distribution[index]
|
|
shuffledDisks[blockIndex-1] = disks[index]
|
|
}
|
|
return shuffledDisks
|
|
}
|
|
|
|
// unshuffleIndex - performs reverse of the shuffleDisks operations
|
|
// for a single 0-based index.
|
|
func unshuffleIndex(n int, distribution []int) int {
|
|
for i, v := range distribution {
|
|
if v-1 == n {
|
|
return i
|
|
}
|
|
}
|
|
return -1
|
|
}
|
|
|
|
// evalDisks - returns a new slice of disks where nil is set if
|
|
// the corresponding error in errs slice is not nil
|
|
func evalDisks(disks []StorageAPI, errs []error) []StorageAPI {
|
|
if len(errs) != len(disks) {
|
|
errorIf(errors.New("unexpected disks/errors slice length"), "unable to evaluate internal disks")
|
|
return nil
|
|
}
|
|
newDisks := make([]StorageAPI, len(disks))
|
|
for index := range errs {
|
|
if errs[index] == nil {
|
|
newDisks[index] = disks[index]
|
|
} else {
|
|
newDisks[index] = nil
|
|
}
|
|
}
|
|
return newDisks
|
|
}
|
|
|
|
// Errors specifically generated by calculatePartSizeFromIdx function.
|
|
var (
|
|
errPartSizeZero = errors.New("Part size cannot be zero")
|
|
errPartSizeIndex = errors.New("Part index cannot be smaller than 1")
|
|
)
|
|
|
|
// calculatePartSizeFromIdx calculates the part size according to input index.
|
|
// returns error if totalSize is -1, partSize is 0, partIndex is 0.
|
|
func calculatePartSizeFromIdx(totalSize int64, partSize int64, partIndex int) (currPartSize int64, err error) {
|
|
if totalSize < 0 {
|
|
return 0, errors2.Trace(errInvalidArgument)
|
|
}
|
|
if partSize == 0 {
|
|
return 0, errors2.Trace(errPartSizeZero)
|
|
}
|
|
if partIndex < 1 {
|
|
return 0, errors2.Trace(errPartSizeIndex)
|
|
}
|
|
if totalSize > 0 {
|
|
// Compute the total count of parts
|
|
partsCount := totalSize/partSize + 1
|
|
// Return the part's size
|
|
switch {
|
|
case int64(partIndex) < partsCount:
|
|
currPartSize = partSize
|
|
case int64(partIndex) == partsCount:
|
|
// Size of last part
|
|
currPartSize = totalSize % partSize
|
|
default:
|
|
currPartSize = 0
|
|
}
|
|
}
|
|
return currPartSize, nil
|
|
}
|