2016-05-20 23:48:47 -04:00
|
|
|
/*
|
2017-01-18 15:24:34 -05:00
|
|
|
* Minio Cloud Storage, (C) 2016, 2017, 2017 Minio, Inc.
|
2016-05-20 23:48:47 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-05-20 23:48:47 -04:00
|
|
|
|
|
|
|
import (
|
2018-04-05 18:04:40 -04:00
|
|
|
"context"
|
2017-08-14 21:08:42 -04:00
|
|
|
"encoding/hex"
|
2016-05-20 23:48:47 -04:00
|
|
|
"encoding/json"
|
2018-04-19 20:24:43 -04:00
|
|
|
"errors"
|
2017-08-14 21:08:42 -04:00
|
|
|
"fmt"
|
|
|
|
"hash"
|
2016-05-20 23:48:47 -04:00
|
|
|
"path"
|
|
|
|
"sort"
|
|
|
|
"sync"
|
|
|
|
"time"
|
2017-08-14 21:08:42 -04:00
|
|
|
|
2018-01-19 13:18:21 -05:00
|
|
|
"github.com/minio/highwayhash"
|
2018-04-05 18:04:40 -04:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
2018-01-17 13:54:31 -05:00
|
|
|
sha256 "github.com/minio/sha256-simd"
|
2017-08-14 21:08:42 -04:00
|
|
|
"golang.org/x/crypto/blake2b"
|
2016-05-20 23:48:47 -04:00
|
|
|
)
|
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
const erasureAlgorithmKlauspost = "klauspost/reedsolomon/vandermonde"
|
|
|
|
|
2018-05-04 13:42:22 -04:00
|
|
|
// magic HH-256 key as HH-256 hash of the first 100 decimals of π as utf-8 string with a zero key.
|
|
|
|
var magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0")
|
2017-08-14 21:08:42 -04:00
|
|
|
|
|
|
|
// BitrotAlgorithm specifies a algorithm used for bitrot protection.
|
2018-05-04 13:42:22 -04:00
|
|
|
type BitrotAlgorithm uint
|
2017-08-14 21:08:42 -04:00
|
|
|
|
2016-05-24 20:48:58 -04:00
|
|
|
const (
|
2017-08-14 21:08:42 -04:00
|
|
|
// SHA256 represents the SHA-256 hash function
|
2018-05-04 13:42:22 -04:00
|
|
|
SHA256 BitrotAlgorithm = 1 + iota
|
2017-08-14 21:08:42 -04:00
|
|
|
// HighwayHash256 represents the HighwayHash-256 hash function
|
2018-05-04 13:42:22 -04:00
|
|
|
HighwayHash256
|
2017-08-14 21:08:42 -04:00
|
|
|
// BLAKE2b512 represents the BLAKE2b-256 hash function
|
2018-05-04 13:42:22 -04:00
|
|
|
BLAKE2b512
|
2016-05-24 20:48:58 -04:00
|
|
|
)
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2018-05-04 13:42:22 -04:00
|
|
|
// DefaultBitrotAlgorithm is the default algorithm used for bitrot protection.
|
|
|
|
var DefaultBitrotAlgorithm = HighwayHash256
|
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
var bitrotAlgorithms = map[BitrotAlgorithm]string{
|
|
|
|
SHA256: "sha256",
|
|
|
|
BLAKE2b512: "blake2b",
|
|
|
|
HighwayHash256: "highwayhash256",
|
|
|
|
}
|
|
|
|
|
2018-04-19 20:24:43 -04:00
|
|
|
// New returns a new hash.Hash calculating the given bitrot algorithm.
|
|
|
|
// New logs error and exits if the algorithm is not supported or not
|
|
|
|
// linked into the binary.
|
2017-08-14 21:08:42 -04:00
|
|
|
func (a BitrotAlgorithm) New() hash.Hash {
|
2018-05-04 13:42:22 -04:00
|
|
|
switch a {
|
|
|
|
case SHA256:
|
|
|
|
return sha256.New()
|
|
|
|
case BLAKE2b512:
|
|
|
|
b2, _ := blake2b.New512(nil) // New512 never returns an error if the key is nil
|
|
|
|
return b2
|
|
|
|
case HighwayHash256:
|
|
|
|
hh, _ := highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
|
|
|
|
return hh
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
2018-05-04 13:42:22 -04:00
|
|
|
logger.CriticalIf(context.Background(), errors.New("Unsupported bitrot algorithm"))
|
|
|
|
return nil
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Available reports whether the given algorihm is a supported and linked into the binary.
|
|
|
|
func (a BitrotAlgorithm) Available() bool {
|
|
|
|
_, ok := bitrotAlgorithms[a]
|
2018-05-04 13:42:22 -04:00
|
|
|
return ok
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// String returns the string identifier for a given bitrot algorithm.
|
|
|
|
// If the algorithm is not supported String panics.
|
|
|
|
func (a BitrotAlgorithm) String() string {
|
2018-04-19 20:24:43 -04:00
|
|
|
name, ok := bitrotAlgorithms[a]
|
|
|
|
if !ok {
|
|
|
|
logger.CriticalIf(context.Background(), errors.New("Unsupported bitrot algorithm"))
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
2018-04-19 20:24:43 -04:00
|
|
|
return name
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// BitrotAlgorithmFromString returns a bitrot algorithm from the given string representation.
|
|
|
|
// It returns 0 if the string representation does not match any supported algorithm.
|
|
|
|
// The zero value of a bitrot algorithm is never supported.
|
|
|
|
func BitrotAlgorithmFromString(s string) (a BitrotAlgorithm) {
|
|
|
|
for alg, name := range bitrotAlgorithms {
|
|
|
|
if name == s {
|
|
|
|
return alg
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-05-20 23:48:47 -04:00
|
|
|
// objectPartInfo Info of each part kept in the multipart metadata
|
|
|
|
// file after CompleteMultipartUpload() is called.
|
|
|
|
type objectPartInfo struct {
|
2016-05-25 00:24:20 -04:00
|
|
|
Number int `json:"number"`
|
|
|
|
Name string `json:"name"`
|
|
|
|
ETag string `json:"etag"`
|
|
|
|
Size int64 `json:"size"`
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2016-05-31 23:23:31 -04:00
|
|
|
// byObjectPartNumber is a collection satisfying sort.Interface.
|
|
|
|
type byObjectPartNumber []objectPartInfo
|
2016-05-30 19:51:59 -04:00
|
|
|
|
2016-05-31 23:23:31 -04:00
|
|
|
func (t byObjectPartNumber) Len() int { return len(t) }
|
|
|
|
func (t byObjectPartNumber) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
|
|
|
|
func (t byObjectPartNumber) Less(i, j int) bool { return t[i].Number < t[j].Number }
|
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
// ChecksumInfo - carries checksums of individual scattered parts per disk.
|
|
|
|
type ChecksumInfo struct {
|
|
|
|
Name string
|
|
|
|
Algorithm BitrotAlgorithm
|
|
|
|
Hash []byte
|
2016-05-31 23:23:31 -04:00
|
|
|
}
|
2016-05-30 19:51:59 -04:00
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
// MarshalJSON marshals the ChecksumInfo struct
|
|
|
|
func (c ChecksumInfo) MarshalJSON() ([]byte, error) {
|
|
|
|
type checksuminfo struct {
|
|
|
|
Name string `json:"name"`
|
|
|
|
Algorithm string `json:"algorithm"`
|
|
|
|
Hash string `json:"hash"`
|
|
|
|
}
|
2017-01-18 15:24:34 -05:00
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
info := checksuminfo{
|
|
|
|
Name: c.Name,
|
|
|
|
Algorithm: c.Algorithm.String(),
|
|
|
|
Hash: hex.EncodeToString(c.Hash),
|
2017-05-16 17:21:52 -04:00
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
return json.Marshal(info)
|
2017-05-16 17:21:52 -04:00
|
|
|
}
|
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
// UnmarshalJSON unmarshals the the given data into the ChecksumInfo struct
|
|
|
|
func (c *ChecksumInfo) UnmarshalJSON(data []byte) error {
|
|
|
|
type checksuminfo struct {
|
|
|
|
Name string `json:"name"`
|
|
|
|
Algorithm string `json:"algorithm"`
|
|
|
|
Hash string `json:"hash"`
|
|
|
|
}
|
|
|
|
|
|
|
|
var info checksuminfo
|
|
|
|
err := json.Unmarshal(data, &info)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
c.Algorithm = BitrotAlgorithmFromString(info.Algorithm)
|
|
|
|
if !c.Algorithm.Available() {
|
|
|
|
return errBitrotHashAlgoInvalid
|
|
|
|
}
|
|
|
|
c.Hash, err = hex.DecodeString(info.Hash)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-12-22 11:25:03 -05:00
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
c.Name = info.Name
|
|
|
|
return nil
|
2016-12-22 11:25:03 -05:00
|
|
|
}
|
2016-07-28 05:20:34 -04:00
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
// ErasureInfo holds erasure coding and bitrot related information.
|
|
|
|
type ErasureInfo struct {
|
|
|
|
// Algorithm is the string representation of erasure-coding-algorithm
|
|
|
|
Algorithm string `json:"algorithm"`
|
|
|
|
// DataBlocks is the number of data blocks for erasure-coding
|
|
|
|
DataBlocks int `json:"data"`
|
|
|
|
// ParityBlocks is the number of parity blocks for erasure-coding
|
|
|
|
ParityBlocks int `json:"parity"`
|
|
|
|
// BlockSize is the size of one erasure-coded block
|
|
|
|
BlockSize int64 `json:"blockSize"`
|
|
|
|
// Index is the index of the current disk
|
|
|
|
Index int `json:"index"`
|
|
|
|
// Distribution is the distribution of the data and parity blocks
|
|
|
|
Distribution []int `json:"distribution"`
|
|
|
|
// Checksums holds all bitrot checksums of all erasure encoded blocks
|
|
|
|
Checksums []ChecksumInfo `json:"checksum,omitempty"`
|
2016-06-01 19:43:31 -04:00
|
|
|
}
|
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
// AddChecksumInfo adds a checksum of a part.
|
|
|
|
func (e *ErasureInfo) AddChecksumInfo(ckSumInfo ChecksumInfo) {
|
|
|
|
for i, sum := range e.Checksums {
|
2016-07-28 05:20:34 -04:00
|
|
|
if sum.Name == ckSumInfo.Name {
|
2017-08-14 21:08:42 -04:00
|
|
|
e.Checksums[i] = ckSumInfo
|
2016-07-28 05:20:34 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
e.Checksums = append(e.Checksums, ckSumInfo)
|
2016-07-28 05:20:34 -04:00
|
|
|
}
|
|
|
|
|
2017-08-14 21:08:42 -04:00
|
|
|
// GetChecksumInfo - get checksum of a part.
|
|
|
|
func (e ErasureInfo) GetChecksumInfo(partName string) (ckSum ChecksumInfo) {
|
2016-07-28 05:20:34 -04:00
|
|
|
// Return the checksum.
|
2017-08-14 21:08:42 -04:00
|
|
|
for _, sum := range e.Checksums {
|
2016-07-28 05:20:34 -04:00
|
|
|
if sum.Name == partName {
|
2016-10-18 14:13:25 -04:00
|
|
|
return sum
|
2016-07-28 05:20:34 -04:00
|
|
|
}
|
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
return ChecksumInfo{}
|
2016-07-28 05:20:34 -04:00
|
|
|
}
|
|
|
|
|
2016-06-01 19:43:31 -04:00
|
|
|
// statInfo - carries stat information of the object.
|
|
|
|
type statInfo struct {
|
|
|
|
Size int64 `json:"size"` // Size of the object `xl.json`.
|
|
|
|
ModTime time.Time `json:"modTime"` // ModTime of the object `xl.json`.
|
|
|
|
}
|
|
|
|
|
|
|
|
// A xlMetaV1 represents `xl.json` metadata header.
|
2016-05-20 23:48:47 -04:00
|
|
|
type xlMetaV1 struct {
|
2016-06-01 19:43:31 -04:00
|
|
|
Version string `json:"version"` // Version of the current `xl.json`.
|
|
|
|
Format string `json:"format"` // Format of the current `xl.json`.
|
|
|
|
Stat statInfo `json:"stat"` // Stat of the current object `xl.json`.
|
|
|
|
// Erasure coded info for the current object `xl.json`.
|
2017-08-14 21:08:42 -04:00
|
|
|
Erasure ErasureInfo `json:"erasure"`
|
2016-06-01 19:43:31 -04:00
|
|
|
// Minio release tag for current object `xl.json`.
|
2016-05-20 23:48:47 -04:00
|
|
|
Minio struct {
|
|
|
|
Release string `json:"release"`
|
|
|
|
} `json:"minio"`
|
2016-06-01 19:43:31 -04:00
|
|
|
// Metadata map for current object `xl.json`.
|
2016-07-21 20:31:14 -04:00
|
|
|
Meta map[string]string `json:"meta,omitempty"`
|
2016-06-01 19:43:31 -04:00
|
|
|
// Captures all the individual object `xl.json`.
|
|
|
|
Parts []objectPartInfo `json:"parts,omitempty"`
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2017-01-18 15:24:34 -05:00
|
|
|
// XL metadata constants.
|
|
|
|
const (
|
|
|
|
// XL meta version.
|
2017-05-14 15:05:51 -04:00
|
|
|
xlMetaVersion = "1.0.1"
|
|
|
|
|
|
|
|
// XL meta version.
|
|
|
|
xlMetaVersion100 = "1.0.0"
|
2017-01-18 15:24:34 -05:00
|
|
|
|
|
|
|
// XL meta format string.
|
|
|
|
xlMetaFormat = "xl"
|
|
|
|
|
|
|
|
// Add new constants here.
|
|
|
|
)
|
|
|
|
|
2016-07-08 18:28:09 -04:00
|
|
|
// newXLMetaV1 - initializes new xlMetaV1, adds version, allocates a fresh erasure info.
|
|
|
|
func newXLMetaV1(object string, dataBlocks, parityBlocks int) (xlMeta xlMetaV1) {
|
2016-05-30 19:51:59 -04:00
|
|
|
xlMeta = xlMetaV1{}
|
2017-01-18 15:24:34 -05:00
|
|
|
xlMeta.Version = xlMetaVersion
|
|
|
|
xlMeta.Format = xlMetaFormat
|
2016-08-18 19:23:42 -04:00
|
|
|
xlMeta.Minio.Release = ReleaseTag
|
2017-08-14 21:08:42 -04:00
|
|
|
xlMeta.Erasure = ErasureInfo{
|
2016-06-01 19:43:31 -04:00
|
|
|
Algorithm: erasureAlgorithmKlauspost,
|
|
|
|
DataBlocks: dataBlocks,
|
|
|
|
ParityBlocks: parityBlocks,
|
|
|
|
BlockSize: blockSizeV1,
|
2016-07-08 18:28:09 -04:00
|
|
|
Distribution: hashOrder(object, dataBlocks+parityBlocks),
|
2016-06-01 19:43:31 -04:00
|
|
|
}
|
2016-05-30 19:51:59 -04:00
|
|
|
return xlMeta
|
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
|
2016-06-01 19:43:31 -04:00
|
|
|
// IsValid - tells if the format is sane by validating the version
|
2018-01-12 07:46:30 -05:00
|
|
|
// string, format and erasure info fields.
|
2016-05-31 23:23:31 -04:00
|
|
|
func (m xlMetaV1) IsValid() bool {
|
2018-01-12 07:46:30 -05:00
|
|
|
return isXLMetaFormatValid(m.Version, m.Format) &&
|
|
|
|
isXLMetaErasureInfoValid(m.Erasure.DataBlocks, m.Erasure.ParityBlocks)
|
2017-05-14 15:05:51 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verifies if the backend format metadata is sane by validating
|
|
|
|
// the version string and format style.
|
2018-01-12 07:46:30 -05:00
|
|
|
func isXLMetaFormatValid(version, format string) bool {
|
2017-05-14 15:05:51 -04:00
|
|
|
return ((version == xlMetaVersion || version == xlMetaVersion100) &&
|
|
|
|
format == xlMetaFormat)
|
|
|
|
}
|
|
|
|
|
2018-01-12 07:46:30 -05:00
|
|
|
// Verifies if the backend format metadata is sane by validating
|
|
|
|
// the ErasureInfo, i.e. data and parity blocks.
|
|
|
|
func isXLMetaErasureInfoValid(data, parity int) bool {
|
|
|
|
return ((data >= parity) && (data != 0) && (parity != 0))
|
|
|
|
}
|
|
|
|
|
2017-05-14 15:05:51 -04:00
|
|
|
// Converts metadata to object info.
|
|
|
|
func (m xlMetaV1) ToObjectInfo(bucket, object string) ObjectInfo {
|
|
|
|
objInfo := ObjectInfo{
|
|
|
|
IsDir: false,
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: object,
|
|
|
|
Size: m.Stat.Size,
|
|
|
|
ModTime: m.Stat.ModTime,
|
|
|
|
ContentType: m.Meta["content-type"],
|
|
|
|
ContentEncoding: m.Meta["content-encoding"],
|
|
|
|
}
|
|
|
|
|
|
|
|
// Extract etag from metadata.
|
|
|
|
objInfo.ETag = extractETag(m.Meta)
|
|
|
|
|
|
|
|
// etag/md5Sum has already been extracted. We need to
|
|
|
|
// remove to avoid it from appearing as part of
|
|
|
|
// response headers. e.g, X-Minio-* or X-Amz-*.
|
2018-01-04 01:14:45 -05:00
|
|
|
objInfo.UserDefined = cleanMetadata(m.Meta)
|
2017-05-14 15:05:51 -04:00
|
|
|
|
2018-03-01 14:37:57 -05:00
|
|
|
// All the parts per object.
|
|
|
|
objInfo.Parts = m.Parts
|
|
|
|
|
2018-04-05 13:56:28 -04:00
|
|
|
// Update storage class
|
|
|
|
if sc, ok := m.Meta[amzStorageClass]; ok {
|
|
|
|
objInfo.StorageClass = sc
|
|
|
|
} else {
|
|
|
|
objInfo.StorageClass = globalMinioDefaultStorageClass
|
|
|
|
}
|
|
|
|
|
2017-05-14 15:05:51 -04:00
|
|
|
// Success.
|
|
|
|
return objInfo
|
2016-05-31 23:23:31 -04:00
|
|
|
}
|
|
|
|
|
2016-09-09 01:38:18 -04:00
|
|
|
// objectPartIndex - returns the index of matching object part number.
|
|
|
|
func objectPartIndex(parts []objectPartInfo, partNumber int) int {
|
|
|
|
for i, part := range parts {
|
2016-05-26 06:15:01 -04:00
|
|
|
if partNumber == part.Number {
|
2016-08-16 10:57:14 -04:00
|
|
|
return i
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddObjectPart - add a new object part in order.
|
2016-05-26 06:15:01 -04:00
|
|
|
func (m *xlMetaV1) AddObjectPart(partNumber int, partName string, partETag string, partSize int64) {
|
2016-05-25 00:24:20 -04:00
|
|
|
partInfo := objectPartInfo{
|
2016-05-26 06:15:01 -04:00
|
|
|
Number: partNumber,
|
|
|
|
Name: partName,
|
|
|
|
ETag: partETag,
|
|
|
|
Size: partSize,
|
2016-05-25 00:24:20 -04:00
|
|
|
}
|
2016-05-26 06:15:01 -04:00
|
|
|
|
|
|
|
// Update part info if it already exists.
|
2016-05-25 00:24:20 -04:00
|
|
|
for i, part := range m.Parts {
|
2016-05-26 06:15:01 -04:00
|
|
|
if partNumber == part.Number {
|
2016-05-25 00:24:20 -04:00
|
|
|
m.Parts[i] = partInfo
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2016-05-26 06:15:01 -04:00
|
|
|
|
|
|
|
// Proceed to include new part info.
|
2016-05-25 00:24:20 -04:00
|
|
|
m.Parts = append(m.Parts, partInfo)
|
2016-05-26 06:15:01 -04:00
|
|
|
|
|
|
|
// Parts in xlMeta should be in sorted order by part number.
|
2016-05-31 23:23:31 -04:00
|
|
|
sort.Sort(byObjectPartNumber(m.Parts))
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2016-05-31 23:23:31 -04:00
|
|
|
// ObjectToPartOffset - translate offset of an object to offset of its individual part.
|
2018-04-05 18:04:40 -04:00
|
|
|
func (m xlMetaV1) ObjectToPartOffset(ctx context.Context, offset int64) (partIndex int, partOffset int64, err error) {
|
2016-07-10 20:12:22 -04:00
|
|
|
if offset == 0 {
|
|
|
|
// Special case - if offset is 0, then partIndex and partOffset are always 0.
|
|
|
|
return 0, 0, nil
|
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
partOffset = offset
|
2016-05-26 06:15:01 -04:00
|
|
|
// Seek until object offset maps to a particular part offset.
|
2016-05-20 23:48:47 -04:00
|
|
|
for i, part := range m.Parts {
|
2016-05-25 00:24:20 -04:00
|
|
|
partIndex = i
|
2016-05-26 06:15:01 -04:00
|
|
|
// Offset is smaller than size we have reached the proper part offset.
|
2016-05-20 23:48:47 -04:00
|
|
|
if partOffset < part.Size {
|
2016-05-25 00:24:20 -04:00
|
|
|
return partIndex, partOffset, nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-05-26 06:15:01 -04:00
|
|
|
// Continue to towards the next part.
|
2016-05-20 23:48:47 -04:00
|
|
|
partOffset -= part.Size
|
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, InvalidRange{})
|
2016-05-26 06:15:01 -04:00
|
|
|
// Offset beyond the size of the object return InvalidRange.
|
2018-04-05 18:04:40 -04:00
|
|
|
return 0, 0, InvalidRange{}
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2016-05-31 23:23:31 -04:00
|
|
|
// pickValidXLMeta - picks one valid xlMeta content and returns from a
|
2018-04-25 14:56:39 -04:00
|
|
|
// slice of xlmeta content.
|
2018-04-05 18:04:40 -04:00
|
|
|
func pickValidXLMeta(ctx context.Context, metaArr []xlMetaV1, modTime time.Time) (xmv xlMetaV1, e error) {
|
2016-07-14 17:59:01 -04:00
|
|
|
// Pick latest valid metadata.
|
|
|
|
for _, meta := range metaArr {
|
2016-10-18 14:13:25 -04:00
|
|
|
if meta.IsValid() && meta.Stat.ModTime.Equal(modTime) {
|
2016-11-20 23:56:44 -05:00
|
|
|
return meta, nil
|
2016-05-31 23:23:31 -04:00
|
|
|
}
|
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
err := fmt.Errorf("No valid xl.json present")
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return xmv, err
|
2016-05-31 23:23:31 -04:00
|
|
|
}
|
|
|
|
|
2016-07-08 01:10:27 -04:00
|
|
|
// list of all errors that can be ignored in a metadata operation.
|
2017-05-31 23:03:32 -04:00
|
|
|
var objMetadataOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errVolumeNotFound, errFileNotFound, errFileAccessDenied, errCorruptedFormat)
|
2016-07-08 01:10:27 -04:00
|
|
|
|
2016-09-09 01:38:18 -04:00
|
|
|
// readXLMetaParts - returns the XL Metadata Parts from xl.json of one of the disks picked at random.
|
2018-04-05 18:04:40 -04:00
|
|
|
func (xl xlObjects) readXLMetaParts(ctx context.Context, bucket, object string) (xlMetaParts []objectPartInfo, xlMeta map[string]string, err error) {
|
2017-04-14 04:46:16 -04:00
|
|
|
var ignoredErrs []error
|
2016-09-09 01:38:18 -04:00
|
|
|
for _, disk := range xl.getLoadBalancedDisks() {
|
|
|
|
if disk == nil {
|
2017-04-14 04:46:16 -04:00
|
|
|
ignoredErrs = append(ignoredErrs, errDiskNotFound)
|
2016-09-09 01:38:18 -04:00
|
|
|
continue
|
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
xlMetaParts, xlMeta, err = readXLMetaParts(ctx, disk, bucket, object)
|
2016-09-09 01:38:18 -04:00
|
|
|
if err == nil {
|
2018-03-01 14:37:57 -05:00
|
|
|
return xlMetaParts, xlMeta, nil
|
2016-09-09 01:38:18 -04:00
|
|
|
}
|
|
|
|
// For any reason disk or bucket is not available continue
|
|
|
|
// and read from other disks.
|
2018-04-10 12:36:37 -04:00
|
|
|
if IsErrIgnored(err, objMetadataOpIgnoredErrs...) {
|
2017-04-14 04:46:16 -04:00
|
|
|
ignoredErrs = append(ignoredErrs, err)
|
2016-09-09 01:38:18 -04:00
|
|
|
continue
|
|
|
|
}
|
2017-04-14 04:46:16 -04:00
|
|
|
// Error is not ignored, return right here.
|
2018-03-01 14:37:57 -05:00
|
|
|
return nil, nil, err
|
2016-09-09 01:38:18 -04:00
|
|
|
}
|
2017-04-14 04:46:16 -04:00
|
|
|
// If all errors were ignored, reduce to maximal occurrence
|
|
|
|
// based on the read quorum.
|
2018-02-15 20:45:57 -05:00
|
|
|
readQuorum := len(xl.getDisks()) / 2
|
2018-04-05 18:04:40 -04:00
|
|
|
return nil, nil, reduceReadQuorumErrs(ctx, ignoredErrs, nil, readQuorum)
|
2016-09-09 01:38:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// readXLMetaStat - return xlMetaV1.Stat and xlMetaV1.Meta from one of the disks picked at random.
|
2018-04-05 18:04:40 -04:00
|
|
|
func (xl xlObjects) readXLMetaStat(ctx context.Context, bucket, object string) (xlStat statInfo, xlMeta map[string]string, err error) {
|
2017-04-14 04:46:16 -04:00
|
|
|
var ignoredErrs []error
|
2016-07-21 03:27:08 -04:00
|
|
|
for _, disk := range xl.getLoadBalancedDisks() {
|
2016-06-02 19:34:15 -04:00
|
|
|
if disk == nil {
|
2017-04-14 04:46:16 -04:00
|
|
|
ignoredErrs = append(ignoredErrs, errDiskNotFound)
|
2016-06-02 19:34:15 -04:00
|
|
|
continue
|
|
|
|
}
|
2016-09-09 01:38:18 -04:00
|
|
|
// parses only xlMetaV1.Meta and xlMeta.Stat
|
2018-04-05 18:04:40 -04:00
|
|
|
xlStat, xlMeta, err = readXLMetaStat(ctx, disk, bucket, object)
|
2016-07-08 01:10:27 -04:00
|
|
|
if err == nil {
|
2016-09-09 01:38:18 -04:00
|
|
|
return xlStat, xlMeta, nil
|
2016-07-08 01:10:27 -04:00
|
|
|
}
|
|
|
|
// For any reason disk or bucket is not available continue
|
|
|
|
// and read from other disks.
|
2018-04-10 12:36:37 -04:00
|
|
|
if IsErrIgnored(err, objMetadataOpIgnoredErrs...) {
|
2017-04-14 04:46:16 -04:00
|
|
|
ignoredErrs = append(ignoredErrs, err)
|
2016-07-08 01:10:27 -04:00
|
|
|
continue
|
2016-05-25 04:33:39 -04:00
|
|
|
}
|
2017-04-14 04:46:16 -04:00
|
|
|
// Error is not ignored, return right here.
|
|
|
|
return statInfo{}, nil, err
|
2016-05-25 04:33:39 -04:00
|
|
|
}
|
2017-04-14 04:46:16 -04:00
|
|
|
// If all errors were ignored, reduce to maximal occurrence
|
|
|
|
// based on the read quorum.
|
2018-02-15 20:45:57 -05:00
|
|
|
readQuorum := len(xl.getDisks()) / 2
|
2018-04-05 18:04:40 -04:00
|
|
|
return statInfo{}, nil, reduceReadQuorumErrs(ctx, ignoredErrs, nil, readQuorum)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2016-06-02 19:34:15 -04:00
|
|
|
// deleteXLMetadata - deletes `xl.json` on a single disk.
|
2018-04-05 18:04:40 -04:00
|
|
|
func deleteXLMetdata(ctx context.Context, disk StorageAPI, bucket, prefix string) error {
|
2016-06-02 19:34:15 -04:00
|
|
|
jsonFile := path.Join(prefix, xlMetaJSONFile)
|
2018-04-05 18:04:40 -04:00
|
|
|
err := disk.DeleteFile(bucket, jsonFile)
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return err
|
2016-06-02 19:34:15 -04:00
|
|
|
}
|
|
|
|
|
2016-05-31 23:23:31 -04:00
|
|
|
// writeXLMetadata - writes `xl.json` to a single disk.
|
2018-04-05 18:04:40 -04:00
|
|
|
func writeXLMetadata(ctx context.Context, disk StorageAPI, bucket, prefix string, xlMeta xlMetaV1) error {
|
2016-05-31 23:23:31 -04:00
|
|
|
jsonFile := path.Join(prefix, xlMetaJSONFile)
|
|
|
|
|
|
|
|
// Marshal json.
|
|
|
|
metadataBytes, err := json.Marshal(&xlMeta)
|
|
|
|
if err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return err
|
2016-05-31 23:23:31 -04:00
|
|
|
}
|
2018-06-28 19:02:02 -04:00
|
|
|
// Persist marshaled data.
|
2018-04-05 18:04:40 -04:00
|
|
|
err = disk.AppendFile(bucket, jsonFile, metadataBytes)
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return err
|
2016-05-31 23:23:31 -04:00
|
|
|
}
|
|
|
|
|
2016-06-17 14:57:51 -04:00
|
|
|
// deleteAllXLMetadata - deletes all partially written `xl.json` depending on errs.
|
2018-04-05 18:04:40 -04:00
|
|
|
func deleteAllXLMetadata(ctx context.Context, disks []StorageAPI, bucket, prefix string, errs []error) {
|
2016-06-17 14:57:51 -04:00
|
|
|
var wg = &sync.WaitGroup{}
|
|
|
|
// Delete all the `xl.json` left over.
|
2016-07-11 20:24:49 -04:00
|
|
|
for index, disk := range disks {
|
2016-06-17 14:57:51 -04:00
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// Undo rename object in parallel.
|
|
|
|
wg.Add(1)
|
|
|
|
go func(index int, disk StorageAPI) {
|
|
|
|
defer wg.Done()
|
|
|
|
if errs[index] != nil {
|
|
|
|
return
|
|
|
|
}
|
2018-04-05 18:04:40 -04:00
|
|
|
_ = deleteXLMetdata(ctx, disk, bucket, prefix)
|
2016-06-17 14:57:51 -04:00
|
|
|
}(index, disk)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
|
2016-12-26 19:29:26 -05:00
|
|
|
// Rename `xl.json` content to destination location for each disk in order.
|
2018-04-05 18:04:40 -04:00
|
|
|
func renameXLMetadata(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, quorum int) ([]StorageAPI, error) {
|
2016-12-26 19:29:26 -05:00
|
|
|
isDir := false
|
|
|
|
srcXLJSON := path.Join(srcEntry, xlMetaJSONFile)
|
|
|
|
dstXLJSON := path.Join(dstEntry, xlMetaJSONFile)
|
2018-04-11 20:15:42 -04:00
|
|
|
return rename(ctx, disks, srcBucket, srcXLJSON, dstBucket, dstXLJSON, isDir, quorum, []error{errFileNotFound})
|
2016-12-26 19:29:26 -05:00
|
|
|
}
|
|
|
|
|
2016-05-31 23:23:31 -04:00
|
|
|
// writeUniqueXLMetadata - writes unique `xl.json` content for each disk in order.
|
2018-04-05 18:04:40 -04:00
|
|
|
func writeUniqueXLMetadata(ctx context.Context, disks []StorageAPI, bucket, prefix string, xlMetas []xlMetaV1, quorum int) ([]StorageAPI, error) {
|
2016-05-31 23:23:31 -04:00
|
|
|
var wg = &sync.WaitGroup{}
|
2016-07-11 20:24:49 -04:00
|
|
|
var mErrs = make([]error, len(disks))
|
2016-05-31 23:23:31 -04:00
|
|
|
|
|
|
|
// Start writing `xl.json` to all disks in parallel.
|
2016-07-11 20:24:49 -04:00
|
|
|
for index, disk := range disks {
|
2016-06-02 19:34:15 -04:00
|
|
|
if disk == nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, errDiskNotFound)
|
|
|
|
mErrs[index] = errDiskNotFound
|
2016-06-02 19:34:15 -04:00
|
|
|
continue
|
|
|
|
}
|
2016-05-31 23:23:31 -04:00
|
|
|
wg.Add(1)
|
|
|
|
// Write `xl.json` in a routine.
|
|
|
|
go func(index int, disk StorageAPI) {
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
// Pick one xlMeta for a disk at index.
|
|
|
|
xlMetas[index].Erasure.Index = index + 1
|
|
|
|
|
|
|
|
// Write unique `xl.json` for a disk at index.
|
2018-04-05 18:04:40 -04:00
|
|
|
err := writeXLMetadata(ctx, disk, bucket, prefix, xlMetas[index])
|
2016-06-03 01:49:27 -04:00
|
|
|
if err != nil {
|
2016-05-31 23:23:31 -04:00
|
|
|
mErrs[index] = err
|
|
|
|
}
|
|
|
|
}(index, disk)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for all the routines.
|
|
|
|
wg.Wait()
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, quorum)
|
2018-04-10 12:36:37 -04:00
|
|
|
if err == errXLWriteQuorum {
|
2016-06-17 14:57:51 -04:00
|
|
|
// Delete all `xl.json` successfully renamed.
|
2018-04-05 18:04:40 -04:00
|
|
|
deleteAllXLMetadata(ctx, disks, bucket, prefix, mErrs)
|
2016-05-31 23:23:31 -04:00
|
|
|
}
|
2017-06-14 20:14:27 -04:00
|
|
|
return evalDisks(disks, mErrs), err
|
2016-05-31 23:23:31 -04:00
|
|
|
}
|
|
|
|
|
2016-06-02 19:34:15 -04:00
|
|
|
// writeSameXLMetadata - write `xl.json` on all disks in order.
|
2018-04-05 18:04:40 -04:00
|
|
|
func writeSameXLMetadata(ctx context.Context, disks []StorageAPI, bucket, prefix string, xlMeta xlMetaV1, writeQuorum int) ([]StorageAPI, error) {
|
2016-05-26 06:15:01 -04:00
|
|
|
var wg = &sync.WaitGroup{}
|
2016-07-11 20:24:49 -04:00
|
|
|
var mErrs = make([]error, len(disks))
|
2016-05-26 06:15:01 -04:00
|
|
|
|
|
|
|
// Start writing `xl.json` to all disks in parallel.
|
2016-07-11 20:24:49 -04:00
|
|
|
for index, disk := range disks {
|
2016-06-02 19:34:15 -04:00
|
|
|
if disk == nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, errDiskNotFound)
|
|
|
|
mErrs[index] = errDiskNotFound
|
2016-06-02 19:34:15 -04:00
|
|
|
continue
|
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
wg.Add(1)
|
2016-05-26 06:15:01 -04:00
|
|
|
// Write `xl.json` in a routine.
|
2016-05-20 23:48:47 -04:00
|
|
|
go func(index int, disk StorageAPI, metadata xlMetaV1) {
|
|
|
|
defer wg.Done()
|
|
|
|
|
2016-05-26 06:15:01 -04:00
|
|
|
// Save the disk order index.
|
2016-05-20 23:48:47 -04:00
|
|
|
metadata.Erasure.Index = index + 1
|
2016-05-26 06:15:01 -04:00
|
|
|
|
2016-05-31 23:23:31 -04:00
|
|
|
// Write xl metadata.
|
2018-04-05 18:04:40 -04:00
|
|
|
err := writeXLMetadata(ctx, disk, bucket, prefix, metadata)
|
2016-06-03 01:49:27 -04:00
|
|
|
if err != nil {
|
2016-05-28 18:13:15 -04:00
|
|
|
mErrs[index] = err
|
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
}(index, disk, xlMeta)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for all the routines.
|
|
|
|
wg.Wait()
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, writeQuorum)
|
2018-04-10 12:36:37 -04:00
|
|
|
if err == errXLWriteQuorum {
|
2016-06-17 14:57:51 -04:00
|
|
|
// Delete all `xl.json` successfully renamed.
|
2018-04-05 18:04:40 -04:00
|
|
|
deleteAllXLMetadata(ctx, disks, bucket, prefix, mErrs)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2017-06-14 20:14:27 -04:00
|
|
|
return evalDisks(disks, mErrs), err
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|