2016-07-21 20:31:14 -04:00
|
|
|
/*
|
2017-01-18 15:24:34 -05:00
|
|
|
* Minio Cloud Storage, (C) 2016, 2017, 2017 Minio, Inc.
|
2016-07-21 20:31:14 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-05-20 23:48:47 -04:00
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/json"
|
2017-05-05 11:49:09 -04:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2017-01-16 20:05:00 -05:00
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
pathutil "path"
|
2016-05-20 23:48:47 -04:00
|
|
|
"sort"
|
2017-01-16 20:05:00 -05:00
|
|
|
"strings"
|
|
|
|
|
2017-05-05 11:49:09 -04:00
|
|
|
"github.com/minio/minio-go/pkg/set"
|
2017-01-16 20:05:00 -05:00
|
|
|
"github.com/minio/minio/pkg/lock"
|
|
|
|
"github.com/minio/minio/pkg/mimedb"
|
2017-04-04 12:14:03 -04:00
|
|
|
"github.com/tidwall/gjson"
|
2016-05-20 23:48:47 -04:00
|
|
|
)
|
|
|
|
|
2016-05-25 00:24:20 -04:00
|
|
|
const (
|
2016-06-14 04:39:40 -04:00
|
|
|
fsMetaJSONFile = "fs.json"
|
|
|
|
fsFormatJSONFile = "format.json"
|
2016-05-25 00:24:20 -04:00
|
|
|
)
|
|
|
|
|
2016-05-20 23:48:47 -04:00
|
|
|
// A fsMetaV1 represents a metadata header mapping keys to sets of values.
|
|
|
|
type fsMetaV1 struct {
|
|
|
|
Version string `json:"version"`
|
|
|
|
Format string `json:"format"`
|
|
|
|
Minio struct {
|
|
|
|
Release string `json:"release"`
|
|
|
|
} `json:"minio"`
|
2016-07-21 20:31:14 -04:00
|
|
|
// Metadata map for current object `fs.json`.
|
|
|
|
Meta map[string]string `json:"meta,omitempty"`
|
|
|
|
Parts []objectPartInfo `json:"parts,omitempty"`
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2017-01-16 20:05:00 -05:00
|
|
|
// Converts metadata to object info.
|
|
|
|
func (m fsMetaV1) ToObjectInfo(bucket, object string, fi os.FileInfo) ObjectInfo {
|
|
|
|
if len(m.Meta) == 0 {
|
|
|
|
m.Meta = make(map[string]string)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Guess content-type from the extension if possible.
|
|
|
|
if m.Meta["content-type"] == "" {
|
|
|
|
if objectExt := pathutil.Ext(object); objectExt != "" {
|
|
|
|
if content, ok := mimedb.DB[strings.ToLower(strings.TrimPrefix(objectExt, "."))]; ok {
|
|
|
|
m.Meta["content-type"] = content.ContentType
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
objInfo := ObjectInfo{
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: object,
|
|
|
|
}
|
|
|
|
|
2017-04-04 12:14:03 -04:00
|
|
|
// We set file info only if its valid.
|
2017-01-16 20:05:00 -05:00
|
|
|
objInfo.ModTime = timeSentinel
|
|
|
|
if fi != nil {
|
|
|
|
objInfo.ModTime = fi.ModTime()
|
|
|
|
objInfo.Size = fi.Size()
|
|
|
|
objInfo.IsDir = fi.IsDir()
|
|
|
|
}
|
|
|
|
|
|
|
|
objInfo.MD5Sum = m.Meta["md5Sum"]
|
|
|
|
objInfo.ContentType = m.Meta["content-type"]
|
|
|
|
objInfo.ContentEncoding = m.Meta["content-encoding"]
|
|
|
|
|
|
|
|
// md5Sum has already been extracted into objInfo.MD5Sum. We
|
|
|
|
// need to remove it from m.Meta to avoid it from appearing as
|
|
|
|
// part of response headers. e.g, X-Minio-* or X-Amz-*.
|
|
|
|
delete(m.Meta, "md5Sum")
|
|
|
|
|
|
|
|
// Save all the other userdefined API.
|
|
|
|
objInfo.UserDefined = m.Meta
|
|
|
|
|
|
|
|
// Success..
|
|
|
|
return objInfo
|
|
|
|
}
|
|
|
|
|
2016-05-26 06:15:01 -04:00
|
|
|
// ObjectPartIndex - returns the index of matching object part number.
|
|
|
|
func (m fsMetaV1) ObjectPartIndex(partNumber int) (partIndex int) {
|
2016-05-20 23:48:47 -04:00
|
|
|
for i, part := range m.Parts {
|
2016-05-26 06:15:01 -04:00
|
|
|
if partNumber == part.Number {
|
|
|
|
partIndex = i
|
|
|
|
return partIndex
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return -1
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddObjectPart - add a new object part in order.
|
2016-05-26 06:15:01 -04:00
|
|
|
func (m *fsMetaV1) AddObjectPart(partNumber int, partName string, partETag string, partSize int64) {
|
|
|
|
partInfo := objectPartInfo{
|
|
|
|
Number: partNumber,
|
|
|
|
Name: partName,
|
|
|
|
ETag: partETag,
|
|
|
|
Size: partSize,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update part info if it already exists.
|
|
|
|
for i, part := range m.Parts {
|
|
|
|
if partNumber == part.Number {
|
|
|
|
m.Parts[i] = partInfo
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Proceed to include new part info.
|
|
|
|
m.Parts = append(m.Parts, partInfo)
|
|
|
|
|
|
|
|
// Parts in fsMeta should be in sorted order by part number.
|
2016-05-31 23:23:31 -04:00
|
|
|
sort.Sort(byObjectPartNumber(m.Parts))
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2017-01-25 15:29:06 -05:00
|
|
|
func (m *fsMetaV1) WriteTo(lk *lock.LockedFile) (n int64, err error) {
|
2017-01-16 20:05:00 -05:00
|
|
|
var metadataBytes []byte
|
|
|
|
metadataBytes, err = json.Marshal(m)
|
2016-06-25 17:51:06 -04:00
|
|
|
if err != nil {
|
2017-01-16 20:05:00 -05:00
|
|
|
return 0, traceError(err)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-06-24 05:06:23 -04:00
|
|
|
|
2017-01-25 15:29:06 -05:00
|
|
|
if err = lk.Truncate(0); err != nil {
|
2017-01-16 20:05:00 -05:00
|
|
|
return 0, traceError(err)
|
|
|
|
}
|
|
|
|
|
2017-01-25 15:29:06 -05:00
|
|
|
if _, err = lk.Write(metadataBytes); err != nil {
|
2017-01-16 20:05:00 -05:00
|
|
|
return 0, traceError(err)
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2016-06-24 05:06:23 -04:00
|
|
|
|
|
|
|
// Success.
|
2017-01-16 20:05:00 -05:00
|
|
|
return int64(len(metadataBytes)), nil
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
|
|
|
|
2017-04-04 12:14:03 -04:00
|
|
|
func parseFSVersion(fsMetaBuf []byte) string {
|
|
|
|
return gjson.GetBytes(fsMetaBuf, "version").String()
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseFSFormat(fsMetaBuf []byte) string {
|
|
|
|
return gjson.GetBytes(fsMetaBuf, "format").String()
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseFSRelease(fsMetaBuf []byte) string {
|
|
|
|
return gjson.GetBytes(fsMetaBuf, "minio.release").String()
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseFSMetaMap(fsMetaBuf []byte) map[string]string {
|
|
|
|
// Get xlMetaV1.Meta map.
|
|
|
|
metaMapResult := gjson.GetBytes(fsMetaBuf, "meta").Map()
|
|
|
|
metaMap := make(map[string]string)
|
|
|
|
for key, valResult := range metaMapResult {
|
|
|
|
metaMap[key] = valResult.String()
|
|
|
|
}
|
|
|
|
return metaMap
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseFSParts(fsMetaBuf []byte) []objectPartInfo {
|
|
|
|
// Parse the FS Parts.
|
|
|
|
partsResult := gjson.GetBytes(fsMetaBuf, "parts").Array()
|
|
|
|
partInfo := make([]objectPartInfo, len(partsResult))
|
|
|
|
for i, p := range partsResult {
|
|
|
|
info := objectPartInfo{}
|
|
|
|
info.Number = int(p.Get("number").Int())
|
|
|
|
info.Name = p.Get("name").String()
|
|
|
|
info.ETag = p.Get("etag").String()
|
|
|
|
info.Size = p.Get("size").Int()
|
|
|
|
partInfo[i] = info
|
|
|
|
}
|
|
|
|
return partInfo
|
|
|
|
}
|
|
|
|
|
2017-01-25 15:29:06 -05:00
|
|
|
func (m *fsMetaV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
|
2017-04-04 12:14:03 -04:00
|
|
|
var fsMetaBuf []byte
|
2017-01-25 15:29:06 -05:00
|
|
|
fi, err := lk.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return 0, traceError(err)
|
|
|
|
}
|
|
|
|
|
2017-04-04 12:14:03 -04:00
|
|
|
fsMetaBuf, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
|
2016-08-31 16:42:57 -04:00
|
|
|
if err != nil {
|
2017-01-16 20:05:00 -05:00
|
|
|
return 0, traceError(err)
|
2016-08-31 16:42:57 -04:00
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2017-04-04 12:14:03 -04:00
|
|
|
if len(fsMetaBuf) == 0 {
|
2017-01-16 20:05:00 -05:00
|
|
|
return 0, traceError(io.EOF)
|
2016-08-25 12:39:01 -04:00
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2017-04-04 12:14:03 -04:00
|
|
|
// obtain version.
|
|
|
|
m.Version = parseFSVersion(fsMetaBuf)
|
|
|
|
|
|
|
|
// obtain format.
|
|
|
|
m.Format = parseFSFormat(fsMetaBuf)
|
|
|
|
|
|
|
|
// obtain metadata.
|
|
|
|
m.Meta = parseFSMetaMap(fsMetaBuf)
|
|
|
|
|
|
|
|
// obtain parts info list.
|
|
|
|
m.Parts = parseFSParts(fsMetaBuf)
|
|
|
|
|
|
|
|
// obtain minio release date.
|
|
|
|
m.Minio.Release = parseFSRelease(fsMetaBuf)
|
2017-01-16 20:05:00 -05:00
|
|
|
|
|
|
|
// Success.
|
2017-04-04 12:14:03 -04:00
|
|
|
return int64(len(fsMetaBuf)), nil
|
2016-08-31 16:42:57 -04:00
|
|
|
}
|
|
|
|
|
2017-01-18 15:24:34 -05:00
|
|
|
// FS metadata constants.
|
|
|
|
const (
|
|
|
|
// FS backend meta version.
|
|
|
|
fsMetaVersion = "1.0.0"
|
|
|
|
|
|
|
|
// FS backend meta format.
|
|
|
|
fsMetaFormat = "fs"
|
|
|
|
|
2017-05-05 11:49:09 -04:00
|
|
|
// FS backend format version.
|
|
|
|
fsFormatVersion = fsFormatV2
|
|
|
|
|
2017-01-18 15:24:34 -05:00
|
|
|
// Add more constants here.
|
|
|
|
)
|
|
|
|
|
2017-05-05 11:49:09 -04:00
|
|
|
// FS format version strings.
|
|
|
|
const (
|
|
|
|
fsFormatV1 = "1" // Previous format.
|
|
|
|
fsFormatV2 = "2" // Current format.
|
|
|
|
// Proceed to add "3" when we
|
|
|
|
// change the backend format in future.
|
|
|
|
)
|
|
|
|
|
2016-05-26 06:15:01 -04:00
|
|
|
// newFSMetaV1 - initializes new fsMetaV1.
|
|
|
|
func newFSMetaV1() (fsMeta fsMetaV1) {
|
|
|
|
fsMeta = fsMetaV1{}
|
2017-01-18 15:24:34 -05:00
|
|
|
fsMeta.Version = fsMetaVersion
|
|
|
|
fsMeta.Format = fsMetaFormat
|
2016-08-18 19:23:42 -04:00
|
|
|
fsMeta.Minio.Release = ReleaseTag
|
2016-05-26 06:15:01 -04:00
|
|
|
return fsMeta
|
|
|
|
}
|
|
|
|
|
2017-05-05 11:49:09 -04:00
|
|
|
// newFSFormatV2 - initializes new formatConfigV1 with FS format version 2.
|
|
|
|
func newFSFormatV2() (format *formatConfigV1) {
|
2016-10-05 15:48:07 -04:00
|
|
|
return &formatConfigV1{
|
2016-06-14 04:39:40 -04:00
|
|
|
Version: "1",
|
|
|
|
Format: "fs",
|
|
|
|
FS: &fsFormat{
|
2017-05-05 11:49:09 -04:00
|
|
|
Version: fsFormatV2,
|
2016-06-14 04:39:40 -04:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-05 11:49:09 -04:00
|
|
|
// Checks if input format is version 1 and 2.
|
|
|
|
func isFSValidFormat(formatCfg *formatConfigV1) bool {
|
|
|
|
// Supported format versions.
|
|
|
|
var supportedFormatVersions = []string{
|
|
|
|
fsFormatV1,
|
|
|
|
fsFormatV2,
|
|
|
|
// New supported versions here.
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for supported format versions.
|
|
|
|
for _, version := range supportedFormatVersions {
|
|
|
|
if formatCfg.FS.Version == version {
|
|
|
|
return true
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
|
|
|
}
|
2017-05-05 11:49:09 -04:00
|
|
|
return false
|
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2017-05-05 11:49:09 -04:00
|
|
|
// errFSFormatOld- old fs format.
|
|
|
|
var errFSFormatOld = errors.New("old FS format found")
|
|
|
|
|
|
|
|
// Checks if the loaded `format.json` is valid and
|
|
|
|
// is expected to be of the requested version.
|
|
|
|
func checkFormatFS(format *formatConfigV1, formatVersion string) error {
|
|
|
|
if format == nil {
|
|
|
|
return errUnexpected
|
|
|
|
}
|
|
|
|
|
|
|
|
// Validate if we have the same format.
|
|
|
|
if format.Format != "fs" {
|
|
|
|
return fmt.Errorf("Unable to recognize backend format, Disk is not in FS format. %s", format.Format)
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
|
|
|
|
2017-05-05 11:49:09 -04:00
|
|
|
// Check if format is currently supported.
|
|
|
|
if !isFSValidFormat(format) {
|
|
|
|
return errCorruptedFormat
|
2017-01-16 20:05:00 -05:00
|
|
|
}
|
|
|
|
|
2017-05-05 11:49:09 -04:00
|
|
|
// Check for format version is current.
|
|
|
|
if format.FS.Version != formatVersion {
|
|
|
|
return errFSFormatOld
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2016-07-03 23:01:40 -04:00
|
|
|
}
|
|
|
|
|
2017-05-05 11:49:09 -04:00
|
|
|
// This is just kept as reference, there is no sanity
|
|
|
|
// check for FS format in version "1".
|
|
|
|
func checkFormatSanityFSV1(fsPath string) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for sanity of FS format in version "2".
|
|
|
|
func checkFormatSanityFSV2(fsPath string) error {
|
|
|
|
buckets, err := readDir(pathJoin(fsPath, minioMetaBucket, bucketConfigPrefix))
|
|
|
|
if err != nil && err != errFileNotFound {
|
2016-06-14 04:39:40 -04:00
|
|
|
return err
|
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2017-05-05 11:49:09 -04:00
|
|
|
// Attempt to validate all the buckets have a sanitized backend.
|
|
|
|
for _, bucket := range buckets {
|
|
|
|
entries, rerr := readDir(pathJoin(fsPath, minioMetaBucket, bucketConfigPrefix, bucket))
|
|
|
|
if rerr != nil {
|
|
|
|
return rerr
|
|
|
|
}
|
|
|
|
|
|
|
|
var expectedConfigs = append(bucketMetadataConfigs, objectMetaPrefix+"/")
|
|
|
|
entriesSet := set.CreateStringSet(entries...)
|
|
|
|
expectedConfigsSet := set.CreateStringSet(expectedConfigs...)
|
|
|
|
|
|
|
|
// Entries found shouldn't be more than total
|
|
|
|
// expected config directories, files.
|
|
|
|
if len(entriesSet) > len(expectedConfigsSet) {
|
|
|
|
return errCorruptedFormat
|
|
|
|
}
|
|
|
|
|
|
|
|
// Look for the difference between entries and the
|
|
|
|
// expected config set, resulting entries if they
|
|
|
|
// intersect with original entries set we know
|
|
|
|
// that the backend has unexpected files.
|
|
|
|
if !entriesSet.Difference(expectedConfigsSet).IsEmpty() {
|
|
|
|
return errCorruptedFormat
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for sanity of FS format for a given version.
|
|
|
|
func checkFormatSanityFS(fsPath string, fsFormatVersion string) (err error) {
|
|
|
|
switch fsFormatVersion {
|
|
|
|
case fsFormatV2:
|
|
|
|
err = checkFormatSanityFSV2(fsPath)
|
|
|
|
default:
|
|
|
|
err = errCorruptedFormat
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initializes a new `format.json` if not present, validates `format.json`
|
|
|
|
// if already present and migrates to newer version if necessary. Returns
|
|
|
|
// the final format version.
|
|
|
|
func initFormatFS(fsPath, fsUUID string) (err error) {
|
|
|
|
fsFormatPath := pathJoin(fsPath, minioMetaBucket, fsFormatJSONFile)
|
|
|
|
|
2017-01-16 20:05:00 -05:00
|
|
|
// fsFormatJSONFile - format.json file stored in minioMetaBucket(.minio.sys) directory.
|
2017-05-05 11:49:09 -04:00
|
|
|
lk, err := lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
|
2017-01-16 20:05:00 -05:00
|
|
|
if err != nil {
|
2017-05-05 11:49:09 -04:00
|
|
|
return traceError(err)
|
2016-06-14 04:39:40 -04:00
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
defer lk.Close()
|
|
|
|
|
2017-05-05 11:49:09 -04:00
|
|
|
var format = &formatConfigV1{}
|
|
|
|
_, err = format.ReadFrom(lk)
|
|
|
|
// For all unexpected errors, we return.
|
|
|
|
if err != nil && errorCause(err) != io.EOF {
|
|
|
|
return traceError(fmt.Errorf("Unable to load 'format.json', %s", err))
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we couldn't read anything, The disk is unformatted.
|
|
|
|
if errorCause(err) == io.EOF {
|
|
|
|
err = errUnformattedDisk
|
|
|
|
format = newFSFormatV2()
|
|
|
|
} else {
|
|
|
|
// Validate loaded `format.json`.
|
|
|
|
err = checkFormatFS(format, fsFormatVersion)
|
|
|
|
if err != nil && err != errFSFormatOld {
|
|
|
|
return traceError(fmt.Errorf("Unable to validate 'format.json', %s", err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Disk is in old format migrate object metadata.
|
|
|
|
if err == errFSFormatOld {
|
|
|
|
if merr := migrateFSObject(fsPath, fsUUID); merr != nil {
|
|
|
|
return merr
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize format v2.
|
|
|
|
format = newFSFormatV2()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Rewrite or write format.json depending on if disk
|
|
|
|
// unformatted and if format is old.
|
|
|
|
if err == errUnformattedDisk || err == errFSFormatOld {
|
|
|
|
if _, err = format.WriteTo(lk); err != nil {
|
|
|
|
return traceError(fmt.Errorf("Unable to initialize 'format.json', %s", err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for sanity.
|
|
|
|
return checkFormatSanityFS(fsPath, format.FS.Version)
|
2016-06-14 04:39:40 -04:00
|
|
|
}
|
|
|
|
|
2016-08-31 16:42:57 -04:00
|
|
|
// Return if the part info in uploadedParts and completeParts are same.
|
|
|
|
func isPartsSame(uploadedParts []objectPartInfo, completeParts []completePart) bool {
|
|
|
|
if len(uploadedParts) != len(completeParts) {
|
|
|
|
return false
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2016-08-31 16:42:57 -04:00
|
|
|
for i := range completeParts {
|
|
|
|
if uploadedParts[i].Number != completeParts[i].PartNumber ||
|
|
|
|
uploadedParts[i].ETag != completeParts[i].ETag {
|
|
|
|
return false
|
|
|
|
}
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|
2017-01-16 20:05:00 -05:00
|
|
|
|
2016-08-31 16:42:57 -04:00
|
|
|
return true
|
2016-05-20 23:48:47 -04:00
|
|
|
}
|