mirror of
https://github.com/minio/minio.git
synced 2025-01-25 21:53:16 -05:00
xl: Change fileMetadata to xlMetadata. (#1404)
Finalized backend format ``` { "version": "1.0.0", "stat": { "size": 24256, "modTime": "2016-04-28T00:11:37.843Z" }, "erasure": { "data": 5, "parity": 5, "blockSize": 4194304 ], "minio": { "release": "RELEASE.2016-04-28T00-09-47Z" } } ```
This commit is contained in:
parent
41b35cff7b
commit
a1a667ae5d
39
xl-json.md
Normal file
39
xl-json.md
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
### xl.json
|
||||||
|
|
||||||
|
``xl.json`` is a special file captured and written by XL storage API layer
|
||||||
|
to interpret, manage and extract erasured data to multiple disks.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"version": "1.0.0",
|
||||||
|
"stat": {
|
||||||
|
"size": 24256,
|
||||||
|
"modTime": "2016-04-28T00:11:37.843Z",
|
||||||
|
"version": 0
|
||||||
|
},
|
||||||
|
"erasure": {
|
||||||
|
"data": 5,
|
||||||
|
"parity": 5,
|
||||||
|
"blockSize": 4194304
|
||||||
|
],
|
||||||
|
"minio": {
|
||||||
|
"release": "RELEASE.2016-04-28T00-09-47Z"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### JSON meaning.
|
||||||
|
|
||||||
|
- "version" // Version of the meta json file.
|
||||||
|
|
||||||
|
- "stat" // Stat value of written file.
|
||||||
|
|
||||||
|
- "size" // Size of the file.
|
||||||
|
- "modTime" // Modified time of the file.
|
||||||
|
- "version" // File version tracked when disks are down.
|
||||||
|
|
||||||
|
- "erasure" // Erasure metadata for the written file.
|
||||||
|
|
||||||
|
- "data" // Data blocks parts of the file.
|
||||||
|
- "parity" // Parity blocks parts of the file.
|
||||||
|
- "blockSize" // BlockSize read/write chunk size.
|
@ -17,7 +17,6 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
"errors"
|
||||||
slashpath "path"
|
slashpath "path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -37,27 +36,11 @@ func highestInt(intSlice []int64) (highestInteger int64) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Extracts file versions from partsMetadata slice and returns version slice.
|
// Extracts file versions from partsMetadata slice and returns version slice.
|
||||||
func listFileVersions(partsMetadata []fileMetadata, errs []error) (versions []int64, err error) {
|
func listFileVersions(partsMetadata []xlMetaV1, errs []error) (versions []int64, err error) {
|
||||||
versions = make([]int64, len(partsMetadata))
|
versions = make([]int64, len(partsMetadata))
|
||||||
for index, metadata := range partsMetadata {
|
for index, metadata := range partsMetadata {
|
||||||
if errs[index] == nil {
|
if errs[index] == nil {
|
||||||
var version int64
|
versions[index] = metadata.Stat.Version
|
||||||
version, err = metadata.GetFileVersion()
|
|
||||||
if err == errMetadataKeyNotExist {
|
|
||||||
log.WithFields(logrus.Fields{
|
|
||||||
"metadata": metadata,
|
|
||||||
}).Errorf("Missing 'file.version', %s", errMetadataKeyNotExist)
|
|
||||||
versions[index] = 0
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
log.WithFields(logrus.Fields{
|
|
||||||
"metadata": metadata,
|
|
||||||
}).Errorf("'file.version' decoding failed with %s", err)
|
|
||||||
// Unexpected, return error.
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
versions[index] = version
|
|
||||||
} else {
|
} else {
|
||||||
versions[index] = -1
|
versions[index] = -1
|
||||||
}
|
}
|
||||||
@ -67,10 +50,10 @@ func listFileVersions(partsMetadata []fileMetadata, errs []error) (versions []in
|
|||||||
|
|
||||||
// Returns slice of online disks needed.
|
// Returns slice of online disks needed.
|
||||||
// - slice returing readable disks.
|
// - slice returing readable disks.
|
||||||
// - fileMetadata
|
// - xlMetaV1
|
||||||
// - bool value indicating if healing is needed.
|
// - bool value indicating if healing is needed.
|
||||||
// - error if any.
|
// - error if any.
|
||||||
func (xl XL) listOnlineDisks(volume, path string) (onlineDisks []StorageAPI, mdata fileMetadata, heal bool, err error) {
|
func (xl XL) listOnlineDisks(volume, path string) (onlineDisks []StorageAPI, mdata xlMetaV1, heal bool, err error) {
|
||||||
partsMetadata, errs := xl.getPartsMetadata(volume, path)
|
partsMetadata, errs := xl.getPartsMetadata(volume, path)
|
||||||
notFoundCount := 0
|
notFoundCount := 0
|
||||||
// FIXME: take care of the situation when a disk has failed and been removed
|
// FIXME: take care of the situation when a disk has failed and been removed
|
||||||
@ -82,8 +65,8 @@ func (xl XL) listOnlineDisks(volume, path string) (onlineDisks []StorageAPI, mda
|
|||||||
notFoundCount++
|
notFoundCount++
|
||||||
// If we have errors with file not found greater than allowed read
|
// If we have errors with file not found greater than allowed read
|
||||||
// quorum we return err as errFileNotFound.
|
// quorum we return err as errFileNotFound.
|
||||||
if notFoundCount > xl.readQuorum {
|
if notFoundCount > len(xl.storageDisks)-xl.readQuorum {
|
||||||
return nil, fileMetadata{}, false, errFileNotFound
|
return nil, xlMetaV1{}, false, errFileNotFound
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -96,7 +79,7 @@ func (xl XL) listOnlineDisks(volume, path string) (onlineDisks []StorageAPI, mda
|
|||||||
"volume": volume,
|
"volume": volume,
|
||||||
"path": path,
|
"path": path,
|
||||||
}).Errorf("Extracting file versions failed with %s", err)
|
}).Errorf("Extracting file versions failed with %s", err)
|
||||||
return nil, fileMetadata{}, false, err
|
return nil, xlMetaV1{}, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get highest file version.
|
// Get highest file version.
|
||||||
@ -130,31 +113,31 @@ func (xl XL) listOnlineDisks(volume, path string) (onlineDisks []StorageAPI, mda
|
|||||||
"onlineDiskCount": onlineDiskCount,
|
"onlineDiskCount": onlineDiskCount,
|
||||||
"readQuorumCount": xl.readQuorum,
|
"readQuorumCount": xl.readQuorum,
|
||||||
}).Errorf("%s", errReadQuorum)
|
}).Errorf("%s", errReadQuorum)
|
||||||
return nil, fileMetadata{}, false, errReadQuorum
|
return nil, xlMetaV1{}, false, errReadQuorum
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return onlineDisks, mdata, heal, nil
|
return onlineDisks, mdata, heal, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get parts.json metadata as a map slice.
|
// Get xl.json metadata as a map slice.
|
||||||
// Returns error slice indicating the failed metadata reads.
|
// Returns error slice indicating the failed metadata reads.
|
||||||
// Read lockNS() should be done by caller.
|
// Read lockNS() should be done by caller.
|
||||||
func (xl XL) getPartsMetadata(volume, path string) ([]fileMetadata, []error) {
|
func (xl XL) getPartsMetadata(volume, path string) ([]xlMetaV1, []error) {
|
||||||
errs := make([]error, len(xl.storageDisks))
|
errs := make([]error, len(xl.storageDisks))
|
||||||
metadataArray := make([]fileMetadata, len(xl.storageDisks))
|
metadataArray := make([]xlMetaV1, len(xl.storageDisks))
|
||||||
metadataFilePath := slashpath.Join(path, metadataFile)
|
xlMetaV1FilePath := slashpath.Join(path, xlMetaV1File)
|
||||||
for index, disk := range xl.storageDisks {
|
for index, disk := range xl.storageDisks {
|
||||||
offset := int64(0)
|
offset := int64(0)
|
||||||
metadataReader, err := disk.ReadFile(volume, metadataFilePath, offset)
|
metadataReader, err := disk.ReadFile(volume, xlMetaV1FilePath, offset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs[index] = err
|
errs[index] = err
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
defer metadataReader.Close()
|
defer metadataReader.Close()
|
||||||
|
|
||||||
metadata, err := fileMetadataDecode(metadataReader)
|
metadata, err := xlMetaV1Decode(metadataReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Unable to parse parts.json, set error.
|
// Unable to parse xl.json, set error.
|
||||||
errs[index] = err
|
errs[index] = err
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -163,38 +146,30 @@ func (xl XL) getPartsMetadata(volume, path string) ([]fileMetadata, []error) {
|
|||||||
return metadataArray, errs
|
return metadataArray, errs
|
||||||
}
|
}
|
||||||
|
|
||||||
// Writes/Updates `parts.json` for given file. updateParts carries
|
// Writes/Updates `xl.json` for given file. updateParts carries
|
||||||
// index of disks where `parts.json` needs to be updated.
|
// index of disks where `xl.json` needs to be updated.
|
||||||
//
|
//
|
||||||
// Returns collection of errors, indexed in accordance with input
|
// Returns collection of errors, indexed in accordance with input
|
||||||
// updateParts order.
|
// updateParts order.
|
||||||
// Write lockNS() should be done by caller.
|
// Write lockNS() should be done by caller.
|
||||||
func (xl XL) setPartsMetadata(volume, path string, metadata fileMetadata, updateParts []bool) []error {
|
func (xl XL) setPartsMetadata(volume, path string, metadata xlMetaV1, updateParts []bool) []error {
|
||||||
metadataFilePath := filepath.Join(path, metadataFile)
|
xlMetaV1FilePath := filepath.Join(path, xlMetaV1File)
|
||||||
errs := make([]error, len(xl.storageDisks))
|
errs := make([]error, len(xl.storageDisks))
|
||||||
|
|
||||||
for index := range updateParts {
|
for index := range updateParts {
|
||||||
errs[index] = errors.New("Metadata not updated")
|
errs[index] = errors.New("Metadata not updated")
|
||||||
}
|
}
|
||||||
|
|
||||||
metadataBytes, err := json.Marshal(metadata)
|
|
||||||
if err != nil {
|
|
||||||
for index := range updateParts {
|
|
||||||
errs[index] = err
|
|
||||||
}
|
|
||||||
return errs
|
|
||||||
}
|
|
||||||
|
|
||||||
for index, shouldUpdate := range updateParts {
|
for index, shouldUpdate := range updateParts {
|
||||||
if !shouldUpdate {
|
if !shouldUpdate {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
writer, err := xl.storageDisks[index].CreateFile(volume, metadataFilePath)
|
writer, err := xl.storageDisks[index].CreateFile(volume, xlMetaV1FilePath)
|
||||||
errs[index] = err
|
errs[index] = err
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
_, err = writer.Write(metadataBytes)
|
err = metadata.Write(writer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs[index] = err
|
errs[index] = err
|
||||||
safeCloseAndRemove(writer)
|
safeCloseAndRemove(writer)
|
||||||
|
@ -17,16 +17,12 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
|
||||||
"io"
|
"io"
|
||||||
slashpath "path"
|
slashpath "path"
|
||||||
"strconv"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
fastSha512 "github.com/minio/minio/pkg/crypto/sha512"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Erasure block size.
|
// Erasure block size.
|
||||||
@ -92,9 +88,8 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
|
|||||||
higherVersion++
|
higherVersion++
|
||||||
|
|
||||||
writers := make([]io.WriteCloser, len(xl.storageDisks))
|
writers := make([]io.WriteCloser, len(xl.storageDisks))
|
||||||
sha512Writers := make([]hash.Hash, len(xl.storageDisks))
|
|
||||||
|
|
||||||
metadataFilePath := slashpath.Join(path, metadataFile)
|
xlMetaV1FilePath := slashpath.Join(path, xlMetaV1File)
|
||||||
metadataWriters := make([]io.WriteCloser, len(xl.storageDisks))
|
metadataWriters := make([]io.WriteCloser, len(xl.storageDisks))
|
||||||
|
|
||||||
// Save additional erasureMetadata.
|
// Save additional erasureMetadata.
|
||||||
@ -102,7 +97,7 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
|
|||||||
|
|
||||||
createFileError := 0
|
createFileError := 0
|
||||||
for index, disk := range xl.storageDisks {
|
for index, disk := range xl.storageDisks {
|
||||||
erasurePart := slashpath.Join(path, fmt.Sprintf("part.%d", index))
|
erasurePart := slashpath.Join(path, fmt.Sprintf("file.%d", index))
|
||||||
var writer io.WriteCloser
|
var writer io.WriteCloser
|
||||||
writer, err = disk.CreateFile(volume, erasurePart)
|
writer, err = disk.CreateFile(volume, erasurePart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -126,7 +121,7 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
|
|||||||
|
|
||||||
// create meta data file
|
// create meta data file
|
||||||
var metadataWriter io.WriteCloser
|
var metadataWriter io.WriteCloser
|
||||||
metadataWriter, err = disk.CreateFile(volume, metadataFilePath)
|
metadataWriter, err = disk.CreateFile(volume, xlMetaV1FilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithFields(logrus.Fields{
|
log.WithFields(logrus.Fields{
|
||||||
"volume": volume,
|
"volume": volume,
|
||||||
@ -148,7 +143,6 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
|
|||||||
|
|
||||||
writers[index] = writer
|
writers[index] = writer
|
||||||
metadataWriters[index] = metadataWriter
|
metadataWriters[index] = metadataWriter
|
||||||
sha512Writers[index] = fastSha512.New()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocate 4MiB block size buffer for reading.
|
// Allocate 4MiB block size buffer for reading.
|
||||||
@ -221,9 +215,6 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
|
|||||||
reader.CloseWithError(err)
|
reader.CloseWithError(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if sha512Writers[index] != nil {
|
|
||||||
sha512Writers[index].Write(encodedData)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update total written.
|
// Update total written.
|
||||||
@ -232,21 +223,19 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Initialize metadata map, save all erasure related metadata.
|
// Initialize metadata map, save all erasure related metadata.
|
||||||
metadata := make(fileMetadata)
|
metadata := xlMetaV1{}
|
||||||
metadata.Set("version", minioVersion)
|
metadata.Version = "1"
|
||||||
metadata.Set("format.major", "1")
|
metadata.Stat.Size = totalSize
|
||||||
metadata.Set("format.minor", "0")
|
metadata.Stat.ModTime = modTime
|
||||||
metadata.Set("format.patch", "0")
|
metadata.Minio.Release = minioReleaseTag
|
||||||
metadata.Set("file.size", strconv.FormatInt(totalSize, 10))
|
|
||||||
if len(xl.storageDisks) > len(writers) {
|
if len(xl.storageDisks) > len(writers) {
|
||||||
// Save file.version only if we wrote to less disks than all
|
// Save file.version only if we wrote to less disks than all
|
||||||
// storage disks.
|
// storage disks.
|
||||||
metadata.Set("file.version", strconv.FormatInt(higherVersion, 10))
|
metadata.Stat.Version = higherVersion
|
||||||
}
|
}
|
||||||
metadata.Set("file.modTime", modTime.Format(timeFormatAMZ))
|
metadata.Erasure.DataBlocks = xl.DataBlocks
|
||||||
metadata.Set("file.xl.blockSize", strconv.Itoa(erasureBlockSize))
|
metadata.Erasure.ParityBlocks = xl.ParityBlocks
|
||||||
metadata.Set("file.xl.dataBlocks", strconv.Itoa(xl.DataBlocks))
|
metadata.Erasure.BlockSize = erasureBlockSize
|
||||||
metadata.Set("file.xl.parityBlocks", strconv.Itoa(xl.ParityBlocks))
|
|
||||||
|
|
||||||
// Write all the metadata.
|
// Write all the metadata.
|
||||||
// below case is not handled here
|
// below case is not handled here
|
||||||
@ -257,10 +246,6 @@ func (xl XL) writeErasure(volume, path string, reader *io.PipeReader, wcloser *w
|
|||||||
if metadataWriter == nil {
|
if metadataWriter == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if sha512Writers[index] != nil {
|
|
||||||
// Save sha512 checksum of each encoded blocks.
|
|
||||||
metadata.Set("file.xl.block512Sum", hex.EncodeToString(sha512Writers[index].Sum(nil)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write metadata.
|
// Write metadata.
|
||||||
err = metadata.Write(metadataWriter)
|
err = metadata.Write(metadataWriter)
|
||||||
|
@ -50,21 +50,12 @@ func (xl XL) healFile(volume string, path string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
size, err := metadata.GetSize()
|
|
||||||
if err != nil {
|
|
||||||
log.WithFields(logrus.Fields{
|
|
||||||
"volume": volume,
|
|
||||||
"path": path,
|
|
||||||
}).Errorf("Failed to get file size, %s", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for index, disk := range onlineDisks {
|
for index, disk := range onlineDisks {
|
||||||
if disk == nil {
|
if disk == nil {
|
||||||
needsHeal[index] = true
|
needsHeal[index] = true
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
erasurePart := slashpath.Join(path, fmt.Sprintf("part.%d", index))
|
erasurePart := slashpath.Join(path, fmt.Sprintf("file.%d", index))
|
||||||
// If disk.ReadFile returns error and we don't have read quorum it will be taken care as
|
// If disk.ReadFile returns error and we don't have read quorum it will be taken care as
|
||||||
// ReedSolomon.Reconstruct() will fail later.
|
// ReedSolomon.Reconstruct() will fail later.
|
||||||
var reader io.ReadCloser
|
var reader io.ReadCloser
|
||||||
@ -93,7 +84,7 @@ func (xl XL) healFile(volume string, path string) error {
|
|||||||
if !healNeeded {
|
if !healNeeded {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
erasurePart := slashpath.Join(path, fmt.Sprintf("part.%d", index))
|
erasurePart := slashpath.Join(path, fmt.Sprintf("file.%d", index))
|
||||||
writers[index], err = xl.storageDisks[index].CreateFile(volume, erasurePart)
|
writers[index], err = xl.storageDisks[index].CreateFile(volume, erasurePart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithFields(logrus.Fields{
|
log.WithFields(logrus.Fields{
|
||||||
@ -105,17 +96,17 @@ func (xl XL) healFile(volume string, path string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var totalLeft = size
|
var totalLeft = metadata.Stat.Size
|
||||||
for totalLeft > 0 {
|
for totalLeft > 0 {
|
||||||
// Figure out the right blockSize.
|
// Figure out the right blockSize.
|
||||||
var curBlockSize int
|
var curBlockSize int64
|
||||||
if erasureBlockSize < totalLeft {
|
if metadata.Erasure.BlockSize < totalLeft {
|
||||||
curBlockSize = erasureBlockSize
|
curBlockSize = metadata.Erasure.BlockSize
|
||||||
} else {
|
} else {
|
||||||
curBlockSize = int(totalLeft)
|
curBlockSize = totalLeft
|
||||||
}
|
}
|
||||||
// Calculate the current block size.
|
// Calculate the current block size.
|
||||||
curBlockSize = getEncodedBlockLen(curBlockSize, xl.DataBlocks)
|
curBlockSize = getEncodedBlockLen(curBlockSize, metadata.Erasure.DataBlocks)
|
||||||
enBlocks := make([][]byte, totalBlocks)
|
enBlocks := make([][]byte, totalBlocks)
|
||||||
// Loop through all readers and read.
|
// Loop through all readers and read.
|
||||||
for index, reader := range readers {
|
for index, reader := range readers {
|
||||||
@ -205,7 +196,7 @@ func (xl XL) healFile(volume string, path string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
totalLeft = totalLeft - erasureBlockSize
|
totalLeft = totalLeft - metadata.Erasure.BlockSize
|
||||||
}
|
}
|
||||||
|
|
||||||
// After successful healing Close() the writer so that the temp
|
// After successful healing Close() the writer so that the temp
|
||||||
|
@ -18,53 +18,31 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"io"
|
"io"
|
||||||
"strconv"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// error type when key is not found.
|
// A xlMetaV1 represents a metadata header mapping keys to sets of values.
|
||||||
var errMetadataKeyNotExist = errors.New("Key not found in fileMetadata.")
|
type xlMetaV1 struct {
|
||||||
|
Version string `json:"version"`
|
||||||
// This code is built on similar ideas of http.Header.
|
Stat struct {
|
||||||
// Ref - https://golang.org/pkg/net/http/#Header
|
Size int64 `json:"size"`
|
||||||
|
ModTime time.Time `json:"modTime"`
|
||||||
// A fileMetadata represents a metadata header mapping
|
Version int64 `json:"version"`
|
||||||
// keys to sets of values.
|
} `json:"stat"`
|
||||||
type fileMetadata map[string][]string
|
Erasure struct {
|
||||||
|
DataBlocks int `json:"data"`
|
||||||
// Add adds the key, value pair to the header.
|
ParityBlocks int `json:"parity"`
|
||||||
// It appends to any existing values associated with key.
|
BlockSize int64 `json:"blockSize"`
|
||||||
func (f fileMetadata) Add(key, value string) {
|
|
||||||
f[key] = append(f[key], value)
|
|
||||||
}
|
}
|
||||||
|
Minio struct {
|
||||||
// Set sets the header entries associated with key to
|
Release string `json:"release"`
|
||||||
// the single element value. It replaces any existing
|
} `json:"minio"`
|
||||||
// values associated with key.
|
|
||||||
func (f fileMetadata) Set(key, value string) {
|
|
||||||
f[key] = []string{value}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get gets the first value associated with the given key.
|
|
||||||
// If there are no values associated with the key, Get returns "".
|
|
||||||
// Get is a convenience method. For more complex queries,
|
|
||||||
// access the map directly.
|
|
||||||
func (f fileMetadata) Get(key string) []string {
|
|
||||||
if f == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
v, ok := f[key]
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write writes a metadata in wire format.
|
// Write writes a metadata in wire format.
|
||||||
func (f fileMetadata) Write(writer io.Writer) error {
|
func (m xlMetaV1) Write(writer io.Writer) error {
|
||||||
metadataBytes, err := json.Marshal(f)
|
metadataBytes, err := json.Marshal(m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -72,56 +50,12 @@ func (f fileMetadata) Write(writer io.Writer) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get file size.
|
// xlMetaV1Decode - file metadata decode.
|
||||||
func (f fileMetadata) GetSize() (int64, error) {
|
func xlMetaV1Decode(reader io.Reader) (metadata xlMetaV1, err error) {
|
||||||
sizes := f.Get("file.size")
|
|
||||||
if sizes == nil {
|
|
||||||
return 0, errMetadataKeyNotExist
|
|
||||||
}
|
|
||||||
sizeStr := sizes[0]
|
|
||||||
return strconv.ParseInt(sizeStr, 10, 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set file size.
|
|
||||||
func (f fileMetadata) SetSize(size int64) {
|
|
||||||
f.Set("file.size", strconv.FormatInt(size, 10))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get file Modification time.
|
|
||||||
func (f fileMetadata) GetModTime() (time.Time, error) {
|
|
||||||
timeStrs := f.Get("file.modTime")
|
|
||||||
if timeStrs == nil {
|
|
||||||
return time.Time{}, errMetadataKeyNotExist
|
|
||||||
}
|
|
||||||
return time.Parse(timeFormatAMZ, timeStrs[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set file Modification time.
|
|
||||||
func (f fileMetadata) SetModTime(modTime time.Time) {
|
|
||||||
f.Set("file.modTime", modTime.Format(timeFormatAMZ))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get file version.
|
|
||||||
func (f fileMetadata) GetFileVersion() (int64, error) {
|
|
||||||
version := f.Get("file.version")
|
|
||||||
if version == nil {
|
|
||||||
return 0, errMetadataKeyNotExist
|
|
||||||
}
|
|
||||||
return strconv.ParseInt(version[0], 10, 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set file version.
|
|
||||||
func (f fileMetadata) SetFileVersion(fileVersion int64) {
|
|
||||||
f.Set("file.version", strconv.FormatInt(fileVersion, 10))
|
|
||||||
}
|
|
||||||
|
|
||||||
// fileMetadataDecode - file metadata decode.
|
|
||||||
func fileMetadataDecode(reader io.Reader) (fileMetadata, error) {
|
|
||||||
metadata := make(fileMetadata)
|
|
||||||
decoder := json.NewDecoder(reader)
|
decoder := json.NewDecoder(reader)
|
||||||
// Unmarshalling failed, file possibly corrupted.
|
// Unmarshalling failed, file possibly corrupted.
|
||||||
if err := decoder.Decode(&metadata); err != nil {
|
if err = decoder.Decode(&metadata); err != nil {
|
||||||
return nil, err
|
return xlMetaV1{}, err
|
||||||
}
|
}
|
||||||
return metadata, nil
|
return metadata, nil
|
||||||
}
|
}
|
||||||
|
@ -62,15 +62,6 @@ func (xl XL) ReadFile(volume, path string, offset int64) (io.ReadCloser, error)
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
fileSize, err := metadata.GetSize()
|
|
||||||
if err != nil {
|
|
||||||
log.WithFields(logrus.Fields{
|
|
||||||
"volume": volume,
|
|
||||||
"path": path,
|
|
||||||
}).Errorf("Failed to get file size, %s", err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Acquire read lock again.
|
// Acquire read lock again.
|
||||||
xl.lockNS(volume, path, readLock)
|
xl.lockNS(volume, path, readLock)
|
||||||
readers := make([]io.ReadCloser, len(xl.storageDisks))
|
readers := make([]io.ReadCloser, len(xl.storageDisks))
|
||||||
@ -78,7 +69,7 @@ func (xl XL) ReadFile(volume, path string, offset int64) (io.ReadCloser, error)
|
|||||||
if disk == nil {
|
if disk == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
erasurePart := slashpath.Join(path, fmt.Sprintf("part.%d", index))
|
erasurePart := slashpath.Join(path, fmt.Sprintf("file.%d", index))
|
||||||
// If disk.ReadFile returns error and we don't have read quorum it will be taken care as
|
// If disk.ReadFile returns error and we don't have read quorum it will be taken care as
|
||||||
// ReedSolomon.Reconstruct() will fail later.
|
// ReedSolomon.Reconstruct() will fail later.
|
||||||
var reader io.ReadCloser
|
var reader io.ReadCloser
|
||||||
@ -91,18 +82,18 @@ func (xl XL) ReadFile(volume, path string, offset int64) (io.ReadCloser, error)
|
|||||||
// Initialize pipe.
|
// Initialize pipe.
|
||||||
pipeReader, pipeWriter := io.Pipe()
|
pipeReader, pipeWriter := io.Pipe()
|
||||||
go func() {
|
go func() {
|
||||||
var totalLeft = fileSize
|
var totalLeft = metadata.Stat.Size
|
||||||
// Read until the totalLeft.
|
// Read until the totalLeft.
|
||||||
for totalLeft > 0 {
|
for totalLeft > 0 {
|
||||||
// Figure out the right blockSize as it was encoded before.
|
// Figure out the right blockSize as it was encoded before.
|
||||||
var curBlockSize int
|
var curBlockSize int64
|
||||||
if erasureBlockSize < totalLeft {
|
if metadata.Erasure.BlockSize < totalLeft {
|
||||||
curBlockSize = erasureBlockSize
|
curBlockSize = metadata.Erasure.BlockSize
|
||||||
} else {
|
} else {
|
||||||
curBlockSize = int(totalLeft)
|
curBlockSize = totalLeft
|
||||||
}
|
}
|
||||||
// Calculate the current encoded block size.
|
// Calculate the current encoded block size.
|
||||||
curEncBlockSize := getEncodedBlockLen(curBlockSize, xl.DataBlocks)
|
curEncBlockSize := getEncodedBlockLen(curBlockSize, metadata.Erasure.DataBlocks)
|
||||||
enBlocks := make([][]byte, len(xl.storageDisks))
|
enBlocks := make([][]byte, len(xl.storageDisks))
|
||||||
// Loop through all readers and read.
|
// Loop through all readers and read.
|
||||||
for index, reader := range readers {
|
for index, reader := range readers {
|
||||||
@ -117,8 +108,6 @@ func (xl XL) ReadFile(volume, path string, offset int64) (io.ReadCloser, error)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO need to verify block512Sum.
|
|
||||||
|
|
||||||
// Check blocks if they are all zero in length.
|
// Check blocks if they are all zero in length.
|
||||||
if checkBlockSize(enBlocks) == 0 {
|
if checkBlockSize(enBlocks) == 0 {
|
||||||
log.WithFields(logrus.Fields{
|
log.WithFields(logrus.Fields{
|
||||||
@ -181,7 +170,7 @@ func (xl XL) ReadFile(volume, path string, offset int64) (io.ReadCloser, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Join the decoded blocks.
|
// Join the decoded blocks.
|
||||||
err = xl.ReedSolomon.Join(pipeWriter, enBlocks, curBlockSize)
|
err = xl.ReedSolomon.Join(pipeWriter, enBlocks, int(curBlockSize))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithFields(logrus.Fields{
|
log.WithFields(logrus.Fields{
|
||||||
"volume": volume,
|
"volume": volume,
|
||||||
@ -192,7 +181,7 @@ func (xl XL) ReadFile(volume, path string, offset int64) (io.ReadCloser, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Save what's left after reading erasureBlockSize.
|
// Save what's left after reading erasureBlockSize.
|
||||||
totalLeft = totalLeft - erasureBlockSize
|
totalLeft = totalLeft - metadata.Erasure.BlockSize
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleanly end the pipe after a successful decoding.
|
// Cleanly end the pipe after a successful decoding.
|
||||||
|
@ -30,7 +30,7 @@ func checkBlockSize(blocks [][]byte) int {
|
|||||||
|
|
||||||
// calculate the blockSize based on input length and total number of
|
// calculate the blockSize based on input length and total number of
|
||||||
// data blocks.
|
// data blocks.
|
||||||
func getEncodedBlockLen(inputLen, dataBlocks int) (curBlockSize int) {
|
func getEncodedBlockLen(inputLen int64, dataBlocks int) (curBlockSize int64) {
|
||||||
curBlockSize = (inputLen + dataBlocks - 1) / dataBlocks
|
curBlockSize = (inputLen + int64(dataBlocks) - 1) / int64(dataBlocks)
|
||||||
return
|
return curBlockSize
|
||||||
}
|
}
|
||||||
|
91
xl-v1.go
91
xl-v1.go
@ -30,7 +30,7 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// Part metadata file.
|
// Part metadata file.
|
||||||
metadataFile = "part.json"
|
xlMetaV1File = "xl.json"
|
||||||
// Maximum erasure blocks.
|
// Maximum erasure blocks.
|
||||||
maxErasureBlocks = 16
|
maxErasureBlocks = 16
|
||||||
)
|
)
|
||||||
@ -325,32 +325,32 @@ func isLeafDirectory(disk StorageAPI, volume, leafPath string) (isLeaf bool) {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// extractMetadata - extract file metadata.
|
// extractMetadata - extract xl metadata.
|
||||||
func extractMetadata(disk StorageAPI, volume, path string) (fileMetadata, error) {
|
func extractMetadata(disk StorageAPI, volume, path string) (xlMetaV1, error) {
|
||||||
metadataFilePath := slashpath.Join(path, metadataFile)
|
xlMetaV1FilePath := slashpath.Join(path, xlMetaV1File)
|
||||||
// We are not going to read partial data from metadata file,
|
// We are not going to read partial data from metadata file,
|
||||||
// read the whole file always.
|
// read the whole file always.
|
||||||
offset := int64(0)
|
offset := int64(0)
|
||||||
metadataReader, err := disk.ReadFile(volume, metadataFilePath, offset)
|
metadataReader, err := disk.ReadFile(volume, xlMetaV1FilePath, offset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithFields(logrus.Fields{
|
log.WithFields(logrus.Fields{
|
||||||
"volume": volume,
|
"volume": volume,
|
||||||
"path": metadataFilePath,
|
"path": xlMetaV1FilePath,
|
||||||
"offset": offset,
|
"offset": offset,
|
||||||
}).Errorf("ReadFile failed with %s", err)
|
}).Errorf("ReadFile failed with %s", err)
|
||||||
return nil, err
|
return xlMetaV1{}, err
|
||||||
}
|
}
|
||||||
// Close metadata reader.
|
// Close metadata reader.
|
||||||
defer metadataReader.Close()
|
defer metadataReader.Close()
|
||||||
|
|
||||||
metadata, err := fileMetadataDecode(metadataReader)
|
metadata, err := xlMetaV1Decode(metadataReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithFields(logrus.Fields{
|
log.WithFields(logrus.Fields{
|
||||||
"volume": volume,
|
"volume": volume,
|
||||||
"path": metadataFilePath,
|
"path": xlMetaV1FilePath,
|
||||||
"offset": offset,
|
"offset": offset,
|
||||||
}).Errorf("fileMetadataDecode failed with %s", err)
|
}).Errorf("xlMetaV1Decode failed with %s", err)
|
||||||
return nil, err
|
return xlMetaV1{}, err
|
||||||
}
|
}
|
||||||
return metadata, nil
|
return metadata, nil
|
||||||
}
|
}
|
||||||
@ -369,25 +369,9 @@ func extractFileInfo(disk StorageAPI, volume, path string) (FileInfo, error) {
|
|||||||
}).Errorf("extractMetadata failed with %s", err)
|
}).Errorf("extractMetadata failed with %s", err)
|
||||||
return FileInfo{}, err
|
return FileInfo{}, err
|
||||||
}
|
}
|
||||||
fileSize, err := metadata.GetSize()
|
fileInfo.Size = metadata.Stat.Size
|
||||||
if err != nil {
|
fileInfo.ModTime = metadata.Stat.ModTime
|
||||||
log.WithFields(logrus.Fields{
|
fileInfo.Mode = os.FileMode(0644) // This is a file already.
|
||||||
"volume": volume,
|
|
||||||
"path": path,
|
|
||||||
}).Errorf("GetSize failed with %s", err)
|
|
||||||
return FileInfo{}, err
|
|
||||||
}
|
|
||||||
fileModTime, err := metadata.GetModTime()
|
|
||||||
if err != nil {
|
|
||||||
log.WithFields(logrus.Fields{
|
|
||||||
"volume": volume,
|
|
||||||
"path": path,
|
|
||||||
}).Errorf("GetModTime failed with %s", err)
|
|
||||||
return FileInfo{}, err
|
|
||||||
}
|
|
||||||
fileInfo.Size = fileSize
|
|
||||||
fileInfo.Mode = os.FileMode(0644)
|
|
||||||
fileInfo.ModTime = fileModTime
|
|
||||||
return fileInfo, nil
|
return fileInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -458,7 +442,7 @@ func listFiles(disk StorageAPI, volume, prefix, marker string, recursive bool, c
|
|||||||
if isLeaf {
|
if isLeaf {
|
||||||
// For leaf for now we just point to the first block, make it
|
// For leaf for now we just point to the first block, make it
|
||||||
// dynamic in future based on the availability of storage disks.
|
// dynamic in future based on the availability of storage disks.
|
||||||
markerPath = slashpath.Join(marker, metadataFile)
|
markerPath = slashpath.Join(marker, xlMetaV1File)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -478,7 +462,7 @@ func listFiles(disk StorageAPI, volume, prefix, marker string, recursive bool, c
|
|||||||
}
|
}
|
||||||
for _, fsFileInfo := range fsFilesInfo {
|
for _, fsFileInfo := range fsFilesInfo {
|
||||||
// Skip metadata files.
|
// Skip metadata files.
|
||||||
if strings.HasSuffix(fsFileInfo.Name, metadataFile) {
|
if strings.HasSuffix(fsFileInfo.Name, xlMetaV1File) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
var fileInfo FileInfo
|
var fileInfo FileInfo
|
||||||
@ -518,9 +502,8 @@ func listFiles(disk StorageAPI, volume, prefix, marker string, recursive bool, c
|
|||||||
// markerPath for the next disk.ListFiles() iteration.
|
// markerPath for the next disk.ListFiles() iteration.
|
||||||
markerPath = fsFilesInfo[len(fsFilesInfo)-1].Name
|
markerPath = fsFilesInfo[len(fsFilesInfo)-1].Name
|
||||||
}
|
}
|
||||||
if count == 0 && recursive && !strings.HasSuffix(markerPath, metadataFile) {
|
if count == 0 && recursive && !strings.HasSuffix(markerPath, xlMetaV1File) {
|
||||||
// If last entry is not part.json then loop once more to check if we
|
// If last entry is not xl.json then loop once more to check if we have reached eof.
|
||||||
// have reached eof.
|
|
||||||
fsFilesInfo, eof, err = disk.ListFiles(volume, prefix, markerPath, recursive, 1)
|
fsFilesInfo, eof, err = disk.ListFiles(volume, prefix, markerPath, recursive, 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithFields(logrus.Fields{
|
log.WithFields(logrus.Fields{
|
||||||
@ -533,17 +516,17 @@ func listFiles(disk StorageAPI, volume, prefix, marker string, recursive bool, c
|
|||||||
return nil, true, err
|
return nil, true, err
|
||||||
}
|
}
|
||||||
if !eof {
|
if !eof {
|
||||||
// part.N and part.json are always in pairs and hence this
|
// file.N and xl.json are always in pairs and hence this
|
||||||
// entry has to be part.json. If not better to manually investigate
|
// entry has to be xl.json. If not better to manually investigate
|
||||||
// and fix it.
|
// and fix it.
|
||||||
// For the next ListFiles() call we can safely assume that the
|
// For the next ListFiles() call we can safely assume that the
|
||||||
// marker is "object/part.json"
|
// marker is "object/xl.json"
|
||||||
if !strings.HasSuffix(fsFilesInfo[0].Name, metadataFile) {
|
if !strings.HasSuffix(fsFilesInfo[0].Name, xlMetaV1File) {
|
||||||
log.WithFields(logrus.Fields{
|
log.WithFields(logrus.Fields{
|
||||||
"volume": volume,
|
"volume": volume,
|
||||||
"prefix": prefix,
|
"prefix": prefix,
|
||||||
"fsFileInfo.Name": fsFilesInfo[0].Name,
|
"fsFileInfo.Name": fsFilesInfo[0].Name,
|
||||||
}).Errorf("ListFiles failed with %s, expected %s to be a part.json file.", err, fsFilesInfo[0].Name)
|
}).Errorf("ListFiles failed with %s, expected %s to be a xl.json file.", err, fsFilesInfo[0].Name)
|
||||||
return nil, true, errUnexpected
|
return nil, true, errUnexpected
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -594,30 +577,12 @@ func (xl XL) StatFile(volume, path string) (FileInfo, error) {
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extract metadata.
|
|
||||||
size, err := metadata.GetSize()
|
|
||||||
if err != nil {
|
|
||||||
log.WithFields(logrus.Fields{
|
|
||||||
"volume": volume,
|
|
||||||
"path": path,
|
|
||||||
}).Errorf("GetSize failed with %s", err)
|
|
||||||
return FileInfo{}, err
|
|
||||||
}
|
|
||||||
modTime, err := metadata.GetModTime()
|
|
||||||
if err != nil {
|
|
||||||
log.WithFields(logrus.Fields{
|
|
||||||
"volume": volume,
|
|
||||||
"path": path,
|
|
||||||
}).Errorf("GetModTime failed with %s", err)
|
|
||||||
return FileInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return file info.
|
// Return file info.
|
||||||
return FileInfo{
|
return FileInfo{
|
||||||
Volume: volume,
|
Volume: volume,
|
||||||
Name: path,
|
Name: path,
|
||||||
Size: size,
|
Size: metadata.Stat.Size,
|
||||||
ModTime: modTime,
|
ModTime: metadata.Stat.ModTime,
|
||||||
Mode: os.FileMode(0644),
|
Mode: os.FileMode(0644),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@ -632,7 +597,7 @@ func (xl XL) DeleteFile(volume, path string) error {
|
|||||||
}
|
}
|
||||||
// Loop through and delete each chunks.
|
// Loop through and delete each chunks.
|
||||||
for index, disk := range xl.storageDisks {
|
for index, disk := range xl.storageDisks {
|
||||||
erasureFilePart := slashpath.Join(path, fmt.Sprintf("part.%d", index))
|
erasureFilePart := slashpath.Join(path, fmt.Sprintf("file.%d", index))
|
||||||
err := disk.DeleteFile(volume, erasureFilePart)
|
err := disk.DeleteFile(volume, erasureFilePart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithFields(logrus.Fields{
|
log.WithFields(logrus.Fields{
|
||||||
@ -641,8 +606,8 @@ func (xl XL) DeleteFile(volume, path string) error {
|
|||||||
}).Errorf("DeleteFile failed with %s", err)
|
}).Errorf("DeleteFile failed with %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
metadataFilePath := slashpath.Join(path, metadataFile)
|
xlMetaV1FilePath := slashpath.Join(path, xlMetaV1File)
|
||||||
err = disk.DeleteFile(volume, metadataFilePath)
|
err = disk.DeleteFile(volume, xlMetaV1FilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithFields(logrus.Fields{
|
log.WithFields(logrus.Fields{
|
||||||
"volume": volume,
|
"volume": volume,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user