mirror of
https://github.com/minio/minio.git
synced 2024-12-24 22:25:54 -05:00
parent
99ddd35343
commit
76f4f20609
@ -20,8 +20,12 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/minio/minio/pkg/lock"
|
||||||
)
|
)
|
||||||
|
|
||||||
// fsFormat - structure holding 'fs' format.
|
// fsFormat - structure holding 'fs' format.
|
||||||
@ -47,6 +51,43 @@ type formatConfigV1 struct {
|
|||||||
XL *xlFormat `json:"xl,omitempty"` // XL field holds xl format.
|
XL *xlFormat `json:"xl,omitempty"` // XL field holds xl format.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *formatConfigV1) WriteTo(lk *lock.LockedFile) (n int64, err error) {
|
||||||
|
// Serialize to prepare to write to disk.
|
||||||
|
var fbytes []byte
|
||||||
|
fbytes, err = json.Marshal(f)
|
||||||
|
if err != nil {
|
||||||
|
return 0, traceError(err)
|
||||||
|
}
|
||||||
|
if err = lk.Truncate(0); err != nil {
|
||||||
|
return 0, traceError(err)
|
||||||
|
}
|
||||||
|
_, err = lk.Write(fbytes)
|
||||||
|
if err != nil {
|
||||||
|
return 0, traceError(err)
|
||||||
|
}
|
||||||
|
return int64(len(fbytes)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *formatConfigV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
|
||||||
|
var fbytes []byte
|
||||||
|
fi, err := lk.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return 0, traceError(err)
|
||||||
|
}
|
||||||
|
fbytes, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
|
||||||
|
if err != nil {
|
||||||
|
return 0, traceError(err)
|
||||||
|
}
|
||||||
|
if len(fbytes) == 0 {
|
||||||
|
return 0, traceError(io.EOF)
|
||||||
|
}
|
||||||
|
// Decode `format.json`.
|
||||||
|
if err = json.Unmarshal(fbytes, f); err != nil {
|
||||||
|
return 0, traceError(err)
|
||||||
|
}
|
||||||
|
return int64(len(fbytes)), nil
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
||||||
All disks online
|
All disks online
|
||||||
|
@ -230,7 +230,7 @@ func fsOpenFile(readPath string, offset int64) (io.ReadCloser, int64, error) {
|
|||||||
|
|
||||||
// Creates a file and copies data from incoming reader. Staging buffer is used by io.CopyBuffer.
|
// Creates a file and copies data from incoming reader. Staging buffer is used by io.CopyBuffer.
|
||||||
func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int64) (int64, error) {
|
func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int64) (int64, error) {
|
||||||
if filePath == "" || reader == nil || buf == nil {
|
if filePath == "" || reader == nil {
|
||||||
return 0, traceError(errInvalidArgument)
|
return 0, traceError(errInvalidArgument)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -263,11 +263,18 @@ func fsCreateFile(filePath string, reader io.Reader, buf []byte, fallocSize int6
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bytesWritten, err := io.CopyBuffer(writer, reader, buf)
|
var bytesWritten int64
|
||||||
|
if buf != nil {
|
||||||
|
bytesWritten, err = io.CopyBuffer(writer, reader, buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, traceError(err)
|
return 0, traceError(err)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
bytesWritten, err = io.Copy(writer, reader)
|
||||||
|
if err != nil {
|
||||||
|
return 0, traceError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
return bytesWritten, nil
|
return bytesWritten, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -276,6 +283,12 @@ func fsRemoveUploadIDPath(basePath, uploadIDPath string) error {
|
|||||||
if basePath == "" || uploadIDPath == "" {
|
if basePath == "" || uploadIDPath == "" {
|
||||||
return traceError(errInvalidArgument)
|
return traceError(errInvalidArgument)
|
||||||
}
|
}
|
||||||
|
if err := checkPathLength(basePath); err != nil {
|
||||||
|
return traceError(err)
|
||||||
|
}
|
||||||
|
if err := checkPathLength(uploadIDPath); err != nil {
|
||||||
|
return traceError(err)
|
||||||
|
}
|
||||||
|
|
||||||
// List all the entries in uploadID.
|
// List all the entries in uploadID.
|
||||||
entries, err := readDir(uploadIDPath)
|
entries, err := readDir(uploadIDPath)
|
||||||
@ -319,6 +332,26 @@ func fsFAllocate(fd int, offset int64, len int64) (err error) {
|
|||||||
// Renames source path to destination path, creates all the
|
// Renames source path to destination path, creates all the
|
||||||
// missing parents if they don't exist.
|
// missing parents if they don't exist.
|
||||||
func fsRenameFile(sourcePath, destPath string) error {
|
func fsRenameFile(sourcePath, destPath string) error {
|
||||||
|
if err := checkPathLength(sourcePath); err != nil {
|
||||||
|
return traceError(err)
|
||||||
|
}
|
||||||
|
if err := checkPathLength(destPath); err != nil {
|
||||||
|
return traceError(err)
|
||||||
|
}
|
||||||
|
// Verify if source path exists.
|
||||||
|
if _, err := os.Stat(preparePath(sourcePath)); err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return traceError(errFileNotFound)
|
||||||
|
} else if os.IsPermission(err) {
|
||||||
|
return traceError(errFileAccessDenied)
|
||||||
|
} else if isSysErrPathNotFound(err) {
|
||||||
|
return traceError(errFileNotFound)
|
||||||
|
} else if isSysErrNotDir(err) {
|
||||||
|
// File path cannot be verified since one of the parents is a file.
|
||||||
|
return traceError(errFileAccessDenied)
|
||||||
|
}
|
||||||
|
return traceError(err)
|
||||||
|
}
|
||||||
if err := mkdirAll(pathutil.Dir(destPath), 0777); err != nil {
|
if err := mkdirAll(pathutil.Dir(destPath), 0777); err != nil {
|
||||||
return traceError(err)
|
return traceError(err)
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,31 @@ import (
|
|||||||
"github.com/minio/minio/pkg/lock"
|
"github.com/minio/minio/pkg/lock"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestFSRenameFile(t *testing.T) {
|
||||||
|
// create posix test setup
|
||||||
|
_, path, err := newPosixTestSetup()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to create posix test setup, %s", err)
|
||||||
|
}
|
||||||
|
defer removeAll(path)
|
||||||
|
|
||||||
|
if err = fsMkdir(pathJoin(path, "testvolume1")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); errorCause(err) != errFileNotFound {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err = fsRenameFile(pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"), pathJoin(path, "testvolume2")); errorCause(err) != errFileNameTooLong {
|
||||||
|
t.Fatal("Unexpected error", err)
|
||||||
|
}
|
||||||
|
if err = fsRenameFile(pathJoin(path, "testvolume1"), pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); errorCause(err) != errFileNameTooLong {
|
||||||
|
t.Fatal("Unexpected error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestFSStats(t *testing.T) {
|
func TestFSStats(t *testing.T) {
|
||||||
// create posix test setup
|
// create posix test setup
|
||||||
_, path, err := newPosixTestSetup()
|
_, path, err := newPosixTestSetup()
|
||||||
@ -48,9 +73,8 @@ func TestFSStats(t *testing.T) {
|
|||||||
t.Fatalf("Unable to create volume, %s", err)
|
t.Fatalf("Unable to create volume, %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var buf = make([]byte, 4096)
|
|
||||||
var reader = bytes.NewReader([]byte("Hello, world"))
|
var reader = bytes.NewReader([]byte("Hello, world"))
|
||||||
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, buf, reader.Size()); err != nil {
|
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, nil, 0); err != nil {
|
||||||
t.Fatalf("Unable to create file, %s", err)
|
t.Fatalf("Unable to create file, %s", err)
|
||||||
}
|
}
|
||||||
// Seek back.
|
// Seek back.
|
||||||
@ -60,7 +84,7 @@ func TestFSStats(t *testing.T) {
|
|||||||
t.Fatal("Unexpected error", err)
|
t.Fatal("Unexpected error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err = fsCreateFile(pathJoin(path, "success-vol", "path/to/success-file"), reader, buf, reader.Size()); err != nil {
|
if _, err = fsCreateFile(pathJoin(path, "success-vol", "path/to/success-file"), reader, nil, 0); err != nil {
|
||||||
t.Fatalf("Unable to create file, %s", err)
|
t.Fatalf("Unable to create file, %s", err)
|
||||||
}
|
}
|
||||||
// Seek back.
|
// Seek back.
|
||||||
@ -174,9 +198,8 @@ func TestFSCreateAndOpen(t *testing.T) {
|
|||||||
t.Fatal("Unexpected error", err)
|
t.Fatal("Unexpected error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var buf = make([]byte, 4096)
|
|
||||||
var reader = bytes.NewReader([]byte("Hello, world"))
|
var reader = bytes.NewReader([]byte("Hello, world"))
|
||||||
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, buf, reader.Size()); err != nil {
|
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, nil, 0); err != nil {
|
||||||
t.Fatalf("Unable to create file, %s", err)
|
t.Fatalf("Unable to create file, %s", err)
|
||||||
}
|
}
|
||||||
// Seek back.
|
// Seek back.
|
||||||
@ -204,7 +227,7 @@ func TestFSCreateAndOpen(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
_, err = fsCreateFile(pathJoin(path, testCase.srcVol, testCase.srcPath), reader, buf, reader.Size())
|
_, err = fsCreateFile(pathJoin(path, testCase.srcVol, testCase.srcPath), reader, nil, 0)
|
||||||
if errorCause(err) != testCase.expectedErr {
|
if errorCause(err) != testCase.expectedErr {
|
||||||
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
|
t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
|
||||||
}
|
}
|
||||||
@ -297,15 +320,14 @@ func TestFSRemoves(t *testing.T) {
|
|||||||
t.Fatalf("Unable to create directory, %s", err)
|
t.Fatalf("Unable to create directory, %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var buf = make([]byte, 4096)
|
|
||||||
var reader = bytes.NewReader([]byte("Hello, world"))
|
var reader = bytes.NewReader([]byte("Hello, world"))
|
||||||
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, buf, reader.Size()); err != nil {
|
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file"), reader, nil, 0); err != nil {
|
||||||
t.Fatalf("Unable to create file, %s", err)
|
t.Fatalf("Unable to create file, %s", err)
|
||||||
}
|
}
|
||||||
// Seek back.
|
// Seek back.
|
||||||
reader.Seek(0, 0)
|
reader.Seek(0, 0)
|
||||||
|
|
||||||
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file-new"), reader, buf, reader.Size()); err != nil {
|
if _, err = fsCreateFile(pathJoin(path, "success-vol", "success-file-new"), reader, nil, 0); err != nil {
|
||||||
t.Fatalf("Unable to create file, %s", err)
|
t.Fatalf("Unable to create file, %s", err)
|
||||||
}
|
}
|
||||||
// Seek back.
|
// Seek back.
|
||||||
@ -417,9 +439,8 @@ func TestFSRemoveMeta(t *testing.T) {
|
|||||||
|
|
||||||
filePath := pathJoin(fsPath, "success-vol", "success-file")
|
filePath := pathJoin(fsPath, "success-vol", "success-file")
|
||||||
|
|
||||||
var buf = make([]byte, 4096)
|
|
||||||
var reader = bytes.NewReader([]byte("Hello, world"))
|
var reader = bytes.NewReader([]byte("Hello, world"))
|
||||||
if _, err = fsCreateFile(filePath, reader, buf, reader.Size()); err != nil {
|
if _, err = fsCreateFile(filePath, reader, nil, 0); err != nil {
|
||||||
t.Fatalf("Unable to create file, %s", err)
|
t.Fatalf("Unable to create file, %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,6 +18,8 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
@ -25,6 +27,7 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/minio/minio-go/pkg/set"
|
||||||
"github.com/minio/minio/pkg/lock"
|
"github.com/minio/minio/pkg/lock"
|
||||||
"github.com/minio/minio/pkg/mimedb"
|
"github.com/minio/minio/pkg/mimedb"
|
||||||
"github.com/tidwall/gjson"
|
"github.com/tidwall/gjson"
|
||||||
@ -225,9 +228,20 @@ const (
|
|||||||
// FS backend meta format.
|
// FS backend meta format.
|
||||||
fsMetaFormat = "fs"
|
fsMetaFormat = "fs"
|
||||||
|
|
||||||
|
// FS backend format version.
|
||||||
|
fsFormatVersion = fsFormatV2
|
||||||
|
|
||||||
// Add more constants here.
|
// Add more constants here.
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// FS format version strings.
|
||||||
|
const (
|
||||||
|
fsFormatV1 = "1" // Previous format.
|
||||||
|
fsFormatV2 = "2" // Current format.
|
||||||
|
// Proceed to add "3" when we
|
||||||
|
// change the backend format in future.
|
||||||
|
)
|
||||||
|
|
||||||
// newFSMetaV1 - initializes new fsMetaV1.
|
// newFSMetaV1 - initializes new fsMetaV1.
|
||||||
func newFSMetaV1() (fsMeta fsMetaV1) {
|
func newFSMetaV1() (fsMeta fsMetaV1) {
|
||||||
fsMeta = fsMetaV1{}
|
fsMeta = fsMetaV1{}
|
||||||
@ -237,58 +251,167 @@ func newFSMetaV1() (fsMeta fsMetaV1) {
|
|||||||
return fsMeta
|
return fsMeta
|
||||||
}
|
}
|
||||||
|
|
||||||
// newFSFormatV1 - initializes new formatConfigV1 with FS format info.
|
// newFSFormatV2 - initializes new formatConfigV1 with FS format version 2.
|
||||||
func newFSFormatV1() (format *formatConfigV1) {
|
func newFSFormatV2() (format *formatConfigV1) {
|
||||||
return &formatConfigV1{
|
return &formatConfigV1{
|
||||||
Version: "1",
|
Version: "1",
|
||||||
Format: "fs",
|
Format: "fs",
|
||||||
FS: &fsFormat{
|
FS: &fsFormat{
|
||||||
Version: "1",
|
Version: fsFormatV2,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// loads format.json from minioMetaBucket if it exists.
|
// Checks if input format is version 1 and 2.
|
||||||
func loadFormatFS(fsPath string) (*formatConfigV1, error) {
|
func isFSValidFormat(formatCfg *formatConfigV1) bool {
|
||||||
rlk, err := lock.RLockedOpenFile(pathJoin(fsPath, minioMetaBucket, fsFormatJSONFile))
|
// Supported format versions.
|
||||||
if err != nil {
|
var supportedFormatVersions = []string{
|
||||||
if os.IsNotExist(err) {
|
fsFormatV1,
|
||||||
return nil, errUnformattedDisk
|
fsFormatV2,
|
||||||
}
|
// New supported versions here.
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer rlk.Close()
|
|
||||||
|
|
||||||
formatBytes, err := ioutil.ReadAll(rlk)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
format := &formatConfigV1{}
|
// Check for supported format versions.
|
||||||
if err = json.Unmarshal(formatBytes, format); err != nil {
|
for _, version := range supportedFormatVersions {
|
||||||
return nil, err
|
if formatCfg.FS.Version == version {
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return format, nil
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// writes FS format (format.json) into minioMetaBucket.
|
// errFSFormatOld- old fs format.
|
||||||
func saveFormatFS(formatPath string, fsFormat *formatConfigV1) error {
|
var errFSFormatOld = errors.New("old FS format found")
|
||||||
metadataBytes, err := json.Marshal(fsFormat)
|
|
||||||
if err != nil {
|
// Checks if the loaded `format.json` is valid and
|
||||||
|
// is expected to be of the requested version.
|
||||||
|
func checkFormatFS(format *formatConfigV1, formatVersion string) error {
|
||||||
|
if format == nil {
|
||||||
|
return errUnexpected
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate if we have the same format.
|
||||||
|
if format.Format != "fs" {
|
||||||
|
return fmt.Errorf("Unable to recognize backend format, Disk is not in FS format. %s", format.Format)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if format is currently supported.
|
||||||
|
if !isFSValidFormat(format) {
|
||||||
|
return errCorruptedFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for format version is current.
|
||||||
|
if format.FS.Version != formatVersion {
|
||||||
|
return errFSFormatOld
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is just kept as reference, there is no sanity
|
||||||
|
// check for FS format in version "1".
|
||||||
|
func checkFormatSanityFSV1(fsPath string) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for sanity of FS format in version "2".
|
||||||
|
func checkFormatSanityFSV2(fsPath string) error {
|
||||||
|
buckets, err := readDir(pathJoin(fsPath, minioMetaBucket, bucketConfigPrefix))
|
||||||
|
if err != nil && err != errFileNotFound {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// fsFormatJSONFile - format.json file stored in minioMetaBucket(.minio.sys) directory.
|
// Attempt to validate all the buckets have a sanitized backend.
|
||||||
lk, err := lock.LockedOpenFile(preparePath(formatPath), os.O_CREATE|os.O_WRONLY, 0600)
|
for _, bucket := range buckets {
|
||||||
if err != nil {
|
entries, rerr := readDir(pathJoin(fsPath, minioMetaBucket, bucketConfigPrefix, bucket))
|
||||||
|
if rerr != nil {
|
||||||
|
return rerr
|
||||||
|
}
|
||||||
|
|
||||||
|
var expectedConfigs = append(bucketMetadataConfigs, objectMetaPrefix+"/")
|
||||||
|
entriesSet := set.CreateStringSet(entries...)
|
||||||
|
expectedConfigsSet := set.CreateStringSet(expectedConfigs...)
|
||||||
|
|
||||||
|
// Entries found shouldn't be more than total
|
||||||
|
// expected config directories, files.
|
||||||
|
if len(entriesSet) > len(expectedConfigsSet) {
|
||||||
|
return errCorruptedFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look for the difference between entries and the
|
||||||
|
// expected config set, resulting entries if they
|
||||||
|
// intersect with original entries set we know
|
||||||
|
// that the backend has unexpected files.
|
||||||
|
if !entriesSet.Difference(expectedConfigsSet).IsEmpty() {
|
||||||
|
return errCorruptedFormat
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for sanity of FS format for a given version.
|
||||||
|
func checkFormatSanityFS(fsPath string, fsFormatVersion string) (err error) {
|
||||||
|
switch fsFormatVersion {
|
||||||
|
case fsFormatV2:
|
||||||
|
err = checkFormatSanityFSV2(fsPath)
|
||||||
|
default:
|
||||||
|
err = errCorruptedFormat
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initializes a new `format.json` if not present, validates `format.json`
|
||||||
|
// if already present and migrates to newer version if necessary. Returns
|
||||||
|
// the final format version.
|
||||||
|
func initFormatFS(fsPath, fsUUID string) (err error) {
|
||||||
|
fsFormatPath := pathJoin(fsPath, minioMetaBucket, fsFormatJSONFile)
|
||||||
|
|
||||||
|
// fsFormatJSONFile - format.json file stored in minioMetaBucket(.minio.sys) directory.
|
||||||
|
lk, err := lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
|
||||||
|
if err != nil {
|
||||||
|
return traceError(err)
|
||||||
}
|
}
|
||||||
defer lk.Close()
|
defer lk.Close()
|
||||||
|
|
||||||
_, err = lk.Write(metadataBytes)
|
var format = &formatConfigV1{}
|
||||||
// Success.
|
_, err = format.ReadFrom(lk)
|
||||||
return err
|
// For all unexpected errors, we return.
|
||||||
|
if err != nil && errorCause(err) != io.EOF {
|
||||||
|
return traceError(fmt.Errorf("Unable to load 'format.json', %s", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we couldn't read anything, The disk is unformatted.
|
||||||
|
if errorCause(err) == io.EOF {
|
||||||
|
err = errUnformattedDisk
|
||||||
|
format = newFSFormatV2()
|
||||||
|
} else {
|
||||||
|
// Validate loaded `format.json`.
|
||||||
|
err = checkFormatFS(format, fsFormatVersion)
|
||||||
|
if err != nil && err != errFSFormatOld {
|
||||||
|
return traceError(fmt.Errorf("Unable to validate 'format.json', %s", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disk is in old format migrate object metadata.
|
||||||
|
if err == errFSFormatOld {
|
||||||
|
if merr := migrateFSObject(fsPath, fsUUID); merr != nil {
|
||||||
|
return merr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize format v2.
|
||||||
|
format = newFSFormatV2()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rewrite or write format.json depending on if disk
|
||||||
|
// unformatted and if format is old.
|
||||||
|
if err == errUnformattedDisk || err == errFSFormatOld {
|
||||||
|
if _, err = format.WriteTo(lk); err != nil {
|
||||||
|
return traceError(fmt.Errorf("Unable to initialize 'format.json', %s", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for sanity.
|
||||||
|
return checkFormatSanityFS(fsPath, format.FS.Version)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return if the part info in uploadedParts and completeParts are same.
|
// Return if the part info in uploadedParts and completeParts are same.
|
||||||
|
@ -58,7 +58,7 @@ func TestReadFSMetadata(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Construct the full path of fs.json
|
// Construct the full path of fs.json
|
||||||
fsPath := pathJoin("buckets", bucketName, objectName, "fs.json")
|
fsPath := pathJoin(bucketMetaPrefix, bucketName, objectMetaPrefix, objectName, "fs.json")
|
||||||
fsPath = pathJoin(fs.fsPath, minioMetaBucket, fsPath)
|
fsPath = pathJoin(fs.fsPath, minioMetaBucket, fsPath)
|
||||||
|
|
||||||
rlk, err := fs.rwPool.Open(fsPath)
|
rlk, err := fs.rwPool.Open(fsPath)
|
||||||
@ -95,7 +95,7 @@ func TestWriteFSMetadata(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Construct the full path of fs.json
|
// Construct the full path of fs.json
|
||||||
fsPath := pathJoin("buckets", bucketName, objectName, "fs.json")
|
fsPath := pathJoin(bucketMetaPrefix, bucketName, objectMetaPrefix, objectName, "fs.json")
|
||||||
fsPath = pathJoin(fs.fsPath, minioMetaBucket, fsPath)
|
fsPath = pathJoin(fs.fsPath, minioMetaBucket, fsPath)
|
||||||
|
|
||||||
rlk, err := fs.rwPool.Open(fsPath)
|
rlk, err := fs.rwPool.Open(fsPath)
|
||||||
|
@ -754,7 +754,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
|
|||||||
|
|
||||||
// Wait for any competing PutObject() operation on bucket/object, since same namespace
|
// Wait for any competing PutObject() operation on bucket/object, since same namespace
|
||||||
// would be acquired for `fs.json`.
|
// would be acquired for `fs.json`.
|
||||||
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
|
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, objectMetaPrefix, object, fsMetaJSONFile)
|
||||||
metaFile, err := fs.rwPool.Create(fsMetaPath)
|
metaFile, err := fs.rwPool.Create(fsMetaPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.rwPool.Close(fsMetaPathMultipart)
|
fs.rwPool.Close(fsMetaPathMultipart)
|
||||||
|
155
cmd/fs-v1.go
155
cmd/fs-v1.go
@ -24,6 +24,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"syscall"
|
"syscall"
|
||||||
@ -72,15 +73,117 @@ func initMetaVolumeFS(fsPath, fsUUID string) error {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Migrate FS object is a place holder code for all
|
||||||
|
// FS format migrations.
|
||||||
|
func migrateFSObject(fsPath, fsUUID string) (err error) {
|
||||||
|
// Writing message here is important for servers being upgraded.
|
||||||
|
log.Println("Please do not stop the server.")
|
||||||
|
|
||||||
|
ch := make(chan os.Signal)
|
||||||
|
defer signal.Stop(ch)
|
||||||
|
defer close(ch)
|
||||||
|
|
||||||
|
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
_, ok := <-ch
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
log.Println("Please wait server is being upgraded..")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return migrateFSFormatV1ToV2(fsPath, fsUUID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// List all buckets at meta bucket prefix in `.minio.sys/buckets/` path.
|
||||||
|
// This is implemented to avoid a bug on windows with using readDir().
|
||||||
|
func fsReaddirMetaBuckets(fsPath string) ([]string, error) {
|
||||||
|
f, err := os.Open(preparePath(pathJoin(fsPath, minioMetaBucket, bucketConfigPrefix)))
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, errFileNotFound
|
||||||
|
} else if os.IsPermission(err) {
|
||||||
|
return nil, errFileAccessDenied
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f.Readdirnames(-1)
|
||||||
|
}
|
||||||
|
|
||||||
|
var bucketMetadataConfigs = []string{
|
||||||
|
bucketNotificationConfig,
|
||||||
|
bucketListenerConfig,
|
||||||
|
bucketPolicyConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempts to migrate old object metadata files to newer format
|
||||||
|
//
|
||||||
|
// i.e
|
||||||
|
// -------------------------------------------------------
|
||||||
|
// .minio.sys/buckets/<bucket_name>/<object_path>/fs.json - V1
|
||||||
|
// -------------------------------------------------------
|
||||||
|
// .minio.sys/buckets/<bucket_name>/objects/<object_path>/fs.json - V2
|
||||||
|
// -------------------------------------------------------
|
||||||
|
//
|
||||||
|
func migrateFSFormatV1ToV2(fsPath, fsUUID string) (err error) {
|
||||||
|
metaBucket := pathJoin(fsPath, minioMetaBucket, bucketConfigPrefix)
|
||||||
|
|
||||||
|
var buckets []string
|
||||||
|
buckets, err = fsReaddirMetaBuckets(fsPath)
|
||||||
|
if err != nil && err != errFileNotFound {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Migrate all buckets present.
|
||||||
|
for _, bucket := range buckets {
|
||||||
|
// Temporary bucket of form .UUID-bucket.
|
||||||
|
tmpBucket := fmt.Sprintf(".%s-%s", fsUUID, bucket)
|
||||||
|
|
||||||
|
// Rename existing bucket as `.UUID-bucket`.
|
||||||
|
if err = fsRenameFile(pathJoin(metaBucket, bucket), pathJoin(metaBucket, tmpBucket)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new bucket name with name as `bucket`.
|
||||||
|
if err = fsMkdir(pathJoin(metaBucket, bucket)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Rename all bucket metadata files to newly created `bucket`.
|
||||||
|
for _, bucketMetaFile := range bucketMetadataConfigs {
|
||||||
|
if err = fsRenameFile(pathJoin(metaBucket, tmpBucket, bucketMetaFile),
|
||||||
|
pathJoin(metaBucket, bucket, bucketMetaFile)); err != nil {
|
||||||
|
if errorCause(err) != errFileNotFound {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally rename the temporary bucket to `bucket/objects` directory.
|
||||||
|
if err = fsRenameFile(pathJoin(metaBucket, tmpBucket),
|
||||||
|
pathJoin(metaBucket, bucket, objectMetaPrefix)); err != nil {
|
||||||
|
if errorCause(err) != errFileNotFound {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Migrating bucket metadata format from \"%s\" to newer format \"%s\"... completed successfully.", fsFormatV1, fsFormatV2)
|
||||||
|
|
||||||
|
// If all goes well we return success.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// newFSObjectLayer - initialize new fs object layer.
|
// newFSObjectLayer - initialize new fs object layer.
|
||||||
func newFSObjectLayer(fsPath string) (ObjectLayer, error) {
|
func newFSObjectLayer(fsPath string) (ObjectLayer, error) {
|
||||||
if fsPath == "" {
|
if fsPath == "" {
|
||||||
return nil, errInvalidArgument
|
return nil, errInvalidArgument
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
|
||||||
// Disallow relative paths, figure out absolute paths.
|
// Disallow relative paths, figure out absolute paths.
|
||||||
fsPath, err = filepath.Abs(fsPath)
|
fsPath, err := filepath.Abs(fsPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -108,26 +211,6 @@ func newFSObjectLayer(fsPath string) (ObjectLayer, error) {
|
|||||||
return nil, fmt.Errorf("Unable to initialize '.minio.sys' meta volume, %s", err)
|
return nil, fmt.Errorf("Unable to initialize '.minio.sys' meta volume, %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load `format.json`.
|
|
||||||
format, err := loadFormatFS(fsPath)
|
|
||||||
if err != nil && err != errUnformattedDisk {
|
|
||||||
return nil, fmt.Errorf("Unable to load 'format.json', %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the `format.json` doesn't exist create one.
|
|
||||||
if err == errUnformattedDisk {
|
|
||||||
fsFormatPath := pathJoin(fsPath, minioMetaBucket, fsFormatJSONFile)
|
|
||||||
// Initialize format.json, if already exists overwrite it.
|
|
||||||
if serr := saveFormatFS(fsFormatPath, newFSFormatV1()); serr != nil {
|
|
||||||
return nil, fmt.Errorf("Unable to initialize 'format.json', %s", serr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate if we have the same format.
|
|
||||||
if err == nil && format.Format != "fs" {
|
|
||||||
return nil, fmt.Errorf("Unable to recognize backend format, Disk is not in FS format. %s", format.Format)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize fs objects.
|
// Initialize fs objects.
|
||||||
fs := &fsObjects{
|
fs := &fsObjects{
|
||||||
fsPath: fsPath,
|
fsPath: fsPath,
|
||||||
@ -141,6 +224,17 @@ func newFSObjectLayer(fsPath string) (ObjectLayer, error) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize `format.json`.
|
||||||
|
if err = initFormatFS(fsPath, fsUUID); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Once initialized hold read lock for the entire operation
|
||||||
|
// of filesystem backend.
|
||||||
|
if _, err = fs.rwPool.Open(pathJoin(fsPath, minioMetaBucket, fsFormatJSONFile)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// Initialize and load bucket policies.
|
// Initialize and load bucket policies.
|
||||||
err = initBucketPolicies(fs)
|
err = initBucketPolicies(fs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -159,6 +253,9 @@ func newFSObjectLayer(fsPath string) (ObjectLayer, error) {
|
|||||||
|
|
||||||
// Should be called when process shuts down.
|
// Should be called when process shuts down.
|
||||||
func (fs fsObjects) Shutdown() error {
|
func (fs fsObjects) Shutdown() error {
|
||||||
|
// Close the format.json read lock.
|
||||||
|
fs.rwPool.Close(pathJoin(fs.fsPath, minioMetaBucket, fsFormatJSONFile))
|
||||||
|
|
||||||
// Cleanup and delete tmp uuid.
|
// Cleanup and delete tmp uuid.
|
||||||
return fsRemoveAll(pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID))
|
return fsRemoveAll(pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID))
|
||||||
}
|
}
|
||||||
@ -238,7 +335,7 @@ func (fs fsObjects) ListBuckets() ([]BucketInfo, error) {
|
|||||||
return nil, traceError(err)
|
return nil, traceError(err)
|
||||||
}
|
}
|
||||||
var bucketInfos []BucketInfo
|
var bucketInfos []BucketInfo
|
||||||
entries, err := readDir(preparePath(fs.fsPath))
|
entries, err := readDir(fs.fsPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, toObjectErr(traceError(errDiskNotFound))
|
return nil, toObjectErr(traceError(errDiskNotFound))
|
||||||
}
|
}
|
||||||
@ -322,7 +419,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
|
|||||||
// Check if this request is only metadata update.
|
// Check if this request is only metadata update.
|
||||||
cpMetadataOnly := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
cpMetadataOnly := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
||||||
if cpMetadataOnly {
|
if cpMetadataOnly {
|
||||||
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, srcBucket, srcObject, fsMetaJSONFile)
|
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, srcBucket, objectMetaPrefix, srcObject, fsMetaJSONFile)
|
||||||
var wlk *lock.LockedFile
|
var wlk *lock.LockedFile
|
||||||
wlk, err = fs.rwPool.Write(fsMetaPath)
|
wlk, err = fs.rwPool.Write(fsMetaPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -395,7 +492,7 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if bucket != minioMetaBucket {
|
if bucket != minioMetaBucket {
|
||||||
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
|
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, objectMetaPrefix, object, fsMetaJSONFile)
|
||||||
_, err = fs.rwPool.Open(fsMetaPath)
|
_, err = fs.rwPool.Open(fsMetaPath)
|
||||||
if err != nil && err != errFileNotFound {
|
if err != nil && err != errFileNotFound {
|
||||||
return toObjectErr(traceError(err), bucket, object)
|
return toObjectErr(traceError(err), bucket, object)
|
||||||
@ -437,7 +534,7 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64,
|
|||||||
// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
|
// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
|
||||||
func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) {
|
func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) {
|
||||||
fsMeta := fsMetaV1{}
|
fsMeta := fsMetaV1{}
|
||||||
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
|
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, objectMetaPrefix, object, fsMetaJSONFile)
|
||||||
|
|
||||||
// Read `fs.json` to perhaps contend with
|
// Read `fs.json` to perhaps contend with
|
||||||
// parallel Put() operations.
|
// parallel Put() operations.
|
||||||
@ -520,7 +617,7 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
|
|||||||
var wlk *lock.LockedFile
|
var wlk *lock.LockedFile
|
||||||
if bucket != minioMetaBucket {
|
if bucket != minioMetaBucket {
|
||||||
bucketMetaDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix)
|
bucketMetaDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix)
|
||||||
fsMetaPath := pathJoin(bucketMetaDir, bucket, object, fsMetaJSONFile)
|
fsMetaPath := pathJoin(bucketMetaDir, bucket, objectMetaPrefix, object, fsMetaJSONFile)
|
||||||
wlk, err = fs.rwPool.Create(fsMetaPath)
|
wlk, err = fs.rwPool.Create(fsMetaPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
|
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
|
||||||
@ -647,7 +744,7 @@ func (fs fsObjects) DeleteObject(bucket, object string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
minioMetaBucketDir := pathJoin(fs.fsPath, minioMetaBucket)
|
minioMetaBucketDir := pathJoin(fs.fsPath, minioMetaBucket)
|
||||||
fsMetaPath := pathJoin(minioMetaBucketDir, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
|
fsMetaPath := pathJoin(minioMetaBucketDir, bucketMetaPrefix, bucket, objectMetaPrefix, object, fsMetaJSONFile)
|
||||||
if bucket != minioMetaBucket {
|
if bucket != minioMetaBucket {
|
||||||
rwlk, lerr := fs.rwPool.Write(fsMetaPath)
|
rwlk, lerr := fs.rwPool.Write(fsMetaPath)
|
||||||
if lerr == nil {
|
if lerr == nil {
|
||||||
@ -701,7 +798,7 @@ func (fs fsObjects) listDirFactory(isLeaf isLeafFunc) listDirFunc {
|
|||||||
// getObjectETag is a helper function, which returns only the md5sum
|
// getObjectETag is a helper function, which returns only the md5sum
|
||||||
// of the file on the disk.
|
// of the file on the disk.
|
||||||
func (fs fsObjects) getObjectETag(bucket, entry string) (string, error) {
|
func (fs fsObjects) getObjectETag(bucket, entry string) (string, error) {
|
||||||
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, entry, fsMetaJSONFile)
|
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, objectMetaPrefix, entry, fsMetaJSONFile)
|
||||||
|
|
||||||
// Read `fs.json` to perhaps contend with
|
// Read `fs.json` to perhaps contend with
|
||||||
// parallel Put() operations.
|
// parallel Put() operations.
|
||||||
|
@ -22,6 +22,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/minio/minio/pkg/lock"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestNewFS - tests initialization of all input disks
|
// TestNewFS - tests initialization of all input disks
|
||||||
@ -85,8 +87,8 @@ func TestFSShutdown(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestFSLoadFormatFS - test loadFormatFS with healty and faulty disks
|
// Tests migrating FS format without .minio.sys/buckets.
|
||||||
func TestFSLoadFormatFS(t *testing.T) {
|
func TestFSMigrateObjectWithoutObjects(t *testing.T) {
|
||||||
// Prepare for testing
|
// Prepare for testing
|
||||||
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||||
defer removeAll(disk)
|
defer removeAll(disk)
|
||||||
@ -100,13 +102,364 @@ func TestFSLoadFormatFS(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fsFormatPath := pathJoin(disk, minioMetaBucket, fsFormatJSONFile)
|
fsFormatPath := pathJoin(disk, minioMetaBucket, fsFormatJSONFile)
|
||||||
if err := saveFormatFS(preparePath(fsFormatPath), newFSFormatV1()); err != nil {
|
formatCfg := &formatConfigV1{
|
||||||
t.Fatal("Should not fail here", err)
|
Version: "1",
|
||||||
|
Format: "fs",
|
||||||
|
FS: &fsFormat{
|
||||||
|
Version: "1",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
_, err := loadFormatFS(disk)
|
|
||||||
|
lk, err := lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
_, err = formatCfg.WriteTo(lk)
|
||||||
|
lk.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Should not fail here", err)
|
t.Fatal("Should not fail here", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err = initFormatFS(disk, uuid); err != nil {
|
||||||
|
t.Fatal("Should not fail with unexpected", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
formatCfg = &formatConfigV1{}
|
||||||
|
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDONLY, 0600)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
_, err = formatCfg.ReadFrom(lk)
|
||||||
|
lk.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Should not fail here", err)
|
||||||
|
}
|
||||||
|
if formatCfg.FS.Version != fsFormatV2 {
|
||||||
|
t.Fatalf("Unexpected version detected expected \"%s\", got %s", fsFormatV2, formatCfg.FS.Version)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests migrating FS format without .minio.sys/buckets.
|
||||||
|
func TestFSMigrateObjectWithErr(t *testing.T) {
|
||||||
|
// Prepare for testing
|
||||||
|
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||||
|
defer removeAll(disk)
|
||||||
|
|
||||||
|
// Assign a new UUID.
|
||||||
|
uuid := mustGetUUID()
|
||||||
|
|
||||||
|
// Initialize meta volume, if volume already exists ignores it.
|
||||||
|
if err := initMetaVolumeFS(disk, uuid); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fsFormatPath := pathJoin(disk, minioMetaBucket, fsFormatJSONFile)
|
||||||
|
formatCfg := &formatConfigV1{
|
||||||
|
Version: "1",
|
||||||
|
Format: "fs",
|
||||||
|
FS: &fsFormat{
|
||||||
|
Version: "10",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
lk, err := lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
_, err = formatCfg.WriteTo(lk)
|
||||||
|
lk.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Should not fail here", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = initFormatFS(disk, uuid); err != nil {
|
||||||
|
if errorCause(err).Error() !=
|
||||||
|
"Unable to validate 'format.json', corrupted backend format" {
|
||||||
|
t.Fatal("Should not fail with unexpected", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fsFormatPath = pathJoin(disk, minioMetaBucket, fsFormatJSONFile)
|
||||||
|
formatCfg = &formatConfigV1{
|
||||||
|
Version: "1",
|
||||||
|
Format: "garbage",
|
||||||
|
FS: &fsFormat{
|
||||||
|
Version: "1",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
_, err = formatCfg.WriteTo(lk)
|
||||||
|
lk.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Should not fail here", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = initFormatFS(disk, uuid); err != nil {
|
||||||
|
if errorCause(err).Error() !=
|
||||||
|
"Unable to validate 'format.json', Unable to recognize backend format, Disk is not in FS format. garbage" {
|
||||||
|
t.Fatal("Should not fail with unexpected", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests migrating FS format with .minio.sys/buckets filled with
|
||||||
|
// object metadata.
|
||||||
|
func TestFSMigrateObjectWithObjects(t *testing.T) {
|
||||||
|
// Prepare for testing
|
||||||
|
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||||
|
defer removeAll(disk)
|
||||||
|
|
||||||
|
// Assign a new UUID.
|
||||||
|
uuid := mustGetUUID()
|
||||||
|
|
||||||
|
// Initialize meta volume, if volume already exists ignores it.
|
||||||
|
if err := initMetaVolumeFS(disk, uuid); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fsFormatPath := pathJoin(disk, minioMetaBucket, fsFormatJSONFile)
|
||||||
|
formatCfg := &formatConfigV1{
|
||||||
|
Version: "1",
|
||||||
|
Format: "fs",
|
||||||
|
FS: &fsFormat{
|
||||||
|
Version: "1",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
lk, err := lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
_, err = formatCfg.WriteTo(lk)
|
||||||
|
lk.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Should not fail here", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct the full path of fs.json
|
||||||
|
fsPath1 := pathJoin(bucketMetaPrefix, "testvolume1", "my-object1", fsMetaJSONFile)
|
||||||
|
fsPath1 = pathJoin(disk, minioMetaBucket, fsPath1)
|
||||||
|
|
||||||
|
fsMetaJSON := `{"version":"1.0.0","format":"fs","minio":{"release":"DEVELOPMENT.2017-03-27T02-26-33Z"},"meta":{"md5Sum":"467886be95c8ecfd71a2900e3f461b4f"}`
|
||||||
|
if _, err = fsCreateFile(fsPath1, bytes.NewReader([]byte(fsMetaJSON)), nil, 0); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct the full path of fs.json
|
||||||
|
fsPath2 := pathJoin(bucketMetaPrefix, "testvolume2", "my-object2", fsMetaJSONFile)
|
||||||
|
fsPath2 = pathJoin(disk, minioMetaBucket, fsPath2)
|
||||||
|
|
||||||
|
fsMetaJSON = `{"version":"1.0.0","format":"fs","minio":{"release":"DEVELOPMENT.2017-03-27T02-26-33Z"},"meta":{"md5Sum":"467886be95c8ecfd71a2900eff461b4d"}`
|
||||||
|
if _, err = fsCreateFile(fsPath2, bytes.NewReader([]byte(fsMetaJSON)), nil, 0); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct the full path of policy.json
|
||||||
|
ppath := pathJoin(bucketMetaPrefix, "testvolume2", bucketPolicyConfig)
|
||||||
|
ppath = pathJoin(disk, minioMetaBucket, ppath)
|
||||||
|
|
||||||
|
policyJSON := `{"Version":"2012-10-17","Statement":[{"Action":["s3:GetBucketLocation","s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::testbucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::testbucket/*"],"Sid":""}]}`
|
||||||
|
if _, err = fsCreateFile(ppath, bytes.NewReader([]byte(policyJSON)), nil, 0); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = initFormatFS(disk, mustGetUUID()); err != nil {
|
||||||
|
t.Fatal("Should not fail here", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fsPath2 = pathJoin(bucketMetaPrefix, "testvolume2", objectMetaPrefix, "my-object2", fsMetaJSONFile)
|
||||||
|
fsPath2 = pathJoin(disk, minioMetaBucket, fsPath2)
|
||||||
|
fi, err := fsStatFile(fsPath2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Path should exist and accessible after migration", err)
|
||||||
|
}
|
||||||
|
if fi.IsDir() {
|
||||||
|
t.Fatalf("Unexpected path %s should be a file", fsPath2)
|
||||||
|
}
|
||||||
|
|
||||||
|
formatCfg = &formatConfigV1{}
|
||||||
|
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDONLY, 0600)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
_, err = formatCfg.ReadFrom(lk)
|
||||||
|
lk.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("Should not fail here", err)
|
||||||
|
}
|
||||||
|
if formatCfg.FS.Version != fsFormatV2 {
|
||||||
|
t.Fatalf("Unexpected version detected expected \"%s\", got %s", fsFormatV2, formatCfg.FS.Version)
|
||||||
|
}
|
||||||
|
|
||||||
|
ppath = pathJoin(bucketMetaPrefix, "testvolume2", "acl.json")
|
||||||
|
ppath = pathJoin(disk, minioMetaBucket, ppath)
|
||||||
|
|
||||||
|
if _, err = fsCreateFile(ppath, bytes.NewReader([]byte("")), nil, 0); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = initFormatFS(disk, mustGetUUID()); errorCause(err) != errCorruptedFormat {
|
||||||
|
t.Fatal("Should not fail here", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestFSCheckFormatFSErr - test loadFormatFS loading older format.
|
||||||
|
func TestFSCheckFormatFSErr(t *testing.T) {
|
||||||
|
// Prepare for testing
|
||||||
|
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||||
|
defer removeAll(disk)
|
||||||
|
|
||||||
|
// Assign a new UUID.
|
||||||
|
uuid := mustGetUUID()
|
||||||
|
|
||||||
|
// Initialize meta volume, if volume already exists ignores it.
|
||||||
|
if err := initMetaVolumeFS(disk, uuid); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fsFormatPath := pathJoin(disk, minioMetaBucket, fsFormatJSONFile)
|
||||||
|
formatCfg := &formatConfigV1{
|
||||||
|
Version: "1",
|
||||||
|
Format: "fs",
|
||||||
|
FS: &fsFormat{
|
||||||
|
Version: "1",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
lk, err := lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = formatCfg.WriteTo(lk)
|
||||||
|
lk.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
formatCfg = &formatConfigV1{}
|
||||||
|
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
_, err = formatCfg.ReadFrom(lk)
|
||||||
|
lk.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = checkFormatFS(formatCfg, fsFormatVersion); errorCause(err) != errFSFormatOld {
|
||||||
|
t.Fatal("Should not fail with unexpected", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
formatCfg = &formatConfigV1{
|
||||||
|
Version: "1",
|
||||||
|
Format: "fs",
|
||||||
|
FS: &fsFormat{
|
||||||
|
Version: "10",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = formatCfg.WriteTo(lk)
|
||||||
|
lk.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = checkFormatFS(formatCfg, fsFormatVersion); errorCause(err) != errCorruptedFormat {
|
||||||
|
t.Fatal("Should not fail with unexpected", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
formatCfg = &formatConfigV1{
|
||||||
|
Version: "1",
|
||||||
|
Format: "garbage",
|
||||||
|
FS: &fsFormat{
|
||||||
|
Version: "1",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = formatCfg.WriteTo(lk)
|
||||||
|
lk.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = checkFormatFS(formatCfg, fsFormatVersion); err != nil {
|
||||||
|
if errorCause(err).Error() != "Unable to recognize backend format, Disk is not in FS format. garbage" {
|
||||||
|
t.Fatal("Should not fail with unexpected", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = checkFormatFS(nil, fsFormatVersion); errorCause(err) != errUnexpected {
|
||||||
|
t.Fatal("Should fail with errUnexpected, but found", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
formatCfg = &formatConfigV1{
|
||||||
|
Version: "1",
|
||||||
|
Format: "fs",
|
||||||
|
FS: &fsFormat{
|
||||||
|
Version: "2",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = formatCfg.WriteTo(lk)
|
||||||
|
lk.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should not fail.
|
||||||
|
if err = checkFormatFS(formatCfg, fsFormatVersion); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestFSCheckFormatFS - test loadFormatFS with healty and faulty disks
|
||||||
|
func TestFSCheckFormatFS(t *testing.T) {
|
||||||
|
// Prepare for testing
|
||||||
|
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
|
||||||
|
defer removeAll(disk)
|
||||||
|
|
||||||
|
// Assign a new UUID.
|
||||||
|
uuid := mustGetUUID()
|
||||||
|
|
||||||
|
// Initialize meta volume, if volume already exists ignores it.
|
||||||
|
if err := initMetaVolumeFS(disk, uuid); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fsFormatPath := pathJoin(disk, minioMetaBucket, fsFormatJSONFile)
|
||||||
|
lk, err := lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
format := newFSFormatV2()
|
||||||
|
_, err = format.WriteTo(lk)
|
||||||
|
lk.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
// Loading corrupted format file
|
// Loading corrupted format file
|
||||||
file, err := os.OpenFile(preparePath(fsFormatPath), os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0666)
|
file, err := os.OpenFile(preparePath(fsFormatPath), os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0666)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -114,15 +467,24 @@ func TestFSLoadFormatFS(t *testing.T) {
|
|||||||
}
|
}
|
||||||
file.Write([]byte{'b'})
|
file.Write([]byte{'b'})
|
||||||
file.Close()
|
file.Close()
|
||||||
_, err = loadFormatFS(disk)
|
|
||||||
|
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
format = &formatConfigV1{}
|
||||||
|
_, err = format.ReadFrom(lk)
|
||||||
|
lk.Close()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("Should return an error here")
|
t.Fatal("Should return an error here")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Loading format file from disk not found.
|
// Loading format file from disk not found.
|
||||||
removeAll(disk)
|
removeAll(disk)
|
||||||
_, err = loadFormatFS(disk)
|
_, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDONLY, 0600)
|
||||||
if err != nil && err != errUnformattedDisk {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
t.Fatal("Should return unformatted disk, but got", err)
|
t.Fatal("Should return 'format.json' does not exist, but got", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -32,6 +32,9 @@ const (
|
|||||||
// Buckets meta prefix.
|
// Buckets meta prefix.
|
||||||
bucketMetaPrefix = "buckets"
|
bucketMetaPrefix = "buckets"
|
||||||
|
|
||||||
|
// Objects meta prefix.
|
||||||
|
objectMetaPrefix = "objects"
|
||||||
|
|
||||||
// Md5Sum of empty string.
|
// Md5Sum of empty string.
|
||||||
emptyStrMd5Sum = "d41d8cd98f00b204e9800998ecf8427e"
|
emptyStrMd5Sum = "d41d8cd98f00b204e9800998ecf8427e"
|
||||||
)
|
)
|
||||||
|
@ -32,6 +32,8 @@ func readDir(dirPath string) (entries []string, err error) {
|
|||||||
// File is really not found.
|
// File is really not found.
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return nil, errFileNotFound
|
return nil, errFileNotFound
|
||||||
|
} else if os.IsPermission(err) {
|
||||||
|
return nil, errFileAccessDenied
|
||||||
}
|
}
|
||||||
|
|
||||||
// File path cannot be verified since one of the parents is a file.
|
// File path cannot be verified since one of the parents is a file.
|
||||||
|
@ -291,7 +291,7 @@ func (s *posix) ListVols() (volsInfo []VolInfo, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
volsInfo, err = listVols(preparePath(s.diskPath))
|
volsInfo, err = listVols(s.diskPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -82,7 +82,7 @@ func (m *ServerMux) handleServiceSignals() error {
|
|||||||
|
|
||||||
// Wait for SIGTERM in a go-routine.
|
// Wait for SIGTERM in a go-routine.
|
||||||
trapCh := signalTrap(os.Interrupt, syscall.SIGTERM)
|
trapCh := signalTrap(os.Interrupt, syscall.SIGTERM)
|
||||||
go func(<-chan bool) {
|
go func(trapCh <-chan bool) {
|
||||||
<-trapCh
|
<-trapCh
|
||||||
globalServiceSignalCh <- serviceStop
|
globalServiceSignalCh <- serviceStop
|
||||||
}(trapCh)
|
}(trapCh)
|
||||||
|
@ -81,7 +81,7 @@ An example here shows how the contention is handled with GetObject().
|
|||||||
GetObject() holds a read lock on `fs.json`.
|
GetObject() holds a read lock on `fs.json`.
|
||||||
|
|
||||||
```go
|
```go
|
||||||
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
|
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, objectMetaPrefix, object, fsMetaJSONFile)
|
||||||
rlk, err := fs.rwPool.Open(fsMetaPath)
|
rlk, err := fs.rwPool.Open(fsMetaPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return toObjectErr(traceError(err), bucket, object)
|
return toObjectErr(traceError(err), bucket, object)
|
||||||
@ -98,7 +98,7 @@ GetObject() holds a read lock on `fs.json`.
|
|||||||
A concurrent PutObject is requested on the same object, PutObject() attempts a write lock on `fs.json`.
|
A concurrent PutObject is requested on the same object, PutObject() attempts a write lock on `fs.json`.
|
||||||
|
|
||||||
```go
|
```go
|
||||||
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
|
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, objectMetaPrefix, object, fsMetaJSONFile)
|
||||||
wlk, err := fs.rwPool.Create(fsMetaPath)
|
wlk, err := fs.rwPool.Create(fsMetaPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||||
|
Loading…
Reference in New Issue
Block a user