fs: Add safe locking semantics for `format.json` (#4523)

This patch also reverts previous changes which were
merged for migration to the newer disk format. We will
be bringing these changes in subsequent releases. But
we wish to add protection in this release such that
future release migrations are protected.

Revert "fs: Migration should handle bucketConfigs as regular objects. (#4482)"
This reverts commit 976870a391.

Revert "fs: Migrate object metadata to objects directory. (#4195)"
This reverts commit 76f4f20609.
This commit is contained in:
Harshavardhana 2017-06-12 17:40:28 -07:00 committed by GitHub
parent b8463a738c
commit 075b8903d7
20 changed files with 509 additions and 933 deletions

View File

@ -33,6 +33,15 @@ type fsFormat struct {
Version string `json:"version"` Version string `json:"version"`
} }
// FS format version strings.
const (
// Represents the current backend disk structure
// version under `.minio.sys` and actual data namespace.
// formatConfigV1.fsFormat.Version
fsFormatBackendV1 = "1"
)
// xlFormat - structure holding 'xl' format. // xlFormat - structure holding 'xl' format.
type xlFormat struct { type xlFormat struct {
Version string `json:"version"` // Version of 'xl' format. Version string `json:"version"` // Version of 'xl' format.
@ -42,6 +51,15 @@ type xlFormat struct {
JBOD []string `json:"jbod"` JBOD []string `json:"jbod"`
} }
// XL format version strings.
const (
// Represents the current backend disk structure
// version under `.minio.sys` and actual data namespace.
// formatConfigV1.xlFormat.Version
xlFormatBackendV1 = "1"
)
// formatConfigV1 - structure holds format config version '1'. // formatConfigV1 - structure holds format config version '1'.
type formatConfigV1 struct { type formatConfigV1 struct {
Version string `json:"version"` // Version of the format config. Version string `json:"version"` // Version of the format config.
@ -51,6 +69,68 @@ type formatConfigV1 struct {
XL *xlFormat `json:"xl,omitempty"` // XL field holds xl format. XL *xlFormat `json:"xl,omitempty"` // XL field holds xl format.
} }
// Format json file.
const (
// Format config file carries backend format specific details.
formatConfigFile = "format.json"
// Format config tmp file carries backend format.
formatConfigFileTmp = "format.json.tmp"
)
// `format.json` version value.
const (
// formatConfigV1.Version represents the version string
// of the current structure and its fields in `format.json`.
formatFileV1 = "1"
// Future `format.json` structure changes should have
// its own version and should be subsequently listed here.
)
// Constitutes `format.json` backend name.
const (
// Represents FS backend.
formatBackendFS = "fs"
// Represents XL backend.
formatBackendXL = "xl"
)
// CheckFS if the format is FS and is valid with right values
// returns appropriate errors otherwise.
func (f *formatConfigV1) CheckFS() error {
// Validate if format config version is v1.
if f.Version != formatFileV1 {
return fmt.Errorf("Unknown format file version '%s'", f.Version)
}
// Validate if we have the expected format.
if f.Format != formatBackendFS {
return fmt.Errorf("FS backend format required. Found '%s'", f.Format)
}
// Check if format is currently supported.
if f.FS.Version != fsFormatBackendV1 {
return fmt.Errorf("Unknown backend FS format version '%s'", f.FS.Version)
}
// Success.
return nil
}
// LoadFormat - loads format config v1, returns `errUnformattedDisk`
// if reading format.json fails with io.EOF.
func (f *formatConfigV1) LoadFormat(lk *lock.LockedFile) error {
_, err := f.ReadFrom(lk)
if errorCause(err) == io.EOF {
// No data on disk `format.json` still empty
// treat it as unformatted disk.
return traceError(errUnformattedDisk)
}
return err
}
func (f *formatConfigV1) WriteTo(lk *lock.LockedFile) (n int64, err error) { func (f *formatConfigV1) WriteTo(lk *lock.LockedFile) (n int64, err error) {
// Serialize to prepare to write to disk. // Serialize to prepare to write to disk.
var fbytes []byte var fbytes []byte
@ -88,6 +168,21 @@ func (f *formatConfigV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
return int64(len(fbytes)), nil return int64(len(fbytes)), nil
} }
func newFSFormat() (format *formatConfigV1) {
return newFSFormatV1()
}
// newFSFormatV1 - initializes new formatConfigV1 with FS format info.
func newFSFormatV1() (format *formatConfigV1) {
return &formatConfigV1{
Version: formatFileV1,
Format: formatBackendFS,
FS: &fsFormat{
Version: fsFormatBackendV1,
},
}
}
/* /*
All disks online All disks online
@ -811,10 +906,10 @@ func loadFormatXL(bootstrapDisks []StorageAPI, readQuorum int) (disks []StorageA
func checkFormatXLValue(formatXL *formatConfigV1) error { func checkFormatXLValue(formatXL *formatConfigV1) error {
// Validate format version and format type. // Validate format version and format type.
if formatXL.Version != "1" { if formatXL.Version != formatFileV1 {
return fmt.Errorf("Unsupported version of backend format [%s] found", formatXL.Version) return fmt.Errorf("Unsupported version of backend format [%s] found", formatXL.Version)
} }
if formatXL.Format != "xl" { if formatXL.Format != formatBackendXL {
return fmt.Errorf("Unsupported backend format [%s] found", formatXL.Format) return fmt.Errorf("Unsupported backend format [%s] found", formatXL.Format)
} }
if formatXL.XL.Version != "1" { if formatXL.XL.Version != "1" {
@ -916,10 +1011,10 @@ func initFormatXL(storageDisks []StorageAPI) (err error) {
} }
// Allocate format config. // Allocate format config.
formats[index] = &formatConfigV1{ formats[index] = &formatConfigV1{
Version: "1", Version: formatFileV1,
Format: "xl", Format: formatBackendXL,
XL: &xlFormat{ XL: &xlFormat{
Version: "1", Version: xlFormatBackendV1,
Disk: mustGetUUID(), Disk: mustGetUUID(),
}, },
} }

View File

@ -18,7 +18,12 @@ package cmd
import ( import (
"bytes" "bytes"
"errors"
"os"
"path/filepath"
"testing" "testing"
"github.com/minio/minio/pkg/lock"
) )
// generates a valid format.json for XL backend. // generates a valid format.json for XL backend.
@ -30,10 +35,10 @@ func genFormatXLValid() []*formatConfigV1 {
} }
for index := range jbod { for index := range jbod {
formatConfigs[index] = &formatConfigV1{ formatConfigs[index] = &formatConfigV1{
Version: "1", Version: formatFileV1,
Format: "xl", Format: formatBackendXL,
XL: &xlFormat{ XL: &xlFormat{
Version: "1", Version: xlFormatBackendV1,
Disk: jbod[index], Disk: jbod[index],
JBOD: jbod, JBOD: jbod,
}, },
@ -51,10 +56,10 @@ func genFormatXLInvalidVersion() []*formatConfigV1 {
} }
for index := range jbod { for index := range jbod {
formatConfigs[index] = &formatConfigV1{ formatConfigs[index] = &formatConfigV1{
Version: "1", Version: formatFileV1,
Format: "xl", Format: formatBackendXL,
XL: &xlFormat{ XL: &xlFormat{
Version: "1", Version: xlFormatBackendV1,
Disk: jbod[index], Disk: jbod[index],
JBOD: jbod, JBOD: jbod,
}, },
@ -75,10 +80,10 @@ func genFormatXLInvalidFormat() []*formatConfigV1 {
} }
for index := range jbod { for index := range jbod {
formatConfigs[index] = &formatConfigV1{ formatConfigs[index] = &formatConfigV1{
Version: "1", Version: formatFileV1,
Format: "xl", Format: formatBackendXL,
XL: &xlFormat{ XL: &xlFormat{
Version: "1", Version: xlFormatBackendV1,
Disk: jbod[index], Disk: jbod[index],
JBOD: jbod, JBOD: jbod,
}, },
@ -99,10 +104,10 @@ func genFormatXLInvalidXLVersion() []*formatConfigV1 {
} }
for index := range jbod { for index := range jbod {
formatConfigs[index] = &formatConfigV1{ formatConfigs[index] = &formatConfigV1{
Version: "1", Version: formatFileV1,
Format: "xl", Format: formatBackendXL,
XL: &xlFormat{ XL: &xlFormat{
Version: "1", Version: xlFormatBackendV1,
Disk: jbod[index], Disk: jbod[index],
JBOD: jbod, JBOD: jbod,
}, },
@ -116,8 +121,8 @@ func genFormatXLInvalidXLVersion() []*formatConfigV1 {
func genFormatFS() *formatConfigV1 { func genFormatFS() *formatConfigV1 {
return &formatConfigV1{ return &formatConfigV1{
Version: "1", Version: formatFileV1,
Format: "fs", Format: formatBackendFS,
} }
} }
@ -130,10 +135,10 @@ func genFormatXLInvalidJBODCount() []*formatConfigV1 {
} }
for index := range jbod { for index := range jbod {
formatConfigs[index] = &formatConfigV1{ formatConfigs[index] = &formatConfigV1{
Version: "1", Version: formatFileV1,
Format: "xl", Format: formatBackendXL,
XL: &xlFormat{ XL: &xlFormat{
Version: "1", Version: xlFormatBackendV1,
Disk: jbod[index], Disk: jbod[index],
JBOD: jbod, JBOD: jbod,
}, },
@ -151,10 +156,10 @@ func genFormatXLInvalidJBOD() []*formatConfigV1 {
} }
for index := range jbod { for index := range jbod {
formatConfigs[index] = &formatConfigV1{ formatConfigs[index] = &formatConfigV1{
Version: "1", Version: formatFileV1,
Format: "xl", Format: formatBackendXL,
XL: &xlFormat{ XL: &xlFormat{
Version: "1", Version: xlFormatBackendV1,
Disk: jbod[index], Disk: jbod[index],
JBOD: jbod, JBOD: jbod,
}, },
@ -178,10 +183,10 @@ func genFormatXLInvalidDisks() []*formatConfigV1 {
} }
for index := range jbod { for index := range jbod {
formatConfigs[index] = &formatConfigV1{ formatConfigs[index] = &formatConfigV1{
Version: "1", Version: formatFileV1,
Format: "xl", Format: formatBackendXL,
XL: &xlFormat{ XL: &xlFormat{
Version: "1", Version: xlFormatBackendV1,
Disk: jbod[index], Disk: jbod[index],
JBOD: jbod, JBOD: jbod,
}, },
@ -202,10 +207,10 @@ func genFormatXLInvalidDisksOrder() []*formatConfigV1 {
} }
for index := range jbod { for index := range jbod {
formatConfigs[index] = &formatConfigV1{ formatConfigs[index] = &formatConfigV1{
Version: "1", Version: formatFileV1,
Format: "xl", Format: formatBackendXL,
XL: &xlFormat{ XL: &xlFormat{
Version: "1", Version: xlFormatBackendV1,
Disk: jbod[index], Disk: jbod[index],
JBOD: jbod, JBOD: jbod,
}, },
@ -240,10 +245,10 @@ func prepareFormatXLHealFreshDisks(obj ObjectLayer) ([]StorageAPI, error) {
// Remove the content of export dir 10 but preserve .minio.sys because it is automatically // Remove the content of export dir 10 but preserve .minio.sys because it is automatically
// created when minio starts // created when minio starts
for i := 3; i <= 5; i++ { for i := 3; i <= 5; i++ {
if err = xl.storageDisks[i].DeleteFile(".minio.sys", "format.json"); err != nil { if err = xl.storageDisks[i].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
return []StorageAPI{}, err return []StorageAPI{}, err
} }
if err = xl.storageDisks[i].DeleteFile(".minio.sys", "tmp"); err != nil { if err = xl.storageDisks[i].DeleteFile(minioMetaBucket, "tmp"); err != nil {
return []StorageAPI{}, err return []StorageAPI{}, err
} }
if err = xl.storageDisks[i].DeleteFile(bucket, object+"/xl.json"); err != nil { if err = xl.storageDisks[i].DeleteFile(bucket, object+"/xl.json"); err != nil {
@ -361,19 +366,19 @@ func TestFormatXLHealCorruptedDisks(t *testing.T) {
} }
// Now, remove two format files.. Load them and reorder // Now, remove two format files.. Load them and reorder
if err = xl.storageDisks[3].DeleteFile(".minio.sys", "format.json"); err != nil { if err = xl.storageDisks[3].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err = xl.storageDisks[11].DeleteFile(".minio.sys", "format.json"); err != nil { if err = xl.storageDisks[11].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Remove the content of export dir 10 but preserve .minio.sys because it is automatically // Remove the content of export dir 10 but preserve .minio.sys because it is automatically
// created when minio starts // created when minio starts
if err = xl.storageDisks[10].DeleteFile(".minio.sys", "format.json"); err != nil { if err = xl.storageDisks[10].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err = xl.storageDisks[10].DeleteFile(".minio.sys", "tmp"); err != nil { if err = xl.storageDisks[10].DeleteFile(minioMetaBucket, "tmp"); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err = xl.storageDisks[10].DeleteFile(bucket, object+"/xl.json"); err != nil { if err = xl.storageDisks[10].DeleteFile(bucket, object+"/xl.json"); err != nil {
@ -434,10 +439,10 @@ func TestFormatXLReorderByInspection(t *testing.T) {
} }
// Now, remove two format files.. Load them and reorder // Now, remove two format files.. Load them and reorder
if err = xl.storageDisks[3].DeleteFile(".minio.sys", "format.json"); err != nil { if err = xl.storageDisks[3].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err = xl.storageDisks[5].DeleteFile(".minio.sys", "format.json"); err != nil { if err = xl.storageDisks[5].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -555,10 +560,10 @@ func TestSavedUUIDOrder(t *testing.T) {
} }
for index := range jbod { for index := range jbod {
formatConfigs[index] = &formatConfigV1{ formatConfigs[index] = &formatConfigV1{
Version: "1", Version: formatFileV1,
Format: "xl", Format: formatBackendXL,
XL: &xlFormat{ XL: &xlFormat{
Version: "1", Version: xlFormatBackendV1,
Disk: jbod[index], Disk: jbod[index],
JBOD: jbod, JBOD: jbod,
}, },
@ -682,6 +687,163 @@ func TestGenericFormatCheckXL(t *testing.T) {
} }
} }
// TestFSCheckFormatFSErr - test loadFormatFS loading older format.
func TestFSCheckFormatFSErr(t *testing.T) {
// Prepare for testing
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
defer removeAll(disk)
// Assign a new UUID.
uuid := mustGetUUID()
// Initialize meta volume, if volume already exists ignores it.
if err := initMetaVolumeFS(disk, uuid); err != nil {
t.Fatal(err)
}
testCases := []struct {
format *formatConfigV1
formatWriteErr error
formatCheckErr error
shouldPass bool
}{
{
format: &formatConfigV1{
Version: formatFileV1,
Format: formatBackendFS,
FS: &fsFormat{
Version: fsFormatBackendV1,
},
},
formatCheckErr: nil,
shouldPass: true,
},
{
format: &formatConfigV1{
Version: formatFileV1,
Format: formatBackendFS,
FS: &fsFormat{
Version: "10",
},
},
formatCheckErr: errors.New("Unknown backend FS format version '10'"),
shouldPass: false,
},
{
format: &formatConfigV1{
Version: formatFileV1,
Format: "garbage",
FS: &fsFormat{
Version: fsFormatBackendV1,
},
},
formatCheckErr: errors.New("FS backend format required. Found 'garbage'"),
},
{
format: &formatConfigV1{
Version: "-1",
Format: formatBackendFS,
FS: &fsFormat{
Version: fsFormatBackendV1,
},
},
formatCheckErr: errors.New("Unknown format file version '-1'"),
},
}
fsFormatPath := pathJoin(disk, minioMetaBucket, formatConfigFile)
for i, testCase := range testCases {
lk, err := lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
t.Fatal(err)
}
_, err = testCase.format.WriteTo(lk)
lk.Close()
if err != nil {
t.Fatalf("Test %d: Expected nil, got %s", i+1, err)
}
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
t.Fatal(err)
}
formatCfg := &formatConfigV1{}
_, err = formatCfg.ReadFrom(lk)
lk.Close()
if err != nil {
t.Fatal(err)
}
err = formatCfg.CheckFS()
if err != nil && testCase.shouldPass {
t.Errorf("Test %d: Should not fail with unexpected %s, expected nil", i+1, err)
}
if err == nil && !testCase.shouldPass {
t.Errorf("Test %d: Should fail with expected %s, got nil", i+1, testCase.formatCheckErr)
}
if err != nil && !testCase.shouldPass {
if errorCause(err).Error() != testCase.formatCheckErr.Error() {
t.Errorf("Test %d: Should fail with expected %s, got %s", i+1, testCase.formatCheckErr, err)
}
}
}
}
// TestFSCheckFormatFS - test loadFormatFS with healty and faulty disks
func TestFSCheckFormatFS(t *testing.T) {
// Prepare for testing
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
defer removeAll(disk)
// Assign a new UUID.
uuid := mustGetUUID()
// Initialize meta volume, if volume already exists ignores it.
if err := initMetaVolumeFS(disk, uuid); err != nil {
t.Fatal(err)
}
fsFormatPath := pathJoin(disk, minioMetaBucket, formatConfigFile)
lk, err := lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
t.Fatal(err)
}
format := newFSFormatV1()
_, err = format.WriteTo(lk)
lk.Close()
if err != nil {
t.Fatal(err)
}
// Loading corrupted format file
file, err := os.OpenFile(preparePath(fsFormatPath), os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
t.Fatal("Should not fail here", err)
}
file.Write([]byte{'b'})
file.Close()
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
t.Fatal(err)
}
format = &formatConfigV1{}
_, err = format.ReadFrom(lk)
lk.Close()
if err == nil {
t.Fatal("Should return an error here")
}
// Loading format file from disk not found.
removeAll(disk)
_, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDONLY, 0600)
if err != nil && !os.IsNotExist(err) {
t.Fatal("Should return 'format.json' does not exist, but got", err)
}
}
func TestLoadFormatXLErrs(t *testing.T) { func TestLoadFormatXLErrs(t *testing.T) {
nDisks := 16 nDisks := 16
fsDirs, err := getRandomDisks(nDisks) fsDirs, err := getRandomDisks(nDisks)
@ -749,7 +911,7 @@ func TestLoadFormatXLErrs(t *testing.T) {
// disks 0..10 returns unformatted disk // disks 0..10 returns unformatted disk
for i := 0; i <= 10; i++ { for i := 0; i <= 10; i++ {
if err = xl.storageDisks[i].DeleteFile(".minio.sys", "format.json"); err != nil { if err = xl.storageDisks[i].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -873,7 +1035,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
} }
xl = obj.(*xlObjects) xl = obj.(*xlObjects)
for i := 0; i <= 15; i++ { for i := 0; i <= 15; i++ {
if err = xl.storageDisks[i].DeleteFile(".minio.sys", "format.json"); err != nil { if err = xl.storageDisks[i].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -894,7 +1056,7 @@ func TestHealFormatXLCorruptedDisksErrs(t *testing.T) {
} }
xl = obj.(*xlObjects) xl = obj.(*xlObjects)
for i := 0; i <= 15; i++ { for i := 0; i <= 15; i++ {
if err = xl.storageDisks[i].AppendFile(".minio.sys", "format.json", []byte("corrupted data")); err != nil { if err = xl.storageDisks[i].AppendFile(minioMetaBucket, formatConfigFile, []byte("corrupted data")); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -998,7 +1160,7 @@ func TestHealFormatXLFreshDisksErrs(t *testing.T) {
} }
xl = obj.(*xlObjects) xl = obj.(*xlObjects)
for i := 0; i <= 15; i++ { for i := 0; i <= 15; i++ {
if err = xl.storageDisks[i].DeleteFile(".minio.sys", "format.json"); err != nil { if err = xl.storageDisks[i].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }

View File

@ -134,6 +134,7 @@ func fsStat(statLoc string) (os.FileInfo, error) {
if err != nil { if err != nil {
return nil, traceError(err) return nil, traceError(err)
} }
return fi, nil return fi, nil
} }
@ -142,12 +143,13 @@ func fsStat(statLoc string) (os.FileInfo, error) {
func fsStatDir(statDir string) (os.FileInfo, error) { func fsStatDir(statDir string) (os.FileInfo, error) {
fi, err := fsStat(statDir) fi, err := fsStat(statDir)
if err != nil { if err != nil {
if os.IsNotExist(errorCause(err)) { err = errorCause(err)
if os.IsNotExist(err) {
return nil, traceError(errVolumeNotFound) return nil, traceError(errVolumeNotFound)
} else if os.IsPermission(errorCause(err)) { } else if os.IsPermission(err) {
return nil, traceError(errVolumeAccessDenied) return nil, traceError(errVolumeAccessDenied)
} }
return nil, err return nil, traceError(err)
} }
if !fi.IsDir() { if !fi.IsDir() {
@ -161,16 +163,17 @@ func fsStatDir(statDir string) (os.FileInfo, error) {
func fsStatFile(statFile string) (os.FileInfo, error) { func fsStatFile(statFile string) (os.FileInfo, error) {
fi, err := fsStat(statFile) fi, err := fsStat(statFile)
if err != nil { if err != nil {
if os.IsNotExist(errorCause(err)) { err = errorCause(err)
if os.IsNotExist(err) {
return nil, traceError(errFileNotFound) return nil, traceError(errFileNotFound)
} else if os.IsPermission(errorCause(err)) { } else if os.IsPermission(err) {
return nil, traceError(errFileAccessDenied) return nil, traceError(errFileAccessDenied)
} else if isSysErrNotDir(errorCause(err)) { } else if isSysErrNotDir(err) {
return nil, traceError(errFileAccessDenied) return nil, traceError(errFileAccessDenied)
} else if isSysErrPathNotFound(errorCause(err)) { } else if isSysErrPathNotFound(err) {
return nil, traceError(errFileNotFound) return nil, traceError(errFileNotFound)
} }
return nil, err return nil, traceError(err)
} }
if fi.IsDir() { if fi.IsDir() {
return nil, traceError(errFileAccessDenied) return nil, traceError(errFileAccessDenied)

View File

@ -18,16 +18,14 @@ package cmd
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
pathutil "path" pathutil "path"
"sort" "sort"
"strings" "strings"
"time"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio/pkg/lock" "github.com/minio/minio/pkg/lock"
"github.com/minio/minio/pkg/mimedb" "github.com/minio/minio/pkg/mimedb"
"github.com/tidwall/gjson" "github.com/tidwall/gjson"
@ -37,8 +35,6 @@ import (
const ( const (
// fs.json object metadata. // fs.json object metadata.
fsMetaJSONFile = "fs.json" fsMetaJSONFile = "fs.json"
// format.json FS format metadata.
fsFormatJSONFile = "format.json"
) )
// FS metadata constants. // FS metadata constants.
@ -52,9 +48,6 @@ const (
// FS backend meta format. // FS backend meta format.
fsMetaFormat = "fs" fsMetaFormat = "fs"
// FS backend format version.
fsFormatVersion = fsFormatV2
// Add more constants here. // Add more constants here.
) )
@ -257,14 +250,6 @@ func (m *fsMetaV1) ReadFrom(lk *lock.LockedFile) (n int64, err error) {
return int64(len(fsMetaBuf)), nil return int64(len(fsMetaBuf)), nil
} }
// FS format version strings.
const (
fsFormatV1 = "1" // Previous format.
fsFormatV2 = "2" // Current format.
// Proceed to add "3" when we
// change the backend format in future.
)
// newFSMetaV1 - initializes new fsMetaV1. // newFSMetaV1 - initializes new fsMetaV1.
func newFSMetaV1() (fsMeta fsMetaV1) { func newFSMetaV1() (fsMeta fsMetaV1) {
fsMeta = fsMetaV1{} fsMeta = fsMetaV1{}
@ -274,167 +259,107 @@ func newFSMetaV1() (fsMeta fsMetaV1) {
return fsMeta return fsMeta
} }
// newFSFormatV2 - initializes new formatConfigV1 with FS format version 2. // Check if disk has already a valid format, holds a read lock and
func newFSFormatV2() (format *formatConfigV1) { // upon success returns it to the caller to be closed.
return &formatConfigV1{ func checkLockedValidFormatFS(fsPath string) (*lock.RLockedFile, error) {
Version: "1", fsFormatPath := pathJoin(fsPath, minioMetaBucket, formatConfigFile)
Format: "fs",
FS: &fsFormat{
Version: fsFormatV2,
},
}
}
// Checks if input format is version 1 and 2. rlk, err := lock.RLockedOpenFile(preparePath(fsFormatPath))
func isFSValidFormat(formatCfg *formatConfigV1) bool { if err != nil {
// Supported format versions. if os.IsNotExist(err) {
var supportedFormatVersions = []string{ // If format.json not found then
fsFormatV1, // its an unformatted disk.
fsFormatV2, return nil, traceError(errUnformattedDisk)
// New supported versions here.
}
// Check for supported format versions.
for _, version := range supportedFormatVersions {
if formatCfg.FS.Version == version {
return true
} }
return nil, traceError(err)
} }
return false
var format = &formatConfigV1{}
if err = format.LoadFormat(rlk.LockedFile); err != nil {
rlk.Close()
return nil, err
}
// Check format FS.
if err = format.CheckFS(); err != nil {
rlk.Close()
return nil, err
}
// Always return read lock here and should be closed by the caller.
return rlk, traceError(err)
} }
// errFSFormatOld- old fs format. // Writes the new format.json if unformatted,
var errFSFormatOld = errors.New("old FS format found") // otherwise closes the input locked file
// and returns any error.
func writeFormatFS(lk *lock.LockedFile) error {
// Close the locked file upon return.
defer lk.Close()
// Checks if the loaded `format.json` is valid and // Load format on disk, checks if we are unformatted
// is expected to be of the requested version. // writes the new format.json
func checkFormatFS(format *formatConfigV1, formatVersion string) error { var format = &formatConfigV1{}
if format == nil { err := format.LoadFormat(lk)
return errUnexpected if errorCause(err) == errUnformattedDisk {
} _, err = newFSFormat().WriteTo(lk)
// Validate if we have the same format.
if format.Format != "fs" {
return fmt.Errorf("Unable to recognize backend format, Disk is not in FS format. %s", format.Format)
}
// Check if format is currently supported.
if !isFSValidFormat(format) {
return errCorruptedFormat
}
// Check for format version is current.
if format.FS.Version != formatVersion {
return errFSFormatOld
}
return nil
}
// This is just kept as reference, there is no sanity
// check for FS format in version "1".
func checkFormatSanityFSV1(fsPath string) error {
return nil
}
// Check for sanity of FS format in version "2".
func checkFormatSanityFSV2(fsPath string) error {
buckets, err := readDir(pathJoin(fsPath, minioMetaBucket, bucketConfigPrefix))
if err != nil && err != errFileNotFound {
return err return err
} }
// Attempt to validate all the buckets have a sanitized backend.
for _, bucket := range buckets {
entries, rerr := readDir(pathJoin(fsPath, minioMetaBucket, bucketConfigPrefix, bucket))
if rerr != nil {
return rerr
}
var expectedConfigs = append(bucketMetadataConfigs, objectMetaPrefix+"/")
entriesSet := set.CreateStringSet(entries...)
expectedConfigsSet := set.CreateStringSet(expectedConfigs...)
// Entries found shouldn't be more than total
// expected config directories, files.
if len(entriesSet) > len(expectedConfigsSet) {
return errCorruptedFormat
}
// Look for the difference between entries and the
// expected config set, resulting entries if they
// intersect with original entries set we know
// that the backend has unexpected files.
if !entriesSet.Difference(expectedConfigsSet).IsEmpty() {
return errCorruptedFormat
}
}
return nil
}
// Check for sanity of FS format for a given version.
func checkFormatSanityFS(fsPath string, fsFormatVersion string) (err error) {
switch fsFormatVersion {
case fsFormatV2:
err = checkFormatSanityFSV2(fsPath)
default:
err = errCorruptedFormat
}
return err return err
} }
// Initializes a new `format.json` if not present, validates `format.json`
// if already present and migrates to newer version if necessary. Returns
// the final format version.
func initFormatFS(fsPath, fsUUID string) (err error) { func initFormatFS(fsPath, fsUUID string) (err error) {
fsFormatPath := pathJoin(fsPath, minioMetaBucket, fsFormatJSONFile) fsFormatPath := pathJoin(fsPath, minioMetaBucket, formatConfigFile)
// fsFormatJSONFile - format.json file stored in minioMetaBucket(.minio.sys) directory. // Once the filesystem has initialized hold the read lock for
lk, err := lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600) // the life time of the server. This is done to ensure that under
if err != nil { // shared backend mode for FS, remote servers do not migrate
return traceError(err) // or cause changes on backend format.
}
defer lk.Close()
var format = &formatConfigV1{} // This loop validates format.json by holding a read lock and
_, err = format.ReadFrom(lk) // proceeds if disk unformatted to hold non-blocking WriteLock
// For all unexpected errors, we return. // If for some reason non-blocking WriteLock fails and the error
if err != nil && errorCause(err) != io.EOF { // is lock.ErrAlreadyLocked i.e some other process is holding a
return traceError(fmt.Errorf("Unable to load 'format.json', %s", err)) // lock we retry in the loop again.
} var rlk *lock.RLockedFile
for {
// Validate the `format.json` for expected values.
rlk, err = checkLockedValidFormatFS(fsPath)
switch {
case err == nil:
// Holding a read lock ensures that any write lock operation
// is blocked if attempted in-turn avoiding corruption on
// the backend disk.
_ = rlk // Hold the lock on `format.json` until server dies.
return nil
case errorCause(err) == errUnformattedDisk:
// Attempt a write lock on formatConfigFile `format.json`
// file stored in minioMetaBucket(.minio.sys) directory.
var lk *lock.LockedFile
lk, err = lock.TryLockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
// Existing write locks detected.
if err == lock.ErrAlreadyLocked {
// Lock already present, sleep and attempt again.
time.Sleep(100 * time.Millisecond)
continue
}
// If we couldn't read anything, The disk is unformatted. // Unexpected error, return.
if errorCause(err) == io.EOF { return traceError(err)
err = errUnformattedDisk }
format = newFSFormatV2()
} else { // Write new format.
// Validate loaded `format.json`. if err = writeFormatFS(lk); err != nil {
err = checkFormatFS(format, fsFormatVersion) return err
if err != nil && err != errFSFormatOld { }
return traceError(fmt.Errorf("Unable to validate 'format.json', %s", err)) // Loop will continue to attempt a
// read-lock on `format.json` .
default:
// Unhandled error return.
return err
} }
} }
// Disk is in old format migrate object metadata.
if err == errFSFormatOld {
if merr := migrateFSObject(fsPath, fsUUID); merr != nil {
return merr
}
// Initialize format v2.
format = newFSFormatV2()
}
// Rewrite or write format.json depending on if disk
// unformatted and if format is old.
if err == errUnformattedDisk || err == errFSFormatOld {
if _, err = format.WriteTo(lk); err != nil {
return traceError(fmt.Errorf("Unable to initialize 'format.json', %s", err))
}
}
// Check for sanity.
return checkFormatSanityFS(fsPath, format.FS.Version)
} }
// Return if the part info in uploadedParts and completeParts are same. // Return if the part info in uploadedParts and completeParts are same.

View File

@ -58,7 +58,7 @@ func TestReadFSMetadata(t *testing.T) {
} }
// Construct the full path of fs.json // Construct the full path of fs.json
fsPath := pathJoin(bucketMetaPrefix, bucketName, objectMetaPrefix, objectName, "fs.json") fsPath := pathJoin(bucketMetaPrefix, bucketName, objectName, "fs.json")
fsPath = pathJoin(fs.fsPath, minioMetaBucket, fsPath) fsPath = pathJoin(fs.fsPath, minioMetaBucket, fsPath)
rlk, err := fs.rwPool.Open(fsPath) rlk, err := fs.rwPool.Open(fsPath)
@ -95,7 +95,7 @@ func TestWriteFSMetadata(t *testing.T) {
} }
// Construct the full path of fs.json // Construct the full path of fs.json
fsPath := pathJoin(bucketMetaPrefix, bucketName, objectMetaPrefix, objectName, "fs.json") fsPath := pathJoin(bucketMetaPrefix, bucketName, objectName, "fs.json")
fsPath = pathJoin(fs.fsPath, minioMetaBucket, fsPath) fsPath = pathJoin(fs.fsPath, minioMetaBucket, fsPath)
rlk, err := fs.rwPool.Open(fsPath) rlk, err := fs.rwPool.Open(fsPath)

View File

@ -759,7 +759,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
// Wait for any competing PutObject() operation on bucket/object, since same namespace // Wait for any competing PutObject() operation on bucket/object, since same namespace
// would be acquired for `fs.json`. // would be acquired for `fs.json`.
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, objectMetaPrefix, object, fsMetaJSONFile) fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
metaFile, err := fs.rwPool.Create(fsMetaPath) metaFile, err := fs.rwPool.Create(fsMetaPath)
if err != nil { if err != nil {
fs.rwPool.Close(fsMetaPathMultipart) fs.rwPool.Close(fsMetaPathMultipart)

View File

@ -24,7 +24,6 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"os/signal"
"path" "path"
"path/filepath" "path/filepath"
"sort" "sort"
@ -74,144 +73,15 @@ func initMetaVolumeFS(fsPath, fsUUID string) error {
} }
// Migrate FS object is a place holder code for all
// FS format migrations.
func migrateFSObject(fsPath, fsUUID string) (err error) {
// Writing message here is important for servers being upgraded.
log.Println("Please do not stop the server.")
ch := make(chan os.Signal)
defer signal.Stop(ch)
defer close(ch)
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
go func() {
for {
_, ok := <-ch
if !ok {
break
}
log.Println("Please wait server is being upgraded..")
}
}()
return migrateFSFormatV1ToV2(fsPath, fsUUID)
}
// List all buckets at meta bucket prefix in `.minio.sys/buckets/` path.
// This is implemented to avoid a bug on windows with using readDir().
func fsReaddirMetaBuckets(fsPath string) ([]string, error) {
f, err := os.Open(preparePath(pathJoin(fsPath, minioMetaBucket, bucketConfigPrefix)))
if err != nil {
if os.IsNotExist(err) {
return nil, errFileNotFound
} else if os.IsPermission(err) {
return nil, errFileAccessDenied
}
return nil, err
}
return f.Readdirnames(-1)
}
// List of all bucket metadata configs.
var bucketMetadataConfigs = []string{
bucketNotificationConfig,
bucketListenerConfig,
bucketPolicyConfig,
}
// Migrates bucket metadata configs, ignores all other files.
func migrateBucketMetadataConfigs(metaBucket, bucket, tmpBucket string) error {
for _, bucketMetaFile := range bucketMetadataConfigs {
fi, err := fsStat(pathJoin(metaBucket, tmpBucket, bucketMetaFile))
if err != nil {
// There are no such files or directories found,
// proceed to next bucket metadata config.
if os.IsNotExist(errorCause(err)) {
continue
}
return err
}
// Bucket metadata is a file, move it as an actual bucket config.
if fi.Mode().IsRegular() {
if err = fsRenameFile(pathJoin(metaBucket, tmpBucket, bucketMetaFile),
pathJoin(metaBucket, bucket, bucketMetaFile)); err != nil {
if errorCause(err) != errFileNotFound {
return err
}
}
}
// All other file types are ignored.
}
// Success.
return nil
}
// Attempts to migrate old object metadata files to newer format
//
// i.e
// -------------------------------------------------------
// .minio.sys/buckets/<bucket_name>/<object_path>/fs.json - V1
// -------------------------------------------------------
// .minio.sys/buckets/<bucket_name>/objects/<object_path>/fs.json - V2
// -------------------------------------------------------
//
func migrateFSFormatV1ToV2(fsPath, fsUUID string) (err error) {
metaBucket := pathJoin(fsPath, minioMetaBucket, bucketConfigPrefix)
var buckets []string
buckets, err = fsReaddirMetaBuckets(fsPath)
if err != nil && err != errFileNotFound {
return err
}
// Migrate all buckets present.
for _, bucket := range buckets {
// Temporary bucket of form .UUID-bucket.
tmpBucket := fmt.Sprintf(".%s-%s", fsUUID, bucket)
// Rename existing bucket as `.UUID-bucket`.
if err = fsRenameFile(pathJoin(metaBucket, bucket), pathJoin(metaBucket, tmpBucket)); err != nil {
return err
}
// Create a new bucket name with name as `bucket`.
if err = fsMkdir(pathJoin(metaBucket, bucket)); err != nil {
return err
}
// Migrate all the bucket metadata configs.
if err = migrateBucketMetadataConfigs(metaBucket, bucket, tmpBucket); err != nil {
return err
}
// Finally rename the temporary bucket to `bucket/objects` directory.
if err = fsRenameFile(pathJoin(metaBucket, tmpBucket),
pathJoin(metaBucket, bucket, objectMetaPrefix)); err != nil {
if errorCause(err) != errFileNotFound {
return err
}
}
}
log.Printf("Migrating bucket metadata format from \"%s\" to newer format \"%s\"... completed successfully.", fsFormatV1, fsFormatV2)
// If all goes well we return success.
return nil
}
// newFSObjectLayer - initialize new fs object layer. // newFSObjectLayer - initialize new fs object layer.
func newFSObjectLayer(fsPath string) (ObjectLayer, error) { func newFSObjectLayer(fsPath string) (ObjectLayer, error) {
if fsPath == "" { if fsPath == "" {
return nil, errInvalidArgument return nil, errInvalidArgument
} }
var err error
// Disallow relative paths, figure out absolute paths. // Disallow relative paths, figure out absolute paths.
fsPath, err := filepath.Abs(fsPath) fsPath, err = filepath.Abs(fsPath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -257,12 +127,6 @@ func newFSObjectLayer(fsPath string) (ObjectLayer, error) {
return nil, err return nil, err
} }
// Once initialized hold read lock for the entire operation
// of filesystem backend.
if _, err = fs.rwPool.Open(pathJoin(fsPath, minioMetaBucket, fsFormatJSONFile)); err != nil {
return nil, err
}
// Initialize and load bucket policies. // Initialize and load bucket policies.
err = initBucketPolicies(fs) err = initBucketPolicies(fs)
if err != nil { if err != nil {
@ -281,9 +145,6 @@ func newFSObjectLayer(fsPath string) (ObjectLayer, error) {
// Should be called when process shuts down. // Should be called when process shuts down.
func (fs fsObjects) Shutdown() error { func (fs fsObjects) Shutdown() error {
// Close the format.json read lock.
fs.rwPool.Close(pathJoin(fs.fsPath, minioMetaBucket, fsFormatJSONFile))
// Cleanup and delete tmp uuid. // Cleanup and delete tmp uuid.
return fsRemoveAll(pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID)) return fsRemoveAll(pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID))
} }
@ -363,7 +224,7 @@ func (fs fsObjects) ListBuckets() ([]BucketInfo, error) {
return nil, traceError(err) return nil, traceError(err)
} }
var bucketInfos []BucketInfo var bucketInfos []BucketInfo
entries, err := readDir(fs.fsPath) entries, err := readDir(preparePath(fs.fsPath))
if err != nil { if err != nil {
return nil, toObjectErr(traceError(errDiskNotFound)) return nil, toObjectErr(traceError(errDiskNotFound))
} }
@ -447,7 +308,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
// Check if this request is only metadata update. // Check if this request is only metadata update.
cpMetadataOnly := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject)) cpMetadataOnly := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
if cpMetadataOnly { if cpMetadataOnly {
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, srcBucket, objectMetaPrefix, srcObject, fsMetaJSONFile) fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, srcBucket, srcObject, fsMetaJSONFile)
var wlk *lock.LockedFile var wlk *lock.LockedFile
wlk, err = fs.rwPool.Write(fsMetaPath) wlk, err = fs.rwPool.Write(fsMetaPath)
if err != nil { if err != nil {
@ -520,7 +381,7 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64,
} }
if bucket != minioMetaBucket { if bucket != minioMetaBucket {
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, objectMetaPrefix, object, fsMetaJSONFile) fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
_, err = fs.rwPool.Open(fsMetaPath) _, err = fs.rwPool.Open(fsMetaPath)
if err != nil && err != errFileNotFound { if err != nil && err != errFileNotFound {
return toObjectErr(traceError(err), bucket, object) return toObjectErr(traceError(err), bucket, object)
@ -562,7 +423,7 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64,
// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo. // getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) { func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) {
fsMeta := fsMetaV1{} fsMeta := fsMetaV1{}
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, objectMetaPrefix, object, fsMetaJSONFile) fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
// Read `fs.json` to perhaps contend with // Read `fs.json` to perhaps contend with
// parallel Put() operations. // parallel Put() operations.
@ -669,7 +530,7 @@ func (fs fsObjects) PutObject(bucket string, object string, size int64, data io.
var wlk *lock.LockedFile var wlk *lock.LockedFile
if bucket != minioMetaBucket { if bucket != minioMetaBucket {
bucketMetaDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix) bucketMetaDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix)
fsMetaPath := pathJoin(bucketMetaDir, bucket, objectMetaPrefix, object, fsMetaJSONFile) fsMetaPath := pathJoin(bucketMetaDir, bucket, object, fsMetaJSONFile)
wlk, err = fs.rwPool.Create(fsMetaPath) wlk, err = fs.rwPool.Create(fsMetaPath)
if err != nil { if err != nil {
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object) return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
@ -796,7 +657,7 @@ func (fs fsObjects) DeleteObject(bucket, object string) error {
} }
minioMetaBucketDir := pathJoin(fs.fsPath, minioMetaBucket) minioMetaBucketDir := pathJoin(fs.fsPath, minioMetaBucket)
fsMetaPath := pathJoin(minioMetaBucketDir, bucketMetaPrefix, bucket, objectMetaPrefix, object, fsMetaJSONFile) fsMetaPath := pathJoin(minioMetaBucketDir, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
if bucket != minioMetaBucket { if bucket != minioMetaBucket {
rwlk, lerr := fs.rwPool.Write(fsMetaPath) rwlk, lerr := fs.rwPool.Write(fsMetaPath)
if lerr == nil { if lerr == nil {
@ -850,7 +711,7 @@ func (fs fsObjects) listDirFactory(isLeaf isLeafFunc) listDirFunc {
// getObjectETag is a helper function, which returns only the md5sum // getObjectETag is a helper function, which returns only the md5sum
// of the file on the disk. // of the file on the disk.
func (fs fsObjects) getObjectETag(bucket, entry string) (string, error) { func (fs fsObjects) getObjectETag(bucket, entry string) (string, error) {
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, objectMetaPrefix, entry, fsMetaJSONFile) fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, entry, fsMetaJSONFile)
// Read `fs.json` to perhaps contend with // Read `fs.json` to perhaps contend with
// parallel Put() operations. // parallel Put() operations.

View File

@ -21,10 +21,7 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"strings"
"testing" "testing"
"github.com/minio/minio/pkg/lock"
) )
// TestNewFS - tests initialization of all input disks // TestNewFS - tests initialization of all input disks
@ -88,516 +85,6 @@ func TestFSShutdown(t *testing.T) {
} }
} }
// Tests migrating FS format without .minio.sys/buckets.
func TestFSMigrateObjectWithoutObjects(t *testing.T) {
// Prepare for testing
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
defer removeAll(disk)
// Assign a new UUID.
uuid := mustGetUUID()
// Initialize meta volume, if volume already exists ignores it.
if err := initMetaVolumeFS(disk, uuid); err != nil {
t.Fatal(err)
}
fsFormatPath := pathJoin(disk, minioMetaBucket, fsFormatJSONFile)
formatCfg := &formatConfigV1{
Version: "1",
Format: "fs",
FS: &fsFormat{
Version: "1",
},
}
lk, err := lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
t.Fatal(err)
}
_, err = formatCfg.WriteTo(lk)
lk.Close()
if err != nil {
t.Fatal("Should not fail here", err)
}
if err = initFormatFS(disk, uuid); err != nil {
t.Fatal("Should not fail with unexpected", err)
}
formatCfg = &formatConfigV1{}
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDONLY, 0600)
if err != nil {
t.Fatal(err)
}
_, err = formatCfg.ReadFrom(lk)
lk.Close()
if err != nil {
t.Fatal("Should not fail here", err)
}
if formatCfg.FS.Version != fsFormatV2 {
t.Fatalf("Unexpected version detected expected \"%s\", got %s", fsFormatV2, formatCfg.FS.Version)
}
}
// Tests migrating FS format without .minio.sys/buckets.
func TestFSMigrateObjectWithErr(t *testing.T) {
// Prepare for testing
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
defer removeAll(disk)
// Assign a new UUID.
uuid := mustGetUUID()
// Initialize meta volume, if volume already exists ignores it.
if err := initMetaVolumeFS(disk, uuid); err != nil {
t.Fatal(err)
}
fsFormatPath := pathJoin(disk, minioMetaBucket, fsFormatJSONFile)
formatCfg := &formatConfigV1{
Version: "1",
Format: "fs",
FS: &fsFormat{
Version: "10",
},
}
lk, err := lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
t.Fatal(err)
}
_, err = formatCfg.WriteTo(lk)
lk.Close()
if err != nil {
t.Fatal("Should not fail here", err)
}
if err = initFormatFS(disk, uuid); err != nil {
if !strings.Contains(errorCause(err).Error(), "Unable to validate 'format.json', corrupted backend format") {
t.Fatal("Should not fail with unexpected", err)
}
}
fsFormatPath = pathJoin(disk, minioMetaBucket, fsFormatJSONFile)
formatCfg = &formatConfigV1{
Version: "1",
Format: "garbage",
FS: &fsFormat{
Version: "1",
},
}
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
t.Fatal(err)
}
_, err = formatCfg.WriteTo(lk)
lk.Close()
if err != nil {
t.Fatal("Should not fail here", err)
}
if err = initFormatFS(disk, uuid); err != nil {
if errorCause(err).Error() !=
"Unable to validate 'format.json', Unable to recognize backend format, Disk is not in FS format. garbage" {
t.Fatal("Should not fail with unexpected", err)
}
}
}
// Tests migrating FS format with .minio.sys/buckets filled with
// objects such as policy.json/fs.json, notification.xml/fs.json
// listener.json/fs.json.
func TestFSMigrateObjectWithBucketConfigObjects(t *testing.T) {
// Prepare for testing
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
defer removeAll(disk)
// Assign a new UUID.
uuid := mustGetUUID()
// Initialize meta volume, if volume already exists ignores it.
if err := initMetaVolumeFS(disk, uuid); err != nil {
t.Fatal(err)
}
fsFormatPath := pathJoin(disk, minioMetaBucket, fsFormatJSONFile)
formatCfg := &formatConfigV1{
Version: "1",
Format: "fs",
FS: &fsFormat{
Version: "1",
},
}
lk, err := lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
t.Fatal(err)
}
_, err = formatCfg.WriteTo(lk)
lk.Close()
if err != nil {
t.Fatal("Should not fail here", err)
}
// Construct the full path of fs.json
fsPath1 := pathJoin(bucketMetaPrefix, "testvolume1", bucketPolicyConfig, fsMetaJSONFile)
fsPath1 = pathJoin(disk, minioMetaBucket, fsPath1)
fsMetaJSON := `{"version":"1.0.0","format":"fs","minio":{"release":"DEVELOPMENT.2017-03-27T02-26-33Z"},"meta":{"etag":"467886be95c8ecfd71a2900e3f461b4f"}`
if _, err = fsCreateFile(fsPath1, bytes.NewReader([]byte(fsMetaJSON)), nil, 0); err != nil {
t.Fatal(err)
}
// Construct the full path of fs.json
fsPath2 := pathJoin(bucketMetaPrefix, "testvolume2", bucketNotificationConfig, fsMetaJSONFile)
fsPath2 = pathJoin(disk, minioMetaBucket, fsPath2)
fsMetaJSON = `{"version":"1.0.0","format":"fs","minio":{"release":"DEVELOPMENT.2017-03-27T02-26-33Z"},"meta":{"etag":"467886be95c8ecfd71a2900eff461b4d"}`
if _, err = fsCreateFile(fsPath2, bytes.NewReader([]byte(fsMetaJSON)), nil, 0); err != nil {
t.Fatal(err)
}
// Construct the full path of fs.json
fsPath3 := pathJoin(bucketMetaPrefix, "testvolume3", bucketListenerConfig, fsMetaJSONFile)
fsPath3 = pathJoin(disk, minioMetaBucket, fsPath3)
fsMetaJSON = `{"version":"1.0.0","format":"fs","minio":{"release":"DEVELOPMENT.2017-03-27T02-26-33Z"},"meta":{"etag":"467886be95c8ecfd71a2900eff461b4d"}`
if _, err = fsCreateFile(fsPath3, bytes.NewReader([]byte(fsMetaJSON)), nil, 0); err != nil {
t.Fatal(err)
}
if err = initFormatFS(disk, mustGetUUID()); err != nil {
t.Fatal("Should not fail here", err)
}
fsPath1 = pathJoin(bucketMetaPrefix, "testvolume1", objectMetaPrefix, bucketPolicyConfig, fsMetaJSONFile)
fsPath1 = pathJoin(disk, minioMetaBucket, fsPath1)
fi, err := fsStatFile(fsPath1)
if err != nil {
t.Fatal("Path should exist and accessible after migration", err)
}
if fi.IsDir() {
t.Fatalf("Unexpected path %s should be a file", fsPath1)
}
fsPath2 = pathJoin(bucketMetaPrefix, "testvolume2", objectMetaPrefix, bucketNotificationConfig, fsMetaJSONFile)
fsPath2 = pathJoin(disk, minioMetaBucket, fsPath2)
fi, err = fsStatFile(fsPath2)
if err != nil {
t.Fatal("Path should exist and accessible after migration", err)
}
if fi.IsDir() {
t.Fatalf("Unexpected path %s should be a file", fsPath2)
}
fsPath3 = pathJoin(bucketMetaPrefix, "testvolume3", objectMetaPrefix, bucketListenerConfig, fsMetaJSONFile)
fsPath3 = pathJoin(disk, minioMetaBucket, fsPath3)
fi, err = fsStatFile(fsPath3)
if err != nil {
t.Fatal("Path should exist and accessible after migration", err)
}
if fi.IsDir() {
t.Fatalf("Unexpected path %s should be a file", fsPath3)
}
formatCfg = &formatConfigV1{}
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDONLY, 0600)
if err != nil {
t.Fatal(err)
}
_, err = formatCfg.ReadFrom(lk)
lk.Close()
if err != nil {
t.Fatal("Should not fail here", err)
}
if formatCfg.FS.Version != fsFormatV2 {
t.Fatalf("Unexpected version detected expected \"%s\", got %s", fsFormatV2, formatCfg.FS.Version)
}
}
// Tests migrating FS format with .minio.sys/buckets filled with
// object metadata.
func TestFSMigrateObjectWithObjects(t *testing.T) {
// Prepare for testing
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
defer removeAll(disk)
// Assign a new UUID.
uuid := mustGetUUID()
// Initialize meta volume, if volume already exists ignores it.
if err := initMetaVolumeFS(disk, uuid); err != nil {
t.Fatal(err)
}
fsFormatPath := pathJoin(disk, minioMetaBucket, fsFormatJSONFile)
formatCfg := &formatConfigV1{
Version: "1",
Format: "fs",
FS: &fsFormat{
Version: "1",
},
}
lk, err := lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
t.Fatal(err)
}
_, err = formatCfg.WriteTo(lk)
lk.Close()
if err != nil {
t.Fatal("Should not fail here", err)
}
// Construct the full path of fs.json
fsPath1 := pathJoin(bucketMetaPrefix, "testvolume1", "my-object1", fsMetaJSONFile)
fsPath1 = pathJoin(disk, minioMetaBucket, fsPath1)
fsMetaJSON := `{"version":"1.0.0","format":"fs","minio":{"release":"DEVELOPMENT.2017-03-27T02-26-33Z"},"meta":{"etag":"467886be95c8ecfd71a2900e3f461b4f"}`
if _, err = fsCreateFile(fsPath1, bytes.NewReader([]byte(fsMetaJSON)), nil, 0); err != nil {
t.Fatal(err)
}
// Construct the full path of fs.json
fsPath2 := pathJoin(bucketMetaPrefix, "testvolume2", "my-object2", fsMetaJSONFile)
fsPath2 = pathJoin(disk, minioMetaBucket, fsPath2)
fsMetaJSON = `{"version":"1.0.0","format":"fs","minio":{"release":"DEVELOPMENT.2017-03-27T02-26-33Z"},"meta":{"etag":"467886be95c8ecfd71a2900eff461b4d"}`
if _, err = fsCreateFile(fsPath2, bytes.NewReader([]byte(fsMetaJSON)), nil, 0); err != nil {
t.Fatal(err)
}
// Construct the full path of policy.json
ppath := pathJoin(bucketMetaPrefix, "testvolume2", bucketPolicyConfig)
ppath = pathJoin(disk, minioMetaBucket, ppath)
policyJSON := `{"Version":"2012-10-17","Statement":[{"Action":["s3:GetBucketLocation","s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::testbucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::testbucket/*"],"Sid":""}]}`
if _, err = fsCreateFile(ppath, bytes.NewReader([]byte(policyJSON)), nil, 0); err != nil {
t.Fatal(err)
}
if err = initFormatFS(disk, mustGetUUID()); err != nil {
t.Fatal("Should not fail here", err)
}
fsPath2 = pathJoin(bucketMetaPrefix, "testvolume2", objectMetaPrefix, "my-object2", fsMetaJSONFile)
fsPath2 = pathJoin(disk, minioMetaBucket, fsPath2)
fi, err := fsStatFile(fsPath2)
if err != nil {
t.Fatal("Path should exist and accessible after migration", err)
}
if fi.IsDir() {
t.Fatalf("Unexpected path %s should be a file", fsPath2)
}
formatCfg = &formatConfigV1{}
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDONLY, 0600)
if err != nil {
t.Fatal(err)
}
_, err = formatCfg.ReadFrom(lk)
lk.Close()
if err != nil {
t.Fatal("Should not fail here", err)
}
if formatCfg.FS.Version != fsFormatV2 {
t.Fatalf("Unexpected version detected expected \"%s\", got %s", fsFormatV2, formatCfg.FS.Version)
}
ppath = pathJoin(bucketMetaPrefix, "testvolume2", "acl.json")
ppath = pathJoin(disk, minioMetaBucket, ppath)
if _, err = fsCreateFile(ppath, bytes.NewReader([]byte("")), nil, 0); err != nil {
t.Fatal(err)
}
if err = initFormatFS(disk, mustGetUUID()); errorCause(err) != errCorruptedFormat {
t.Fatal("Should not fail here", err)
}
}
// TestFSCheckFormatFSErr - test loadFormatFS loading older format.
func TestFSCheckFormatFSErr(t *testing.T) {
// Prepare for testing
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
defer removeAll(disk)
// Assign a new UUID.
uuid := mustGetUUID()
// Initialize meta volume, if volume already exists ignores it.
if err := initMetaVolumeFS(disk, uuid); err != nil {
t.Fatal(err)
}
fsFormatPath := pathJoin(disk, minioMetaBucket, fsFormatJSONFile)
formatCfg := &formatConfigV1{
Version: "1",
Format: "fs",
FS: &fsFormat{
Version: "1",
},
}
lk, err := lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
t.Fatal(err)
}
_, err = formatCfg.WriteTo(lk)
lk.Close()
if err != nil {
t.Fatal(err)
}
formatCfg = &formatConfigV1{}
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
t.Fatal(err)
}
_, err = formatCfg.ReadFrom(lk)
lk.Close()
if err != nil {
t.Fatal(err)
}
if err = checkFormatFS(formatCfg, fsFormatVersion); errorCause(err) != errFSFormatOld {
t.Fatal("Should not fail with unexpected", err)
}
formatCfg = &formatConfigV1{
Version: "1",
Format: "fs",
FS: &fsFormat{
Version: "10",
},
}
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
t.Fatal(err)
}
_, err = formatCfg.WriteTo(lk)
lk.Close()
if err != nil {
t.Fatal(err)
}
if err = checkFormatFS(formatCfg, fsFormatVersion); errorCause(err) != errCorruptedFormat {
t.Fatal("Should not fail with unexpected", err)
}
formatCfg = &formatConfigV1{
Version: "1",
Format: "garbage",
FS: &fsFormat{
Version: "1",
},
}
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
t.Fatal(err)
}
_, err = formatCfg.WriteTo(lk)
lk.Close()
if err != nil {
t.Fatal(err)
}
if err = checkFormatFS(formatCfg, fsFormatVersion); err != nil {
if errorCause(err).Error() != "Unable to recognize backend format, Disk is not in FS format. garbage" {
t.Fatal("Should not fail with unexpected", err)
}
}
if err = checkFormatFS(nil, fsFormatVersion); errorCause(err) != errUnexpected {
t.Fatal("Should fail with errUnexpected, but found", err)
}
formatCfg = &formatConfigV1{
Version: "1",
Format: "fs",
FS: &fsFormat{
Version: "2",
},
}
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
t.Fatal(err)
}
_, err = formatCfg.WriteTo(lk)
lk.Close()
if err != nil {
t.Fatal(err)
}
// Should not fail.
if err = checkFormatFS(formatCfg, fsFormatVersion); err != nil {
t.Fatal(err)
}
}
// TestFSCheckFormatFS - test loadFormatFS with healty and faulty disks
func TestFSCheckFormatFS(t *testing.T) {
// Prepare for testing
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
defer removeAll(disk)
// Assign a new UUID.
uuid := mustGetUUID()
// Initialize meta volume, if volume already exists ignores it.
if err := initMetaVolumeFS(disk, uuid); err != nil {
t.Fatal(err)
}
fsFormatPath := pathJoin(disk, minioMetaBucket, fsFormatJSONFile)
lk, err := lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
t.Fatal(err)
}
format := newFSFormatV2()
_, err = format.WriteTo(lk)
lk.Close()
if err != nil {
t.Fatal(err)
}
// Loading corrupted format file
file, err := os.OpenFile(preparePath(fsFormatPath), os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
t.Fatal("Should not fail here", err)
}
file.Write([]byte{'b'})
file.Close()
lk, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
t.Fatal(err)
}
format = &formatConfigV1{}
_, err = format.ReadFrom(lk)
lk.Close()
if err == nil {
t.Fatal("Should return an error here")
}
// Loading format file from disk not found.
removeAll(disk)
_, err = lock.LockedOpenFile(preparePath(fsFormatPath), os.O_RDONLY, 0600)
if err != nil && !os.IsNotExist(err) {
t.Fatal("Should return 'format.json' does not exist, but got", err)
}
}
// TestFSGetBucketInfo - test GetBucketInfo with healty and faulty disks // TestFSGetBucketInfo - test GetBucketInfo with healty and faulty disks
func TestFSGetBucketInfo(t *testing.T) { func TestFSGetBucketInfo(t *testing.T) {
// Prepare for testing // Prepare for testing
@ -633,7 +120,6 @@ func TestFSGetBucketInfo(t *testing.T) {
} }
} }
// Tests FS backend put object behavior.
func TestFSPutObject(t *testing.T) { func TestFSPutObject(t *testing.T) {
// Prepare for tests // Prepare for tests
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())

View File

@ -32,9 +32,6 @@ const (
// Buckets meta prefix. // Buckets meta prefix.
bucketMetaPrefix = "buckets" bucketMetaPrefix = "buckets"
// Objects meta prefix.
objectMetaPrefix = "objects"
// ETag (hex encoded md5sum) of empty string. // ETag (hex encoded md5sum) of empty string.
emptyETag = "d41d8cd98f00b204e9800998ecf8427e" emptyETag = "d41d8cd98f00b204e9800998ecf8427e"
) )

View File

@ -32,8 +32,6 @@ func readDir(dirPath string) (entries []string, err error) {
// File is really not found. // File is really not found.
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil, errFileNotFound return nil, errFileNotFound
} else if os.IsPermission(err) {
return nil, errFileAccessDenied
} }
// File path cannot be verified since one of the parents is a file. // File path cannot be verified since one of the parents is a file.

View File

@ -293,7 +293,7 @@ func (s *posix) ListVols() (volsInfo []VolInfo, err error) {
return nil, err return nil, err
} }
volsInfo, err = listVols(s.diskPath) volsInfo, err = listVols(preparePath(s.diskPath))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -277,7 +277,7 @@ func testMakeBucketWebHandler(obj ObjectLayer, instanceType string, t TestErrHan
{".", false}, {".", false},
{"ab", false}, {"ab", false},
{"minio", false}, {"minio", false},
{".minio.sys", false}, {minioMetaBucket, false},
{bucketName, true}, {bucketName, true},
} }

View File

@ -383,7 +383,7 @@ func healObject(storageDisks []StorageAPI, bucket string, object string, quorum
for index, disk := range outDatedDisks { for index, disk := range outDatedDisks {
// Before healing outdated disks, we need to remove xl.json // Before healing outdated disks, we need to remove xl.json
// and part files from "bucket/object/" so that // and part files from "bucket/object/" so that
// rename(".minio.sys", "tmp/tmpuuid/", "bucket", "object/") succeeds. // rename(minioMetaBucket, "tmp/tmpuuid/", "bucket", "object/") succeeds.
if disk == nil { if disk == nil {
// Not an outdated disk. // Not an outdated disk.
continue continue

View File

@ -121,7 +121,7 @@ func TestHealFormatXL(t *testing.T) {
} }
xl = obj.(*xlObjects) xl = obj.(*xlObjects)
for i := 0; i <= 15; i++ { for i := 0; i <= 15; i++ {
if err = xl.storageDisks[i].DeleteFile(".minio.sys", "format.json"); err != nil { if err = xl.storageDisks[i].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -142,7 +142,7 @@ func TestHealFormatXL(t *testing.T) {
} }
xl = obj.(*xlObjects) xl = obj.(*xlObjects)
for i := 0; i <= 15; i++ { for i := 0; i <= 15; i++ {
if err = xl.storageDisks[i].AppendFile(".minio.sys", "format.json", []byte("corrupted data")); err != nil { if err = xl.storageDisks[i].AppendFile(minioMetaBucket, formatConfigFile, []byte("corrupted data")); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -163,7 +163,7 @@ func TestHealFormatXL(t *testing.T) {
} }
xl = obj.(*xlObjects) xl = obj.(*xlObjects)
for i := 0; i <= 2; i++ { for i := 0; i <= 2; i++ {
if err = xl.storageDisks[i].DeleteFile(".minio.sys", "format.json"); err != nil { if err = xl.storageDisks[i].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -184,7 +184,7 @@ func TestHealFormatXL(t *testing.T) {
} }
xl = obj.(*xlObjects) xl = obj.(*xlObjects)
for i := 0; i <= 2; i++ { for i := 0; i <= 2; i++ {
if err = xl.storageDisks[i].DeleteFile(".minio.sys", "format.json"); err != nil { if err = xl.storageDisks[i].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
@ -216,7 +216,7 @@ func TestHealFormatXL(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
for i := 0; i <= 2; i++ { for i := 0; i <= 2; i++ {
if err = xl.storageDisks[i].DeleteFile(".minio.sys", "format.json"); err != nil { if err = xl.storageDisks[i].DeleteFile(minioMetaBucket, formatConfigFile); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }

View File

@ -29,12 +29,6 @@ import (
// XL constants. // XL constants.
const ( const (
// Format config file carries backend format specific details.
formatConfigFile = "format.json"
// Format config tmp file carries backend format.
formatConfigFileTmp = "format.json.tmp"
// XL metadata file carries per object metadata. // XL metadata file carries per object metadata.
xlMetaJSONFile = "xl.json" xlMetaJSONFile = "xl.json"

View File

@ -81,7 +81,7 @@ An example here shows how the contention is handled with GetObject().
GetObject() holds a read lock on `fs.json`. GetObject() holds a read lock on `fs.json`.
```go ```go
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, objectMetaPrefix, object, fsMetaJSONFile) fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
rlk, err := fs.rwPool.Open(fsMetaPath) rlk, err := fs.rwPool.Open(fsMetaPath)
if err != nil { if err != nil {
return toObjectErr(traceError(err), bucket, object) return toObjectErr(traceError(err), bucket, object)
@ -98,7 +98,7 @@ GetObject() holds a read lock on `fs.json`.
A concurrent PutObject is requested on the same object, PutObject() attempts a write lock on `fs.json`. A concurrent PutObject is requested on the same object, PutObject() attempts a write lock on `fs.json`.
```go ```go
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, objectMetaPrefix, object, fsMetaJSONFile) fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
wlk, err := fs.rwPool.Create(fsMetaPath) wlk, err := fs.rwPool.Create(fsMetaPath)
if err != nil { if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)

View File

@ -19,10 +19,16 @@
package lock package lock
import ( import (
"errors"
"os" "os"
"sync" "sync"
) )
var (
// ErrAlreadyLocked is returned if the underlying fd is already locked.
ErrAlreadyLocked = errors.New("file already locked")
)
// RLockedFile represents a read locked file, implements a special // RLockedFile represents a read locked file, implements a special
// closer which only closes the associated *os.File when the ref count. // closer which only closes the associated *os.File when the ref count.
// has reached zero, i.e when all the readers have given up their locks. // has reached zero, i.e when all the readers have given up their locks.

View File

@ -24,16 +24,12 @@ import (
"syscall" "syscall"
) )
// LockedOpenFile - initializes a new lock and protects // Internal function implements support for both
// the file from concurrent access across mount points. // blocking and non blocking lock type.
// This implementation doesn't support all the open func lockedOpenFile(path string, flag int, perm os.FileMode, lockType int) (*LockedFile, error) {
// flags and shouldn't be considered as replacement
// for os.OpenFile().
func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
var lockType int
switch flag { switch flag {
case syscall.O_RDONLY: case syscall.O_RDONLY:
lockType = syscall.LOCK_SH lockType |= syscall.LOCK_SH
case syscall.O_WRONLY: case syscall.O_WRONLY:
fallthrough fallthrough
case syscall.O_RDWR: case syscall.O_RDWR:
@ -41,7 +37,7 @@ func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error
case syscall.O_WRONLY | syscall.O_CREAT: case syscall.O_WRONLY | syscall.O_CREAT:
fallthrough fallthrough
case syscall.O_RDWR | syscall.O_CREAT: case syscall.O_RDWR | syscall.O_CREAT:
lockType = syscall.LOCK_EX lockType |= syscall.LOCK_EX
default: default:
return nil, fmt.Errorf("Unsupported flag (%d)", flag) return nil, fmt.Errorf("Unsupported flag (%d)", flag)
} }
@ -53,6 +49,9 @@ func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error
if err = syscall.Flock(int(f.Fd()), lockType); err != nil { if err = syscall.Flock(int(f.Fd()), lockType); err != nil {
f.Close() f.Close()
if err == syscall.EWOULDBLOCK {
err = ErrAlreadyLocked
}
return nil, err return nil, err
} }
@ -73,3 +72,21 @@ func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error
return &LockedFile{File: f}, nil return &LockedFile{File: f}, nil
} }
// TryLockedOpenFile - tries a new write lock, functionality
// it is similar to LockedOpenFile with with syscall.LOCK_EX
// mode but along with syscall.LOCK_NB such that the function
// doesn't wait forever but instead returns if it cannot
// acquire a write lock.
func TryLockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
return lockedOpenFile(path, flag, perm, syscall.LOCK_NB)
}
// LockedOpenFile - initializes a new lock and protects
// the file from concurrent access across mount points.
// This implementation doesn't support all the open
// flags and shouldn't be considered as replacement
// for os.OpenFile().
func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
return lockedOpenFile(path, flag, perm, 0)
}

View File

@ -24,17 +24,8 @@ import (
"syscall" "syscall"
) )
// LockedOpenFile - initializes a new lock and protects // lockedOpenFile is an internal function.
// the file from concurrent access across mount points. func lockedOpenFile(path string, flag int, perm os.FileMode, rlockType int) (*LockedFile, error) {
// This implementation doesn't support all the open
// flags and shouldn't be considered as replacement
// for os.OpenFile().
func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Pid = 0
var lockType int16 var lockType int16
switch flag { switch flag {
case syscall.O_RDONLY: case syscall.O_RDONLY:
@ -51,16 +42,24 @@ func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error
return nil, fmt.Errorf("Unsupported flag (%d)", flag) return nil, fmt.Errorf("Unsupported flag (%d)", flag)
} }
lock.Type = lockType var lock = syscall.Flock_t{
lock.Whence = 0 Start: 0,
Len: 0,
Pid: 0,
Type: lockType,
Whence: 0,
}
f, err := os.OpenFile(path, flag, perm) f, err := os.OpenFile(path, flag, perm)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if err = syscall.FcntlFlock(f.Fd(), syscall.F_SETLKW, &lock); err != nil { if err = syscall.FcntlFlock(f.Fd(), rlockType, &lock); err != nil {
f.Close() f.Close()
if err == syscall.EAGAIN {
err = ErrLocked
}
return nil, err return nil, err
} }
@ -81,3 +80,21 @@ func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error
return &LockedFile{f}, nil return &LockedFile{f}, nil
} }
// TryLockedOpenFile - tries a new write lock, functionality
// it is similar to LockedOpenFile with with syscall.LOCK_EX
// mode but along with syscall.LOCK_NB such that the function
// doesn't wait forever but instead returns if it cannot
// acquire a write lock.
func TryLockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
return lockedOpenFile(path, flag, perm, syscall.F_SETLK)
}
// LockedOpenFile - initializes a new lock and protects
// the file from concurrent access across mount points.
// This implementation doesn't support all the open
// flags and shouldn't be considered as replacement
// for os.OpenFile().
func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
return lockedOpenFile(path, flag, perm, syscall.F_SETLKW)
}

View File

@ -19,7 +19,6 @@
package lock package lock
import ( import (
"errors"
"fmt" "fmt"
"os" "os"
"syscall" "syscall"
@ -31,24 +30,25 @@ import (
var ( var (
modkernel32 = syscall.NewLazyDLL("kernel32.dll") modkernel32 = syscall.NewLazyDLL("kernel32.dll")
procLockFileEx = modkernel32.NewProc("LockFileEx") procLockFileEx = modkernel32.NewProc("LockFileEx")
errLocked = errors.New("The process cannot access the file because another process has locked a portion of the file.")
) )
const ( const (
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
lockFileExclusiveLock = 2
lockFileFailImmediately = 1
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
errLockViolation syscall.Errno = 0x21 errLockViolation syscall.Errno = 0x21
) )
// LockedOpenFile - initializes a new lock and protects // lockedOpenFile is an internal function.
// the file from concurrent access. func lockedOpenFile(path string, flag int, perm os.FileMode, lockType uint32) (*LockedFile, error) {
func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
f, err := open(path, flag, perm) f, err := open(path, flag, perm)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if err = lockFile(syscall.Handle(f.Fd()), 0); err != nil { if err = lockFile(syscall.Handle(f.Fd()), lockType); err != nil {
f.Close() f.Close()
return nil, err return nil, err
} }
@ -71,6 +71,21 @@ func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error
return &LockedFile{File: f}, nil return &LockedFile{File: f}, nil
} }
// TryLockedOpenFile - tries a new write lock, functionality
// it is similar to LockedOpenFile with with syscall.LOCK_EX
// mode but along with syscall.LOCK_NB such that the function
// doesn't wait forever but instead returns if it cannot
// acquire a write lock.
func TryLockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
return lockedOpenFile(path, flag, perm, lockFileFailImmediately)
}
// LockedOpenFile - initializes a new lock and protects
// the file from concurrent access.
func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
return lockedOpenFile(path, flag, perm, 0)
}
// perm param is ignored, on windows file perms/NT acls // perm param is ignored, on windows file perms/NT acls
// are not octet combinations. Providing access to NT // are not octet combinations. Providing access to NT
// acls is out of scope here. // acls is out of scope here.
@ -121,7 +136,7 @@ func open(path string, flag int, perm os.FileMode) (*os.File, error) {
func lockFile(fd syscall.Handle, flags uint32) error { func lockFile(fd syscall.Handle, flags uint32) error {
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
var flag uint32 = 2 // Lockfile exlusive. var flag uint32 = lockFileExclusiveLock // Lockfile exlusive.
flag |= flags flag |= flags
if fd == syscall.InvalidHandle { if fd == syscall.InvalidHandle {
@ -131,8 +146,8 @@ func lockFile(fd syscall.Handle, flags uint32) error {
err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{}) err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{})
if err == nil { if err == nil {
return nil return nil
} else if err.Error() == errLocked.Error() { } else if err.Error() == "The process cannot access the file because another process has locked a portion of the file." {
return errors.New("lock already acquired") return ErrAlreadyLocked
} else if err != errLockViolation { } else if err != errLockViolation {
return err return err
} }