Fix backend format for disk-cache - not to use FS format.json (#5732)

This commit is contained in:
Krishna Srinivas 2018-03-29 14:38:26 -07:00 committed by Dee Koder
parent 328076f773
commit 804a4f9c15
12 changed files with 204 additions and 166 deletions

View File

@ -20,6 +20,7 @@ import (
"errors" "errors"
"os" "os"
"path/filepath" "path/filepath"
"strconv"
"strings" "strings"
"time" "time"
@ -125,21 +126,22 @@ func handleCommonEnvVars() {
} }
if drives := os.Getenv("MINIO_CACHE_DRIVES"); drives != "" { if drives := os.Getenv("MINIO_CACHE_DRIVES"); drives != "" {
driveList, err := parseCacheDrives(drives) driveList, err := parseCacheDrives(strings.Split(drives, cacheEnvDelimiter))
fatalIf(err, "Invalid value set in environment variable MINIO_CACHE_DRIVES") fatalIf(err, "Invalid value set in environment variable MINIO_CACHE_DRIVES %s.", drives)
globalCacheDrives = driveList globalCacheDrives = driveList
globalIsDiskCacheEnabled = true globalIsDiskCacheEnabled = true
} }
if excludes := os.Getenv("MINIO_CACHE_EXCLUDE"); excludes != "" { if excludes := os.Getenv("MINIO_CACHE_EXCLUDE"); excludes != "" {
excludeList, err := parseCacheExcludes(excludes) excludeList, err := parseCacheExcludes(strings.Split(excludes, cacheEnvDelimiter))
fatalIf(err, "Invalid value set in environment variable MINIO_CACHE_EXCLUDE") fatalIf(err, "Invalid value set in environment variable MINIO_CACHE_EXCLUDE %s.", excludes)
globalCacheExcludes = excludeList globalCacheExcludes = excludeList
} }
if expiryStr := os.Getenv("MINIO_CACHE_EXPIRY"); expiryStr != "" { if expiryStr := os.Getenv("MINIO_CACHE_EXPIRY"); expiryStr != "" {
expiry, err := parseCacheExpiry(expiryStr) expiry, err := strconv.Atoi(expiryStr)
fatalIf(err, "Invalid value set in environment variable MINIO_CACHE_EXPIRY") fatalIf(err, "Invalid value set in environment variable MINIO_CACHE_EXPIRY %s.", expiryStr)
globalCacheExpiry = expiry globalCacheExpiry = expiry
} }
// In place update is true by default if the MINIO_UPDATE is not set // In place update is true by default if the MINIO_UPDATE is not set
// or is not set to 'off', if MINIO_UPDATE is set to 'off' then // or is not set to 'off', if MINIO_UPDATE is set to 'off' then
// in-place update is off. // in-place update is off.

View File

@ -113,14 +113,7 @@ func (s *serverConfig) SetCacheConfig(drives, exclude []string, expiry int) {
// GetCacheConfig gets the current cache config // GetCacheConfig gets the current cache config
func (s *serverConfig) GetCacheConfig() CacheConfig { func (s *serverConfig) GetCacheConfig() CacheConfig {
if s.Cache.Drives != nil { return s.Cache
return CacheConfig{
Drives: s.Cache.Drives,
Exclude: s.Cache.Exclude,
Expiry: s.Cache.Expiry,
}
}
return CacheConfig{}
} }
// Save config. // Save config.
@ -247,6 +240,7 @@ func newConfig() error {
if globalIsDiskCacheEnabled { if globalIsDiskCacheEnabled {
srvCfg.SetCacheConfig(globalCacheDrives, globalCacheExcludes, globalCacheExpiry) srvCfg.SetCacheConfig(globalCacheDrives, globalCacheExcludes, globalCacheExpiry)
} }
// hold the mutex lock before a new config is assigned. // hold the mutex lock before a new config is assigned.
// Save the new config globally. // Save the new config globally.
// unlock the mutex. // unlock the mutex.
@ -377,6 +371,7 @@ func loadConfig() error {
if globalIsDiskCacheEnabled { if globalIsDiskCacheEnabled {
srvCfg.SetCacheConfig(globalCacheDrives, globalCacheExcludes, globalCacheExpiry) srvCfg.SetCacheConfig(globalCacheDrives, globalCacheExcludes, globalCacheExpiry)
} }
// hold the mutex lock before a new config is assigned. // hold the mutex lock before a new config is assigned.
globalServerConfigMu.Lock() globalServerConfigMu.Lock()
globalServerConfig = srvCfg globalServerConfig = srvCfg

View File

@ -1752,7 +1752,7 @@ func migrateV21ToV22() error {
srvConfig := &serverConfigV22{ srvConfig := &serverConfigV22{
Notify: notifier{}, Notify: notifier{},
} }
srvConfig.Version = serverConfigVersion srvConfig.Version = "22"
srvConfig.Credential = cv21.Credential srvConfig.Credential = cv21.Credential
srvConfig.Region = cv21.Region srvConfig.Region = cv21.Region
if srvConfig.Region == "" { if srvConfig.Region == "" {
@ -1856,7 +1856,7 @@ func migrateV22ToV23() error {
srvConfig := &serverConfigV23{ srvConfig := &serverConfigV23{
Notify: notifier{}, Notify: notifier{},
} }
srvConfig.Version = serverConfigVersion srvConfig.Version = "23"
srvConfig.Credential = cv22.Credential srvConfig.Credential = cv22.Credential
srvConfig.Region = cv22.Region srvConfig.Region = cv22.Region
if srvConfig.Region == "" { if srvConfig.Region == "" {

View File

@ -17,48 +17,58 @@
package cmd package cmd
import ( import (
"strconv" "encoding/json"
"strings" "fmt"
"path/filepath"
"errors"
) )
// CacheConfig represents cache config settings // CacheConfig represents cache config settings
type CacheConfig struct { type CacheConfig struct {
Drives []string Drives []string `json:"drives"`
Expiry int Expiry int `json:"expiry"`
Exclude []string Exclude []string `json:"exclude"`
}
// UnmarshalJSON - implements JSON unmarshal interface for unmarshalling
// json entries for CacheConfig.
func (cfg *CacheConfig) UnmarshalJSON(data []byte) (err error) {
type Alias CacheConfig
var _cfg = &struct {
*Alias
}{
Alias: (*Alias)(cfg),
}
if err = json.Unmarshal(data, _cfg); err != nil {
return err
}
if _, err = parseCacheDrives(_cfg.Drives); err != nil {
return err
}
if _, err = parseCacheExcludes(_cfg.Exclude); err != nil {
return err
}
return nil
} }
// Parses given cacheDrivesEnv and returns a list of cache drives. // Parses given cacheDrivesEnv and returns a list of cache drives.
func parseCacheDrives(cacheDrivesEnv string) ([]string, error) { func parseCacheDrives(drives []string) ([]string, error) {
cacheDrivesEnv = strings.ToLower(cacheDrivesEnv) for _, d := range drives {
s := strings.Split(cacheDrivesEnv, ";") if !filepath.IsAbs(d) {
c2 := make([]string, 0) return nil, fmt.Errorf("cache dir should be absolute path: %s", d)
for _, d := range s {
if len(d) > 0 {
c2 = append(c2, d)
} }
} }
return c2, nil return drives, nil
} }
// Parses given cacheExcludesEnv and returns a list of cache exclude patterns. // Parses given cacheExcludesEnv and returns a list of cache exclude patterns.
func parseCacheExcludes(cacheExcludesEnv string) ([]string, error) { func parseCacheExcludes(excludes []string) ([]string, error) {
s := strings.Split(cacheExcludesEnv, ";") for _, e := range excludes {
c2 := make([]string, 0) if len(e) == 0 {
for _, e := range s { return nil, fmt.Errorf("cache exclude path (%s) cannot be empty", e)
if len(e) > 0 { }
if strings.HasPrefix(e, "/") { if hasPrefix(e, slashSeparator) {
return c2, errors.New("cache exclude patterns cannot start with / as prefix " + e) return nil, fmt.Errorf("cache exclude pattern (%s) cannot start with / as prefix", e)
}
c2 = append(c2, e)
} }
} }
return c2, nil return excludes, nil
}
// Parses given cacheExpiryEnv and returns cache expiry in days.
func parseCacheExpiry(cacheExpiryEnv string) (int, error) {
return strconv.Atoi(cacheExpiryEnv)
} }

View File

@ -18,9 +18,52 @@ package cmd
import ( import (
"reflect" "reflect"
"runtime"
"strings"
"testing" "testing"
) )
// Tests cache drive parsing.
func TestParseCacheDrives(t *testing.T) {
testCases := []struct {
driveStr string
expectedPatterns []string
success bool
}{
// valid input
{"bucket1/*;*.png;images/trip/barcelona/*", []string{}, false},
{"bucket1", []string{}, false},
}
if runtime.GOOS == globalWindowsOSName {
testCases = append(testCases, struct {
driveStr string
expectedPatterns []string
success bool
}{"C:/home/drive1;C:/home/drive2;C:/home/drive3", []string{"C:/home/drive1", "C:/home/drive2", "C:/home/drive3"}, true})
} else {
testCases = append(testCases, struct {
driveStr string
expectedPatterns []string
success bool
}{"/home/drive1;/home/drive2;/home/drive3", []string{"/home/drive1", "/home/drive2", "/home/drive3"}, true})
}
for i, testCase := range testCases {
drives, err := parseCacheDrives(strings.Split(testCase.driveStr, cacheEnvDelimiter))
if err != nil && testCase.success {
t.Errorf("Test %d: Expected success but failed instead %s", i+1, err)
}
if err == nil && !testCase.success {
t.Errorf("Test %d: Expected failure but passed instead", i+1)
}
if err == nil {
if !reflect.DeepEqual(drives, testCase.expectedPatterns) {
t.Errorf("Test %d: Expected %v, got %v", i+1, testCase.expectedPatterns, drives)
}
}
}
}
// Tests cache exclude parsing. // Tests cache exclude parsing.
func TestParseCacheExclude(t *testing.T) { func TestParseCacheExclude(t *testing.T) {
testCases := []struct { testCases := []struct {
@ -28,8 +71,6 @@ func TestParseCacheExclude(t *testing.T) {
expectedPatterns []string expectedPatterns []string
success bool success bool
}{ }{
// Empty input.
{"", []string{}, true},
// valid input // valid input
{"/home/drive1;/home/drive2;/home/drive3", []string{}, false}, {"/home/drive1;/home/drive2;/home/drive3", []string{}, false},
{"bucket1/*;*.png;images/trip/barcelona/*", []string{"bucket1/*", "*.png", "images/trip/barcelona/*"}, true}, {"bucket1/*;*.png;images/trip/barcelona/*", []string{"bucket1/*", "*.png", "images/trip/barcelona/*"}, true},
@ -37,15 +78,17 @@ func TestParseCacheExclude(t *testing.T) {
} }
for i, testCase := range testCases { for i, testCase := range testCases {
excludes, err := parseCacheExcludes(testCase.excludeStr) excludes, err := parseCacheExcludes(strings.Split(testCase.excludeStr, cacheEnvDelimiter))
if err != nil && testCase.success { if err != nil && testCase.success {
t.Errorf("Test %d: Expected success but failed instead %s", i+1, err) t.Errorf("Test %d: Expected success but failed instead %s", i+1, err)
} }
if err == nil && !testCase.success { if err == nil && !testCase.success {
t.Errorf("Test %d: Expected failure but passed instead", i+1) t.Errorf("Test %d: Expected failure but passed instead", i+1)
} }
if !reflect.DeepEqual(excludes, testCase.expectedPatterns) { if err == nil {
t.Errorf("Expected %v, got %v", testCase.expectedPatterns, excludes) if !reflect.DeepEqual(excludes, testCase.expectedPatterns) {
t.Errorf("Test %d: Expected %v, got %v", i+1, testCase.expectedPatterns, excludes)
}
} }
} }
} }

View File

@ -20,6 +20,7 @@ import (
"context" "context"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
@ -37,6 +38,8 @@ const (
// cache.json object metadata for cached objects. // cache.json object metadata for cached objects.
cacheMetaJSONFile = "cache.json" cacheMetaJSONFile = "cache.json"
cacheMetaFormat = "cache" cacheMetaFormat = "cache"
cacheEnvDelimiter = ";"
) )
// cacheFSObjects implements the cache backend operations. // cacheFSObjects implements the cache backend operations.
@ -59,9 +62,13 @@ type cacheFSObjects struct {
// Inits the cache directory if it is not init'ed already. // Inits the cache directory if it is not init'ed already.
// Initializing implies creation of new FS Object layer. // Initializing implies creation of new FS Object layer.
func newCacheFSObjects(dir string, expiry int, maxDiskUsagePct int) (*cacheFSObjects, error) { func newCacheFSObjects(dir string, expiry int, maxDiskUsagePct int) (*cacheFSObjects, error) {
obj, err := newFSObjects(dir, cacheMetaJSONFile) // Assign a new UUID for FS minio mode. Each server instance
if err != nil { // gets its own UUID for temporary file transaction.
return nil, err fsUUID := mustGetUUID()
// Initialize meta volume, if volume already exists ignores it.
if err := initMetaVolumeFS(dir, fsUUID); err != nil {
return nil, fmt.Errorf("Unable to initialize '.minio.sys' meta volume, %s", err)
} }
trashPath := pathJoin(dir, minioMetaBucket, cacheTrashDir) trashPath := pathJoin(dir, minioMetaBucket, cacheTrashDir)
@ -72,9 +79,23 @@ func newCacheFSObjects(dir string, expiry int, maxDiskUsagePct int) (*cacheFSObj
if expiry == 0 { if expiry == 0 {
expiry = globalCacheExpiry expiry = globalCacheExpiry
} }
var cacheFS cacheFSObjects
fsObjects := obj.(*FSObjects) // Initialize fs objects.
cacheFS = cacheFSObjects{ fsObjects := &FSObjects{
fsPath: dir,
metaJSONFile: cacheMetaJSONFile,
fsUUID: fsUUID,
rwPool: &fsIOPool{
readersMap: make(map[string]*lock.RLockedFile),
},
nsMutex: newNSLock(false),
listPool: newTreeWalkPool(globalLookupTimeout),
appendFileMap: make(map[string]*fsAppendFile),
}
go fsObjects.cleanupStaleMultipartUploads(globalMultipartCleanupInterval, globalMultipartExpiry, globalServiceDoneCh)
cacheFS := cacheFSObjects{
FSObjects: fsObjects, FSObjects: fsObjects,
dir: dir, dir: dir,
expiry: expiry, expiry: expiry,

View File

@ -18,15 +18,12 @@ package cmd
import ( import (
"context" "context"
"crypto/sha256"
"errors" "errors"
"fmt" "fmt"
"hash/crc32"
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"os" "os"
"path"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@ -115,7 +112,6 @@ func backendDownError(err error) bool {
// until an online drive is found.If object is not found, fall back to the first online cache drive // until an online drive is found.If object is not found, fall back to the first online cache drive
// closest to the hash index, so that object can be recached. // closest to the hash index, so that object can be recached.
func (c diskCache) getCachedFSLoc(ctx context.Context, bucket, object string) (*cacheFSObjects, error) { func (c diskCache) getCachedFSLoc(ctx context.Context, bucket, object string) (*cacheFSObjects, error) {
index := c.hashIndex(bucket, object) index := c.hashIndex(bucket, object)
numDisks := len(c.cfs) numDisks := len(c.cfs)
// save first online cache disk closest to the hint index // save first online cache disk closest to the hint index
@ -145,7 +141,6 @@ func (c diskCache) getCachedFSLoc(ctx context.Context, bucket, object string) (*
// a hint. In the event that the cache drive at hash index is offline, treat the list of cache drives // a hint. In the event that the cache drive at hash index is offline, treat the list of cache drives
// as a circular buffer and walk through them starting at hash index until an online drive is found. // as a circular buffer and walk through them starting at hash index until an online drive is found.
func (c diskCache) getCacheFS(ctx context.Context, bucket, object string) (*cacheFSObjects, error) { func (c diskCache) getCacheFS(ctx context.Context, bucket, object string) (*cacheFSObjects, error) {
index := c.hashIndex(bucket, object) index := c.hashIndex(bucket, object)
numDisks := len(c.cfs) numDisks := len(c.cfs)
for k := 0; k < numDisks; k++ { for k := 0; k < numDisks; k++ {
@ -162,8 +157,7 @@ func (c diskCache) getCacheFS(ctx context.Context, bucket, object string) (*cach
// Compute a unique hash sum for bucket and object // Compute a unique hash sum for bucket and object
func (c diskCache) hashIndex(bucket, object string) int { func (c diskCache) hashIndex(bucket, object string) int {
key := fmt.Sprintf("%x", sha256.Sum256([]byte(path.Join(bucket, object)))) return crcHashMod(pathJoin(bucket, object), len(c.cfs))
return int(crc32.Checksum([]byte(key), crc32.IEEETable)) % len(c.cfs)
} }
// construct a metadata k-v map // construct a metadata k-v map
@ -496,7 +490,6 @@ func (c cacheObjects) listBuckets(ctx context.Context) (buckets []BucketInfo, er
// Returns list of buckets from cache or the backend. If the backend is down, buckets // Returns list of buckets from cache or the backend. If the backend is down, buckets
// available on cache are served. // available on cache are served.
func (c cacheObjects) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) { func (c cacheObjects) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) {
listBucketsFn := c.ListBucketsFn listBucketsFn := c.ListBucketsFn
buckets, err = listBucketsFn(ctx) buckets, err = listBucketsFn(ctx)
if err != nil { if err != nil {
@ -510,7 +503,6 @@ func (c cacheObjects) ListBuckets(ctx context.Context) (buckets []BucketInfo, er
// Returns bucket info from cache if backend is down. // Returns bucket info from cache if backend is down.
func (c cacheObjects) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) { func (c cacheObjects) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
getBucketInfoFn := c.GetBucketInfoFn getBucketInfoFn := c.GetBucketInfoFn
bucketInfo, err = getBucketInfoFn(ctx, bucket) bucketInfo, err = getBucketInfoFn(ctx, bucket)
if backendDownError(err) { if backendDownError(err) {
@ -525,7 +517,6 @@ func (c cacheObjects) GetBucketInfo(ctx context.Context, bucket string) (bucketI
// Delete Object deletes from cache as well if backend operation succeeds // Delete Object deletes from cache as well if backend operation succeeds
func (c cacheObjects) DeleteObject(ctx context.Context, bucket, object string) (err error) { func (c cacheObjects) DeleteObject(ctx context.Context, bucket, object string) (err error) {
if err = c.DeleteObjectFn(ctx, bucket, object); err != nil { if err = c.DeleteObjectFn(ctx, bucket, object); err != nil {
return return
} }
@ -619,7 +610,6 @@ func (c cacheObjects) PutObject(ctx context.Context, bucket, object string, r *h
// NewMultipartUpload - Starts a new multipart upload operation to backend and cache. // NewMultipartUpload - Starts a new multipart upload operation to backend and cache.
func (c cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) { func (c cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) {
newMultipartUploadFn := c.NewMultipartUploadFn newMultipartUploadFn := c.NewMultipartUploadFn
if c.isCacheExclude(bucket, object) || filterFromCache(metadata) { if c.isCacheExclude(bucket, object) || filterFromCache(metadata) {
@ -643,7 +633,6 @@ func (c cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object str
// PutObjectPart - uploads part to backend and cache simultaneously. // PutObjectPart - uploads part to backend and cache simultaneously.
func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error) { func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *hash.Reader) (info PartInfo, err error) {
putObjectPartFn := c.PutObjectPartFn putObjectPartFn := c.PutObjectPartFn
dcache, err := c.cache.getCacheFS(ctx, bucket, object) dcache, err := c.cache.getCacheFS(ctx, bucket, object)
if err != nil { if err != nil {
@ -713,7 +702,6 @@ func (c cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadI
// AbortMultipartUpload - aborts multipart upload on backend and cache. // AbortMultipartUpload - aborts multipart upload on backend and cache.
func (c cacheObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { func (c cacheObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
abortMultipartUploadFn := c.AbortMultipartUploadFn abortMultipartUploadFn := c.AbortMultipartUploadFn
if c.isCacheExclude(bucket, object) { if c.isCacheExclude(bucket, object) {
@ -737,7 +725,6 @@ func (c cacheObjects) AbortMultipartUpload(ctx context.Context, bucket, object,
// CompleteMultipartUpload - completes multipart upload operation on backend and cache. // CompleteMultipartUpload - completes multipart upload operation on backend and cache.
func (c cacheObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) { func (c cacheObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart) (objInfo ObjectInfo, err error) {
completeMultipartUploadFn := c.CompleteMultipartUploadFn completeMultipartUploadFn := c.CompleteMultipartUploadFn
if c.isCacheExclude(bucket, object) { if c.isCacheExclude(bucket, object) {
@ -802,33 +789,32 @@ func (c cacheObjects) DeleteBucket(ctx context.Context, bucket string) (err erro
// newCache initializes the cacheFSObjects for the "drives" specified in config.json // newCache initializes the cacheFSObjects for the "drives" specified in config.json
// or the global env overrides. // or the global env overrides.
func newCache(c CacheConfig) (*diskCache, error) { func newCache(config CacheConfig) (*diskCache, error) {
var cfsObjects []*cacheFSObjects var cfsObjects []*cacheFSObjects
formats, err := loadAndValidateCacheFormat(c.Drives) formats, err := loadAndValidateCacheFormat(config.Drives)
if err != nil { if err != nil {
errorIf(err, "Cache drives validation error") return nil, err
} }
if len(formats) == 0 { for i, dir := range config.Drives {
return nil, errors.New("Cache drives validation error")
}
for i, dir := range c.Drives {
// skip cacheFSObjects creation for cache drives missing a format.json // skip cacheFSObjects creation for cache drives missing a format.json
if formats[i] == nil { if formats[i] == nil {
cfsObjects = append(cfsObjects, nil) cfsObjects = append(cfsObjects, nil)
continue continue
} }
c, err := newCacheFSObjects(dir, c.Expiry, cacheMaxDiskUsagePct)
if err != nil {
return nil, err
}
if err := checkAtimeSupport(dir); err != nil { if err := checkAtimeSupport(dir); err != nil {
return nil, errors.New("Atime support required for disk caching") return nil, errors.New("Atime support required for disk caching")
} }
cache, err := newCacheFSObjects(dir, config.Expiry, cacheMaxDiskUsagePct)
if err != nil {
return nil, err
}
// Start the purging go-routine for entries that have expired // Start the purging go-routine for entries that have expired
go c.purge() go cache.purge()
// Start trash purge routine for deleted buckets. // Start trash purge routine for deleted buckets.
go c.purgeTrash() go cache.purgeTrash()
cfsObjects = append(cfsObjects, c)
cfsObjects = append(cfsObjects, cache)
} }
return &diskCache{cfs: cfsObjects}, nil return &diskCache{cfs: cfsObjects}, nil
} }
@ -857,17 +843,16 @@ func checkAtimeSupport(dir string) (err error) {
} }
// Returns cacheObjects for use by Server. // Returns cacheObjects for use by Server.
func newServerCacheObjects(c CacheConfig) (CacheObjectLayer, error) { func newServerCacheObjects(config CacheConfig) (CacheObjectLayer, error) {
// list of disk caches for cache "drives" specified in config.json or MINIO_CACHE_DRIVES env var. // list of disk caches for cache "drives" specified in config.json or MINIO_CACHE_DRIVES env var.
dcache, err := newCache(c) dcache, err := newCache(config)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &cacheObjects{ return &cacheObjects{
cache: dcache, cache: dcache,
exclude: c.Exclude, exclude: config.Exclude,
listPool: newTreeWalkPool(globalLookupTimeout), listPool: newTreeWalkPool(globalLookupTimeout),
GetObjectFn: func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error { GetObjectFn: func(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) error {
return newObjectLayerFn().GetObject(ctx, bucket, object, startOffset, length, writer, etag) return newObjectLayerFn().GetObject(ctx, bucket, object, startOffset, length, writer, etag)

View File

@ -21,9 +21,7 @@ import (
"fmt" "fmt"
"io" "io"
"os" "os"
"path/filepath"
"reflect" "reflect"
"syscall"
errors2 "github.com/minio/minio/pkg/errors" errors2 "github.com/minio/minio/pkg/errors"
) )
@ -36,6 +34,8 @@ const (
formatCacheVersionV1 = "1" formatCacheVersionV1 = "1"
formatMetaVersion1 = "1" formatMetaVersion1 = "1"
formatCacheV1DistributionAlgo = "CRCMOD"
) )
// Represents the current cache structure with list of // Represents the current cache structure with list of
@ -49,6 +49,9 @@ type formatCacheV1 struct {
// Disks field carries the input disk order generated the first // Disks field carries the input disk order generated the first
// time when fresh disks were supplied. // time when fresh disks were supplied.
Disks []string `json:"disks"` Disks []string `json:"disks"`
// Distribution algorithm represents the hashing algorithm
// to pick the right set index for an object.
DistributionAlgo string `json:"distributionAlgo"`
} `json:"cache"` // Cache field holds cache format. } `json:"cache"` // Cache field holds cache format.
} }
@ -71,6 +74,7 @@ func newFormatCacheV1(drives []string) []*formatCacheV1 {
format.Version = formatMetaVersion1 format.Version = formatMetaVersion1
format.Format = formatCache format.Format = formatCache
format.Cache.Version = formatCacheVersionV1 format.Cache.Version = formatCacheVersionV1
format.Cache.DistributionAlgo = formatCacheV1DistributionAlgo
format.Cache.This = mustGetUUID() format.Cache.This = mustGetUUID()
formats[i] = format formats[i] = format
disks[i] = formats[i].Cache.This disks[i] = formats[i].Cache.This
@ -116,28 +120,25 @@ func createFormatCache(fsFormatPath string, format *formatCacheV1) error {
// of format cache config // of format cache config
func initFormatCache(drives []string) (formats []*formatCacheV1, err error) { func initFormatCache(drives []string) (formats []*formatCacheV1, err error) {
nformats := newFormatCacheV1(drives) nformats := newFormatCacheV1(drives)
for i, drive := range drives { for _, drive := range drives {
// Disallow relative paths, figure out absolute paths. _, err = os.Stat(drive)
cfsPath, err := filepath.Abs(drive) if err == nil {
if err != nil { continue
}
if !os.IsNotExist(err) {
return nil, err return nil, err
} }
if err = os.Mkdir(drive, 0777); err != nil {
fi, err := os.Stat(cfsPath) return nil, err
if err == nil {
if !fi.IsDir() {
return nil, syscall.ENOTDIR
}
} }
if os.IsNotExist(err) { }
// Disk not found create it. for i, drive := range drives {
err = os.MkdirAll(cfsPath, 0777) if err = os.Mkdir(pathJoin(drive, minioMetaBucket), 0777); err != nil {
if err != nil { if !os.IsExist(err) {
return nil, err return nil, err
} }
} }
cacheFormatPath := pathJoin(drive, minioMetaBucket, formatConfigFile)
cacheFormatPath := pathJoin(drive, formatConfigFile)
// Fresh disk - create format.json for this cfs // Fresh disk - create format.json for this cfs
if err = createFormatCache(cacheFormatPath, nformats[i]); err != nil { if err = createFormatCache(cacheFormatPath, nformats[i]); err != nil {
return nil, err return nil, err
@ -146,32 +147,25 @@ func initFormatCache(drives []string) (formats []*formatCacheV1, err error) {
return nformats, nil return nformats, nil
} }
func loadFormatCache(drives []string) (formats []*formatCacheV1, err error) { func loadFormatCache(drives []string) ([]*formatCacheV1, error) {
var errs []error formats := make([]*formatCacheV1, len(drives))
for _, drive := range drives { for i, drive := range drives {
cacheFormatPath := pathJoin(drive, formatConfigFile) cacheFormatPath := pathJoin(drive, minioMetaBucket, formatConfigFile)
f, perr := os.Open(cacheFormatPath) f, err := os.Open(cacheFormatPath)
if perr != nil { if err != nil {
formats = append(formats, nil) if os.IsNotExist(err) {
errs = append(errs, perr) continue
continue }
return nil, err
} }
defer f.Close() defer f.Close()
format, perr := formatMetaCacheV1(f) format, err := formatMetaCacheV1(f)
if perr != nil { if err != nil {
// format could not be unmarshalled.
formats = append(formats, nil)
errs = append(errs, perr)
continue continue
} }
formats = append(formats, format) formats[i] = format
} }
for _, perr := range errs { return formats, nil
if perr != nil {
err = perr
}
}
return formats, err
} }
// unmarshalls the cache format.json into formatCacheV1 // unmarshalls the cache format.json into formatCacheV1
@ -198,7 +192,6 @@ func checkFormatCacheValue(format *formatCacheV1) error {
} }
func checkFormatCacheValues(formats []*formatCacheV1) (int, error) { func checkFormatCacheValues(formats []*formatCacheV1) (int, error) {
for i, formatCache := range formats { for i, formatCache := range formats {
if formatCache == nil { if formatCache == nil {
continue continue
@ -276,6 +269,15 @@ func findCacheDiskIndex(disk string, disks []string) int {
// validate whether cache drives order has changed // validate whether cache drives order has changed
func validateCacheFormats(formats []*formatCacheV1) error { func validateCacheFormats(formats []*formatCacheV1) error {
count := 0
for _, format := range formats {
if format == nil {
count++
}
}
if count == len(formats) {
return errors.New("Cache format files missing on all drives")
}
if _, err := checkFormatCacheValues(formats); err != nil { if _, err := checkFormatCacheValues(formats); err != nil {
return err return err
} }
@ -290,23 +292,9 @@ func validateCacheFormats(formats []*formatCacheV1) error {
func cacheDrivesUnformatted(drives []string) bool { func cacheDrivesUnformatted(drives []string) bool {
count := 0 count := 0
for _, drive := range drives { for _, drive := range drives {
cacheFormatPath := pathJoin(drive, formatConfigFile) cacheFormatPath := pathJoin(drive, minioMetaBucket, formatConfigFile)
if _, err := os.Stat(cacheFormatPath); os.IsNotExist(err) {
// // Disallow relative paths, figure out absolute paths.
cfsPath, err := filepath.Abs(cacheFormatPath)
if err != nil {
continue
}
fi, err := os.Stat(cfsPath)
if err == nil {
if !fi.IsDir() {
continue
}
}
if os.IsNotExist(err) {
count++ count++
continue
} }
} }
return count == len(drives) return count == len(drives)
@ -322,7 +310,10 @@ func loadAndValidateCacheFormat(drives []string) (formats []*formatCacheV1, err
formats, err = loadFormatCache(drives) formats, err = loadFormatCache(drives)
} }
if err != nil { if err != nil {
return formats, err return nil, err
} }
return formats, validateCacheFormats(formats) if err = validateCacheFormats(formats); err != nil {
return nil, err
}
return formats, nil
} }

View File

@ -27,17 +27,13 @@ func TestDiskCacheFormat(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
_, err = initDiskCaches(fsDirs, t)
if err != nil {
t.Fatal(err)
}
// cformat := newFormatCacheV1([]string{cacheDataDir + "/format.json"})
_, err = initFormatCache(fsDirs) _, err = initFormatCache(fsDirs)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Do the basic sanity checks to check if initFormatCache() did its job. // Do the basic sanity checks to check if initFormatCache() did its job.
cacheFormatPath := pathJoin(fsDirs[0], formatConfigFile) cacheFormatPath := pathJoin(fsDirs[0], minioMetaBucket, formatConfigFile)
f, err := os.OpenFile(cacheFormatPath, os.O_RDWR, 0) f, err := os.OpenFile(cacheFormatPath, os.O_RDWR, 0)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@ -95,8 +95,8 @@ func initMetaVolumeFS(fsPath, fsUUID string) error {
} }
// newFSObjects - initialize new fs object layer. // NewFSObjectLayer - initialize new fs object layer.
func newFSObjects(fsPath, metaJSONFile string) (ObjectLayer, error) { func NewFSObjectLayer(fsPath string) (ObjectLayer, error) {
if fsPath == "" { if fsPath == "" {
return nil, errInvalidArgument return nil, errInvalidArgument
} }
@ -150,7 +150,7 @@ func newFSObjects(fsPath, metaJSONFile string) (ObjectLayer, error) {
// Initialize fs objects. // Initialize fs objects.
fs := &FSObjects{ fs := &FSObjects{
fsPath: fsPath, fsPath: fsPath,
metaJSONFile: metaJSONFile, metaJSONFile: fsMetaJSONFile,
fsUUID: fsUUID, fsUUID: fsUUID,
rwPool: &fsIOPool{ rwPool: &fsIOPool{
readersMap: make(map[string]*lock.RLockedFile), readersMap: make(map[string]*lock.RLockedFile),
@ -183,11 +183,6 @@ func newFSObjects(fsPath, metaJSONFile string) (ObjectLayer, error) {
return fs, nil return fs, nil
} }
// NewFSObjectLayer - initialize new fs object layer.
func NewFSObjectLayer(fsPath string) (ObjectLayer, error) {
return newFSObjects(fsPath, fsMetaJSONFile)
}
// Shutdown - should be called when process shuts down. // Shutdown - should be called when process shuts down.
func (fs *FSObjects) Shutdown(ctx context.Context) error { func (fs *FSObjects) Shutdown(ctx context.Context) error {
fs.fsFormatRlk.Close() fs.fsFormatRlk.Close()

View File

@ -1282,9 +1282,9 @@ func TestPosixAppendFile(t *testing.T) {
expectedErr error expectedErr error
}{"level0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001/level0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002/level0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003/object000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", err}) }{"level0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001/level0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002/level0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003/object000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", err})
for _, testCase := range testCases { for i, testCase := range testCases {
if err = posixStorage.AppendFile("success-vol", testCase.fileName, []byte("hello, world")); err != testCase.expectedErr { if err = posixStorage.AppendFile("success-vol", testCase.fileName, []byte("hello, world")); err != testCase.expectedErr {
t.Errorf("Case: %s, expected: %s, got: %s", testCase, testCase.expectedErr, err) t.Errorf("Case: %d, expected: %s, got: %s", i+1, testCase.expectedErr, err)
} }
} }
@ -1373,9 +1373,9 @@ func TestPosixPrepareFile(t *testing.T) {
expectedErr error expectedErr error
}{"level0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001/level0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002/level0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003/object000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", err}) }{"level0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001/level0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002/level0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003/object000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", err})
for _, testCase := range testCases { for i, testCase := range testCases {
if err = posixStorage.PrepareFile("success-vol", testCase.fileName, 16); err != testCase.expectedErr { if err = posixStorage.PrepareFile("success-vol", testCase.fileName, 16); err != testCase.expectedErr {
t.Errorf("Case: %s, expected: %s, got: %s", testCase, testCase.expectedErr, err) t.Errorf("Case: %d, expected: %s, got: %s", i, testCase.expectedErr, err)
} }
} }

View File

@ -244,7 +244,7 @@ func getUserAgent(mode string) string {
if mode != "" { if mode != "" {
uaAppend("; ", mode) uaAppend("; ", mode)
} }
if globalIsDiskCacheEnabled { if len(globalCacheDrives) > 0 {
uaAppend("; ", "feature-cache") uaAppend("; ", "feature-cache")
} }
if globalWORMEnabled { if globalWORMEnabled {