Add disk based edge caching support. (#5182)

This PR adds disk based edge caching support for minio server.

Cache settings can be configured in config.json to take list of disk drives,
cache expiry in days and file patterns to exclude from cache or via environment
variables MINIO_CACHE_DRIVES, MINIO_CACHE_EXCLUDE and MINIO_CACHE_EXPIRY

Design assumes that Atime support is enabled and the list of cache drives is
fixed.
 - Objects are cached on both GET and PUT/POST operations.
 - Expiry is used as hint to evict older entries from cache, or if 80% of cache
   capacity is filled.
 - When object storage backend is down, GET, LIST and HEAD operations fetch
   object seamlessly from cache.

Current Limitations
 - Bucket policies are not cached, so anonymous operations are not supported in
   offline mode.
 - Objects are distributed using deterministic hashing among list of cache
   drives specified.If one or more drives go offline, or cache drive
   configuration is altered - performance could degrade to linear lookup.

Fixes #4026
This commit is contained in:
poornas 2018-03-28 14:14:06 -07:00 committed by kannappanr
parent 2f9354b17e
commit a3e806ed61
60 changed files with 3544 additions and 82 deletions

1
.gitignore vendored
View File

@ -22,4 +22,3 @@ parts/
prime/
stage/
.sia_temp/
buildcoveragecoverage.txt

View File

@ -169,10 +169,9 @@ const (
ErrOperationTimedOut
ErrPartsSizeUnequal
ErrInvalidRequest
// Minio storage class error codes
ErrInvalidStorageClass
ErrBackendDown
// Add new extended error codes here.
// Please open a https://github.com/minio/minio/issues before adding
// new error codes here.
@ -831,6 +830,11 @@ var errorCodeResponse = map[APIErrorCode]APIError{
Description: "",
HTTPStatusCode: http.StatusBadRequest,
},
ErrBackendDown: {
Code: "XMinioBackendDown",
Description: "Object storage backend is unreachable",
HTTPStatusCode: http.StatusServiceUnavailable,
},
// Add your error structure here.
}
@ -975,6 +979,8 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) {
apiErr = ErrOverlappingFilterNotification
case *event.ErrUnsupportedConfiguration:
apiErr = ErrUnsupportedNotification
case BackendDown:
apiErr = ErrBackendDown
default:
apiErr = ErrInternalError
}

View File

@ -16,19 +16,32 @@
package cmd
import router "github.com/gorilla/mux"
import "net/http"
import (
"net/http"
router "github.com/gorilla/mux"
)
// objectAPIHandler implements and provides http handlers for S3 API.
type objectAPIHandlers struct {
ObjectAPI func() ObjectLayer
CacheAPI func() CacheObjectLayer
}
// registerAPIRouter - registers S3 compatible APIs.
func registerAPIRouter(mux *router.Router) {
var err error
var cacheConfig = globalServerConfig.GetCacheConfig()
if len(cacheConfig.Drives) > 0 {
// initialize the new disk cache objects.
globalCacheObjectAPI, err = newServerCacheObjects(cacheConfig)
fatalIf(err, "Unable to initialize disk caching")
}
// Initialize API.
api := objectAPIHandlers{
ObjectAPI: newObjectLayerFn,
CacheAPI: newCacheObjectsFn,
}
// API Router

View File

@ -86,11 +86,14 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
writeErrorResponse(w, s3Error, r.URL)
return
}
listObjectsV2 := objectAPI.ListObjectsV2
if api.CacheAPI() != nil {
listObjectsV2 = api.CacheAPI().ListObjectsV2
}
// Inititate a list objects operation based on the input params.
// On success would return back ListObjectsInfo object to be
// marshalled into S3 compatible XML header.
listObjectsV2Info, err := objectAPI.ListObjectsV2(ctx, bucket, prefix, marker, delimiter, maxKeys, fetchOwner, startAfter)
listObjectsV2Info, err := listObjectsV2(ctx, bucket, prefix, marker, delimiter, maxKeys, fetchOwner, startAfter)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
@ -149,11 +152,14 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
writeErrorResponse(w, s3Error, r.URL)
return
}
listObjects := objectAPI.ListObjects
if api.CacheAPI() != nil {
listObjects = api.CacheAPI().ListObjects
}
// Inititate a list objects operation based on the input params.
// On success would return back ListObjectsInfo object to be
// marshalled into S3 compatible XML header.
listObjectsInfo, err := objectAPI.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
listObjectsInfo, err := listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return

View File

@ -136,8 +136,11 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
return
}
defer bucketLock.RUnlock()
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
getBucketInfo := objectAPI.GetBucketInfo
if api.CacheAPI() != nil {
getBucketInfo = api.CacheAPI().GetBucketInfo
}
if _, err := getBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}
@ -219,7 +222,11 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
listBuckets := objectAPI.ListBuckets
if api.CacheAPI() != nil {
listBuckets = api.CacheAPI().ListBuckets
}
// ListBuckets does not have any bucket action.
s3Error := checkRequestAuthType(r, "", "", globalMinioDefaultRegion)
if s3Error == ErrInvalidRegion {
@ -231,7 +238,7 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R
return
}
// Invoke the list buckets.
bucketsInfo, err := objectAPI.ListBuckets(ctx)
bucketsInfo, err := listBuckets(ctx)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
@ -325,7 +332,11 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
}
return
}
dErr := objectAPI.DeleteObject(ctx, bucket, obj.ObjectName)
deleteObject := objectAPI.DeleteObject
if api.CacheAPI() != nil {
deleteObject = api.CacheAPI().DeleteObject
}
dErr := deleteObject(ctx, bucket, obj.ObjectName)
if dErr != nil {
dErrs[i] = dErr
}
@ -683,8 +694,11 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re
writeErrorResponseHeadersOnly(w, s3Error)
return
}
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
getBucketInfo := objectAPI.GetBucketInfo
if api.CacheAPI() != nil {
getBucketInfo = api.CacheAPI().GetBucketInfo
}
if _, err := getBucketInfo(ctx, bucket); err != nil {
writeErrorResponseHeadersOnly(w, toAPIErrorCode(err))
return
}
@ -710,9 +724,12 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
vars := mux.Vars(r)
bucket := vars["bucket"]
deleteBucket := objectAPI.DeleteBucket
if api.CacheAPI() != nil {
deleteBucket = api.CacheAPI().DeleteBucket
}
// Attempt to delete bucket.
if err := objectAPI.DeleteBucket(ctx, bucket); err != nil {
if err := deleteBucket(ctx, bucket); err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
}

View File

@ -124,6 +124,22 @@ func handleCommonEnvVars() {
globalIsEnvDomainName = true
}
if drives := os.Getenv("MINIO_CACHE_DRIVES"); drives != "" {
driveList, err := parseCacheDrives(drives)
fatalIf(err, "Invalid value set in environment variable MINIO_CACHE_DRIVES")
globalCacheDrives = driveList
globalIsDiskCacheEnabled = true
}
if excludes := os.Getenv("MINIO_CACHE_EXCLUDE"); excludes != "" {
excludeList, err := parseCacheExcludes(excludes)
fatalIf(err, "Invalid value set in environment variable MINIO_CACHE_EXCLUDE")
globalCacheExcludes = excludeList
}
if expiryStr := os.Getenv("MINIO_CACHE_EXPIRY"); expiryStr != "" {
expiry, err := parseCacheExpiry(expiryStr)
fatalIf(err, "Invalid value set in environment variable MINIO_CACHE_EXPIRY")
globalCacheExpiry = expiry
}
// In place update is true by default if the MINIO_UPDATE is not set
// or is not set to 'off', if MINIO_UPDATE is set to 'off' then
// in-place update is off.

View File

@ -39,9 +39,9 @@ import (
// 6. Make changes in config-current_test.go for any test change
// Config version
const serverConfigVersion = "22"
const serverConfigVersion = "23"
type serverConfig = serverConfigV22
type serverConfig = serverConfigV23
var (
// globalServerConfig server config.
@ -104,6 +104,25 @@ func (s *serverConfig) GetBrowser() bool {
return bool(s.Browser)
}
// SetCacheConfig sets the current cache config
func (s *serverConfig) SetCacheConfig(drives, exclude []string, expiry int) {
s.Cache.Drives = drives
s.Cache.Exclude = exclude
s.Cache.Expiry = expiry
}
// GetCacheConfig gets the current cache config
func (s *serverConfig) GetCacheConfig() CacheConfig {
if s.Cache.Drives != nil {
return CacheConfig{
Drives: s.Cache.Drives,
Exclude: s.Cache.Exclude,
Expiry: s.Cache.Expiry,
}
}
return CacheConfig{}
}
// Save config.
func (s *serverConfig) Save() error {
// Save config file.
@ -164,6 +183,11 @@ func newServerConfig() *serverConfig {
Standard: storageClass{},
RRS: storageClass{},
},
Cache: CacheConfig{
Drives: []string{},
Exclude: []string{},
Expiry: globalCacheExpiry,
},
Notify: notifier{},
}
@ -187,6 +211,9 @@ func newServerConfig() *serverConfig {
srvCfg.Notify.Webhook = make(map[string]target.WebhookArgs)
srvCfg.Notify.Webhook["1"] = target.WebhookArgs{}
srvCfg.Cache.Drives = make([]string, 0)
srvCfg.Cache.Exclude = make([]string, 0)
srvCfg.Cache.Expiry = globalCacheExpiry
return srvCfg
}
@ -217,6 +244,9 @@ func newConfig() error {
srvCfg.SetStorageClass(globalStandardStorageClass, globalRRStorageClass)
}
if globalIsDiskCacheEnabled {
srvCfg.SetCacheConfig(globalCacheDrives, globalCacheExcludes, globalCacheExpiry)
}
// hold the mutex lock before a new config is assigned.
// Save the new config globally.
// unlock the mutex.
@ -344,6 +374,9 @@ func loadConfig() error {
srvCfg.SetStorageClass(globalStandardStorageClass, globalRRStorageClass)
}
if globalIsDiskCacheEnabled {
srvCfg.SetCacheConfig(globalCacheDrives, globalCacheExcludes, globalCacheExpiry)
}
// hold the mutex lock before a new config is assigned.
globalServerConfigMu.Lock()
globalServerConfig = srvCfg
@ -362,6 +395,12 @@ func loadConfig() error {
if !globalIsStorageClass {
globalStandardStorageClass, globalRRStorageClass = globalServerConfig.GetStorageClass()
}
if !globalIsDiskCacheEnabled {
cacheConf := globalServerConfig.GetCacheConfig()
globalCacheDrives = cacheConf.Drives
globalCacheExcludes = cacheConf.Exclude
globalCacheExpiry = cacheConf.Expiry
}
globalServerConfigMu.Unlock()
return nil

View File

@ -165,6 +165,12 @@ func migrateConfig() error {
if err = migrateV21ToV22(); err != nil {
return err
}
fallthrough
case "22":
if err = migrateV22ToV23(); err != nil {
return err
}
fallthrough
case serverConfigVersion:
// No migration needed. this always points to current version.
err = nil
@ -1831,3 +1837,112 @@ func migrateV21ToV22() error {
log.Printf(configMigrateMSGTemplate, configFile, cv21.Version, srvConfig.Version)
return nil
}
func migrateV22ToV23() error {
configFile := getConfigFile()
cv22 := &serverConfigV22{}
_, err := quick.Load(configFile, cv22)
if os.IsNotExist(err) {
return nil
} else if err != nil {
return fmt.Errorf("Unable to load config version 22. %v", err)
}
if cv22.Version != "22" {
return nil
}
// Copy over fields from V22 into V23 config struct
srvConfig := &serverConfigV23{
Notify: notifier{},
}
srvConfig.Version = serverConfigVersion
srvConfig.Credential = cv22.Credential
srvConfig.Region = cv22.Region
if srvConfig.Region == "" {
// Region needs to be set for AWS Signature Version 4.
srvConfig.Region = globalMinioDefaultRegion
}
if len(cv22.Notify.AMQP) == 0 {
srvConfig.Notify.AMQP = make(map[string]target.AMQPArgs)
srvConfig.Notify.AMQP["1"] = target.AMQPArgs{}
} else {
srvConfig.Notify.AMQP = cv22.Notify.AMQP
}
if len(cv22.Notify.Elasticsearch) == 0 {
srvConfig.Notify.Elasticsearch = make(map[string]target.ElasticsearchArgs)
srvConfig.Notify.Elasticsearch["1"] = target.ElasticsearchArgs{
Format: event.NamespaceFormat,
}
} else {
srvConfig.Notify.Elasticsearch = cv22.Notify.Elasticsearch
}
if len(cv22.Notify.Redis) == 0 {
srvConfig.Notify.Redis = make(map[string]target.RedisArgs)
srvConfig.Notify.Redis["1"] = target.RedisArgs{
Format: event.NamespaceFormat,
}
} else {
srvConfig.Notify.Redis = cv22.Notify.Redis
}
if len(cv22.Notify.PostgreSQL) == 0 {
srvConfig.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs)
srvConfig.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{
Format: event.NamespaceFormat,
}
} else {
srvConfig.Notify.PostgreSQL = cv22.Notify.PostgreSQL
}
if len(cv22.Notify.Kafka) == 0 {
srvConfig.Notify.Kafka = make(map[string]target.KafkaArgs)
srvConfig.Notify.Kafka["1"] = target.KafkaArgs{}
} else {
srvConfig.Notify.Kafka = cv22.Notify.Kafka
}
if len(cv22.Notify.NATS) == 0 {
srvConfig.Notify.NATS = make(map[string]target.NATSArgs)
srvConfig.Notify.NATS["1"] = target.NATSArgs{}
} else {
srvConfig.Notify.NATS = cv22.Notify.NATS
}
if len(cv22.Notify.Webhook) == 0 {
srvConfig.Notify.Webhook = make(map[string]target.WebhookArgs)
srvConfig.Notify.Webhook["1"] = target.WebhookArgs{}
} else {
srvConfig.Notify.Webhook = cv22.Notify.Webhook
}
if len(cv22.Notify.MySQL) == 0 {
srvConfig.Notify.MySQL = make(map[string]target.MySQLArgs)
srvConfig.Notify.MySQL["1"] = target.MySQLArgs{
Format: event.NamespaceFormat,
}
} else {
srvConfig.Notify.MySQL = cv22.Notify.MySQL
}
if len(cv22.Notify.MQTT) == 0 {
srvConfig.Notify.MQTT = make(map[string]target.MQTTArgs)
srvConfig.Notify.MQTT["1"] = target.MQTTArgs{}
} else {
srvConfig.Notify.MQTT = cv22.Notify.MQTT
}
// Load browser config from existing config in the file.
srvConfig.Browser = cv22.Browser
// Load domain config from existing config in the file.
srvConfig.Domain = cv22.Domain
// Init cache config.For future migration, Cache config needs to be copied over from previous version.
srvConfig.Cache.Drives = []string{}
srvConfig.Cache.Exclude = []string{}
srvConfig.Cache.Expiry = globalCacheExpiry
if err = quick.Save(configFile, srvConfig); err != nil {
return fmt.Errorf("Failed to migrate config from %s to %s. %v", cv22.Version, srvConfig.Version, err)
}
log.Printf(configMigrateMSGTemplate, configFile, cv22.Version, srvConfig.Version)
return nil
}

View File

@ -131,10 +131,13 @@ func TestServerConfigMigrateInexistentConfig(t *testing.T) {
if err := migrateV20ToV21(); err != nil {
t.Fatal("migrate v20 to v21 should succeed when no config file is found")
}
if err := migrateV21ToV22(); err != nil {
t.Fatal("migrate v21 to v22 should succeed when no config file is found")
}
}
// Test if a config migration from v2 to v21 is successfully done
func TestServerConfigMigrateV2toV21(t *testing.T) {
// Test if a config migration from v2 to v23 is successfully done
func TestServerConfigMigrateV2toV23(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
t.Fatalf("Init Test config failed")
@ -263,6 +266,12 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
if err := migrateV20ToV21(); err == nil {
t.Fatal("migrateConfigV20ToV21() should fail with a corrupted json")
}
if err := migrateV21ToV22(); err == nil {
t.Fatal("migrateConfigV21ToV22() should fail with a corrupted json")
}
if err := migrateV22ToV23(); err == nil {
t.Fatal("migrateConfigV22ToV23() should fail with a corrupted json")
}
}
// Test if all migrate code returns error with corrupted config files

View File

@ -579,3 +579,23 @@ type serverConfigV22 struct {
// Notification queue configuration.
Notify notifier `json:"notify"`
}
// serverConfigV23 is just like version '22' with addition of cache field
type serverConfigV23 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Browser BrowserFlag `json:"browser"`
Domain string `json:"domain"`
// Storage class configuration
StorageClass storageClassConfig `json:"storageclass"`
// Cache configuration
Cache CacheConfig `json:"cache"`
// Notification queue configuration.
Notify notifier `json:"notify"`
}

64
cmd/disk-cache-config.go Normal file
View File

@ -0,0 +1,64 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"strconv"
"strings"
"errors"
)
// CacheConfig represents cache config settings
type CacheConfig struct {
Drives []string
Expiry int
Exclude []string
}
// Parses given cacheDrivesEnv and returns a list of cache drives.
func parseCacheDrives(cacheDrivesEnv string) ([]string, error) {
cacheDrivesEnv = strings.ToLower(cacheDrivesEnv)
s := strings.Split(cacheDrivesEnv, ";")
c2 := make([]string, 0)
for _, d := range s {
if len(d) > 0 {
c2 = append(c2, d)
}
}
return c2, nil
}
// Parses given cacheExcludesEnv and returns a list of cache exclude patterns.
func parseCacheExcludes(cacheExcludesEnv string) ([]string, error) {
s := strings.Split(cacheExcludesEnv, ";")
c2 := make([]string, 0)
for _, e := range s {
if len(e) > 0 {
if strings.HasPrefix(e, "/") {
return c2, errors.New("cache exclude patterns cannot start with / as prefix " + e)
}
c2 = append(c2, e)
}
}
return c2, nil
}
// Parses given cacheExpiryEnv and returns cache expiry in days.
func parseCacheExpiry(cacheExpiryEnv string) (int, error) {
return strconv.Atoi(cacheExpiryEnv)
}

View File

@ -0,0 +1,51 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"reflect"
"testing"
)
// Tests cache exclude parsing.
func TestParseCacheExclude(t *testing.T) {
testCases := []struct {
excludeStr string
expectedPatterns []string
success bool
}{
// Empty input.
{"", []string{}, true},
// valid input
{"/home/drive1;/home/drive2;/home/drive3", []string{}, false},
{"bucket1/*;*.png;images/trip/barcelona/*", []string{"bucket1/*", "*.png", "images/trip/barcelona/*"}, true},
{"bucket1", []string{"bucket1"}, true},
}
for i, testCase := range testCases {
excludes, err := parseCacheExcludes(testCase.excludeStr)
if err != nil && testCase.success {
t.Errorf("Test %d: Expected success but failed instead %s", i+1, err)
}
if err == nil && !testCase.success {
t.Errorf("Test %d: Expected failure but passed instead", i+1)
}
if !reflect.DeepEqual(excludes, testCase.expectedPatterns) {
t.Errorf("Expected %v, got %v", testCase.expectedPatterns, excludes)
}
}
}

506
cmd/disk-cache-fs.go Normal file
View File

@ -0,0 +1,506 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"encoding/hex"
"encoding/json"
"io"
"io/ioutil"
"os"
"path"
"sync"
"time"
"github.com/minio/minio/pkg/disk"
errors2 "github.com/minio/minio/pkg/errors"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/lock"
)
const (
// cache.json object metadata for cached objects.
cacheMetaJSONFile = "cache.json"
cacheMetaFormat = "cache"
)
// cacheFSObjects implements the cache backend operations.
type cacheFSObjects struct {
*FSObjects
// caching drive path (from cache "drives" in config.json)
dir string
// expiry in days specified in config.json
expiry int
// max disk usage pct
maxDiskUsagePct int
// purge() listens on this channel to start the cache-purge process
purgeChan chan struct{}
// mark false if drive is offline
online bool
// mutex to protect updates to online variable
onlineMutex *sync.RWMutex
}
// Inits the cache directory if it is not init'ed already.
// Initializing implies creation of new FS Object layer.
func newCacheFSObjects(dir string, expiry int, maxDiskUsagePct int) (*cacheFSObjects, error) {
obj, err := newFSObjects(dir, cacheMetaJSONFile)
if err != nil {
return nil, err
}
trashPath := pathJoin(dir, minioMetaBucket, cacheTrashDir)
if err := os.MkdirAll(trashPath, 0777); err != nil {
return nil, err
}
if expiry == 0 {
expiry = globalCacheExpiry
}
var cacheFS cacheFSObjects
fsObjects := obj.(*FSObjects)
cacheFS = cacheFSObjects{
FSObjects: fsObjects,
dir: dir,
expiry: expiry,
maxDiskUsagePct: maxDiskUsagePct,
purgeChan: make(chan struct{}),
online: true,
onlineMutex: &sync.RWMutex{},
}
return &cacheFS, nil
}
// Returns if the disk usage is low.
// Disk usage is low if usage is < 80% of cacheMaxDiskUsagePct
// Ex. for a 100GB disk, if maxUsage is configured as 70% then cacheMaxDiskUsagePct is 70G
// hence disk usage is low if the disk usage is less than 56G (because 80% of 70G is 56G)
func (cfs *cacheFSObjects) diskUsageLow() bool {
minUsage := cfs.maxDiskUsagePct * 80 / 100
di, err := disk.GetInfo(cfs.dir)
if err != nil {
errorIf(err, "Error getting disk information on %s", cfs.dir)
return false
}
usedPercent := (di.Total - di.Free) * 100 / di.Total
return int(usedPercent) < minUsage
}
// Return if the disk usage is high.
// Disk usage is high if disk used is > cacheMaxDiskUsagePct
func (cfs *cacheFSObjects) diskUsageHigh() bool {
di, err := disk.GetInfo(cfs.dir)
if err != nil {
errorIf(err, "Error getting disk information on %s", cfs.dir)
return true
}
usedPercent := (di.Total - di.Free) * 100 / di.Total
return int(usedPercent) > cfs.maxDiskUsagePct
}
// Returns if size space can be allocated without exceeding
// max disk usable for caching
func (cfs *cacheFSObjects) diskAvailable(size int64) bool {
di, err := disk.GetInfo(cfs.dir)
if err != nil {
errorIf(err, "Error getting disk information on %s", cfs.dir)
return false
}
usedPercent := (di.Total - (di.Free - uint64(size))) * 100 / di.Total
return int(usedPercent) < cfs.maxDiskUsagePct
}
// purges all content marked trash from the cache.
func (cfs *cacheFSObjects) purgeTrash() {
ticker := time.NewTicker(time.Minute * cacheCleanupInterval)
for {
select {
case <-globalServiceDoneCh:
// Stop the timer.
ticker.Stop()
return
case <-ticker.C:
trashPath := path.Join(cfs.fsPath, minioMetaBucket, cacheTrashDir)
entries, err := readDir(trashPath)
if err != nil {
return
}
for _, entry := range entries {
fi, err := fsStatVolume(pathJoin(trashPath, entry))
if err != nil {
continue
}
dir := path.Join(trashPath, fi.Name())
// Delete all expired cache content.
fsRemoveAll(dir)
}
}
}
}
// Purge cache entries that were not accessed.
func (cfs *cacheFSObjects) purge() {
delimiter := slashSeparator
maxKeys := 1000
ctx := context.Background()
for {
olderThan := cfs.expiry
for !cfs.diskUsageLow() {
// delete unaccessed objects older than expiry duration
expiry := UTCNow().AddDate(0, 0, -1*olderThan)
olderThan /= 2
if olderThan < 1 {
break
}
deletedCount := 0
buckets, err := cfs.ListBuckets(ctx)
if err != nil {
errorIf(err, "Unable to list buckets.")
}
// Reset cache online status if drive was offline earlier.
if !cfs.IsOnline() {
cfs.setOnline(true)
}
for _, bucket := range buckets {
var continuationToken string
var marker string
for {
objects, err := cfs.ListObjects(ctx, bucket.Name, marker, continuationToken, delimiter, maxKeys)
if err != nil {
break
}
if !objects.IsTruncated {
break
}
marker = objects.NextMarker
for _, object := range objects.Objects {
// purge objects that qualify because of cache-control directives or
// past cache expiry duration.
if !filterFromCache(object.UserDefined) ||
!isStaleCache(object) ||
object.AccTime.After(expiry) {
continue
}
if err = cfs.DeleteObject(ctx, bucket.Name, object.Name); err != nil {
errorIf(err, "Unable to remove cache entry in dir %s/%s", bucket.Name, object.Name)
continue
}
deletedCount++
}
}
}
if deletedCount == 0 {
// to avoid a busy loop
time.Sleep(time.Minute * 30)
}
}
<-cfs.purgeChan
}
}
// sets cache drive status
func (cfs *cacheFSObjects) setOnline(status bool) {
cfs.onlineMutex.Lock()
cfs.online = status
cfs.onlineMutex.Unlock()
}
// returns true if cache drive is online
func (cfs *cacheFSObjects) IsOnline() bool {
cfs.onlineMutex.RLock()
defer cfs.onlineMutex.RUnlock()
return cfs.online
}
// Caches the object to disk
func (cfs *cacheFSObjects) Put(ctx context.Context, bucket, object string, data *hash.Reader, metadata map[string]string) error {
if cfs.diskUsageHigh() {
select {
case cfs.purgeChan <- struct{}{}:
default:
}
return errDiskFull
}
if !cfs.diskAvailable(data.Size()) {
return errDiskFull
}
if _, err := cfs.GetBucketInfo(ctx, bucket); err != nil {
pErr := cfs.MakeBucketWithLocation(ctx, bucket, "")
if pErr != nil {
return pErr
}
}
_, err := cfs.PutObject(ctx, bucket, object, data, metadata)
// if err is due to disk being offline , mark cache drive as offline
if errors2.IsErr(err, baseErrs...) {
cfs.setOnline(false)
}
return err
}
// Returns the handle for the cached object
func (cfs *cacheFSObjects) Get(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string) (err error) {
return cfs.GetObject(ctx, bucket, object, startOffset, length, writer, etag)
}
// Deletes the cached object
func (cfs *cacheFSObjects) Delete(ctx context.Context, bucket, object string) (err error) {
return cfs.DeleteObject(ctx, bucket, object)
}
// convenience function to check if object is cached on this cacheFSObjects
func (cfs *cacheFSObjects) Exists(ctx context.Context, bucket, object string) bool {
_, err := cfs.GetObjectInfo(ctx, bucket, object)
return err == nil
}
// Identical to fs PutObject operation except that it uses ETag in metadata
// headers.
func (cfs *cacheFSObjects) PutObject(ctx context.Context, bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, retErr error) {
fs := cfs.FSObjects
// Lock the object.
objectLock := fs.nsMutex.NewNSLock(bucket, object)
if err := objectLock.GetLock(globalObjectTimeout); err != nil {
return objInfo, err
}
defer objectLock.Unlock()
// No metadata is set, allocate a new one.
meta := make(map[string]string)
for k, v := range metadata {
meta[k] = v
}
var err error
// Validate if bucket name is valid and exists.
if _, err = fs.statBucketDir(bucket); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket)
}
fsMeta := newFSMetaV1()
fsMeta.Meta = meta
// This is a special case with size as '0' and object ends
// with a slash separator, we treat it like a valid operation
// and return success.
if isObjectDir(object, data.Size()) {
// Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(errors2.Trace(errFileAccessDenied), bucket, object)
}
if err = mkdirAll(pathJoin(fs.fsPath, bucket, object), 0777); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
var fi os.FileInfo
if fi, err = fsStatDir(pathJoin(fs.fsPath, bucket, object)); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
return fsMeta.ToObjectInfo(bucket, object, fi), nil
}
if err = checkPutObjectArgs(bucket, object, fs, data.Size()); err != nil {
return ObjectInfo{}, err
}
// Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(errors2.Trace(errFileAccessDenied), bucket, object)
}
// Validate input data size and it can never be less than zero.
if data.Size() < 0 {
return ObjectInfo{}, errors2.Trace(errInvalidArgument)
}
var wlk *lock.LockedFile
if bucket != minioMetaBucket {
bucketMetaDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix)
fsMetaPath := pathJoin(bucketMetaDir, bucket, object, fs.metaJSONFile)
wlk, err = fs.rwPool.Create(fsMetaPath)
if err != nil {
return ObjectInfo{}, toObjectErr(errors2.Trace(err), bucket, object)
}
// This close will allow for locks to be synchronized on `fs.json`.
defer wlk.Close()
defer func() {
// Remove meta file when PutObject encounters any error
if retErr != nil {
tmpDir := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID)
fsRemoveMeta(bucketMetaDir, fsMetaPath, tmpDir)
}
}()
}
// Uploaded object will first be written to the temporary location which will eventually
// be renamed to the actual location. It is first written to the temporary location
// so that cleaning it up will be easy if the server goes down.
tempObj := mustGetUUID()
// Allocate a buffer to Read() from request body
bufSize := int64(readSizeV1)
if size := data.Size(); size > 0 && bufSize > size {
bufSize = size
}
buf := make([]byte, int(bufSize))
fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, tempObj)
bytesWritten, err := fsCreateFile(fsTmpObjPath, data, buf, data.Size())
if err != nil {
fsRemoveFile(fsTmpObjPath)
errorIf(err, "Failed to create object %s/%s", bucket, object)
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
if fsMeta.Meta["etag"] == "" {
fsMeta.Meta["etag"] = hex.EncodeToString(data.MD5Current())
}
// Should return IncompleteBody{} error when reader has fewer
// bytes than specified in request header.
if bytesWritten < data.Size() {
fsRemoveFile(fsTmpObjPath)
return ObjectInfo{}, errors2.Trace(IncompleteBody{})
}
// Delete the temporary object in the case of a
// failure. If PutObject succeeds, then there would be
// nothing to delete.
defer fsRemoveFile(fsTmpObjPath)
// Entire object was written to the temp location, now it's safe to rename it to the actual location.
fsNSObjPath := pathJoin(fs.fsPath, bucket, object)
if err = fsRenameFile(fsTmpObjPath, fsNSObjPath); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
if bucket != minioMetaBucket {
// Write FS metadata after a successful namespace operation.
if _, err = fsMeta.WriteTo(wlk); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
}
// Stat the file to fetch timestamp, size.
fi, err := fsStatFile(pathJoin(fs.fsPath, bucket, object))
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
// Success.
return fsMeta.ToObjectInfo(bucket, object, fi), nil
}
// Implements S3 compatible initiate multipart API. Operation here is identical
// to fs backend implementation - with the exception that cache FS uses the uploadID
// generated on the backend
func (cfs *cacheFSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, meta map[string]string, uploadID string) (string, error) {
if cfs.diskUsageHigh() {
select {
case cfs.purgeChan <- struct{}{}:
default:
}
return "", errDiskFull
}
if _, err := cfs.GetBucketInfo(ctx, bucket); err != nil {
pErr := cfs.MakeBucketWithLocation(ctx, bucket, "")
if pErr != nil {
return "", pErr
}
}
fs := cfs.FSObjects
if err := checkNewMultipartArgs(bucket, object, fs); err != nil {
return "", toObjectErr(err, bucket)
}
if _, err := fs.statBucketDir(bucket); err != nil {
return "", toObjectErr(err, bucket)
}
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
err := mkdirAll(uploadIDDir, 0755)
if err != nil {
return "", errors2.Trace(err)
}
// Initialize fs.json values.
fsMeta := newFSMetaV1()
fsMeta.Meta = meta
fsMetaBytes, err := json.Marshal(fsMeta)
if err != nil {
return "", errors2.Trace(err)
}
if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0644); err != nil {
return "", errors2.Trace(err)
}
return uploadID, nil
}
// moveBucketToTrash clears cacheFSObjects of bucket contents and moves it to trash folder.
func (cfs *cacheFSObjects) moveBucketToTrash(ctx context.Context, bucket string) (err error) {
fs := cfs.FSObjects
bucketLock := fs.nsMutex.NewNSLock(bucket, "")
if err = bucketLock.GetLock(globalObjectTimeout); err != nil {
return err
}
defer bucketLock.Unlock()
bucketDir, err := fs.getBucketDir(bucket)
if err != nil {
return toObjectErr(err, bucket)
}
trashPath := pathJoin(cfs.fsPath, minioMetaBucket, cacheTrashDir)
expiredDir := path.Join(trashPath, bucket)
// Attempt to move regular bucket to expired directory.
if err = fsRenameDir(bucketDir, expiredDir); err != nil {
return toObjectErr(err, bucket)
}
// Cleanup all the bucket metadata.
ominioMetadataBucketDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket)
nminioMetadataBucketDir := pathJoin(trashPath, MustGetUUID())
_ = fsRenameDir(ominioMetadataBucketDir, nminioMetadataBucketDir)
return nil
}
// Removes a directory only if its empty, handles long
// paths for windows automatically.
func fsRenameDir(dirPath, newPath string) (err error) {
if dirPath == "" || newPath == "" {
return errors2.Trace(errInvalidArgument)
}
if err = checkPathLength(dirPath); err != nil {
return errors2.Trace(err)
}
if err = checkPathLength(newPath); err != nil {
return errors2.Trace(err)
}
if err = os.Rename(dirPath, newPath); err != nil {
if os.IsNotExist(err) {
return errors2.Trace(errVolumeNotFound)
} else if isSysErrNotEmpty(err) {
return errors2.Trace(errVolumeNotEmpty)
}
return errors2.Trace(err)
}
return nil
}

1021
cmd/disk-cache.go Normal file

File diff suppressed because it is too large Load Diff

282
cmd/disk-cache_test.go Normal file
View File

@ -0,0 +1,282 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"context"
"os"
"reflect"
"testing"
"time"
"github.com/minio/minio/pkg/hash"
)
// Initialize cache FS objects.
func initCacheFSObjects(disk string, t *testing.T) (*cacheFSObjects, error) {
newTestConfig(globalMinioDefaultRegion)
var err error
obj, err := newCacheFSObjects(disk, globalCacheExpiry, 100)
if err != nil {
t.Fatal(err)
}
return obj, nil
}
// inits diskCache struct for nDisks
func initDiskCaches(drives []string, t *testing.T) (*diskCache, error) {
var cfs []*cacheFSObjects
for _, d := range drives {
obj, err := initCacheFSObjects(d, t)
if err != nil {
return nil, err
}
cfs = append(cfs, obj)
}
return &diskCache{cfs: cfs}, nil
}
// test whether a drive being offline causes
// getCacheFS to fetch next online drive
func TestGetCacheFS(t *testing.T) {
for n := 1; n < 10; n++ {
fsDirs, err := getRandomDisks(n)
if err != nil {
t.Fatal(err)
}
d, err := initDiskCaches(fsDirs, t)
if err != nil {
t.Fatal(err)
}
bucketName := "testbucket"
objectName := "testobject"
ctx := context.Background()
// find cache drive where object would be hashed
index := d.hashIndex(bucketName, objectName)
// turn off drive by setting online status to false
d.cfs[index].online = false
cfs, err := d.getCacheFS(ctx, bucketName, objectName)
if n == 1 && err == errDiskNotFound {
continue
}
if err != nil {
t.Fatal(err)
}
i := -1
for j, f := range d.cfs {
if f == cfs {
i = j
break
}
}
if i != (index+1)%n {
t.Fatalf("expected next cache location to be picked")
}
}
}
// test wildcard patterns for excluding entries from cache
func TestCacheExclusion(t *testing.T) {
rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(rootPath)
fsDirs, err := getRandomDisks(1)
if err != nil {
t.Fatal(err)
}
cconfig := CacheConfig{Expiry: 30, Drives: fsDirs}
cobjects, err := newServerCacheObjects(cconfig)
if err != nil {
t.Fatal(err)
}
cobj := cobjects.(*cacheObjects)
globalServiceDoneCh <- struct{}{}
testCases := []struct {
bucketName string
objectName string
excludePattern string
expectedResult bool
}{
{"testbucket", "testobjectmatch", "testbucket/testobj*", true},
{"testbucket", "testobjectnomatch", "testbucet/testobject*", false},
{"testbucket", "testobject/pref1/obj1", "*/*", true},
{"testbucket", "testobject/pref1/obj1", "*/pref1/*", true},
{"testbucket", "testobject/pref1/obj1", "testobject/*", false},
{"photos", "image1.jpg", "*.jpg", true},
{"photos", "europe/paris/seine.jpg", "seine.jpg", false},
{"photos", "europe/paris/seine.jpg", "*/seine.jpg", true},
{"phil", "z/likes/coffee", "*/likes/*", true},
{"failbucket", "no/slash/prefixes", "/failbucket/no/", false},
{"failbucket", "no/slash/prefixes", "/failbucket/no/*", false},
}
for i, testCase := range testCases {
cobj.exclude = []string{testCase.excludePattern}
if cobj.isCacheExclude(testCase.bucketName, testCase.objectName) != testCase.expectedResult {
t.Fatal("Cache exclusion test failed for case ", i)
}
}
}
// Test diskCache.
func TestDiskCache(t *testing.T) {
fsDirs, err := getRandomDisks(1)
if err != nil {
t.Fatal(err)
}
d, err := initDiskCaches(fsDirs, t)
if err != nil {
t.Fatal(err)
}
cache := d.cfs[0]
ctx := context.Background()
bucketName := "testbucket"
objectName := "testobject"
content := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
etag := "061208c10af71a30c6dcd6cf5d89f0fe"
contentType := "application/zip"
size := len(content)
httpMeta := make(map[string]string)
httpMeta["etag"] = etag
httpMeta["content-type"] = contentType
objInfo := ObjectInfo{}
objInfo.Bucket = bucketName
objInfo.Name = objectName
objInfo.Size = int64(size)
objInfo.ContentType = contentType
objInfo.ETag = etag
objInfo.UserDefined = httpMeta
byteReader := bytes.NewReader([]byte(content))
hashReader, err := hash.NewReader(byteReader, int64(size), "", "")
if err != nil {
t.Fatal(err)
}
err = cache.Put(ctx, bucketName, objectName, hashReader, httpMeta)
if err != nil {
t.Fatal(err)
}
cachedObjInfo, err := cache.GetObjectInfo(ctx, bucketName, objectName)
if err != nil {
t.Fatal(err)
}
if !cache.Exists(ctx, bucketName, objectName) {
t.Fatal("Expected object to exist on cache")
}
if cachedObjInfo.ETag != objInfo.ETag {
t.Fatal("Expected ETag to match")
}
if cachedObjInfo.Size != objInfo.Size {
t.Fatal("Size mismatch")
}
if cachedObjInfo.ContentType != objInfo.ContentType {
t.Fatal("Cached content-type does not match")
}
writer := bytes.NewBuffer(nil)
err = cache.Get(ctx, bucketName, objectName, 0, int64(size), writer, "")
if err != nil {
t.Fatal(err)
}
if ccontent := writer.Bytes(); !bytes.Equal([]byte(content), ccontent) {
t.Errorf("wrong cached file content")
}
err = cache.Delete(ctx, bucketName, objectName)
if err != nil {
t.Errorf("object missing from cache")
}
online := cache.IsOnline()
if !online {
t.Errorf("expected cache drive to be online")
}
}
func TestIsCacheExcludeDirective(t *testing.T) {
testCases := []struct {
cacheControlOpt string
expectedResult bool
}{
{"no-cache", true},
{"no-store", true},
{"must-revalidate", true},
{"no-transform", false},
{"max-age", false},
}
for i, testCase := range testCases {
if isCacheExcludeDirective(testCase.cacheControlOpt) != testCase.expectedResult {
t.Errorf("Cache exclude directive test failed for case %d", i)
}
}
}
func TestGetCacheControlOpts(t *testing.T) {
testCases := []struct {
cacheControlHeaderVal string
expiryHeaderVal string
expectedCacheControl cacheControl
expectedErr bool
}{
{"", "", cacheControl{}, false},
{"max-age=2592000, public", "", cacheControl{maxAge: 2592000, sMaxAge: 0, minFresh: 0, expiry: time.Time{}, exclude: false}, false},
{"max-age=2592000, no-store", "", cacheControl{maxAge: 2592000, sMaxAge: 0, minFresh: 0, expiry: time.Time{}, exclude: true}, false},
{"must-revalidate, max-age=600", "", cacheControl{maxAge: 600, sMaxAge: 0, minFresh: 0, expiry: time.Time{}, exclude: true}, false},
{"s-maxAge=2500, max-age=600", "", cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Time{}, exclude: false}, false},
{"s-maxAge=2500, max-age=600", "Wed, 21 Oct 2015 07:28:00 GMT", cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Date(2015, time.October, 21, 07, 28, 00, 00, time.UTC), exclude: false}, false},
{"s-maxAge=2500, max-age=600s", "", cacheControl{maxAge: 600, sMaxAge: 2500, minFresh: 0, expiry: time.Time{}, exclude: false}, true},
}
var m map[string]string
for i, testCase := range testCases {
m = make(map[string]string)
m["cache-control"] = testCase.cacheControlHeaderVal
if testCase.expiryHeaderVal != "" {
m["expires"] = testCase.expiryHeaderVal
}
c, err := getCacheControlOpts(m)
if testCase.expectedErr && err == nil {
t.Errorf("expected err for case %d", i)
}
if !testCase.expectedErr && !reflect.DeepEqual(c, testCase.expectedCacheControl) {
t.Errorf("expected %v got %v for case %d", testCase.expectedCacheControl, c, i)
}
}
}
func TestFilterFromCache(t *testing.T) {
testCases := []struct {
metadata map[string]string
expectedResult bool
}{
{map[string]string{"content-type": "application/json"}, false},
{map[string]string{"cache-control": "private,no-store"}, true},
{map[string]string{"cache-control": "no-cache,must-revalidate"}, true},
{map[string]string{"cache-control": "no-transform"}, false},
{map[string]string{"cache-control": "max-age=3600"}, false},
}
for i, testCase := range testCases {
if filterFromCache(testCase.metadata) != testCase.expectedResult {
t.Errorf("Cache exclude directive test failed for case %d", i)
}
}
}

328
cmd/format-disk-cache.go Normal file
View File

@ -0,0 +1,328 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"reflect"
"syscall"
errors2 "github.com/minio/minio/pkg/errors"
)
const (
// Represents Cache format json holding details on all other cache drives in use.
formatCache = "cache"
// formatCacheV1.Cache.Version
formatCacheVersionV1 = "1"
formatMetaVersion1 = "1"
)
// Represents the current cache structure with list of
// disks comprising the disk cache
// formatCacheV1 - structure holds format config version '1'.
type formatCacheV1 struct {
formatMetaV1
Cache struct {
Version string `json:"version"` // Version of 'cache' format.
This string `json:"this"` // This field carries assigned disk uuid.
// Disks field carries the input disk order generated the first
// time when fresh disks were supplied.
Disks []string `json:"disks"`
} `json:"cache"` // Cache field holds cache format.
}
// Used to detect the version of "cache" format.
type formatCacheVersionDetect struct {
Cache struct {
Version string `json:"version"`
} `json:"cache"`
}
// Return a slice of format, to be used to format uninitialized disks.
func newFormatCacheV1(drives []string) []*formatCacheV1 {
diskCount := len(drives)
var disks = make([]string, diskCount)
var formats = make([]*formatCacheV1, diskCount)
for i := 0; i < diskCount; i++ {
format := &formatCacheV1{}
format.Version = formatMetaVersion1
format.Format = formatCache
format.Cache.Version = formatCacheVersionV1
format.Cache.This = mustGetUUID()
formats[i] = format
disks[i] = formats[i].Cache.This
}
for i := 0; i < diskCount; i++ {
format := formats[i]
format.Cache.Disks = disks
}
return formats
}
// Returns format.Cache.Version
func formatCacheGetVersion(r io.ReadSeeker) (string, error) {
format := &formatCacheVersionDetect{}
if err := jsonLoad(r, format); err != nil {
return "", err
}
return format.Cache.Version, nil
}
// Creates a new cache format.json if unformatted.
func createFormatCache(fsFormatPath string, format *formatCacheV1) error {
// open file using READ & WRITE permission
var file, err = os.OpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return errors2.Trace(err)
}
// Close the locked file upon return.
defer file.Close()
fi, err := file.Stat()
if err != nil {
return errors2.Trace(err)
}
if fi.Size() != 0 {
// format.json already got created because of another minio process's createFormatCache()
return nil
}
return jsonSave(file, format)
}
// This function creates a cache format file on disk and returns a slice
// of format cache config
func initFormatCache(drives []string) (formats []*formatCacheV1, err error) {
nformats := newFormatCacheV1(drives)
for i, drive := range drives {
// Disallow relative paths, figure out absolute paths.
cfsPath, err := filepath.Abs(drive)
if err != nil {
return nil, err
}
fi, err := os.Stat(cfsPath)
if err == nil {
if !fi.IsDir() {
return nil, syscall.ENOTDIR
}
}
if os.IsNotExist(err) {
// Disk not found create it.
err = os.MkdirAll(cfsPath, 0777)
if err != nil {
return nil, err
}
}
cacheFormatPath := pathJoin(drive, formatConfigFile)
// Fresh disk - create format.json for this cfs
if err = createFormatCache(cacheFormatPath, nformats[i]); err != nil {
return nil, err
}
}
return nformats, nil
}
func loadFormatCache(drives []string) (formats []*formatCacheV1, err error) {
var errs []error
for _, drive := range drives {
cacheFormatPath := pathJoin(drive, formatConfigFile)
f, perr := os.Open(cacheFormatPath)
if perr != nil {
formats = append(formats, nil)
errs = append(errs, perr)
continue
}
defer f.Close()
format, perr := formatMetaCacheV1(f)
if perr != nil {
// format could not be unmarshalled.
formats = append(formats, nil)
errs = append(errs, perr)
continue
}
formats = append(formats, format)
}
for _, perr := range errs {
if perr != nil {
err = perr
}
}
return formats, err
}
// unmarshalls the cache format.json into formatCacheV1
func formatMetaCacheV1(r io.ReadSeeker) (*formatCacheV1, error) {
format := &formatCacheV1{}
if err := jsonLoad(r, format); err != nil {
return nil, err
}
return format, nil
}
func checkFormatCacheValue(format *formatCacheV1) error {
// Validate format version and format type.
if format.Version != formatMetaVersion1 {
return fmt.Errorf("Unsupported version of cache format [%s] found", format.Version)
}
if format.Format != formatCache {
return fmt.Errorf("Unsupported cache format [%s] found", format.Format)
}
if format.Cache.Version != formatCacheVersionV1 {
return fmt.Errorf("Unsupported Cache backend format found [%s]", format.Cache.Version)
}
return nil
}
func checkFormatCacheValues(formats []*formatCacheV1) (int, error) {
for i, formatCache := range formats {
if formatCache == nil {
continue
}
if err := checkFormatCacheValue(formatCache); err != nil {
return i, err
}
if len(formats) != len(formatCache.Cache.Disks) {
return i, fmt.Errorf("Expected number of cache drives %d , got %d",
len(formatCache.Cache.Disks), len(formats))
}
}
return -1, nil
}
// checkCacheDisksConsistency - checks if "This" disk uuid on each disk is consistent with all "Disks" slices
// across disks.
func checkCacheDiskConsistency(formats []*formatCacheV1) error {
var disks = make([]string, len(formats))
// Collect currently available disk uuids.
for index, format := range formats {
if format == nil {
disks[index] = ""
continue
}
disks[index] = format.Cache.This
}
for i, format := range formats {
if format == nil {
continue
}
j := findCacheDiskIndex(disks[i], format.Cache.Disks)
if j == -1 {
return fmt.Errorf("UUID on positions %d:%d do not match with , expected %s", i, j, disks[i])
}
if i != j {
return fmt.Errorf("UUID on positions %d:%d do not match with , expected %s got %s", i, j, disks[i], format.Cache.Disks[j])
}
}
return nil
}
// checkCacheDisksSliceConsistency - validate cache Disks order if they are consistent.
func checkCacheDisksSliceConsistency(formats []*formatCacheV1) error {
var sentinelDisks []string
// Extract first valid Disks slice.
for _, format := range formats {
if format == nil {
continue
}
sentinelDisks = format.Cache.Disks
break
}
for _, format := range formats {
if format == nil {
continue
}
currentDisks := format.Cache.Disks
if !reflect.DeepEqual(sentinelDisks, currentDisks) {
return errors.New("inconsistent cache drives found")
}
}
return nil
}
// findCacheDiskIndex returns position of cache disk in JBOD.
func findCacheDiskIndex(disk string, disks []string) int {
for index, uuid := range disks {
if uuid == disk {
return index
}
}
return -1
}
// validate whether cache drives order has changed
func validateCacheFormats(formats []*formatCacheV1) error {
if _, err := checkFormatCacheValues(formats); err != nil {
return err
}
if err := checkCacheDisksSliceConsistency(formats); err != nil {
return err
}
return checkCacheDiskConsistency(formats)
}
// return true if all of the list of cache drives are
// fresh disks
func cacheDrivesUnformatted(drives []string) bool {
count := 0
for _, drive := range drives {
cacheFormatPath := pathJoin(drive, formatConfigFile)
// // Disallow relative paths, figure out absolute paths.
cfsPath, err := filepath.Abs(cacheFormatPath)
if err != nil {
continue
}
fi, err := os.Stat(cfsPath)
if err == nil {
if !fi.IsDir() {
continue
}
}
if os.IsNotExist(err) {
count++
continue
}
}
return count == len(drives)
}
// create format.json for each cache drive if fresh disk or load format from disk
// Then validate the format for all drives in the cache to ensure order
// of cache drives has not changed.
func loadAndValidateCacheFormat(drives []string) (formats []*formatCacheV1, err error) {
if cacheDrivesUnformatted(drives) {
formats, err = initFormatCache(drives)
} else {
formats, err = loadFormatCache(drives)
}
if err != nil {
return formats, err
}
return formats, validateCacheFormats(formats)
}

View File

@ -0,0 +1,322 @@
/*
* Minio Cloud Storage, (C) 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"os"
"testing"
)
// TestDiskCacheFormat - tests initFormatCache, formatMetaGetFormatBackendCache, formatCacheGetVersion.
func TestDiskCacheFormat(t *testing.T) {
fsDirs, err := getRandomDisks(1)
if err != nil {
t.Fatal(err)
}
_, err = initDiskCaches(fsDirs, t)
if err != nil {
t.Fatal(err)
}
// cformat := newFormatCacheV1([]string{cacheDataDir + "/format.json"})
_, err = initFormatCache(fsDirs)
if err != nil {
t.Fatal(err)
}
// Do the basic sanity checks to check if initFormatCache() did its job.
cacheFormatPath := pathJoin(fsDirs[0], formatConfigFile)
f, err := os.OpenFile(cacheFormatPath, os.O_RDWR, 0)
if err != nil {
t.Fatal(err)
}
defer f.Close()
version, err := formatCacheGetVersion(f)
if err != nil {
t.Fatal(err)
}
if version != formatCacheVersionV1 {
t.Fatalf(`expected: %s, got: %s`, formatCacheVersionV1, version)
}
// Corrupt the format.json file and test the functions.
// formatMetaGetFormatBackendFS, formatFSGetVersion, initFormatFS should return errors.
if err = f.Truncate(0); err != nil {
t.Fatal(err)
}
if _, err = f.WriteString("b"); err != nil {
t.Fatal(err)
}
if _, err = loadAndValidateCacheFormat(fsDirs); err == nil {
t.Fatal("expected to fail")
}
// With unknown formatMetaV1.Version formatMetaGetFormatCache, initFormatCache should return error.
if err = f.Truncate(0); err != nil {
t.Fatal(err)
}
// Here we set formatMetaV1.Version to "2"
if _, err = f.WriteString(`{"version":"2","format":"cache","cache":{"version":"1"}}`); err != nil {
t.Fatal(err)
}
if _, err = loadAndValidateCacheFormat(fsDirs); err == nil {
t.Fatal("expected to fail")
}
}
// generates a valid format.json for Cache backend.
func genFormatCacheValid() []*formatCacheV1 {
disks := make([]string, 8)
formatConfigs := make([]*formatCacheV1, 8)
for index := range disks {
disks[index] = mustGetUUID()
}
for index := range disks {
format := &formatCacheV1{}
format.Version = formatMetaVersion1
format.Format = formatCache
format.Cache.Version = formatCacheVersionV1
format.Cache.This = disks[index]
format.Cache.Disks = disks
formatConfigs[index] = format
}
return formatConfigs
}
// generates a invalid format.json version for Cache backend.
func genFormatCacheInvalidVersion() []*formatCacheV1 {
disks := make([]string, 8)
formatConfigs := make([]*formatCacheV1, 8)
for index := range disks {
disks[index] = mustGetUUID()
}
for index := range disks {
format := &formatCacheV1{}
format.Version = formatMetaVersion1
format.Format = formatCache
format.Cache.Version = formatCacheVersionV1
format.Cache.This = disks[index]
format.Cache.Disks = disks
formatConfigs[index] = format
}
// Corrupt version numbers.
formatConfigs[0].Version = "2"
formatConfigs[3].Version = "-1"
return formatConfigs
}
// generates a invalid format.json version for Cache backend.
func genFormatCacheInvalidFormat() []*formatCacheV1 {
disks := make([]string, 8)
formatConfigs := make([]*formatCacheV1, 8)
for index := range disks {
disks[index] = mustGetUUID()
}
for index := range disks {
format := &formatCacheV1{}
format.Version = formatMetaVersion1
format.Format = formatCache
format.Cache.Version = formatCacheVersionV1
format.Cache.This = disks[index]
format.Cache.Disks = disks
formatConfigs[index] = format
}
// Corrupt format.
formatConfigs[0].Format = "cach"
formatConfigs[3].Format = "cach"
return formatConfigs
}
// generates a invalid format.json version for Cache backend.
func genFormatCacheInvalidCacheVersion() []*formatCacheV1 {
disks := make([]string, 8)
formatConfigs := make([]*formatCacheV1, 8)
for index := range disks {
disks[index] = mustGetUUID()
}
for index := range disks {
format := &formatCacheV1{}
format.Version = formatMetaVersion1
format.Format = formatCache
format.Cache.Version = formatCacheVersionV1
format.Cache.This = disks[index]
format.Cache.Disks = disks
formatConfigs[index] = format
}
// Corrupt version numbers.
formatConfigs[0].Cache.Version = "10"
formatConfigs[3].Cache.Version = "-1"
return formatConfigs
}
// generates a invalid format.json version for Cache backend.
func genFormatCacheInvalidDisksCount() []*formatCacheV1 {
disks := make([]string, 7)
formatConfigs := make([]*formatCacheV1, 8)
for index := range disks {
disks[index] = mustGetUUID()
}
for index := range disks {
format := &formatCacheV1{}
format.Version = formatMetaVersion1
format.Format = formatCache
format.Cache.Version = formatCacheVersionV1
format.Cache.This = disks[index]
format.Cache.Disks = disks
formatConfigs[index] = format
}
return formatConfigs
}
// generates a invalid format.json Disks for Cache backend.
func genFormatCacheInvalidDisks() []*formatCacheV1 {
disks := make([]string, 8)
formatConfigs := make([]*formatCacheV1, 8)
for index := range disks {
disks[index] = mustGetUUID()
}
for index := range disks {
format := &formatCacheV1{}
format.Version = formatMetaVersion1
format.Format = formatCache
format.Cache.Version = formatCacheVersionV1
format.Cache.This = disks[index]
format.Cache.Disks = disks
formatConfigs[index] = format
}
for index := range disks {
disks[index] = mustGetUUID()
}
// Corrupt Disks entries on disk 6 and disk 8.
formatConfigs[5].Cache.Disks = disks
formatConfigs[7].Cache.Disks = disks
return formatConfigs
}
// generates a invalid format.json This disk UUID for Cache backend.
func genFormatCacheInvalidThis() []*formatCacheV1 {
disks := make([]string, 8)
formatConfigs := make([]*formatCacheV1, 8)
for index := range disks {
disks[index] = mustGetUUID()
}
for index := range disks {
format := &formatCacheV1{}
format.Version = formatMetaVersion1
format.Format = formatCache
format.Cache.Version = formatCacheVersionV1
format.Cache.This = disks[index]
format.Cache.Disks = disks
formatConfigs[index] = format
}
// Make disk 5 and disk 8 have inconsistent disk uuid's.
formatConfigs[4].Cache.This = mustGetUUID()
formatConfigs[7].Cache.This = mustGetUUID()
return formatConfigs
}
// generates a invalid format.json Disk UUID in wrong order for Cache backend.
func genFormatCacheInvalidDisksOrder() []*formatCacheV1 {
disks := make([]string, 8)
formatConfigs := make([]*formatCacheV1, 8)
for index := range disks {
disks[index] = mustGetUUID()
}
for index := range disks {
format := &formatCacheV1{}
format.Version = formatMetaVersion1
format.Format = formatCache
format.Cache.Version = formatCacheVersionV1
format.Cache.This = disks[index]
format.Cache.Disks = disks
formatConfigs[index] = format
}
// Re order disks for failure case.
var disks1 = make([]string, 8)
copy(disks1, disks)
disks1[1], disks1[2] = disks[2], disks[1]
formatConfigs[2].Cache.Disks = disks1
return formatConfigs
}
// Wrapper for calling FormatCache tests - validates
// - valid format
// - unrecognized version number
// - unrecognized format tag
// - unrecognized cache version
// - wrong number of Disks entries
// - invalid This uuid
// - invalid Disks order
func TestFormatCache(t *testing.T) {
formatInputCases := [][]*formatCacheV1{
genFormatCacheValid(),
genFormatCacheInvalidVersion(),
genFormatCacheInvalidFormat(),
genFormatCacheInvalidCacheVersion(),
genFormatCacheInvalidDisksCount(),
genFormatCacheInvalidDisks(),
genFormatCacheInvalidThis(),
genFormatCacheInvalidDisksOrder(),
}
testCases := []struct {
formatConfigs []*formatCacheV1
shouldPass bool
}{
{
formatConfigs: formatInputCases[0],
shouldPass: true,
},
{
formatConfigs: formatInputCases[1],
shouldPass: false,
},
{
formatConfigs: formatInputCases[2],
shouldPass: false,
},
{
formatConfigs: formatInputCases[3],
shouldPass: false,
},
{
formatConfigs: formatInputCases[4],
shouldPass: false,
},
{
formatConfigs: formatInputCases[5],
shouldPass: false,
},
{
formatConfigs: formatInputCases[6],
shouldPass: false,
},
{
formatConfigs: formatInputCases[7],
shouldPass: false,
},
}
for i, testCase := range testCases {
err := validateCacheFormats(testCase.formatConfigs)
if err != nil && testCase.shouldPass {
t.Errorf("Test %d: Expected to pass but failed with %s", i+1, err)
}
if err == nil && !testCase.shouldPass {
t.Errorf("Test %d: Expected to fail but passed instead", i+1)
}
}
}

View File

@ -90,7 +90,7 @@ func (fs *FSObjects) backgroundAppend(bucket, object, uploadID string) {
sort.Strings(entries)
for _, entry := range entries {
if entry == fsMetaJSONFile {
if entry == fs.metaJSONFile {
continue
}
partNumber, etag, err := fs.decodePartFile(entry)
@ -150,7 +150,7 @@ func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, k
// is the creation time of the uploadID, hence we will use that.
var uploads []MultipartInfo
for _, uploadID := range uploadIDs {
metaFilePath := pathJoin(fs.getMultipartSHADir(bucket, object), uploadID, fsMetaJSONFile)
metaFilePath := pathJoin(fs.getMultipartSHADir(bucket, object), uploadID, fs.metaJSONFile)
fi, err := fsStatFile(metaFilePath)
if err != nil {
return result, toObjectErr(err, bucket, object)
@ -229,7 +229,7 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
return "", errors.Trace(err)
}
if err = ioutil.WriteFile(pathJoin(uploadIDDir, fsMetaJSONFile), fsMetaBytes, 0644); err != nil {
if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0644); err != nil {
return "", errors.Trace(err)
}
@ -291,7 +291,7 @@ func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
// Just check if the uploadID exists to avoid copy if it doesn't.
_, err := fsStatFile(pathJoin(uploadIDDir, fsMetaJSONFile))
_, err := fsStatFile(pathJoin(uploadIDDir, fs.metaJSONFile))
if err != nil {
if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied {
return pi, errors.Trace(InvalidUploadID{UploadID: uploadID})
@ -371,7 +371,7 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
}
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
_, err := fsStatFile(pathJoin(uploadIDDir, fsMetaJSONFile))
_, err := fsStatFile(pathJoin(uploadIDDir, fs.metaJSONFile))
if err != nil {
if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied {
return result, errors.Trace(InvalidUploadID{UploadID: uploadID})
@ -386,7 +386,7 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
partsMap := make(map[int]string)
for _, entry := range entries {
if entry == fsMetaJSONFile {
if entry == fs.metaJSONFile {
continue
}
partNumber, etag1, derr := fs.decodePartFile(entry)
@ -451,7 +451,7 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
result.Parts[i].Size = stat.Size()
}
fsMetaBytes, err := ioutil.ReadFile(pathJoin(uploadIDDir, fsMetaJSONFile))
fsMetaBytes, err := ioutil.ReadFile(pathJoin(uploadIDDir, fs.metaJSONFile))
if err != nil {
return result, errors.Trace(err)
}
@ -482,7 +482,7 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
// Just check if the uploadID exists to avoid copy if it doesn't.
_, err := fsStatFile(pathJoin(uploadIDDir, fsMetaJSONFile))
_, err := fsStatFile(pathJoin(uploadIDDir, fs.metaJSONFile))
if err != nil {
if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied {
return oi, errors.Trace(InvalidUploadID{UploadID: uploadID})
@ -601,7 +601,7 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
return oi, err
}
defer destLock.Unlock()
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile)
metaFile, err := fs.rwPool.Create(fsMetaPath)
if err != nil {
return oi, toObjectErr(errors.Trace(err), bucket, object)
@ -609,7 +609,7 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
defer metaFile.Close()
// Read saved fs metadata for ongoing multipart.
fsMetaBuf, err := ioutil.ReadFile(pathJoin(uploadIDDir, fsMetaJSONFile))
fsMetaBuf, err := ioutil.ReadFile(pathJoin(uploadIDDir, fs.metaJSONFile))
if err != nil {
return oi, toObjectErr(errors.Trace(err), bucket, object)
}
@ -673,7 +673,7 @@ func (fs *FSObjects) AbortMultipartUpload(ctx context.Context, bucket, object, u
uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID)
// Just check if the uploadID exists to avoid copy if it doesn't.
_, err := fsStatFile(pathJoin(uploadIDDir, fsMetaJSONFile))
_, err := fsStatFile(pathJoin(uploadIDDir, fs.metaJSONFile))
if err != nil {
if errors.Cause(err) == errFileNotFound || errors.Cause(err) == errFileAccessDenied {
return errors.Trace(InvalidUploadID{UploadID: uploadID})

View File

@ -42,7 +42,8 @@ import (
type FSObjects struct {
// Path to be exported over S3 API.
fsPath string
// meta json filename, varies by fs / cache backend.
metaJSONFile string
// Unique value to be used for all
// temporary transactions.
fsUUID string
@ -94,8 +95,8 @@ func initMetaVolumeFS(fsPath, fsUUID string) error {
}
// NewFSObjectLayer - initialize new fs object layer.
func NewFSObjectLayer(fsPath string) (ObjectLayer, error) {
// newFSObjects - initialize new fs object layer.
func newFSObjects(fsPath, metaJSONFile string) (ObjectLayer, error) {
if fsPath == "" {
return nil, errInvalidArgument
}
@ -149,6 +150,7 @@ func NewFSObjectLayer(fsPath string) (ObjectLayer, error) {
// Initialize fs objects.
fs := &FSObjects{
fsPath: fsPath,
metaJSONFile: metaJSONFile,
fsUUID: fsUUID,
rwPool: &fsIOPool{
readersMap: make(map[string]*lock.RLockedFile),
@ -181,6 +183,11 @@ func NewFSObjectLayer(fsPath string) (ObjectLayer, error) {
return fs, nil
}
// NewFSObjectLayer - initialize new fs object layer.
func NewFSObjectLayer(fsPath string) (ObjectLayer, error) {
return newFSObjects(fsPath, fsMetaJSONFile)
}
// Shutdown - should be called when process shuts down.
func (fs *FSObjects) Shutdown(ctx context.Context) error {
fs.fsFormatRlk.Close()
@ -392,7 +399,7 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
// Close any writer which was initialized.
defer srcInfo.Writer.Close()
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, srcBucket, srcObject, fsMetaJSONFile)
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, srcBucket, srcObject, fs.metaJSONFile)
wlk, err := fs.rwPool.Write(fsMetaPath)
if err != nil {
return oi, toObjectErr(errors.Trace(err), srcBucket, srcObject)
@ -487,7 +494,7 @@ func (fs *FSObjects) getObject(bucket, object string, offset int64, length int64
}
if bucket != minioMetaBucket {
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile)
if lock {
_, err = fs.rwPool.Open(fsMetaPath)
if err != nil && err != errFileNotFound {
@ -554,10 +561,10 @@ func (fs *FSObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e erro
return oi, toObjectErr(errFileNotFound, bucket, object)
}
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile)
// Read `fs.json` to perhaps contend with
// parallel Put() operations.
rlk, err := fs.rwPool.Open(fsMetaPath)
if err == nil {
// Read from fs metadata only if it exists.
@ -646,8 +653,9 @@ func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string
// putObject - wrapper for PutObject
func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader, metadata map[string]string) (objInfo ObjectInfo, retErr error) {
// No metadata is set, allocate a new one.
if metadata == nil {
metadata = make(map[string]string)
meta := make(map[string]string)
for k, v := range metadata {
meta[k] = v
}
var err error
@ -657,7 +665,7 @@ func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader,
}
fsMeta := newFSMetaV1()
fsMeta.Meta = metadata
fsMeta.Meta = meta
// This is a special case with size as '0' and object ends
// with a slash separator, we treat it like a valid operation
@ -694,7 +702,8 @@ func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader,
var wlk *lock.LockedFile
if bucket != minioMetaBucket {
bucketMetaDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix)
fsMetaPath := pathJoin(bucketMetaDir, bucket, object, fsMetaJSONFile)
fsMetaPath := pathJoin(bucketMetaDir, bucket, object, fs.metaJSONFile)
wlk, err = fs.rwPool.Create(fsMetaPath)
if err != nil {
return ObjectInfo{}, toObjectErr(errors.Trace(err), bucket, object)
@ -729,7 +738,7 @@ func (fs *FSObjects) putObject(bucket string, object string, data *hash.Reader,
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
metadata["etag"] = hex.EncodeToString(data.MD5Current())
fsMeta.Meta["etag"] = hex.EncodeToString(data.MD5Current())
// Should return IncompleteBody{} error when reader has fewer
// bytes than specified in request header.
@ -791,7 +800,7 @@ func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string) er
}
minioMetaBucketDir := pathJoin(fs.fsPath, minioMetaBucket)
fsMetaPath := pathJoin(minioMetaBucketDir, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
fsMetaPath := pathJoin(minioMetaBucketDir, bucketMetaPrefix, bucket, object, fs.metaJSONFile)
if bucket != minioMetaBucket {
rwlk, lerr := fs.rwPool.Write(fsMetaPath)
if lerr == nil {
@ -839,7 +848,7 @@ func (fs *FSObjects) listDirFactory(isLeaf isLeafFunc) listDirFunc {
// getObjectETag is a helper function, which returns only the md5sum
// of the file on the disk.
func (fs *FSObjects) getObjectETag(bucket, entry string, lock bool) (string, error) {
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, entry, fsMetaJSONFile)
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, entry, fs.metaJSONFile)
var reader io.Reader
var fi os.FileInfo

View File

@ -281,6 +281,11 @@ func ErrorRespToObjectError(err error, params ...string) error {
object = params[1]
}
if isNetworkOrHostDown(err) {
e.Cause = BackendDown{}
return e
}
minioErr, ok := err.(minio.ErrorResponse)
if !ok {
// We don't interpret non Minio errors. As minio errors will

View File

@ -17,6 +17,7 @@
package cmd
import (
"context"
"fmt"
"strings"
)
@ -24,7 +25,11 @@ import (
// Prints the formatted startup message.
func printGatewayStartupMessage(apiEndPoints []string, backendType string) {
strippedAPIEndpoints := stripStandardPorts(apiEndPoints)
// If cache layer is enabled, print cache capacity.
cacheObjectAPI := newCacheObjectsFn()
if cacheObjectAPI != nil {
printCacheStorageInfo(cacheObjectAPI.StorageInfo(context.Background()))
}
// Prints credential.
printGatewayCommonMsg(strippedAPIEndpoints)

View File

@ -72,6 +72,11 @@ ENVIRONMENT VARIABLES:
BROWSER:
MINIO_BROWSER: To disable web browser access, set this value to "off".
CACHE:
MINIO_CACHE_DRIVES: List of cache drives delimited by ";"
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";"
MINIO_CACHE_EXPIRY: Cache expiry duration in days
UPDATE:
MINIO_UPDATE: To turn off in-place upgrades, set this value to "off".
@ -89,6 +94,14 @@ EXAMPLES:
$ export MINIO_SECRET_KEY=azureaccountkey
$ {{.HelpName}} https://azure.example.com
3. Start minio gateway server for Azure Blob Storage backend with edge caching enabled.
$ export MINIO_ACCESS_KEY=azureaccountname
$ export MINIO_SECRET_KEY=azureaccountkey
$ export MINIO_CACHE_DRIVES="/home/drive1;/home/drive2;/home/drive3;/home/drive4"
$ export MINIO_CACHE_EXCLUDE="bucket1/*;*.png"
$ export MINIO_CACHE_EXPIRY=40
$ {{.HelpName}}
`
minio.RegisterGatewayCommand(cli.Command{

View File

@ -63,6 +63,11 @@ ENVIRONMENT VARIABLES:
BROWSER:
MINIO_BROWSER: To disable web browser access, set this value to "off".
CACHE:
MINIO_CACHE_DRIVES: List of cache drives delimited by ";"
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";"
MINIO_CACHE_EXPIRY: Cache expiry duration in days
UPDATE:
MINIO_UPDATE: To turn off in-place upgrades, set this value to "off".
@ -74,6 +79,14 @@ EXAMPLES:
$ export MINIO_ACCESS_KEY=accountID
$ export MINIO_SECRET_KEY=applicationKey
$ {{.HelpName}}
2. Start minio gateway server for B2 backend with edge caching enabled.
$ export MINIO_ACCESS_KEY=accountID
$ export MINIO_SECRET_KEY=applicationKey
$ export MINIO_CACHE_DRIVES="/home/drive1;/home/drive2;/home/drive3;/home/drive4"
$ export MINIO_CACHE_EXCLUDE="bucket1/*;*.png"
$ export MINIO_CACHE_EXPIRY=40
$ {{.HelpName}}
`
minio.RegisterGatewayCommand(cli.Command{
Name: b2Backend,

View File

@ -109,6 +109,11 @@ ENVIRONMENT VARIABLES:
BROWSER:
MINIO_BROWSER: To disable web browser access, set this value to "off".
CACHE:
MINIO_CACHE_DRIVES: List of cache drives delimited by ";"
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";"
MINIO_CACHE_EXPIRY: Cache expiry duration in days
UPDATE:
MINIO_UPDATE: To turn off in-place upgrades, set this value to "off".
@ -125,6 +130,15 @@ EXAMPLES:
$ export MINIO_ACCESS_KEY=accesskey
$ export MINIO_SECRET_KEY=secretkey
$ {{.HelpName}} mygcsprojectid
2. Start minio gateway server for GCS backend with edge caching enabled.
$ export GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json
$ export MINIO_ACCESS_KEY=accesskey
$ export MINIO_SECRET_KEY=secretkey
$ export MINIO_CACHE_DRIVES="/home/drive1;/home/drive2;/home/drive3;/home/drive4"
$ export MINIO_CACHE_EXCLUDE="bucket1/*;*.png"
$ export MINIO_CACHE_EXPIRY=40
$ {{.HelpName}} mygcsprojectid
`
minio.RegisterGatewayCommand(cli.Command{

View File

@ -74,6 +74,11 @@ ENVIRONMENT VARIABLES:
DOMAIN:
MINIO_DOMAIN: To enable virtual-host-style requests. Set this value to Minio host domain name.
CACHE:
MINIO_CACHE_DRIVES: List of cache drives delimited by ";"
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";"
MINIO_CACHE_EXPIRY: Cache expiry duration in days
EXAMPLES:
1. Start minio gateway server for Manta Object Storage backend.
$ export MINIO_ACCESS_KEY=manta_account_name
@ -90,6 +95,14 @@ EXAMPLES:
$ export MINIO_SECRET_KEY=manta_key_id
$ export MANTA_KEY_MATERIAL=~/.ssh/custom_rsa
$ {{.HelpName}}
4. Start minio gateway server for Manta Object Storage backend with edge caching enabled.
$ export MINIO_ACCESS_KEY=manta_account_name
$ export MINIO_SECRET_KEY=manta_key_id
$ export MINIO_CACHE_DRIVES="/home/drive1;/home/drive2;/home/drive3;/home/drive4"
$ export MINIO_CACHE_EXCLUDE="bucket1/*;*.png"
$ export MINIO_CACHE_EXPIRY=40
$ {{.HelpName}}
`
minio.RegisterGatewayCommand(cli.Command{

View File

@ -50,6 +50,11 @@ ENVIRONMENT VARIABLES:
BROWSER:
MINIO_BROWSER: To disable web browser access, set this value to "off".
CACHE:
MINIO_CACHE_DRIVES: List of cache drives delimited by ";"
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";"
MINIO_CACHE_EXPIRY: Cache expiry duration in days
UPDATE:
MINIO_UPDATE: To turn off in-place upgrades, set this value to "off".
@ -61,6 +66,14 @@ EXAMPLES:
$ export MINIO_ACCESS_KEY=accesskey
$ export MINIO_SECRET_KEY=secretkey
$ {{.HelpName}} /shared/nasvol
2. Start minio gateway server for NAS with edge caching enabled.
$ export MINIO_ACCESS_KEY=accesskey
$ export MINIO_SECRET_KEY=secretkey
$ export MINIO_CACHE_DRIVES="/home/drive1;/home/drive2;/home/drive3;/home/drive4"
$ export MINIO_CACHE_EXCLUDE="bucket1/*;*.png"
$ export MINIO_CACHE_EXPIRY=40
$ {{.HelpName}} /shared/nasvol
`
minio.RegisterGatewayCommand(cli.Command{

View File

@ -71,6 +71,11 @@ ENVIRONMENT VARIABLES:
DOMAIN:
MINIO_DOMAIN: To enable virtual-host-style requests. Set this value to Minio host domain name.
CACHE:
MINIO_CACHE_DRIVES: List of cache drives delimited by ";"
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";"
MINIO_CACHE_EXPIRY: Cache expiry duration in days
EXAMPLES:
1. Start minio gateway server for Aliyun OSS backend.
$ export MINIO_ACCESS_KEY=accesskey
@ -81,6 +86,14 @@ EXAMPLES:
$ export MINIO_ACCESS_KEY=Q3AM3UQ867SPQQA43P2F
$ export MINIO_SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
$ {{.HelpName}} https://oss.example.com
3. Start minio gateway server for Aliyun OSS backend with edge caching enabled.
$ export MINIO_ACCESS_KEY=accesskey
$ export MINIO_SECRET_KEY=secretkey
$ export MINIO_CACHE_DRIVES="/home/drive1;/home/drive2;/home/drive3;/home/drive4"
$ export MINIO_CACHE_EXCLUDE="bucket1/*;*.png"
$ export MINIO_CACHE_EXPIRY=40
$ {{.HelpName}}
`
minio.RegisterGatewayCommand(cli.Command{

View File

@ -56,6 +56,11 @@ ENVIRONMENT VARIABLES:
BROWSER:
MINIO_BROWSER: To disable web browser access, set this value to "off".
CACHE:
MINIO_CACHE_DRIVES: List of cache drives delimited by ";"
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";"
MINIO_CACHE_EXPIRY: Cache expiry duration in days
UPDATE:
MINIO_UPDATE: To turn off in-place upgrades, set this value to "off".
@ -72,6 +77,14 @@ EXAMPLES:
$ export MINIO_ACCESS_KEY=Q3AM3UQ867SPQQA43P2F
$ export MINIO_SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG
$ {{.HelpName}} https://play.minio.io:9000
3. Start minio gateway server for AWS S3 backend with edge caching enabled.
$ export MINIO_ACCESS_KEY=accesskey
$ export MINIO_SECRET_KEY=secretkey
$ export MINIO_CACHE_DRIVES="/home/drive1;/home/drive2;/home/drive3;/home/drive4"
$ export MINIO_CACHE_EXCLUDE="bucket1/*;*.png"
$ export MINIO_CACHE_EXPIRY=40
$ {{.HelpName}}
`
minio.RegisterGatewayCommand(cli.Command{

View File

@ -73,6 +73,11 @@ ENVIRONMENT VARIABLES: (Default values in parenthesis)
BROWSER:
MINIO_BROWSER: To disable web browser access, set this value to "off".
CACHE:
MINIO_CACHE_DRIVES: List of cache drives delimited by ";"
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";"
MINIO_CACHE_EXPIRY: Cache expiry duration in days
UPDATE:
MINIO_UPDATE: To turn off in-place upgrades, set this value to "off".
@ -85,6 +90,12 @@ ENVIRONMENT VARIABLES: (Default values in parenthesis)
EXAMPLES:
1. Start minio gateway server for Sia backend.
$ {{.HelpName}}
2. Start minio gateway server for Sia backend with edge caching enabled.
$ export MINIO_CACHE_DRIVES="/home/drive1;/home/drive2;/home/drive3;/home/drive4"
$ export MINIO_CACHE_EXCLUDE="bucket1/*;*.png"
$ export MINIO_CACHE_EXPIRY=40
$ {{.HelpName}}
`
minio.RegisterGatewayCommand(cli.Command{

View File

@ -174,7 +174,16 @@ var (
globalWORMEnabled bool
// Is Disk Caching set up
globalIsDiskCacheEnabled bool
// Disk cache drives
globalCacheDrives []string
// Disk cache excludes
globalCacheExcludes []string
// Disk cache expiry
globalCacheExpiry = 90
// Add new variable global values here.
)
// global colors.

View File

@ -61,6 +61,7 @@ var supportedHeaders = []string{
"content-encoding",
"content-disposition",
amzStorageClass,
"expires",
// Add more supported headers here.
}

View File

@ -45,6 +45,9 @@ var globalObjLayerMutex *sync.RWMutex
// Global object layer, only accessed by newObjectLayerFn().
var globalObjectAPI ObjectLayer
//Global cacheObjects, only accessed by newCacheObjectsFn().
var globalCacheObjectAPI CacheObjectLayer
func init() {
// Initialize this once per server initialization.
globalObjLayerMutex = &sync.RWMutex{}

View File

@ -108,6 +108,8 @@ type ObjectInfo struct {
Writer io.WriteCloser `json:"-"`
Reader *hash.Reader `json:"-"`
metadataOnly bool
// Date and time when the object was last accessed.
AccTime time.Time
}
// ListPartsInfo - represents list of all parts.

View File

@ -391,6 +391,13 @@ func (e UnsupportedMetadata) Error() string {
return "Unsupported headers in Metadata"
}
// BackendDown is returned for network errors or if the gateway's backend is down.
type BackendDown struct{}
func (e BackendDown) Error() string {
return "Backend down"
}
// isErrIncompleteBody - Check if error type is IncompleteBody.
func isErrIncompleteBody(err error) bool {
err = errors.Cause(err)

View File

@ -232,10 +232,13 @@ func isETagEqual(left, right string) bool {
// deleteObject is a convenient wrapper to delete an object, this
// is a common function to be called from object handlers and
// web handlers.
func deleteObject(ctx context.Context, obj ObjectLayer, bucket, object string, r *http.Request) (err error) {
func deleteObject(ctx context.Context, obj ObjectLayer, cache CacheObjectLayer, bucket, object string, r *http.Request) (err error) {
deleteObject := obj.DeleteObject
if cache != nil {
deleteObject = cache.DeleteObject
}
// Proceed to delete the object.
if err = obj.DeleteObject(ctx, bucket, object); err != nil {
if err = deleteObject(ctx, bucket, object); err != nil {
return err
}

View File

@ -100,7 +100,12 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
return
}
objInfo, err := objectAPI.GetObjectInfo(ctx, bucket, object)
getObjectInfo := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
getObjectInfo = api.CacheAPI().GetObjectInfo
}
objInfo, err := getObjectInfo(ctx, bucket, object)
if err != nil {
apiErr := toAPIErrorCode(err)
if apiErr == ErrNoSuchKey {
@ -170,8 +175,13 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
setHeadGetRespHeaders(w, r.URL.Query())
httpWriter := ioutil.WriteOnClose(writer)
// Reads the object at startOffset and writes to httpWriter.
if err = objectAPI.GetObject(ctx, bucket, object, startOffset, length, httpWriter, objInfo.ETag); err != nil {
getObject := objectAPI.GetObject
if api.CacheAPI() != nil && !hasSSECustomerHeader(r.Header) {
getObject = api.CacheAPI().GetObject
}
// Reads the object at startOffset and writes to mw.
if err = getObject(ctx, bucket, object, startOffset, length, httpWriter, objInfo.ETag); err != nil {
errorIf(err, "Unable to write to client.")
if !httpWriter.HasWritten() { // write error response only if no data has been written to client yet
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
@ -227,7 +237,12 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
return
}
objInfo, err := objectAPI.GetObjectInfo(ctx, bucket, object)
getObjectInfo := objectAPI.GetObjectInfo
if api.CacheAPI() != nil {
getObjectInfo = api.CacheAPI().GetObjectInfo
}
objInfo, err := getObjectInfo(ctx, bucket, object)
if err != nil {
apiErr := toAPIErrorCode(err)
if apiErr == ErrNoSuchKey {
@ -319,7 +334,6 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
vars := mux.Vars(r)
dstBucket := vars["bucket"]
dstObject := vars["object"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
@ -645,6 +659,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
sha256hex = ""
reader io.Reader
s3Err APIErrorCode
putObject = objectAPI.PutObject
)
reader = r.Body
switch rAuthType {
@ -713,7 +728,11 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
}
}
objInfo, err := objectAPI.PutObject(ctx, bucket, object, hashReader, metadata)
if api.CacheAPI() != nil && !hasSSECustomerHeader(r.Header) {
putObject = api.CacheAPI().PutObject
}
// Create the object..
objInfo, err := putObject(ctx, bucket, object, hashReader, metadata)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
@ -763,7 +782,6 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
if s3Error := checkRequestAuthType(r, bucket, "s3:PutObject", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
@ -820,7 +838,11 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
metadata[k] = v
}
uploadID, err := objectAPI.NewMultipartUpload(ctx, bucket, object, metadata)
newMultipartUpload := objectAPI.NewMultipartUpload
if api.CacheAPI() != nil {
newMultipartUpload = api.CacheAPI().NewMultipartUpload
}
uploadID, err := newMultipartUpload(ctx, bucket, object, metadata)
if err != nil {
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
@ -1036,7 +1058,6 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
vars := mux.Vars(r)
bucket := vars["bucket"]
object := vars["object"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
@ -1208,7 +1229,11 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
}
}
partInfo, err := objectAPI.PutObjectPart(ctx, bucket, object, uploadID, partID, hashReader)
putObjectPart := objectAPI.PutObjectPart
if api.CacheAPI() != nil {
putObjectPart = api.CacheAPI().PutObjectPart
}
partInfo, err := putObjectPart(ctx, bucket, object, uploadID, partID, hashReader)
if err != nil {
// Verify if the underlying error is signature mismatch.
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
@ -1234,7 +1259,10 @@ func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter,
writeErrorResponse(w, ErrServerNotInitialized, r.URL)
return
}
abortMultipartUpload := objectAPI.AbortMultipartUpload
if api.CacheAPI() != nil {
abortMultipartUpload = api.CacheAPI().AbortMultipartUpload
}
if s3Error := checkRequestAuthType(r, bucket, "s3:AbortMultipartUpload", globalServerConfig.GetRegion()); s3Error != ErrNone {
writeErrorResponse(w, s3Error, r.URL)
return
@ -1249,7 +1277,7 @@ func (api objectAPIHandlers) AbortMultipartUploadHandler(w http.ResponseWriter,
}
uploadID, _, _, _ := getObjectResources(r.URL.Query())
if err := objectAPI.AbortMultipartUpload(ctx, bucket, object, uploadID); err != nil {
if err := abortMultipartUpload(ctx, bucket, object, uploadID); err != nil {
errorIf(err, "AbortMultipartUpload failed")
writeErrorResponse(w, toAPIErrorCode(err), r.URL)
return
@ -1353,7 +1381,11 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
completeParts = append(completeParts, part)
}
objInfo, err := objectAPI.CompleteMultipartUpload(ctx, bucket, object, uploadID, completeParts)
completeMultiPartUpload := objectAPI.CompleteMultipartUpload
if api.CacheAPI() != nil {
completeMultiPartUpload = api.CacheAPI().CompleteMultipartUpload
}
objInfo, err := completeMultiPartUpload(ctx, bucket, object, uploadID, completeParts)
if err != nil {
err = errors.Cause(err)
switch oErr := err.(type) {
@ -1434,7 +1466,7 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.
// Ignore delete object errors while replying to client, since we are
// suppposed to reply only 204. Additionally log the error for
// investigation.
if err := deleteObject(ctx, objectAPI, bucket, object, r); err != nil {
if err := deleteObject(ctx, objectAPI, api.CacheAPI(), bucket, object, r); err != nil {
errorIf(err, "Unable to delete an object %s", pathJoin(bucket, object))
}
writeSuccessNoContent(w)

View File

@ -29,6 +29,10 @@ func newObjectLayerFn() (layer ObjectLayer) {
return
}
func newCacheObjectsFn() CacheObjectLayer {
return globalCacheObjectAPI
}
// Composed function registering routers for only distributed XL setup.
func registerDistXLRouters(mux *router.Router, endpoints EndpointList) error {
// Register storage rpc router only if its a distributed setup.

View File

@ -67,6 +67,11 @@ ENVIRONMENT VARIABLES:
BROWSER:
MINIO_BROWSER: To disable web browser access, set this value to "off".
CACHE:
MINIO_CACHE_DRIVES: List of cache drives delimited by ";"
MINIO_CACHE_EXCLUDE: List of cache exclusion patterns delimited by ";"
MINIO_CACHE_EXPIRY: Cache expiry duration in days
REGION:
MINIO_REGION: To set custom region. By default all regions are accepted.
@ -108,6 +113,12 @@ EXAMPLES:
$ export MINIO_ACCESS_KEY=minio
$ export MINIO_SECRET_KEY=miniostorage
$ {{.HelpName}} http://node{1...8}.example.com/mnt/export/{1...8}
7. Start minio server with edge caching enabled.
$ export MINIO_CACHE_DRIVES="/home/drive1;/home/drive2;/home/drive3;/home/drive4"
$ export MINIO_CACHE_EXCLUDE="bucket1/*;*.png"
$ export MINIO_CACHE_EXPIRY=40
$ {{.HelpName}} /home/shared
`,
}

View File

@ -47,7 +47,11 @@ func getFormatStr(strLen int, padding int) string {
func printStartupMessage(apiEndPoints []string) {
strippedAPIEndpoints := stripStandardPorts(apiEndPoints)
// If cache layer is enabled, print cache capacity.
cacheObjectAPI := newCacheObjectsFn()
if cacheObjectAPI != nil {
printCacheStorageInfo(cacheObjectAPI.StorageInfo(context.Background()))
}
// Object layer is initialized then print StorageInfo.
objAPI := newObjectLayerFn()
if objAPI != nil {
@ -184,6 +188,13 @@ func printStorageInfo(storageInfo StorageInfo) {
log.Println()
}
func printCacheStorageInfo(storageInfo StorageInfo) {
msg := fmt.Sprintf("%s %s Free, %s Total", colorBlue("Cache Capacity:"),
humanize.IBytes(uint64(storageInfo.Free)),
humanize.IBytes(uint64(storageInfo.Total)))
log.Println(msg)
}
// Prints certificate expiry date warning
func getCertificateChainMsg(certs []*x509.Certificate) string {
msg := colorBlue("\nCertificate expiry info:\n")

View File

@ -545,6 +545,14 @@ func resetGlobalHealState() {
}
}
}
func resetGlobalCacheEnvs() {
globalIsDiskCacheEnabled = false
}
// sets globalObjectAPI to `nil`.
func resetGlobalCacheObjectAPI() {
globalCacheObjectAPI = nil
}
// Resets all the globals used modified in tests.
// Resetting ensures that the changes made to globals by one test doesn't affect others.
@ -567,6 +575,10 @@ func resetTestGlobals() {
resetGlobalStorageEnvs()
// Reset global heal state
resetGlobalHealState()
//Reset global disk cache flags
resetGlobalCacheEnvs()
//set globalCacheObjectAPI to nil
resetGlobalCacheObjectAPI()
}
// Configure the server for the test run.
@ -2199,13 +2211,17 @@ func registerAPIFunctions(muxRouter *router.Router, objLayer ObjectLayer, apiFun
bucketRouter := apiRouter.PathPrefix("/{bucket}").Subrouter()
// All object storage operations are registered as HTTP handlers on `objectAPIHandlers`.
// When the handlers get a HTTP request they use the underlyting ObjectLayer to perform operations.
// When the handlers get a HTTP request they use the underlying ObjectLayer to perform operations.
globalObjLayerMutex.Lock()
globalObjectAPI = objLayer
globalObjLayerMutex.Unlock()
// When cache is enabled, Put and Get operations are passed
// to underlying cache layer to manage object layer operation and disk caching
// operation
api := objectAPIHandlers{
ObjectAPI: newObjectLayerFn,
CacheAPI: newCacheObjectsFn,
}
// Register ListBuckets handler.

View File

@ -334,3 +334,34 @@ func newContext(r *http.Request, api string) context.Context {
return logger.SetContext(context.Background(), &logger.ReqInfo{r.RemoteAddr, r.Header.Get("user-agent"), "", api, bucket, object, nil})
}
// isNetworkOrHostDown - if there was a network error or if the host is down.
func isNetworkOrHostDown(err error) bool {
if err == nil {
return false
}
switch err.(type) {
case *net.DNSError, *net.OpError, net.UnknownNetworkError:
return true
case *url.Error:
// For a URL error, where it replies back "connection closed"
if strings.Contains(err.Error(), "Connection closed by foreign host") {
return true
}
return true
default:
if strings.Contains(err.Error(), "net/http: TLS handshake timeout") {
// If error is - tlsHandshakeTimeoutError,.
return true
} else if strings.Contains(err.Error(), "i/o timeout") {
// If error is - tcp timeoutError.
return true
} else if strings.Contains(err.Error(), "connection timed out") {
// If err is a net.Dial timeout.
return true
} else if strings.Contains(err.Error(), "net/http: HTTP/1.x transport connection broken") {
return true
}
}
return false
}

View File

@ -155,7 +155,11 @@ func (web *webAPIHandlers) DeleteBucket(r *http.Request, args *RemoveBucketArgs,
return toJSONError(errAuthentication)
}
err := objectAPI.DeleteBucket(context.Background(), args.BucketName)
deleteBucket := objectAPI.DeleteBucket
if web.CacheAPI() != nil {
deleteBucket = web.CacheAPI().DeleteBucket
}
err := deleteBucket(context.Background(), args.BucketName)
if err != nil {
return toJSONError(err, args.BucketName)
}
@ -184,11 +188,15 @@ func (web *webAPIHandlers) ListBuckets(r *http.Request, args *WebGenericArgs, re
if objectAPI == nil {
return toJSONError(errServerNotInitialized)
}
listBuckets := objectAPI.ListBuckets
if web.CacheAPI() != nil {
listBuckets = web.CacheAPI().ListBuckets
}
authErr := webRequestAuthenticate(r)
if authErr != nil {
return toJSONError(authErr)
}
buckets, err := objectAPI.ListBuckets(context.Background())
buckets, err := listBuckets(context.Background())
if err != nil {
return toJSONError(err)
}
@ -237,6 +245,10 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r
if objectAPI == nil {
return toJSONError(errServerNotInitialized)
}
listObjects := objectAPI.ListObjects
if web.CacheAPI() != nil {
listObjects = web.CacheAPI().ListObjects
}
prefix := args.Prefix + "test" // To test if GetObject/PutObject with the specified prefix is allowed.
readable := isBucketActionAllowed("s3:GetObject", args.BucketName, prefix, objectAPI)
writable := isBucketActionAllowed("s3:PutObject", args.BucketName, prefix, objectAPI)
@ -257,7 +269,7 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r
default:
return errAuthentication
}
lo, err := objectAPI.ListObjects(context.Background(), args.BucketName, args.Prefix, args.Marker, slashSeparator, 1000)
lo, err := listObjects(context.Background(), args.BucketName, args.Prefix, args.Marker, slashSeparator, 1000)
if err != nil {
return &json2.Error{Message: err.Error()}
}
@ -301,6 +313,10 @@ func (web *webAPIHandlers) RemoveObject(r *http.Request, args *RemoveObjectArgs,
if objectAPI == nil {
return toJSONError(errServerNotInitialized)
}
listObjects := objectAPI.ListObjects
if web.CacheAPI() != nil {
listObjects = web.CacheAPI().ListObjects
}
if !isHTTPRequestValid(r) {
return toJSONError(errAuthentication)
}
@ -314,7 +330,7 @@ next:
for _, objectName := range args.Objects {
// If not a directory, remove the object.
if !hasSuffix(objectName, slashSeparator) && objectName != "" {
if err = deleteObject(nil, objectAPI, args.BucketName, objectName, r); err != nil {
if err = deleteObject(nil, objectAPI, web.CacheAPI(), args.BucketName, objectName, r); err != nil {
break next
}
continue
@ -324,13 +340,13 @@ next:
marker := ""
for {
var lo ListObjectsInfo
lo, err = objectAPI.ListObjects(context.Background(), args.BucketName, objectName, marker, "", 1000)
lo, err = listObjects(context.Background(), args.BucketName, objectName, marker, "", 1000)
if err != nil {
break next
}
marker = lo.NextMarker
for _, obj := range lo.Objects {
err = deleteObject(nil, objectAPI, args.BucketName, obj.Name, r)
err = deleteObject(nil, objectAPI, web.CacheAPI(), args.BucketName, obj.Name, r)
if err != nil {
break next
}
@ -529,6 +545,10 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
return
}
putObject := objectAPI.PutObject
if web.CacheAPI() != nil {
putObject = web.CacheAPI().PutObject
}
vars := mux.Vars(r)
bucket := vars["bucket"]
object := vars["object"]
@ -563,7 +583,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) {
return
}
objInfo, err := objectAPI.PutObject(context.Background(), bucket, object, hashReader, metadata)
objInfo, err := putObject(context.Background(), bucket, object, hashReader, metadata)
if err != nil {
writeWebErrorResponse(w, err)
return
@ -596,10 +616,14 @@ func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) {
return
}
getObject := objectAPI.GetObject
if web.CacheAPI() != nil {
getObject = web.CacheAPI().GetObject
}
// Add content disposition.
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", path.Base(object)))
if err := objectAPI.GetObject(context.Background(), bucket, object, 0, -1, w, ""); err != nil {
if err := getObject(context.Background(), bucket, object, 0, -1, w, ""); err != nil {
/// No need to print error, response writer already written to.
return
}
@ -621,7 +645,14 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
writeWebErrorResponse(w, errServerNotInitialized)
return
}
getObject := objectAPI.GetObject
if web.CacheAPI() != nil {
getObject = web.CacheAPI().GetObject
}
listObjects := objectAPI.ListObjects
if web.CacheAPI() != nil {
listObjects = web.CacheAPI().ListObjects
}
// Auth is done after reading the body to accommodate for anonymous requests
// when bucket policy is enabled.
var args DownloadZipArgs
@ -644,11 +675,14 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
archive := zip.NewWriter(w)
defer archive.Close()
getObjectInfo := objectAPI.GetObjectInfo
if web.CacheAPI() != nil {
getObjectInfo = web.CacheAPI().GetObjectInfo
}
for _, object := range args.Objects {
// Writes compressed object file to the response.
zipit := func(objectName string) error {
info, err := objectAPI.GetObjectInfo(context.Background(), args.BucketName, objectName)
info, err := getObjectInfo(context.Background(), args.BucketName, objectName)
if err != nil {
return err
}
@ -663,7 +697,7 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
writeWebErrorResponse(w, errUnexpected)
return err
}
return objectAPI.GetObject(context.Background(), args.BucketName, objectName, 0, info.Size, writer, "")
return getObject(context.Background(), args.BucketName, objectName, 0, info.Size, writer, "")
}
if !hasSuffix(object, slashSeparator) {
@ -679,7 +713,7 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) {
// date to the response writer.
marker := ""
for {
lo, err := objectAPI.ListObjects(context.Background(), args.BucketName, pathJoin(args.Prefix, object), marker, "", 1000)
lo, err := listObjects(context.Background(), args.BucketName, pathJoin(args.Prefix, object), marker, "", 1000)
if err != nil {
return
}

View File

@ -32,6 +32,7 @@ import (
// webAPI container for Web API.
type webAPIHandlers struct {
ObjectAPI func() ObjectLayer
CacheAPI func() CacheObjectLayer
}
// indexHandler - Handler to serve index.html
@ -63,6 +64,7 @@ func registerWebRouter(mux *router.Router) error {
// Initialize Web.
web := &webAPIHandlers{
ObjectAPI: newObjectLayerFn,
CacheAPI: newCacheObjectsFn,
}
// Initialize a new json2 codec.

View File

@ -92,6 +92,13 @@ minio server /data
By default, parity for objects with standard storage class is set to `N/2`, and parity for objects with reduced redundancy storage class objects is set to `2`. Read more about storage class support in Minio server [here](https://github.com/minio/minio/blob/master/docs/erasure/storage-class/README.md).
### Cache
|Field|Type|Description|
|:---|:---|:---|
|``drives``| _[]string_ | List of drives|
|``exclude`` | _[]string_ | List of wildcard patterns for prefixes to exclude from cache |
|``expiry`` | _int_ | Days to cache expiry |
#### Notify
|Field|Type|Description|
|:---|:---|:---|

View File

@ -11,6 +11,11 @@
"standard": "",
"rrs": ""
},
"cache": {
"drives": [],
"expiry": 90,
"exclude": []
},
"notify": {
"amqp": {
"1": {

View File

@ -0,0 +1,55 @@
## Disk based caching
Disk caching can be turned on by updating the "cache" config
settings for minio server. By default, this is at `${HOME}/.minio`.
"cache" takes the drives location, duration to expiry (in days) and any
wildcard patterns to exclude certain content from cache as
configuration settings.
```
"cache": {
"drives": ["/path/drive1", "/path/drive2", "/path/drive3"],
"expiry": 30,
"exclude": ["*.png","bucket1/a/b","bucket2/*"]
},
```
The cache settings can also be set by the environment variables
below. When set, environment variables override any cache settings in config.json
```
export MINIO_CACHE_DRIVES="/drive1;/drive2;/drive3"
export MINIO_CACHE_EXPIRY=90
export MINIO_CACHE_EXCLUDE="pattern1;pattern2;pattern3"
```
- Cache size is 80% of drive capacity. Disk caching requires
Atime support to be enabled on the cache drive.
- Expiration of entries takes user provided expiry as a hint,
and defaults to 90 days if not provided.
- Garbage collection sweep of the expired entries happens whenever
disk usage is > 80% of drive capacity until sufficient disk
space has been freed.
- Object is cached only when drive has sufficient disk space for 100 times the size of current object
### Behavior
Disk caching happens on both GET and PUT operations.
- GET caches new objects for entries not found in cache.
Otherwise serves from the cache.
- PUT/POST caches all successfully uploaded objects. Replaces
existing cached entry for the same object if needed.
When an object is deleted, it is automatically cleared from the cache.
NOTE: Expiration happens automatically based on the configured
interval as explained above, frequently accessed objects stay
alive in cache for a significantly longer time on every cache hit.
The following caveats apply for offline mode
- GET, LIST and HEAD operations will be served from the disk cache.
- PUT operations are disallowed when gateway backend is offline.
- Anonymous operations are not implemented as of now.

22
vendor/github.com/djherbis/atime/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2015 Dustin H
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

42
vendor/github.com/djherbis/atime/README.md generated vendored Normal file
View File

@ -0,0 +1,42 @@
atime
==========
[![GoDoc](https://godoc.org/github.com/djherbis/atime?status.svg)](https://godoc.org/github.com/djherbis/atime)
[![Release](https://img.shields.io/github/release/djherbis/atime.svg)](https://github.com/djherbis/atime/releases/latest)
[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE.txt)
[![Build Status](https://travis-ci.org/djherbis/atime.svg?branch=master)](https://travis-ci.org/djherbis/atime)
[![Coverage Status](https://coveralls.io/repos/djherbis/atime/badge.svg?branch=master)](https://coveralls.io/r/djherbis/atime?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/djherbis/atime)](https://goreportcard.com/report/github.com/djherbis/atime)
[![Sourcegraph](https://sourcegraph.com/github.com/djherbis/atime/-/badge.svg)](https://sourcegraph.com/github.com/djherbis/atime?badge)
Usage
------------
File Access Times for #golang
Looking for ctime or btime? Checkout https://github.com/djherbis/times
Go has a hidden atime function for most platforms, this repo makes it accessible.
```go
package main
import (
"log"
"github.com/djherbis/atime"
)
func main() {
at, err := atime.Stat("myfile")
if err != nil {
log.Fatal(err.Error())
}
log.Println(at)
}
```
Installation
------------
```sh
go get github.com/djherbis/atime
```

21
vendor/github.com/djherbis/atime/atime_darwin.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// http://golang.org/src/os/stat_darwin.go
package atime
import (
"os"
"syscall"
"time"
)
func timespecToTime(ts syscall.Timespec) time.Time {
return time.Unix(int64(ts.Sec), int64(ts.Nsec))
}
func atime(fi os.FileInfo) time.Time {
return timespecToTime(fi.Sys().(*syscall.Stat_t).Atimespec)
}

21
vendor/github.com/djherbis/atime/atime_dragonfly.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// http://golang.org/src/os/stat_dragonfly.go
package atime
import (
"os"
"syscall"
"time"
)
func timespecToTime(ts syscall.Timespec) time.Time {
return time.Unix(int64(ts.Sec), int64(ts.Nsec))
}
func atime(fi os.FileInfo) time.Time {
return timespecToTime(fi.Sys().(*syscall.Stat_t).Atim)
}

21
vendor/github.com/djherbis/atime/atime_freebsd.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// http://golang.org/src/os/stat_freebsd.go
package atime
import (
"os"
"syscall"
"time"
)
func timespecToTime(ts syscall.Timespec) time.Time {
return time.Unix(int64(ts.Sec), int64(ts.Nsec))
}
func atime(fi os.FileInfo) time.Time {
return timespecToTime(fi.Sys().(*syscall.Stat_t).Atimespec)
}

21
vendor/github.com/djherbis/atime/atime_linux.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// http://golang.org/src/os/stat_linux.go
package atime
import (
"os"
"syscall"
"time"
)
func timespecToTime(ts syscall.Timespec) time.Time {
return time.Unix(int64(ts.Sec), int64(ts.Nsec))
}
func atime(fi os.FileInfo) time.Time {
return timespecToTime(fi.Sys().(*syscall.Stat_t).Atim)
}

22
vendor/github.com/djherbis/atime/atime_nacl.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// http://golang.org/src/os/stat_nacl.go
package atime
import (
"os"
"syscall"
"time"
)
func timespecToTime(sec, nsec int64) time.Time {
return time.Unix(sec, nsec)
}
func atime(fi os.FileInfo) time.Time {
st := fi.Sys().(*syscall.Stat_t)
return timespecToTime(st.Atime, st.AtimeNsec)
}

21
vendor/github.com/djherbis/atime/atime_netbsd.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// http://golang.org/src/os/stat_netbsd.go
package atime
import (
"os"
"syscall"
"time"
)
func timespecToTime(ts syscall.Timespec) time.Time {
return time.Unix(int64(ts.Sec), int64(ts.Nsec))
}
func atime(fi os.FileInfo) time.Time {
return timespecToTime(fi.Sys().(*syscall.Stat_t).Atimespec)
}

21
vendor/github.com/djherbis/atime/atime_openbsd.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// http://golang.org/src/os/stat_openbsd.go
package atime
import (
"os"
"syscall"
"time"
)
func timespecToTime(ts syscall.Timespec) time.Time {
return time.Unix(int64(ts.Sec), int64(ts.Nsec))
}
func atime(fi os.FileInfo) time.Time {
return timespecToTime(fi.Sys().(*syscall.Stat_t).Atim)
}

16
vendor/github.com/djherbis/atime/atime_plan9.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// http://golang.org/src/os/stat_plan9.go
package atime
import (
"os"
"time"
)
func atime(fi os.FileInfo) time.Time {
return time.Unix(int64(fi.Sys().(*syscall.Dir).Atime), 0)
}

21
vendor/github.com/djherbis/atime/atime_solaris.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// http://golang.org/src/os/stat_solaris.go
package atime
import (
"os"
"syscall"
"time"
)
func timespecToTime(ts syscall.Timespec) time.Time {
return time.Unix(int64(ts.Sec), int64(ts.Nsec))
}
func atime(fi os.FileInfo) time.Time {
return timespecToTime(fi.Sys().(*syscall.Stat_t).Atim)
}

17
vendor/github.com/djherbis/atime/atime_windows.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// http://golang.org/src/os/stat_windows.go
package atime
import (
"os"
"syscall"
"time"
)
func atime(fi os.FileInfo) time.Time {
return time.Unix(0, fi.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds())
}

21
vendor/github.com/djherbis/atime/stat.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Package atime provides a platform-independent way to get atimes for files.
package atime
import (
"os"
"time"
)
// Get returns the Last Access Time for the given FileInfo
func Get(fi os.FileInfo) time.Time {
return atime(fi)
}
// Stat returns the Last Access Time for the given filename
func Stat(name string) (time.Time, error) {
fi, err := os.Stat(name)
if err != nil {
return time.Time{}, err
}
return atime(fi), nil
}

6
vendor/vendor.json vendored
View File

@ -107,6 +107,12 @@
"revision": "01aeca54ebda6e0fbfafd0a524d234159c05ec20",
"revisionTime": "2016-07-05T13:30:06-07:00"
},
{
"checksumSHA1": "QF48SiRNX1YDARpi0rJtgAizF5w=",
"path": "github.com/djherbis/atime",
"revision": "89517e96e10b93292169a79fd4523807bdc5d5fa",
"revisionTime": "2017-02-15T08:49:34Z"
},
{
"checksumSHA1": "rhLUtXvcmouYuBwOq9X/nYKzvNg=",
"path": "github.com/dustin/go-humanize",