2018-03-28 17:14:06 -04:00
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"net/http"
|
|
|
|
"os"
|
2019-05-22 17:54:15 -04:00
|
|
|
"strings"
|
2019-08-09 20:09:08 -04:00
|
|
|
"sync"
|
2019-05-22 17:54:15 -04:00
|
|
|
"time"
|
2018-03-28 17:14:06 -04:00
|
|
|
|
|
|
|
"github.com/djherbis/atime"
|
2018-04-05 18:04:40 -04:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
2019-09-05 10:20:16 -04:00
|
|
|
"github.com/minio/minio/pkg/hash"
|
2018-07-27 18:32:19 -04:00
|
|
|
"github.com/minio/minio/pkg/wildcard"
|
2018-03-28 17:14:06 -04:00
|
|
|
)
|
|
|
|
|
2019-05-22 17:54:15 -04:00
|
|
|
const (
|
2019-08-09 20:09:08 -04:00
|
|
|
cacheBlkSize = int64(1 * 1024 * 1024)
|
2019-05-22 17:54:15 -04:00
|
|
|
)
|
|
|
|
|
2018-05-30 14:30:14 -04:00
|
|
|
// CacheStorageInfo - represents total, free capacity of
|
|
|
|
// underlying cache storage.
|
|
|
|
type CacheStorageInfo struct {
|
|
|
|
Total uint64 // Total cache disk space.
|
|
|
|
Free uint64 // Free cache available space.
|
|
|
|
}
|
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
// CacheObjectLayer implements primitives for cache object API layer.
|
|
|
|
type CacheObjectLayer interface {
|
|
|
|
// Object operations.
|
2018-09-27 06:06:45 -04:00
|
|
|
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
|
2018-09-10 12:42:43 -04:00
|
|
|
GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
2018-03-28 17:14:06 -04:00
|
|
|
DeleteObject(ctx context.Context, bucket, object string) error
|
2019-05-13 15:25:49 -04:00
|
|
|
DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error)
|
2019-09-05 10:20:16 -04:00
|
|
|
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
2018-03-28 17:14:06 -04:00
|
|
|
// Storage operations.
|
2018-05-30 14:30:14 -04:00
|
|
|
StorageInfo(ctx context.Context) CacheStorageInfo
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
// Abstracts disk caching - used by the S3 layer
|
|
|
|
type cacheObjects struct {
|
|
|
|
// slice of cache drives
|
|
|
|
cache []*diskCache
|
|
|
|
// file path patterns to exclude from cache
|
|
|
|
exclude []string
|
|
|
|
// to manage cache namespace locks
|
|
|
|
nsMutex *nsLockMap
|
|
|
|
|
|
|
|
// if true migration is in progress from v1 to v2
|
|
|
|
migrating bool
|
|
|
|
// mutex to protect migration bool
|
|
|
|
migMutex sync.Mutex
|
|
|
|
|
|
|
|
// Object functions pointing to the corresponding functions of backend implementation.
|
|
|
|
GetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
|
|
|
|
GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
|
|
|
DeleteObjectFn func(ctx context.Context, bucket, object string) error
|
|
|
|
DeleteObjectsFn func(ctx context.Context, bucket string, objects []string) ([]error, error)
|
2019-09-05 10:20:16 -04:00
|
|
|
PutObjectFn func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
func (c *cacheObjects) delete(ctx context.Context, dcache *diskCache, bucket, object string) (err error) {
|
|
|
|
cLock := c.nsMutex.NewNSLock(ctx, bucket, object)
|
|
|
|
if err := cLock.GetLock(globalObjectTimeout); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer cLock.Unlock()
|
|
|
|
return dcache.Delete(ctx, bucket, object)
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
func (c *cacheObjects) put(ctx context.Context, dcache *diskCache, bucket, object string, data io.Reader, size int64, opts ObjectOptions) error {
|
|
|
|
cLock := c.nsMutex.NewNSLock(ctx, bucket, object)
|
|
|
|
if err := cLock.GetLock(globalObjectTimeout); err != nil {
|
|
|
|
return err
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
defer cLock.Unlock()
|
|
|
|
return dcache.Put(ctx, bucket, object, data, size, opts)
|
|
|
|
}
|
2019-05-22 17:54:15 -04:00
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
func (c *cacheObjects) get(ctx context.Context, dcache *diskCache, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
|
|
|
cLock := c.nsMutex.NewNSLock(ctx, bucket, object)
|
|
|
|
if err := cLock.GetRLock(globalObjectTimeout); err != nil {
|
|
|
|
return nil, err
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
|
|
|
|
defer cLock.RUnlock()
|
|
|
|
return dcache.Get(ctx, bucket, object, rs, h, opts)
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
func (c *cacheObjects) stat(ctx context.Context, dcache *diskCache, bucket, object string) (oi ObjectInfo, err error) {
|
|
|
|
cLock := c.nsMutex.NewNSLock(ctx, bucket, object)
|
|
|
|
if err := cLock.GetRLock(globalObjectTimeout); err != nil {
|
|
|
|
return oi, err
|
2019-05-21 23:00:27 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
|
|
|
|
defer cLock.RUnlock()
|
|
|
|
return dcache.Stat(ctx, bucket, object)
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
// DeleteObject clears cache entry if backend delete operation succeeds
|
|
|
|
func (c *cacheObjects) DeleteObject(ctx context.Context, bucket, object string) (err error) {
|
|
|
|
if err = c.DeleteObjectFn(ctx, bucket, object); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if c.isCacheExclude(bucket, object) || c.skipCache() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
dcache, cerr := c.getCacheLoc(ctx, bucket, object)
|
|
|
|
if cerr != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if dcache.Exists(ctx, bucket, object) {
|
|
|
|
c.delete(ctx, dcache, bucket, object)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteObjects batch deletes objects in slice, and clears any cached entries
|
|
|
|
func (c *cacheObjects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) {
|
|
|
|
errs := make([]error, len(objects))
|
|
|
|
for idx, object := range objects {
|
|
|
|
errs[idx] = c.DeleteObject(ctx, bucket, object)
|
|
|
|
}
|
|
|
|
return errs, nil
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// construct a metadata k-v map
|
2019-08-09 20:09:08 -04:00
|
|
|
func getMetadata(objInfo ObjectInfo) map[string]string {
|
2018-03-28 17:14:06 -04:00
|
|
|
metadata := make(map[string]string)
|
|
|
|
metadata["etag"] = objInfo.ETag
|
|
|
|
metadata["content-type"] = objInfo.ContentType
|
2019-08-09 20:09:08 -04:00
|
|
|
if objInfo.ContentEncoding != "" {
|
|
|
|
metadata["content-encoding"] = objInfo.ContentEncoding
|
|
|
|
}
|
|
|
|
if objInfo.Expires != timeSentinel {
|
|
|
|
metadata["expires"] = objInfo.Expires.Format(http.TimeFormat)
|
|
|
|
}
|
|
|
|
for k, v := range objInfo.UserDefined {
|
|
|
|
metadata[k] = v
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
|
|
|
return metadata
|
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
|
|
|
if c.isCacheExclude(bucket, object) || c.skipCache() {
|
2019-04-09 17:49:45 -04:00
|
|
|
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
var cc cacheControl
|
2018-09-20 22:22:09 -04:00
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
// fetch diskCache if object is currently cached or nearest available cache drive
|
|
|
|
dcache, err := c.getCacheToLoc(ctx, bucket, object)
|
2018-09-20 22:22:09 -04:00
|
|
|
if err != nil {
|
2019-04-09 17:49:45 -04:00
|
|
|
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
cacheReader, cacheErr := c.get(ctx, dcache, bucket, object, rs, h, opts)
|
|
|
|
if cacheErr == nil {
|
|
|
|
cc = cacheControlOpts(cacheReader.ObjInfo)
|
|
|
|
if !cc.isEmpty() && !cc.isStale(cacheReader.ObjInfo.ModTime) {
|
|
|
|
return cacheReader, nil
|
|
|
|
}
|
|
|
|
}
|
2019-05-21 23:00:27 -04:00
|
|
|
|
2019-05-22 17:54:15 -04:00
|
|
|
objInfo, err := c.GetObjectInfoFn(ctx, bucket, object, opts)
|
2018-10-17 13:57:12 -04:00
|
|
|
if backendDownError(err) && cacheErr == nil {
|
|
|
|
return cacheReader, nil
|
|
|
|
} else if err != nil {
|
2018-09-20 22:22:09 -04:00
|
|
|
if _, ok := err.(ObjectNotFound); ok {
|
2019-04-18 16:53:22 -04:00
|
|
|
if cacheErr == nil {
|
|
|
|
cacheReader.Close()
|
|
|
|
// Delete cached entry if backend object
|
|
|
|
// was deleted.
|
|
|
|
dcache.Delete(ctx, bucket, object)
|
|
|
|
}
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
2018-10-17 13:57:12 -04:00
|
|
|
return nil, err
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
if !objInfo.IsCacheable() {
|
2019-04-09 17:49:45 -04:00
|
|
|
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
|
|
|
|
2018-10-17 13:57:12 -04:00
|
|
|
if cacheErr == nil {
|
2019-08-09 20:09:08 -04:00
|
|
|
// if ETag matches for stale cache entry, serve from cache
|
|
|
|
if cacheReader.ObjInfo.ETag == objInfo.ETag {
|
|
|
|
// Update metadata in case server-side copy might have changed object metadata
|
|
|
|
dcache.updateMetadataIfChanged(ctx, bucket, object, objInfo, cacheReader.ObjInfo)
|
2018-09-20 22:22:09 -04:00
|
|
|
return cacheReader, nil
|
|
|
|
}
|
2018-10-11 02:01:24 -04:00
|
|
|
cacheReader.Close()
|
2018-09-20 22:22:09 -04:00
|
|
|
// Object is stale, so delete from cache
|
2019-08-09 20:09:08 -04:00
|
|
|
c.delete(ctx, dcache, bucket, object)
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Since we got here, we are serving the request from backend,
|
|
|
|
// and also adding the object to the cache.
|
2019-08-09 20:09:08 -04:00
|
|
|
if !dcache.diskUsageLow() {
|
|
|
|
select {
|
|
|
|
case dcache.purgeChan <- struct{}{}:
|
|
|
|
default:
|
|
|
|
}
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
2019-05-22 17:54:15 -04:00
|
|
|
if !dcache.diskAvailable(objInfo.Size) {
|
2019-04-09 17:49:45 -04:00
|
|
|
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
if rs != nil {
|
|
|
|
go func() {
|
|
|
|
// fill cache in the background for range GET requests
|
|
|
|
bReader, bErr := c.GetObjectNInfoFn(ctx, bucket, object, nil, h, lockType, opts)
|
|
|
|
if bErr != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer bReader.Close()
|
|
|
|
oi, err := c.stat(ctx, dcache, bucket, object)
|
|
|
|
// avoid cache overwrite if another background routine filled cache
|
|
|
|
if err != nil || oi.ETag != bReader.ObjInfo.ETag {
|
|
|
|
c.put(ctx, dcache, bucket, object, bReader, bReader.ObjInfo.Size, ObjectOptions{UserDefined: getMetadata(bReader.ObjInfo)})
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
return c.GetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
|
|
|
|
}
|
|
|
|
|
2019-04-09 17:49:45 -04:00
|
|
|
bkReader, bkErr := c.GetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
|
2018-09-20 22:22:09 -04:00
|
|
|
if bkErr != nil {
|
|
|
|
return nil, bkErr
|
|
|
|
}
|
|
|
|
// Initialize pipe.
|
|
|
|
pipeReader, pipeWriter := io.Pipe()
|
|
|
|
teeReader := io.TeeReader(bkReader, pipeWriter)
|
|
|
|
go func() {
|
2019-09-05 10:20:16 -04:00
|
|
|
putErr := c.put(ctx, dcache, bucket, object, io.LimitReader(pipeReader, bkReader.ObjInfo.Size), bkReader.ObjInfo.Size, ObjectOptions{UserDefined: getMetadata(bkReader.ObjInfo)})
|
2018-09-20 22:22:09 -04:00
|
|
|
// close the write end of the pipe, so the error gets
|
|
|
|
// propagated to getObjReader
|
|
|
|
pipeWriter.CloseWithError(putErr)
|
|
|
|
}()
|
|
|
|
cleanupBackend := func() { bkReader.Close() }
|
2018-09-21 14:42:06 -04:00
|
|
|
cleanupPipe := func() { pipeReader.Close() }
|
2019-03-06 15:38:41 -05:00
|
|
|
return NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, opts.CheckCopyPrecondFn, cleanupBackend, cleanupPipe)
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
// Returns ObjectInfo from cache if available.
|
2019-08-09 20:09:08 -04:00
|
|
|
func (c *cacheObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
|
2018-03-28 17:14:06 -04:00
|
|
|
getObjectInfoFn := c.GetObjectInfoFn
|
2019-08-09 20:09:08 -04:00
|
|
|
|
|
|
|
if c.isCacheExclude(bucket, object) || c.skipCache() {
|
2018-09-10 12:42:43 -04:00
|
|
|
return getObjectInfoFn(ctx, bucket, object, opts)
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
|
|
|
|
// fetch diskCache if object is currently cached or nearest available cache drive
|
|
|
|
dcache, err := c.getCacheToLoc(ctx, bucket, object)
|
2018-03-28 17:14:06 -04:00
|
|
|
if err != nil {
|
2018-09-10 12:42:43 -04:00
|
|
|
return getObjectInfoFn(ctx, bucket, object, opts)
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
var cc cacheControl
|
|
|
|
// if cache control setting is valid, avoid HEAD operation to backend
|
|
|
|
cachedObjInfo, cerr := c.stat(ctx, dcache, bucket, object)
|
|
|
|
if cerr == nil {
|
|
|
|
cc = cacheControlOpts(cachedObjInfo)
|
|
|
|
if !cc.isEmpty() && !cc.isStale(cachedObjInfo.ModTime) {
|
|
|
|
return cachedObjInfo, nil
|
|
|
|
}
|
|
|
|
}
|
2018-09-10 12:42:43 -04:00
|
|
|
objInfo, err := getObjectInfoFn(ctx, bucket, object, opts)
|
2018-03-28 17:14:06 -04:00
|
|
|
if err != nil {
|
2018-04-10 12:36:37 -04:00
|
|
|
if _, ok := err.(ObjectNotFound); ok {
|
2018-03-28 17:14:06 -04:00
|
|
|
// Delete the cached entry if backend object was deleted.
|
2019-08-09 20:09:08 -04:00
|
|
|
c.delete(ctx, dcache, bucket, object)
|
2018-03-28 17:14:06 -04:00
|
|
|
return ObjectInfo{}, err
|
|
|
|
}
|
|
|
|
if !backendDownError(err) {
|
|
|
|
return ObjectInfo{}, err
|
|
|
|
}
|
|
|
|
if cerr == nil {
|
|
|
|
return cachedObjInfo, nil
|
|
|
|
}
|
|
|
|
return ObjectInfo{}, BackendDown{}
|
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
// when backend is up, do a sanity check on cached object
|
2019-08-09 20:09:08 -04:00
|
|
|
if cerr != nil {
|
2018-03-28 17:14:06 -04:00
|
|
|
return objInfo, nil
|
|
|
|
}
|
|
|
|
if cachedObjInfo.ETag != objInfo.ETag {
|
|
|
|
// Delete the cached entry if the backend object was replaced.
|
2019-08-09 20:09:08 -04:00
|
|
|
c.delete(ctx, dcache, bucket, object)
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
|
|
|
return objInfo, nil
|
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
// StorageInfo - returns underlying storage statistics.
|
|
|
|
func (c *cacheObjects) StorageInfo(ctx context.Context) (cInfo CacheStorageInfo) {
|
|
|
|
var total, free uint64
|
|
|
|
for _, cache := range c.cache {
|
2018-04-26 01:09:05 -04:00
|
|
|
if cache == nil {
|
|
|
|
continue
|
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
info, err := getDiskInfo(cache.dir)
|
|
|
|
logger.GetReqInfo(ctx).AppendTags("cachePath", cache.dir)
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
total += info.Total
|
|
|
|
free += info.Free
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
return CacheStorageInfo{
|
|
|
|
Total: total,
|
|
|
|
Free: free,
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
// skipCache() returns true if cache migration is in progress
|
|
|
|
func (c *cacheObjects) skipCache() bool {
|
|
|
|
c.migMutex.Lock()
|
|
|
|
defer c.migMutex.Unlock()
|
|
|
|
return c.migrating
|
2019-05-13 15:25:49 -04:00
|
|
|
}
|
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
// Returns true if object should be excluded from cache
|
2019-08-09 20:09:08 -04:00
|
|
|
func (c *cacheObjects) isCacheExclude(bucket, object string) bool {
|
|
|
|
// exclude directories from cache
|
|
|
|
if strings.HasSuffix(object, SlashSeparator) {
|
|
|
|
return true
|
|
|
|
}
|
2018-03-28 17:14:06 -04:00
|
|
|
for _, pattern := range c.exclude {
|
|
|
|
matchStr := fmt.Sprintf("%s/%s", bucket, object)
|
|
|
|
if ok := wildcard.MatchSimple(pattern, matchStr); ok {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
// choose a cache deterministically based on hash of bucket,object. The hash index is treated as
|
|
|
|
// a hint. In the event that the cache drive at hash index is offline, treat the list of cache drives
|
|
|
|
// as a circular buffer and walk through them starting at hash index until an online drive is found.
|
|
|
|
func (c *cacheObjects) getCacheLoc(ctx context.Context, bucket, object string) (*diskCache, error) {
|
|
|
|
index := c.hashIndex(bucket, object)
|
|
|
|
numDisks := len(c.cache)
|
|
|
|
for k := 0; k < numDisks; k++ {
|
|
|
|
i := (index + k) % numDisks
|
|
|
|
if c.cache[i] == nil {
|
|
|
|
continue
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
if c.cache[i].IsOnline() {
|
|
|
|
return c.cache[i], nil
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
return nil, errDiskNotFound
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
// get cache disk where object is currently cached for a GET operation. If object does not exist at that location,
|
|
|
|
// treat the list of cache drives as a circular buffer and walk through them starting at hash index
|
|
|
|
// until an online drive is found.If object is not found, fall back to the first online cache drive
|
|
|
|
// closest to the hash index, so that object can be re-cached.
|
|
|
|
func (c *cacheObjects) getCacheToLoc(ctx context.Context, bucket, object string) (*diskCache, error) {
|
|
|
|
index := c.hashIndex(bucket, object)
|
2019-05-22 17:54:15 -04:00
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
numDisks := len(c.cache)
|
|
|
|
// save first online cache disk closest to the hint index
|
|
|
|
var firstOnlineDisk *diskCache
|
|
|
|
for k := 0; k < numDisks; k++ {
|
|
|
|
i := (index + k) % numDisks
|
|
|
|
if c.cache[i] == nil {
|
|
|
|
continue
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
if c.cache[i].IsOnline() {
|
|
|
|
if firstOnlineDisk == nil {
|
|
|
|
firstOnlineDisk = c.cache[i]
|
|
|
|
}
|
|
|
|
if c.cache[i].Exists(ctx, bucket, object) {
|
|
|
|
return c.cache[i], nil
|
|
|
|
}
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
if firstOnlineDisk != nil {
|
|
|
|
return firstOnlineDisk, nil
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
return nil, errDiskNotFound
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
// Compute a unique hash sum for bucket and object
|
|
|
|
func (c *cacheObjects) hashIndex(bucket, object string) int {
|
|
|
|
return crcHashMod(pathJoin(bucket, object), len(c.cache))
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// newCache initializes the cacheFSObjects for the "drives" specified in config.json
|
|
|
|
// or the global env overrides.
|
2019-08-09 20:09:08 -04:00
|
|
|
func newCache(config CacheConfig) ([]*diskCache, bool, error) {
|
|
|
|
var caches []*diskCache
|
2018-04-05 18:04:40 -04:00
|
|
|
ctx := logger.SetReqInfo(context.Background(), &logger.ReqInfo{})
|
2019-08-09 20:09:08 -04:00
|
|
|
formats, migrating, err := loadAndValidateCacheFormat(ctx, config.Drives)
|
2018-03-28 17:14:06 -04:00
|
|
|
if err != nil {
|
2019-08-09 20:09:08 -04:00
|
|
|
return nil, false, err
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
2018-03-29 17:38:26 -04:00
|
|
|
for i, dir := range config.Drives {
|
2019-08-09 20:09:08 -04:00
|
|
|
// skip diskCache creation for cache drives missing a format.json
|
2018-03-28 17:14:06 -04:00
|
|
|
if formats[i] == nil {
|
2019-08-09 20:09:08 -04:00
|
|
|
caches = append(caches, nil)
|
2018-03-28 17:14:06 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err := checkAtimeSupport(dir); err != nil {
|
2019-08-09 20:09:08 -04:00
|
|
|
return nil, false, errors.New("Atime support required for disk caching")
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
|
|
|
|
cache, err := newdiskCache(dir, config.Expiry, config.MaxUse)
|
2018-03-29 17:38:26 -04:00
|
|
|
if err != nil {
|
2019-08-09 20:09:08 -04:00
|
|
|
return nil, false, err
|
|
|
|
}
|
|
|
|
// Start the purging go-routine for entries that have expired if no migration in progress
|
|
|
|
if !migrating {
|
|
|
|
go cache.purge()
|
2018-03-29 17:38:26 -04:00
|
|
|
}
|
2019-05-22 17:54:15 -04:00
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
caches = append(caches, cache)
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
return caches, migrating, nil
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Return error if Atime is disabled on the O/S
|
|
|
|
func checkAtimeSupport(dir string) (err error) {
|
|
|
|
file, err := ioutil.TempFile(dir, "prefix")
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer os.Remove(file.Name())
|
|
|
|
finfo1, err := os.Stat(file.Name())
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2019-07-10 18:41:11 -04:00
|
|
|
// add a sleep to ensure atime change is detected
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
|
|
|
|
if _, err = io.Copy(ioutil.Discard, file); err != nil {
|
2018-03-28 17:14:06 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
finfo2, err := os.Stat(file.Name())
|
|
|
|
|
|
|
|
if atime.Get(finfo2).Equal(atime.Get(finfo1)) {
|
|
|
|
return errors.New("Atime not supported")
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
func (c *cacheObjects) migrateCacheFromV1toV2(ctx context.Context) {
|
|
|
|
logger.StartupMessage(colorBlue("Cache migration initiated ...."))
|
2019-08-23 13:13:22 -04:00
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
var wg = &sync.WaitGroup{}
|
|
|
|
errs := make([]error, len(c.cache))
|
|
|
|
for i, dc := range c.cache {
|
|
|
|
if dc == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
wg.Add(1)
|
|
|
|
// start migration from V1 to V2
|
|
|
|
go func(ctx context.Context, dc *diskCache, errs []error, idx int) {
|
|
|
|
defer wg.Done()
|
|
|
|
if err := migrateOldCache(ctx, dc); err != nil {
|
|
|
|
errs[idx] = err
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// start purge routine after migration completes.
|
|
|
|
go dc.purge()
|
|
|
|
}(ctx, dc, errs, i)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
errCnt := 0
|
|
|
|
for _, err := range errs {
|
|
|
|
if err != nil {
|
|
|
|
errCnt++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if errCnt > 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// update migration status
|
|
|
|
c.migMutex.Lock()
|
|
|
|
defer c.migMutex.Unlock()
|
|
|
|
c.migrating = false
|
|
|
|
logger.StartupMessage(colorBlue("Cache migration completed successfully."))
|
|
|
|
}
|
2018-03-28 17:14:06 -04:00
|
|
|
|
2019-09-05 10:20:16 -04:00
|
|
|
// PutObject - caches the uploaded object for single Put operations
|
|
|
|
func (c *cacheObjects) PutObject(ctx context.Context, bucket, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
|
|
|
putObjectFn := c.PutObjectFn
|
|
|
|
data := r.rawReader
|
|
|
|
dcache, err := c.getCacheToLoc(ctx, bucket, object)
|
|
|
|
if err != nil {
|
|
|
|
// disk cache could not be located,execute backend call.
|
|
|
|
return putObjectFn(ctx, bucket, object, r, opts)
|
|
|
|
}
|
|
|
|
size := r.Size()
|
|
|
|
if c.skipCache() {
|
|
|
|
return putObjectFn(ctx, bucket, object, r, opts)
|
|
|
|
}
|
|
|
|
|
|
|
|
// fetch from backend if there is no space on cache drive
|
|
|
|
if !dcache.diskAvailable(size) {
|
|
|
|
return putObjectFn(ctx, bucket, object, r, opts)
|
|
|
|
}
|
|
|
|
if opts.ServerSideEncryption != nil {
|
|
|
|
return putObjectFn(ctx, bucket, object, r, opts)
|
|
|
|
}
|
|
|
|
// fetch from backend if cache exclude pattern or cache-control
|
|
|
|
// directive set to exclude
|
|
|
|
if c.isCacheExclude(bucket, object) {
|
|
|
|
dcache.Delete(ctx, bucket, object)
|
|
|
|
return putObjectFn(ctx, bucket, object, r, opts)
|
|
|
|
}
|
|
|
|
// Initialize pipe to stream data to backend
|
|
|
|
pipeReader, pipeWriter := io.Pipe()
|
|
|
|
hashReader, err := hash.NewReader(pipeReader, size, "", "", data.ActualSize(), globalCLIContext.StrictS3Compat)
|
|
|
|
if err != nil {
|
|
|
|
return ObjectInfo{}, err
|
|
|
|
}
|
|
|
|
// Initialize pipe to stream data to cache
|
|
|
|
rPipe, wPipe := io.Pipe()
|
|
|
|
|
|
|
|
oinfoCh := make(chan ObjectInfo)
|
|
|
|
errCh := make(chan error)
|
|
|
|
go func() {
|
|
|
|
oinfo, perr := putObjectFn(ctx, bucket, object, NewPutObjReader(hashReader, nil, nil), opts)
|
|
|
|
if perr != nil {
|
|
|
|
pipeWriter.CloseWithError(perr)
|
|
|
|
wPipe.CloseWithError(perr)
|
|
|
|
close(oinfoCh)
|
|
|
|
errCh <- perr
|
|
|
|
return
|
|
|
|
}
|
|
|
|
close(errCh)
|
|
|
|
oinfoCh <- oinfo
|
|
|
|
}()
|
|
|
|
// get a namespace lock on cache until cache is filled.
|
|
|
|
cLock := c.nsMutex.NewNSLock(ctx, bucket, object)
|
|
|
|
if err := cLock.GetLock(globalObjectTimeout); err != nil {
|
|
|
|
return ObjectInfo{}, err
|
|
|
|
}
|
|
|
|
defer cLock.Unlock()
|
|
|
|
go func() {
|
|
|
|
if err = dcache.Put(ctx, bucket, object, rPipe, data.Size(), opts); err != nil {
|
|
|
|
wPipe.CloseWithError(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
mwriter := io.MultiWriter(pipeWriter, wPipe)
|
|
|
|
_, err = io.Copy(mwriter, data)
|
|
|
|
if err != nil {
|
|
|
|
err = <-errCh
|
|
|
|
return objInfo, err
|
|
|
|
}
|
|
|
|
pipeWriter.Close()
|
|
|
|
wPipe.Close()
|
|
|
|
objInfo = <-oinfoCh
|
|
|
|
dcache.updateETag(ctx, bucket, object, objInfo.ETag)
|
|
|
|
|
|
|
|
return objInfo, err
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
// Returns cacheObjects for use by Server.
|
2019-08-09 20:09:08 -04:00
|
|
|
func newServerCacheObjects(ctx context.Context, config CacheConfig) (CacheObjectLayer, error) {
|
2018-03-28 17:14:06 -04:00
|
|
|
// list of disk caches for cache "drives" specified in config.json or MINIO_CACHE_DRIVES env var.
|
2019-08-09 20:09:08 -04:00
|
|
|
cache, migrateSw, err := newCache(config)
|
2018-03-28 17:14:06 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-05-22 17:54:15 -04:00
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
c := &cacheObjects{
|
|
|
|
cache: cache,
|
|
|
|
exclude: config.Exclude,
|
|
|
|
nsMutex: newNSLock(false),
|
|
|
|
migrating: migrateSw,
|
|
|
|
migMutex: sync.Mutex{},
|
2018-09-10 12:42:43 -04:00
|
|
|
GetObjectInfoFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
|
|
|
|
return newObjectLayerFn().GetObjectInfo(ctx, bucket, object, opts)
|
2018-03-28 17:14:06 -04:00
|
|
|
},
|
2018-10-11 02:01:24 -04:00
|
|
|
GetObjectNInfoFn: func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
|
|
|
return newObjectLayerFn().GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
|
|
|
},
|
2018-03-28 17:14:06 -04:00
|
|
|
DeleteObjectFn: func(ctx context.Context, bucket, object string) error {
|
|
|
|
return newObjectLayerFn().DeleteObject(ctx, bucket, object)
|
|
|
|
},
|
2019-05-13 15:25:49 -04:00
|
|
|
DeleteObjectsFn: func(ctx context.Context, bucket string, objects []string) ([]error, error) {
|
|
|
|
errs := make([]error, len(objects))
|
|
|
|
for idx, object := range objects {
|
|
|
|
errs[idx] = newObjectLayerFn().DeleteObject(ctx, bucket, object)
|
|
|
|
}
|
|
|
|
return errs, nil
|
|
|
|
},
|
2019-09-05 10:20:16 -04:00
|
|
|
PutObjectFn: func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
|
|
|
return newObjectLayerFn().PutObject(ctx, bucket, object, data, opts)
|
|
|
|
},
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
if migrateSw {
|
|
|
|
go c.migrateCacheFromV1toV2(ctx)
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
|
|
|
return c, nil
|
|
|
|
}
|