2021-04-18 15:41:13 -04:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2019-11-13 15:17:45 -05:00
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net/http"
|
2020-11-02 13:00:45 -05:00
|
|
|
"strconv"
|
2019-05-22 17:54:15 -04:00
|
|
|
"strings"
|
2019-08-09 20:09:08 -04:00
|
|
|
"sync"
|
2019-05-22 17:54:15 -04:00
|
|
|
"time"
|
2018-03-28 17:14:06 -04:00
|
|
|
|
2021-06-01 17:59:40 -04:00
|
|
|
objectlock "github.com/minio/minio/internal/bucket/object/lock"
|
|
|
|
"github.com/minio/minio/internal/color"
|
|
|
|
"github.com/minio/minio/internal/config/cache"
|
2022-03-10 21:42:44 -05:00
|
|
|
"github.com/minio/minio/internal/disk"
|
2021-06-01 17:59:40 -04:00
|
|
|
"github.com/minio/minio/internal/hash"
|
|
|
|
"github.com/minio/minio/internal/logger"
|
|
|
|
"github.com/minio/minio/internal/sync/errgroup"
|
2023-03-04 02:56:53 -05:00
|
|
|
xnet "github.com/minio/pkg/net"
|
2021-05-28 18:17:01 -04:00
|
|
|
"github.com/minio/pkg/wildcard"
|
2018-03-28 17:14:06 -04:00
|
|
|
)
|
|
|
|
|
2019-05-22 17:54:15 -04:00
|
|
|
const (
|
2020-11-02 13:00:45 -05:00
|
|
|
cacheBlkSize = 1 << 20
|
|
|
|
cacheGCInterval = time.Minute * 30
|
|
|
|
writeBackStatusHeader = ReservedMetadataPrefixLower + "write-back-status"
|
|
|
|
writeBackRetryHeader = ReservedMetadataPrefixLower + "write-back-retry"
|
2019-05-22 17:54:15 -04:00
|
|
|
)
|
|
|
|
|
2020-11-02 13:00:45 -05:00
|
|
|
type cacheCommitStatus string
|
|
|
|
|
|
|
|
const (
|
|
|
|
// CommitPending - cache writeback with backend is pending.
|
|
|
|
CommitPending cacheCommitStatus = "pending"
|
|
|
|
|
|
|
|
// CommitComplete - cache writeback completed ok.
|
|
|
|
CommitComplete cacheCommitStatus = "complete"
|
|
|
|
|
|
|
|
// CommitFailed - cache writeback needs a retry.
|
|
|
|
CommitFailed cacheCommitStatus = "failed"
|
|
|
|
)
|
|
|
|
|
2021-11-01 11:11:58 -04:00
|
|
|
const (
|
|
|
|
// CommitWriteBack allows staging and write back of cached content for single object uploads
|
|
|
|
CommitWriteBack string = "writeback"
|
|
|
|
// CommitWriteThrough allows caching multipart uploads to disk synchronously
|
|
|
|
CommitWriteThrough string = "writethrough"
|
|
|
|
)
|
|
|
|
|
2020-11-02 13:00:45 -05:00
|
|
|
// String returns string representation of status
|
|
|
|
func (s cacheCommitStatus) String() string {
|
|
|
|
return string(s)
|
|
|
|
}
|
|
|
|
|
2018-05-30 14:30:14 -04:00
|
|
|
// CacheStorageInfo - represents total, free capacity of
|
|
|
|
// underlying cache storage.
|
|
|
|
type CacheStorageInfo struct {
|
|
|
|
Total uint64 // Total cache disk space.
|
|
|
|
Free uint64 // Free cache available space.
|
|
|
|
}
|
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
// CacheObjectLayer implements primitives for cache object API layer.
|
|
|
|
type CacheObjectLayer interface {
|
|
|
|
// Object operations.
|
2018-09-27 06:06:45 -04:00
|
|
|
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
|
2018-09-10 12:42:43 -04:00
|
|
|
GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
2020-06-12 23:04:01 -04:00
|
|
|
DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error)
|
|
|
|
DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error)
|
2019-09-05 10:20:16 -04:00
|
|
|
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
2020-02-07 17:36:46 -05:00
|
|
|
CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error)
|
2021-11-01 11:11:58 -04:00
|
|
|
// Multipart operations.
|
2022-08-29 19:57:16 -04:00
|
|
|
NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (res *NewMultipartUploadResult, err error)
|
2021-11-01 11:11:58 -04:00
|
|
|
PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
|
|
|
|
AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error
|
|
|
|
CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
|
|
|
CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error)
|
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
// Storage operations.
|
2018-05-30 14:30:14 -04:00
|
|
|
StorageInfo(ctx context.Context) CacheStorageInfo
|
2019-12-06 02:16:06 -05:00
|
|
|
CacheStats() *CacheStats
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
// Abstracts disk caching - used by the S3 layer
|
|
|
|
type cacheObjects struct {
|
|
|
|
// slice of cache drives
|
|
|
|
cache []*diskCache
|
|
|
|
// file path patterns to exclude from cache
|
|
|
|
exclude []string
|
2020-02-03 22:40:01 -05:00
|
|
|
// number of accesses after which to cache an object
|
|
|
|
after int
|
2020-11-02 13:00:45 -05:00
|
|
|
// commit objects in async manner
|
2021-11-01 11:11:58 -04:00
|
|
|
commitWriteback bool
|
|
|
|
commitWritethrough bool
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
// if true migration is in progress from v1 to v2
|
|
|
|
migrating bool
|
2020-11-02 13:00:45 -05:00
|
|
|
// retry queue for writeback cache mode to reattempt upload to backend
|
|
|
|
wbRetryCh chan ObjectInfo
|
2019-12-06 02:16:06 -05:00
|
|
|
// Cache stats
|
|
|
|
cacheStats *CacheStats
|
|
|
|
|
2021-11-01 11:11:58 -04:00
|
|
|
InnerGetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
|
|
|
|
InnerGetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
|
|
|
InnerDeleteObjectFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
|
|
|
InnerPutObjectFn func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
|
|
|
InnerCopyObjectFn func(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error)
|
2022-08-29 19:57:16 -04:00
|
|
|
InnerNewMultipartUploadFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (res *NewMultipartUploadResult, err error)
|
2021-11-01 11:11:58 -04:00
|
|
|
InnerPutObjectPartFn func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error)
|
|
|
|
InnerAbortMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error
|
|
|
|
InnerCompleteMultipartUploadFn func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
|
|
|
InnerCopyObjectPartFn func(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error)
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
|
|
|
|
2020-05-18 21:33:43 -04:00
|
|
|
func (c *cacheObjects) incHitsToMeta(ctx context.Context, dcache *diskCache, bucket, object string, size int64, eTag string, rs *HTTPRangeSpec) error {
|
2020-09-10 14:37:22 -04:00
|
|
|
metadata := map[string]string{"etag": eTag}
|
2021-12-08 17:52:31 -05:00
|
|
|
return dcache.SaveMetadata(ctx, bucket, object, metadata, size, rs, "", true, false)
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
|
|
|
|
2020-02-03 22:40:01 -05:00
|
|
|
// Backend metadata could have changed through server side copy - reset cache metadata if that is the case
|
2020-05-18 21:33:43 -04:00
|
|
|
func (c *cacheObjects) updateMetadataIfChanged(ctx context.Context, dcache *diskCache, bucket, object string, bkObjectInfo, cacheObjInfo ObjectInfo, rs *HTTPRangeSpec) error {
|
2020-09-10 14:37:22 -04:00
|
|
|
bkMeta := make(map[string]string, len(bkObjectInfo.UserDefined))
|
|
|
|
cacheMeta := make(map[string]string, len(cacheObjInfo.UserDefined))
|
2020-02-03 22:40:01 -05:00
|
|
|
for k, v := range bkObjectInfo.UserDefined {
|
2020-05-28 17:36:38 -04:00
|
|
|
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
2020-02-03 22:40:01 -05:00
|
|
|
// Do not need to send any internal metadata
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
bkMeta[http.CanonicalHeaderKey(k)] = v
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
2020-02-03 22:40:01 -05:00
|
|
|
for k, v := range cacheObjInfo.UserDefined {
|
2020-05-28 17:36:38 -04:00
|
|
|
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
2020-02-03 22:40:01 -05:00
|
|
|
// Do not need to send any internal metadata
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
cacheMeta[http.CanonicalHeaderKey(k)] = v
|
2019-05-21 23:00:27 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
|
2020-02-03 22:40:01 -05:00
|
|
|
if !isMetadataSame(bkMeta, cacheMeta) ||
|
|
|
|
bkObjectInfo.ETag != cacheObjInfo.ETag ||
|
|
|
|
bkObjectInfo.ContentType != cacheObjInfo.ContentType ||
|
|
|
|
!bkObjectInfo.Expires.Equal(cacheObjInfo.Expires) {
|
2021-12-08 17:52:31 -05:00
|
|
|
return dcache.SaveMetadata(ctx, bucket, object, getMetadata(bkObjectInfo), bkObjectInfo.Size, nil, "", false, false)
|
2019-12-08 16:58:04 -05:00
|
|
|
}
|
2020-05-18 21:33:43 -04:00
|
|
|
return c.incHitsToMeta(ctx, dcache, bucket, object, cacheObjInfo.Size, cacheObjInfo.ETag, rs)
|
2019-12-08 16:58:04 -05:00
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
// DeleteObject clears cache entry if backend delete operation succeeds
|
2020-06-12 23:04:01 -04:00
|
|
|
func (c *cacheObjects) DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
2020-10-26 18:41:24 -04:00
|
|
|
if objInfo, err = c.InnerDeleteObjectFn(ctx, bucket, object, opts); err != nil {
|
2019-08-09 20:09:08 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if c.isCacheExclude(bucket, object) || c.skipCache() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-10-15 21:35:41 -04:00
|
|
|
dcache, cerr := c.getCacheLoc(bucket, object)
|
2019-08-09 20:09:08 -04:00
|
|
|
if cerr != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
return objInfo, cerr
|
2019-08-09 20:09:08 -04:00
|
|
|
}
|
2020-02-03 22:40:01 -05:00
|
|
|
dcache.Delete(ctx, bucket, object)
|
2019-08-09 20:09:08 -04:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteObjects batch deletes objects in slice, and clears any cached entries
|
2020-06-12 23:04:01 -04:00
|
|
|
func (c *cacheObjects) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) {
|
2019-08-09 20:09:08 -04:00
|
|
|
errs := make([]error, len(objects))
|
2020-06-12 23:04:01 -04:00
|
|
|
objInfos := make([]ObjectInfo, len(objects))
|
2019-08-09 20:09:08 -04:00
|
|
|
for idx, object := range objects {
|
2020-06-12 23:04:01 -04:00
|
|
|
opts.VersionID = object.VersionID
|
|
|
|
objInfos[idx], errs[idx] = c.DeleteObject(ctx, bucket, object.ObjectName, opts)
|
|
|
|
}
|
|
|
|
deletedObjects := make([]DeletedObject, len(objInfos))
|
|
|
|
for idx := range errs {
|
|
|
|
if errs[idx] != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if objInfos[idx].DeleteMarker {
|
|
|
|
deletedObjects[idx] = DeletedObject{
|
|
|
|
DeleteMarker: objInfos[idx].DeleteMarker,
|
|
|
|
DeleteMarkerVersionID: objInfos[idx].VersionID,
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
deletedObjects[idx] = DeletedObject{
|
|
|
|
ObjectName: objInfos[idx].Name,
|
|
|
|
VersionID: objInfos[idx].VersionID,
|
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
return deletedObjects, errs
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// construct a metadata k-v map
|
2019-08-09 20:09:08 -04:00
|
|
|
func getMetadata(objInfo ObjectInfo) map[string]string {
|
2020-09-10 14:37:22 -04:00
|
|
|
metadata := make(map[string]string, len(objInfo.UserDefined)+4)
|
2018-03-28 17:14:06 -04:00
|
|
|
metadata["etag"] = objInfo.ETag
|
|
|
|
metadata["content-type"] = objInfo.ContentType
|
2019-08-09 20:09:08 -04:00
|
|
|
if objInfo.ContentEncoding != "" {
|
|
|
|
metadata["content-encoding"] = objInfo.ContentEncoding
|
|
|
|
}
|
2020-08-24 15:11:20 -04:00
|
|
|
if !objInfo.Expires.Equal(timeSentinel) {
|
2019-08-09 20:09:08 -04:00
|
|
|
metadata["expires"] = objInfo.Expires.Format(http.TimeFormat)
|
|
|
|
}
|
2021-02-11 22:25:47 -05:00
|
|
|
metadata["last-modified"] = objInfo.ModTime.Format(http.TimeFormat)
|
2019-08-09 20:09:08 -04:00
|
|
|
for k, v := range objInfo.UserDefined {
|
|
|
|
metadata[k] = v
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
|
|
|
return metadata
|
|
|
|
}
|
|
|
|
|
2019-12-07 12:44:33 -05:00
|
|
|
// marks cache hit
|
|
|
|
func (c *cacheObjects) incCacheStats(size int64) {
|
|
|
|
c.cacheStats.incHit()
|
|
|
|
c.cacheStats.incBytesServed(size)
|
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
|
|
|
if c.isCacheExclude(bucket, object) || c.skipCache() {
|
2020-10-26 15:30:40 -04:00
|
|
|
return c.InnerGetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
2020-02-23 08:33:39 -05:00
|
|
|
var cc *cacheControl
|
2019-12-07 12:44:33 -05:00
|
|
|
var cacheObjSize int64
|
2019-08-09 20:09:08 -04:00
|
|
|
// fetch diskCache if object is currently cached or nearest available cache drive
|
|
|
|
dcache, err := c.getCacheToLoc(ctx, bucket, object)
|
2018-09-20 22:22:09 -04:00
|
|
|
if err != nil {
|
2020-10-26 15:30:40 -04:00
|
|
|
return c.InnerGetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
|
|
|
|
2020-02-03 22:40:01 -05:00
|
|
|
cacheReader, numCacheHits, cacheErr := dcache.Get(ctx, bucket, object, rs, h, opts)
|
2019-08-09 20:09:08 -04:00
|
|
|
if cacheErr == nil {
|
2019-12-07 12:44:33 -05:00
|
|
|
cacheObjSize = cacheReader.ObjInfo.Size
|
|
|
|
if rs != nil {
|
|
|
|
if _, len, err := rs.GetOffsetLength(cacheObjSize); err == nil {
|
|
|
|
cacheObjSize = len
|
|
|
|
}
|
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
cc = cacheControlOpts(cacheReader.ObjInfo)
|
2020-02-23 08:33:39 -05:00
|
|
|
if cc != nil && (!cc.isStale(cacheReader.ObjInfo.ModTime) ||
|
|
|
|
cc.onlyIfCached) {
|
2019-12-06 02:16:06 -05:00
|
|
|
// This is a cache hit, mark it so
|
2019-12-08 16:58:04 -05:00
|
|
|
bytesServed := cacheReader.ObjInfo.Size
|
|
|
|
if rs != nil {
|
|
|
|
if _, len, err := rs.GetOffsetLength(bytesServed); err == nil {
|
|
|
|
bytesServed = len
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c.cacheStats.incHit()
|
|
|
|
c.cacheStats.incBytesServed(bytesServed)
|
2020-05-18 21:33:43 -04:00
|
|
|
c.incHitsToMeta(ctx, dcache, bucket, object, cacheReader.ObjInfo.Size, cacheReader.ObjInfo.ETag, rs)
|
2019-08-09 20:09:08 -04:00
|
|
|
return cacheReader, nil
|
|
|
|
}
|
2020-02-27 13:57:00 -05:00
|
|
|
if cc != nil && cc.noStore {
|
2020-04-22 15:13:57 -04:00
|
|
|
cacheReader.Close()
|
2019-12-06 21:19:36 -05:00
|
|
|
c.cacheStats.incMiss()
|
2020-10-26 15:30:40 -04:00
|
|
|
bReader, err := c.InnerGetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
|
2020-01-10 23:21:13 -05:00
|
|
|
bReader.ObjInfo.CacheLookupStatus = CacheHit
|
|
|
|
bReader.ObjInfo.CacheStatus = CacheMiss
|
|
|
|
return bReader, err
|
2019-12-06 21:19:36 -05:00
|
|
|
}
|
2021-11-30 13:22:42 -05:00
|
|
|
// serve cached content without ETag verification if writeback commit is not yet complete
|
2021-12-08 17:52:31 -05:00
|
|
|
if writebackInProgress(cacheReader.ObjInfo.UserDefined) {
|
2021-11-30 13:22:42 -05:00
|
|
|
return cacheReader, nil
|
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
}
|
2019-05-21 23:00:27 -04:00
|
|
|
|
2020-10-26 18:41:24 -04:00
|
|
|
objInfo, err := c.InnerGetObjectInfoFn(ctx, bucket, object, opts)
|
2018-10-17 13:57:12 -04:00
|
|
|
if backendDownError(err) && cacheErr == nil {
|
2019-12-07 12:44:33 -05:00
|
|
|
c.incCacheStats(cacheObjSize)
|
2018-10-17 13:57:12 -04:00
|
|
|
return cacheReader, nil
|
|
|
|
} else if err != nil {
|
2020-04-22 15:13:57 -04:00
|
|
|
if cacheErr == nil {
|
|
|
|
cacheReader.Close()
|
|
|
|
}
|
2018-09-20 22:22:09 -04:00
|
|
|
if _, ok := err.(ObjectNotFound); ok {
|
2019-04-18 16:53:22 -04:00
|
|
|
if cacheErr == nil {
|
|
|
|
// Delete cached entry if backend object
|
|
|
|
// was deleted.
|
|
|
|
dcache.Delete(ctx, bucket, object)
|
|
|
|
}
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
2019-12-07 12:44:33 -05:00
|
|
|
c.cacheStats.incMiss()
|
2018-10-17 13:57:12 -04:00
|
|
|
return nil, err
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
if !objInfo.IsCacheable() {
|
2020-04-22 15:13:57 -04:00
|
|
|
if cacheErr == nil {
|
|
|
|
cacheReader.Close()
|
|
|
|
}
|
2019-12-07 12:44:33 -05:00
|
|
|
c.cacheStats.incMiss()
|
2020-10-26 15:30:40 -04:00
|
|
|
return c.InnerGetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
2020-01-16 18:41:56 -05:00
|
|
|
// skip cache for objects with locks
|
|
|
|
objRetention := objectlock.GetObjectRetentionMeta(objInfo.UserDefined)
|
|
|
|
legalHold := objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined)
|
2020-04-06 16:44:16 -04:00
|
|
|
if objRetention.Mode.Valid() || legalHold.Status.Valid() {
|
2020-04-22 15:13:57 -04:00
|
|
|
if cacheErr == nil {
|
|
|
|
cacheReader.Close()
|
|
|
|
}
|
2020-01-16 18:41:56 -05:00
|
|
|
c.cacheStats.incMiss()
|
2020-10-26 15:30:40 -04:00
|
|
|
return c.InnerGetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
|
2020-01-16 18:41:56 -05:00
|
|
|
}
|
2018-10-17 13:57:12 -04:00
|
|
|
if cacheErr == nil {
|
2019-08-09 20:09:08 -04:00
|
|
|
// if ETag matches for stale cache entry, serve from cache
|
|
|
|
if cacheReader.ObjInfo.ETag == objInfo.ETag {
|
|
|
|
// Update metadata in case server-side copy might have changed object metadata
|
2020-05-18 21:33:43 -04:00
|
|
|
c.updateMetadataIfChanged(ctx, dcache, bucket, object, objInfo, cacheReader.ObjInfo, rs)
|
2019-12-07 12:44:33 -05:00
|
|
|
c.incCacheStats(cacheObjSize)
|
2018-09-20 22:22:09 -04:00
|
|
|
return cacheReader, nil
|
|
|
|
}
|
2018-10-11 02:01:24 -04:00
|
|
|
cacheReader.Close()
|
2018-09-20 22:22:09 -04:00
|
|
|
// Object is stale, so delete from cache
|
2020-02-03 22:40:01 -05:00
|
|
|
dcache.Delete(ctx, bucket, object)
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
|
|
|
|
2019-12-07 12:44:33 -05:00
|
|
|
// Reaching here implies cache miss
|
|
|
|
c.cacheStats.incMiss()
|
2020-06-15 12:05:35 -04:00
|
|
|
|
2020-10-26 15:30:40 -04:00
|
|
|
bkReader, bkErr := c.InnerGetObjectNInfoFn(ctx, bucket, object, rs, h, lockType, opts)
|
2020-02-03 22:40:01 -05:00
|
|
|
|
2020-01-24 18:49:16 -05:00
|
|
|
if bkErr != nil {
|
|
|
|
return bkReader, bkErr
|
|
|
|
}
|
2020-02-03 22:40:01 -05:00
|
|
|
// If object has less hits than configured cache after, just increment the hit counter
|
|
|
|
// but do not cache it.
|
|
|
|
if numCacheHits < c.after {
|
2020-05-18 21:33:43 -04:00
|
|
|
c.incHitsToMeta(ctx, dcache, bucket, object, objInfo.Size, objInfo.ETag, rs)
|
2020-02-03 22:40:01 -05:00
|
|
|
return bkReader, bkErr
|
|
|
|
}
|
|
|
|
|
2020-01-10 23:21:13 -05:00
|
|
|
// Record if cache has a hit that was invalidated by ETag verification
|
|
|
|
if cacheErr == nil {
|
|
|
|
bkReader.ObjInfo.CacheLookupStatus = CacheHit
|
|
|
|
}
|
2020-09-02 20:48:44 -04:00
|
|
|
|
|
|
|
// Check if we can add it without exceeding total cache size.
|
|
|
|
if !dcache.diskSpaceAvailable(objInfo.Size) {
|
2020-01-10 23:21:13 -05:00
|
|
|
return bkReader, bkErr
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
|
|
|
|
2020-10-27 02:04:48 -04:00
|
|
|
if rs != nil && !dcache.enableRange {
|
2019-08-09 20:09:08 -04:00
|
|
|
go func() {
|
2020-06-29 16:25:29 -04:00
|
|
|
// if range caching is disabled, download entire object.
|
2020-10-27 02:04:48 -04:00
|
|
|
rs = nil
|
2019-08-09 20:09:08 -04:00
|
|
|
// fill cache in the background for range GET requests
|
2020-10-26 15:30:40 -04:00
|
|
|
bReader, bErr := c.InnerGetObjectNInfoFn(GlobalContext, bucket, object, rs, h, lockType, opts)
|
2019-08-09 20:09:08 -04:00
|
|
|
if bErr != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer bReader.Close()
|
2020-08-20 17:23:24 -04:00
|
|
|
oi, _, _, err := dcache.statRange(GlobalContext, bucket, object, rs)
|
2019-08-09 20:09:08 -04:00
|
|
|
// avoid cache overwrite if another background routine filled cache
|
|
|
|
if err != nil || oi.ETag != bReader.ObjInfo.ETag {
|
2020-06-29 16:25:29 -04:00
|
|
|
// use a new context to avoid locker prematurely timing out operation when the GetObjectNInfo returns.
|
2020-08-13 12:16:01 -04:00
|
|
|
dcache.Put(GlobalContext, bucket, object, bReader, bReader.ObjInfo.Size, rs, ObjectOptions{
|
|
|
|
UserDefined: getMetadata(bReader.ObjInfo),
|
2021-11-10 11:12:03 -05:00
|
|
|
}, false, false)
|
2020-06-29 16:25:29 -04:00
|
|
|
return
|
2019-08-09 20:09:08 -04:00
|
|
|
}
|
|
|
|
}()
|
2020-01-10 23:21:13 -05:00
|
|
|
return bkReader, bkErr
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
2020-01-10 23:21:13 -05:00
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
// Initialize pipe.
|
2021-05-11 12:18:37 -04:00
|
|
|
pr, pw := io.Pipe()
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
teeReader := io.TeeReader(bkReader, pw)
|
2020-09-10 14:37:22 -04:00
|
|
|
userDefined := getMetadata(bkReader.ObjInfo)
|
2021-05-11 12:18:37 -04:00
|
|
|
wg.Add(1)
|
2018-09-20 22:22:09 -04:00
|
|
|
go func() {
|
2020-11-02 13:00:45 -05:00
|
|
|
_, putErr := dcache.Put(ctx, bucket, object,
|
2021-05-11 12:18:37 -04:00
|
|
|
io.LimitReader(pr, bkReader.ObjInfo.Size),
|
2020-10-27 02:04:48 -04:00
|
|
|
bkReader.ObjInfo.Size, rs, ObjectOptions{
|
2020-09-10 14:37:22 -04:00
|
|
|
UserDefined: userDefined,
|
2021-11-10 11:12:03 -05:00
|
|
|
}, false, false)
|
2021-05-11 12:18:37 -04:00
|
|
|
// close the read end of the pipe, so the error gets
|
|
|
|
// propagated to teeReader
|
|
|
|
pr.CloseWithError(putErr)
|
|
|
|
wg.Done()
|
2018-09-20 22:22:09 -04:00
|
|
|
}()
|
2021-05-11 12:18:37 -04:00
|
|
|
cleanupBackend := func() {
|
|
|
|
pw.CloseWithError(bkReader.Close())
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
return NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, opts, cleanupBackend)
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
// Returns ObjectInfo from cache if available.
|
2019-08-09 20:09:08 -04:00
|
|
|
func (c *cacheObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
|
2020-10-26 18:41:24 -04:00
|
|
|
getObjectInfoFn := c.InnerGetObjectInfoFn
|
2019-08-09 20:09:08 -04:00
|
|
|
|
|
|
|
if c.isCacheExclude(bucket, object) || c.skipCache() {
|
2018-09-10 12:42:43 -04:00
|
|
|
return getObjectInfoFn(ctx, bucket, object, opts)
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
|
|
|
|
// fetch diskCache if object is currently cached or nearest available cache drive
|
|
|
|
dcache, err := c.getCacheToLoc(ctx, bucket, object)
|
2018-03-28 17:14:06 -04:00
|
|
|
if err != nil {
|
2018-09-10 12:42:43 -04:00
|
|
|
return getObjectInfoFn(ctx, bucket, object, opts)
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
2020-02-23 08:33:39 -05:00
|
|
|
var cc *cacheControl
|
2019-08-09 20:09:08 -04:00
|
|
|
// if cache control setting is valid, avoid HEAD operation to backend
|
2020-02-03 22:40:01 -05:00
|
|
|
cachedObjInfo, _, cerr := dcache.Stat(ctx, bucket, object)
|
2019-08-09 20:09:08 -04:00
|
|
|
if cerr == nil {
|
|
|
|
cc = cacheControlOpts(cachedObjInfo)
|
2020-02-23 08:33:39 -05:00
|
|
|
if cc == nil || (cc != nil && !cc.isStale(cachedObjInfo.ModTime)) {
|
2019-12-06 02:16:06 -05:00
|
|
|
// This is a cache hit, mark it so
|
|
|
|
c.cacheStats.incHit()
|
2019-08-09 20:09:08 -04:00
|
|
|
return cachedObjInfo, nil
|
|
|
|
}
|
2021-11-30 13:22:42 -05:00
|
|
|
// serve cache metadata without ETag verification if writeback commit is not yet complete
|
2021-12-08 17:52:31 -05:00
|
|
|
if writebackInProgress(cachedObjInfo.UserDefined) {
|
2021-11-30 13:22:42 -05:00
|
|
|
return cachedObjInfo, nil
|
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
}
|
2019-12-06 02:16:06 -05:00
|
|
|
|
2018-09-10 12:42:43 -04:00
|
|
|
objInfo, err := getObjectInfoFn(ctx, bucket, object, opts)
|
2018-03-28 17:14:06 -04:00
|
|
|
if err != nil {
|
2018-04-10 12:36:37 -04:00
|
|
|
if _, ok := err.(ObjectNotFound); ok {
|
2018-03-28 17:14:06 -04:00
|
|
|
// Delete the cached entry if backend object was deleted.
|
2020-02-03 22:40:01 -05:00
|
|
|
dcache.Delete(ctx, bucket, object)
|
2019-12-07 12:44:33 -05:00
|
|
|
c.cacheStats.incMiss()
|
2018-03-28 17:14:06 -04:00
|
|
|
return ObjectInfo{}, err
|
|
|
|
}
|
|
|
|
if !backendDownError(err) {
|
2019-12-07 12:44:33 -05:00
|
|
|
c.cacheStats.incMiss()
|
2018-03-28 17:14:06 -04:00
|
|
|
return ObjectInfo{}, err
|
|
|
|
}
|
|
|
|
if cerr == nil {
|
2019-12-07 12:44:33 -05:00
|
|
|
// This is a cache hit, mark it so
|
|
|
|
c.cacheStats.incHit()
|
2018-03-28 17:14:06 -04:00
|
|
|
return cachedObjInfo, nil
|
|
|
|
}
|
2019-12-07 12:44:33 -05:00
|
|
|
c.cacheStats.incMiss()
|
2023-03-04 02:56:53 -05:00
|
|
|
if xnet.IsNetworkOrHostDown(err, false) {
|
|
|
|
return ObjectInfo{}, BackendDown{Err: err.Error()}
|
|
|
|
}
|
|
|
|
return ObjectInfo{}, err
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
2019-12-07 12:44:33 -05:00
|
|
|
// Reaching here implies cache miss
|
|
|
|
c.cacheStats.incMiss()
|
2018-03-28 17:14:06 -04:00
|
|
|
// when backend is up, do a sanity check on cached object
|
2019-08-09 20:09:08 -04:00
|
|
|
if cerr != nil {
|
2018-03-28 17:14:06 -04:00
|
|
|
return objInfo, nil
|
|
|
|
}
|
|
|
|
if cachedObjInfo.ETag != objInfo.ETag {
|
|
|
|
// Delete the cached entry if the backend object was replaced.
|
2020-02-03 22:40:01 -05:00
|
|
|
dcache.Delete(ctx, bucket, object)
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
|
|
|
return objInfo, nil
|
|
|
|
}
|
|
|
|
|
2020-02-07 17:36:46 -05:00
|
|
|
// CopyObject reverts to backend after evicting any stale cache entries
|
|
|
|
func (c *cacheObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
|
2020-10-26 18:41:24 -04:00
|
|
|
copyObjectFn := c.InnerCopyObjectFn
|
2020-02-07 17:36:46 -05:00
|
|
|
if c.isCacheExclude(srcBucket, srcObject) || c.skipCache() {
|
|
|
|
return copyObjectFn(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts)
|
|
|
|
}
|
|
|
|
if srcBucket != dstBucket || srcObject != dstObject {
|
|
|
|
return copyObjectFn(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts)
|
|
|
|
}
|
|
|
|
// fetch diskCache if object is currently cached or nearest available cache drive
|
|
|
|
dcache, err := c.getCacheToLoc(ctx, srcBucket, srcObject)
|
|
|
|
if err != nil {
|
|
|
|
return copyObjectFn(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts)
|
|
|
|
}
|
|
|
|
// if currently cached, evict old entry and revert to backend.
|
|
|
|
if cachedObjInfo, _, cerr := dcache.Stat(ctx, srcBucket, srcObject); cerr == nil {
|
|
|
|
cc := cacheControlOpts(cachedObjInfo)
|
2020-02-27 13:57:00 -05:00
|
|
|
if cc == nil || !cc.isStale(cachedObjInfo.ModTime) {
|
2020-02-07 17:36:46 -05:00
|
|
|
dcache.Delete(ctx, srcBucket, srcObject)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return copyObjectFn(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts)
|
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
// StorageInfo - returns underlying storage statistics.
|
|
|
|
func (c *cacheObjects) StorageInfo(ctx context.Context) (cInfo CacheStorageInfo) {
|
|
|
|
var total, free uint64
|
|
|
|
for _, cache := range c.cache {
|
2018-04-26 01:09:05 -04:00
|
|
|
if cache == nil {
|
|
|
|
continue
|
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
info, err := getDiskInfo(cache.dir)
|
|
|
|
logger.GetReqInfo(ctx).AppendTags("cachePath", cache.dir)
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
total += info.Total
|
|
|
|
free += info.Free
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
return CacheStorageInfo{
|
|
|
|
Total: total,
|
|
|
|
Free: free,
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-06 02:16:06 -05:00
|
|
|
// CacheStats - returns underlying storage statistics.
|
|
|
|
func (c *cacheObjects) CacheStats() (cs *CacheStats) {
|
|
|
|
return c.cacheStats
|
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
// skipCache() returns true if cache migration is in progress
|
|
|
|
func (c *cacheObjects) skipCache() bool {
|
|
|
|
return c.migrating
|
2019-05-13 15:25:49 -04:00
|
|
|
}
|
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
// Returns true if object should be excluded from cache
|
2019-08-09 20:09:08 -04:00
|
|
|
func (c *cacheObjects) isCacheExclude(bucket, object string) bool {
|
|
|
|
// exclude directories from cache
|
|
|
|
if strings.HasSuffix(object, SlashSeparator) {
|
|
|
|
return true
|
|
|
|
}
|
2018-03-28 17:14:06 -04:00
|
|
|
for _, pattern := range c.exclude {
|
|
|
|
matchStr := fmt.Sprintf("%s/%s", bucket, object)
|
|
|
|
if ok := wildcard.MatchSimple(pattern, matchStr); ok {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
// choose a cache deterministically based on hash of bucket,object. The hash index is treated as
|
|
|
|
// a hint. In the event that the cache drive at hash index is offline, treat the list of cache drives
|
|
|
|
// as a circular buffer and walk through them starting at hash index until an online drive is found.
|
2019-10-15 21:35:41 -04:00
|
|
|
func (c *cacheObjects) getCacheLoc(bucket, object string) (*diskCache, error) {
|
2019-08-09 20:09:08 -04:00
|
|
|
index := c.hashIndex(bucket, object)
|
|
|
|
numDisks := len(c.cache)
|
|
|
|
for k := 0; k < numDisks; k++ {
|
|
|
|
i := (index + k) % numDisks
|
|
|
|
if c.cache[i] == nil {
|
|
|
|
continue
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
if c.cache[i].IsOnline() {
|
|
|
|
return c.cache[i], nil
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
return nil, errDiskNotFound
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
// get cache disk where object is currently cached for a GET operation. If object does not exist at that location,
|
|
|
|
// treat the list of cache drives as a circular buffer and walk through them starting at hash index
|
|
|
|
// until an online drive is found.If object is not found, fall back to the first online cache drive
|
|
|
|
// closest to the hash index, so that object can be re-cached.
|
|
|
|
func (c *cacheObjects) getCacheToLoc(ctx context.Context, bucket, object string) (*diskCache, error) {
|
|
|
|
index := c.hashIndex(bucket, object)
|
2019-05-22 17:54:15 -04:00
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
numDisks := len(c.cache)
|
|
|
|
// save first online cache disk closest to the hint index
|
|
|
|
var firstOnlineDisk *diskCache
|
|
|
|
for k := 0; k < numDisks; k++ {
|
|
|
|
i := (index + k) % numDisks
|
|
|
|
if c.cache[i] == nil {
|
|
|
|
continue
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
if c.cache[i].IsOnline() {
|
|
|
|
if firstOnlineDisk == nil {
|
|
|
|
firstOnlineDisk = c.cache[i]
|
|
|
|
}
|
|
|
|
if c.cache[i].Exists(ctx, bucket, object) {
|
|
|
|
return c.cache[i], nil
|
|
|
|
}
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
if firstOnlineDisk != nil {
|
|
|
|
return firstOnlineDisk, nil
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
return nil, errDiskNotFound
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
// Compute a unique hash sum for bucket and object
|
|
|
|
func (c *cacheObjects) hashIndex(bucket, object string) int {
|
|
|
|
return crcHashMod(pathJoin(bucket, object), len(c.cache))
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// newCache initializes the cacheFSObjects for the "drives" specified in config.json
|
|
|
|
// or the global env overrides.
|
2019-10-04 13:35:33 -04:00
|
|
|
func newCache(config cache.Config) ([]*diskCache, bool, error) {
|
2019-08-09 20:09:08 -04:00
|
|
|
var caches []*diskCache
|
2020-04-09 12:30:02 -04:00
|
|
|
ctx := logger.SetReqInfo(GlobalContext, &logger.ReqInfo{})
|
2019-08-09 20:09:08 -04:00
|
|
|
formats, migrating, err := loadAndValidateCacheFormat(ctx, config.Drives)
|
2018-03-28 17:14:06 -04:00
|
|
|
if err != nil {
|
2019-08-09 20:09:08 -04:00
|
|
|
return nil, false, err
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
2022-03-10 21:42:44 -05:00
|
|
|
var warningMsg string
|
2018-03-29 17:38:26 -04:00
|
|
|
for i, dir := range config.Drives {
|
2019-08-09 20:09:08 -04:00
|
|
|
// skip diskCache creation for cache drives missing a format.json
|
2018-03-28 17:14:06 -04:00
|
|
|
if formats[i] == nil {
|
2019-08-09 20:09:08 -04:00
|
|
|
caches = append(caches, nil)
|
2018-03-28 17:14:06 -04:00
|
|
|
continue
|
|
|
|
}
|
2022-03-10 21:42:44 -05:00
|
|
|
if !globalIsCICD && len(warningMsg) == 0 {
|
|
|
|
rootDsk, err := disk.IsRootDisk(dir, "/")
|
|
|
|
if err != nil {
|
|
|
|
warningMsg = fmt.Sprintf("Invalid cache dir %s err : %s", dir, err.Error())
|
|
|
|
}
|
|
|
|
if rootDsk {
|
2022-08-04 19:10:08 -04:00
|
|
|
warningMsg = fmt.Sprintf("cache dir cannot be part of root drive: %s", dir)
|
2022-03-10 21:42:44 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
if err := checkAtimeSupport(dir); err != nil {
|
2022-08-04 19:10:08 -04:00
|
|
|
return nil, false, fmt.Errorf("Atime support required for drive caching, atime check failed with %w", err)
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
|
2020-06-29 16:25:29 -04:00
|
|
|
cache, err := newDiskCache(ctx, dir, config)
|
2018-03-29 17:38:26 -04:00
|
|
|
if err != nil {
|
2019-08-09 20:09:08 -04:00
|
|
|
return nil, false, err
|
|
|
|
}
|
|
|
|
caches = append(caches, cache)
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
2022-03-10 21:42:44 -05:00
|
|
|
if warningMsg != "" {
|
2022-08-04 19:10:08 -04:00
|
|
|
logger.Info(color.Yellow(fmt.Sprintf("WARNING: Usage of root drive for drive caching is deprecated: %s", warningMsg)))
|
2022-03-10 21:42:44 -05:00
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
return caches, migrating, nil
|
2018-03-28 17:14:06 -04:00
|
|
|
}
|
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
func (c *cacheObjects) migrateCacheFromV1toV2(ctx context.Context) {
|
2022-03-03 16:21:16 -05:00
|
|
|
logger.Info(color.Blue("Cache migration initiated ...."))
|
2019-08-23 13:13:22 -04:00
|
|
|
|
2019-10-14 12:44:51 -04:00
|
|
|
g := errgroup.WithNErrs(len(c.cache))
|
|
|
|
for index, dc := range c.cache {
|
2019-08-09 20:09:08 -04:00
|
|
|
if dc == nil {
|
|
|
|
continue
|
|
|
|
}
|
2019-10-14 12:44:51 -04:00
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
|
|
|
// start migration from V1 to V2
|
|
|
|
return migrateOldCache(ctx, c.cache[index])
|
|
|
|
}, index)
|
2019-08-09 20:09:08 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
errCnt := 0
|
2020-02-23 08:33:39 -05:00
|
|
|
for _, err := range g.Wait() {
|
2019-08-09 20:09:08 -04:00
|
|
|
if err != nil {
|
|
|
|
errCnt++
|
2019-10-14 12:44:51 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
continue
|
2019-08-09 20:09:08 -04:00
|
|
|
}
|
|
|
|
}
|
2019-10-14 12:44:51 -04:00
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
if errCnt > 0 {
|
|
|
|
return
|
|
|
|
}
|
2019-10-14 12:44:51 -04:00
|
|
|
|
2019-08-09 20:09:08 -04:00
|
|
|
// update migration status
|
|
|
|
c.migrating = false
|
2022-03-03 16:21:16 -05:00
|
|
|
logger.Info(color.Blue("Cache migration completed successfully."))
|
2019-08-09 20:09:08 -04:00
|
|
|
}
|
2018-03-28 17:14:06 -04:00
|
|
|
|
2019-09-05 10:20:16 -04:00
|
|
|
// PutObject - caches the uploaded object for single Put operations
|
|
|
|
func (c *cacheObjects) PutObject(ctx context.Context, bucket, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
2020-10-26 18:41:24 -04:00
|
|
|
putObjectFn := c.InnerPutObjectFn
|
2019-09-05 10:20:16 -04:00
|
|
|
dcache, err := c.getCacheToLoc(ctx, bucket, object)
|
|
|
|
if err != nil {
|
|
|
|
// disk cache could not be located,execute backend call.
|
|
|
|
return putObjectFn(ctx, bucket, object, r, opts)
|
|
|
|
}
|
|
|
|
size := r.Size()
|
|
|
|
if c.skipCache() {
|
|
|
|
return putObjectFn(ctx, bucket, object, r, opts)
|
|
|
|
}
|
|
|
|
|
|
|
|
// fetch from backend if there is no space on cache drive
|
2020-09-02 20:48:44 -04:00
|
|
|
if !dcache.diskSpaceAvailable(size) {
|
2019-09-05 10:20:16 -04:00
|
|
|
return putObjectFn(ctx, bucket, object, r, opts)
|
|
|
|
}
|
2020-09-02 20:48:44 -04:00
|
|
|
|
2019-09-05 10:20:16 -04:00
|
|
|
if opts.ServerSideEncryption != nil {
|
2019-12-12 18:11:27 -05:00
|
|
|
dcache.Delete(ctx, bucket, object)
|
2019-09-05 10:20:16 -04:00
|
|
|
return putObjectFn(ctx, bucket, object, r, opts)
|
|
|
|
}
|
2019-11-20 16:18:09 -05:00
|
|
|
|
|
|
|
// skip cache for objects with locks
|
2020-01-16 18:41:56 -05:00
|
|
|
objRetention := objectlock.GetObjectRetentionMeta(opts.UserDefined)
|
|
|
|
legalHold := objectlock.GetObjectLegalHoldMeta(opts.UserDefined)
|
2020-04-06 16:44:16 -04:00
|
|
|
if objRetention.Mode.Valid() || legalHold.Status.Valid() {
|
2019-12-12 18:11:27 -05:00
|
|
|
dcache.Delete(ctx, bucket, object)
|
2019-11-20 16:18:09 -05:00
|
|
|
return putObjectFn(ctx, bucket, object, r, opts)
|
|
|
|
}
|
|
|
|
|
2019-09-05 10:20:16 -04:00
|
|
|
// fetch from backend if cache exclude pattern or cache-control
|
|
|
|
// directive set to exclude
|
|
|
|
if c.isCacheExclude(bucket, object) {
|
|
|
|
dcache.Delete(ctx, bucket, object)
|
|
|
|
return putObjectFn(ctx, bucket, object, r, opts)
|
|
|
|
}
|
2020-11-02 13:00:45 -05:00
|
|
|
if c.commitWriteback {
|
2021-11-10 11:12:03 -05:00
|
|
|
oi, err := dcache.Put(ctx, bucket, object, r, r.Size(), nil, opts, false, true)
|
2020-11-02 13:00:45 -05:00
|
|
|
if err != nil {
|
|
|
|
return ObjectInfo{}, err
|
|
|
|
}
|
|
|
|
go c.uploadObject(GlobalContext, oi)
|
|
|
|
return oi, nil
|
|
|
|
}
|
2021-11-10 11:12:03 -05:00
|
|
|
if !c.commitWritethrough {
|
|
|
|
objInfo, err = putObjectFn(ctx, bucket, object, r, opts)
|
|
|
|
if err == nil {
|
|
|
|
go func() {
|
|
|
|
// fill cache in the background
|
|
|
|
bReader, bErr := c.InnerGetObjectNInfoFn(GlobalContext, bucket, object, nil, http.Header{}, readLock, ObjectOptions{})
|
|
|
|
if bErr != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer bReader.Close()
|
|
|
|
oi, _, err := dcache.Stat(GlobalContext, bucket, object)
|
|
|
|
// avoid cache overwrite if another background routine filled cache
|
|
|
|
if err != nil || oi.ETag != bReader.ObjInfo.ETag {
|
2021-12-15 19:48:34 -05:00
|
|
|
dcache.Put(GlobalContext, bucket, object, bReader, bReader.ObjInfo.Size, nil, ObjectOptions{UserDefined: getMetadata(bReader.ObjInfo)}, false, false)
|
2021-11-10 11:12:03 -05:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
return objInfo, err
|
|
|
|
}
|
|
|
|
cLock, lkctx, cerr := dcache.GetLockContext(GlobalContext, bucket, object)
|
|
|
|
if cerr != nil {
|
|
|
|
return putObjectFn(ctx, bucket, object, r, opts)
|
|
|
|
}
|
2022-12-23 22:49:07 -05:00
|
|
|
defer cLock.Unlock(lkctx)
|
2021-11-10 11:12:03 -05:00
|
|
|
// Initialize pipe to stream data to backend
|
|
|
|
pipeReader, pipeWriter := io.Pipe()
|
|
|
|
hashReader, err := hash.NewReader(pipeReader, size, "", "", r.ActualSize())
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Initialize pipe to stream data to cache
|
|
|
|
rPipe, wPipe := io.Pipe()
|
|
|
|
infoCh := make(chan ObjectInfo)
|
|
|
|
go func() {
|
2022-05-05 07:14:41 -04:00
|
|
|
defer close(infoCh)
|
2021-11-10 11:12:03 -05:00
|
|
|
info, err := putObjectFn(ctx, bucket, object, NewPutObjReader(hashReader), opts)
|
2022-05-05 07:14:41 -04:00
|
|
|
pipeReader.CloseWithError(err)
|
|
|
|
rPipe.CloseWithError(err)
|
|
|
|
if err == nil {
|
|
|
|
infoCh <- info
|
|
|
|
}
|
2021-11-10 11:12:03 -05:00
|
|
|
}()
|
2019-09-05 10:20:16 -04:00
|
|
|
|
2021-11-10 11:12:03 -05:00
|
|
|
go func() {
|
|
|
|
_, err := dcache.put(lkctx.Context(), bucket, object, rPipe, r.Size(), nil, opts, false, false)
|
|
|
|
if err != nil {
|
2022-05-05 07:14:41 -04:00
|
|
|
logger.LogIf(lkctx.Context(), err)
|
2021-11-10 11:12:03 -05:00
|
|
|
}
|
2022-05-05 07:14:41 -04:00
|
|
|
// We do not care about errors to cached backend.
|
|
|
|
rPipe.Close()
|
2021-11-10 11:12:03 -05:00
|
|
|
}()
|
|
|
|
|
|
|
|
mwriter := cacheMultiWriter(pipeWriter, wPipe)
|
|
|
|
_, err = io.Copy(mwriter, r)
|
|
|
|
pipeWriter.Close()
|
|
|
|
wPipe.Close()
|
|
|
|
if err != nil {
|
|
|
|
return ObjectInfo{}, err
|
2019-09-05 10:20:16 -04:00
|
|
|
}
|
2021-11-10 11:12:03 -05:00
|
|
|
info := <-infoCh
|
|
|
|
if cerr = dcache.updateMetadata(lkctx.Context(), bucket, object, info.ETag, info.ModTime, info.Size); cerr != nil {
|
|
|
|
dcache.delete(bucket, object)
|
|
|
|
}
|
|
|
|
return info, err
|
2020-11-02 13:00:45 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// upload cached object to backend in async commit mode.
|
|
|
|
func (c *cacheObjects) uploadObject(ctx context.Context, oi ObjectInfo) {
|
|
|
|
dcache, err := c.getCacheToLoc(ctx, oi.Bucket, oi.Name)
|
|
|
|
if err != nil {
|
|
|
|
// disk cache could not be located.
|
|
|
|
logger.LogIf(ctx, fmt.Errorf("Could not upload %s/%s to backend: %w", oi.Bucket, oi.Name, err))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
cReader, _, bErr := dcache.Get(ctx, oi.Bucket, oi.Name, nil, http.Header{}, ObjectOptions{})
|
|
|
|
if bErr != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer cReader.Close()
|
2019-09-05 10:20:16 -04:00
|
|
|
|
2020-11-02 13:00:45 -05:00
|
|
|
if cReader.ObjInfo.ETag != oi.ETag {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
st := cacheCommitStatus(oi.UserDefined[writeBackStatusHeader])
|
|
|
|
if st == CommitComplete || st.String() == "" {
|
|
|
|
return
|
|
|
|
}
|
pkg/etag: add new package for S3 ETag handling (#11577)
This commit adds a new package `etag` for dealing
with S3 ETags.
Even though ETag is often viewed as MD5 checksum of
an object, handling S3 ETags correctly is a surprisingly
complex task. While it is true that the ETag corresponds
to the MD5 for the most basic S3 API operations, there are
many exceptions in case of multipart uploads or encryption.
In worse, some S3 clients expect very specific behavior when
it comes to ETags. For example, some clients expect that the
ETag is a double-quoted string and fail otherwise.
Non-AWS compliant ETag handling has been a source of many bugs
in the past.
Therefore, this commit adds a dedicated `etag` package that provides
functionality for parsing, generating and converting S3 ETags.
Further, this commit removes the ETag computation from the `hash`
package. Instead, the `hash` package (i.e. `hash.Reader`) should
focus only on computing and verifying the content-sha256.
One core feature of this commit is to provide a mechanism to
communicate a computed ETag from a low-level `io.Reader` to
a high-level `io.Reader`.
This problem occurs when an S3 server receives a request and
has to compute the ETag of the content. However, the server
may also wrap the initial body with several other `io.Reader`,
e.g. when encrypting or compressing the content:
```
reader := Encrypt(Compress(ETag(content)))
```
In such a case, the ETag should be accessible by the high-level
`io.Reader`.
The `etag` provides a mechanism to wrap `io.Reader` implementations
such that the `ETag` can be accessed by a type-check.
This technique is applied to the PUT, COPY and Upload handlers.
2021-02-23 15:31:53 -05:00
|
|
|
hashReader, err := hash.NewReader(cReader, oi.Size, "", "", oi.Size)
|
2020-11-02 13:00:45 -05:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
var opts ObjectOptions
|
2022-05-05 07:14:41 -04:00
|
|
|
opts.UserDefined = cloneMSS(oi.UserDefined)
|
2021-02-10 11:52:50 -05:00
|
|
|
objInfo, err := c.InnerPutObjectFn(ctx, oi.Bucket, oi.Name, NewPutObjReader(hashReader), opts)
|
2020-11-02 13:00:45 -05:00
|
|
|
wbCommitStatus := CommitComplete
|
2021-12-08 17:52:31 -05:00
|
|
|
size := objInfo.Size
|
2020-11-02 13:00:45 -05:00
|
|
|
if err != nil {
|
|
|
|
wbCommitStatus = CommitFailed
|
|
|
|
}
|
|
|
|
|
|
|
|
meta := cloneMSS(cReader.ObjInfo.UserDefined)
|
|
|
|
retryCnt := 0
|
|
|
|
if wbCommitStatus == CommitFailed {
|
|
|
|
retryCnt, _ = strconv.Atoi(meta[writeBackRetryHeader])
|
|
|
|
retryCnt++
|
|
|
|
meta[writeBackRetryHeader] = strconv.Itoa(retryCnt)
|
2021-12-08 17:52:31 -05:00
|
|
|
size = cReader.ObjInfo.Size
|
2020-11-02 13:00:45 -05:00
|
|
|
} else {
|
|
|
|
delete(meta, writeBackRetryHeader)
|
|
|
|
}
|
|
|
|
meta[writeBackStatusHeader] = wbCommitStatus.String()
|
|
|
|
meta["etag"] = oi.ETag
|
2021-12-08 17:52:31 -05:00
|
|
|
dcache.SaveMetadata(ctx, oi.Bucket, oi.Name, meta, size, nil, "", false, wbCommitStatus == CommitComplete)
|
2020-11-02 13:00:45 -05:00
|
|
|
if retryCnt > 0 {
|
|
|
|
// slow down retries
|
|
|
|
time.Sleep(time.Second * time.Duration(retryCnt%10+1))
|
|
|
|
c.queueWritebackRetry(oi)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *cacheObjects) queueWritebackRetry(oi ObjectInfo) {
|
|
|
|
select {
|
2021-11-30 13:22:42 -05:00
|
|
|
case <-GlobalContext.Done():
|
|
|
|
return
|
2020-11-02 13:00:45 -05:00
|
|
|
case c.wbRetryCh <- oi:
|
|
|
|
c.uploadObject(GlobalContext, oi)
|
|
|
|
default:
|
|
|
|
}
|
2019-09-05 10:20:16 -04:00
|
|
|
}
|
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
// Returns cacheObjects for use by Server.
|
2019-10-04 13:35:33 -04:00
|
|
|
func newServerCacheObjects(ctx context.Context, config cache.Config) (CacheObjectLayer, error) {
|
2018-03-28 17:14:06 -04:00
|
|
|
// list of disk caches for cache "drives" specified in config.json or MINIO_CACHE_DRIVES env var.
|
2019-08-09 20:09:08 -04:00
|
|
|
cache, migrateSw, err := newCache(config)
|
2018-03-28 17:14:06 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
c := &cacheObjects{
|
2021-11-01 11:11:58 -04:00
|
|
|
cache: cache,
|
|
|
|
exclude: config.Exclude,
|
|
|
|
after: config.After,
|
|
|
|
migrating: migrateSw,
|
|
|
|
commitWriteback: config.CacheCommitMode == CommitWriteBack,
|
|
|
|
commitWritethrough: config.CacheCommitMode == CommitWriteThrough,
|
|
|
|
|
|
|
|
cacheStats: newCacheStats(),
|
2020-10-26 18:41:24 -04:00
|
|
|
InnerGetObjectInfoFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
|
2019-11-09 12:27:23 -05:00
|
|
|
return newObjectLayerFn().GetObjectInfo(ctx, bucket, object, opts)
|
2018-03-28 17:14:06 -04:00
|
|
|
},
|
2020-10-26 15:30:40 -04:00
|
|
|
InnerGetObjectNInfoFn: func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
2019-11-09 12:27:23 -05:00
|
|
|
return newObjectLayerFn().GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
2018-10-11 02:01:24 -04:00
|
|
|
},
|
2020-10-26 18:41:24 -04:00
|
|
|
InnerDeleteObjectFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
|
2020-06-12 23:04:01 -04:00
|
|
|
return newObjectLayerFn().DeleteObject(ctx, bucket, object, opts)
|
2019-05-13 15:25:49 -04:00
|
|
|
},
|
2020-10-26 18:41:24 -04:00
|
|
|
InnerPutObjectFn: func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
2019-11-09 12:27:23 -05:00
|
|
|
return newObjectLayerFn().PutObject(ctx, bucket, object, data, opts)
|
2019-09-05 10:20:16 -04:00
|
|
|
},
|
2020-10-26 18:41:24 -04:00
|
|
|
InnerCopyObjectFn: func(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
|
2020-02-07 17:36:46 -05:00
|
|
|
return newObjectLayerFn().CopyObject(ctx, srcBucket, srcObject, destBucket, destObject, srcInfo, srcOpts, dstOpts)
|
|
|
|
},
|
2022-08-29 19:57:16 -04:00
|
|
|
InnerNewMultipartUploadFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (res *NewMultipartUploadResult, err error) {
|
2021-11-01 11:11:58 -04:00
|
|
|
return newObjectLayerFn().NewMultipartUpload(ctx, bucket, object, opts)
|
|
|
|
},
|
|
|
|
InnerPutObjectPartFn: func(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
|
|
|
|
return newObjectLayerFn().PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
|
|
|
},
|
|
|
|
InnerAbortMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error {
|
|
|
|
return newObjectLayerFn().AbortMultipartUpload(ctx, bucket, object, uploadID, opts)
|
|
|
|
},
|
|
|
|
InnerCompleteMultipartUploadFn: func(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
|
|
|
return newObjectLayerFn().CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
|
|
|
},
|
|
|
|
InnerCopyObjectPartFn: func(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) {
|
|
|
|
return newObjectLayerFn().CopyObjectPart(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID, startOffset, length, srcInfo, srcOpts, dstOpts)
|
|
|
|
},
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
2020-06-15 12:05:35 -04:00
|
|
|
c.cacheStats.GetDiskStats = func() []CacheDiskStats {
|
|
|
|
cacheDiskStats := make([]CacheDiskStats, len(c.cache))
|
|
|
|
for i := range c.cache {
|
2020-07-28 13:06:52 -04:00
|
|
|
dcache := c.cache[i]
|
|
|
|
cacheDiskStats[i] = CacheDiskStats{}
|
|
|
|
if dcache != nil {
|
2020-12-07 19:35:11 -05:00
|
|
|
info, err := getDiskInfo(dcache.dir)
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
cacheDiskStats[i].UsageSize = info.Used
|
|
|
|
cacheDiskStats[i].TotalCapacity = info.Total
|
2020-07-28 13:06:52 -04:00
|
|
|
cacheDiskStats[i].Dir = dcache.stats.Dir
|
2023-03-02 23:32:22 -05:00
|
|
|
if info.Total != 0 {
|
|
|
|
// UsageState
|
|
|
|
gcTriggerPct := dcache.quotaPct * dcache.highWatermark / 100
|
|
|
|
usedPercent := float64(info.Used) * 100 / float64(info.Total)
|
|
|
|
if usedPercent >= float64(gcTriggerPct) {
|
|
|
|
cacheDiskStats[i].UsageState = 1
|
|
|
|
}
|
|
|
|
// UsagePercent
|
|
|
|
cacheDiskStats[i].UsagePercent = uint64(usedPercent)
|
|
|
|
}
|
2020-06-15 12:05:35 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return cacheDiskStats
|
|
|
|
}
|
2019-08-09 20:09:08 -04:00
|
|
|
if migrateSw {
|
|
|
|
go c.migrateCacheFromV1toV2(ctx)
|
2019-05-22 17:54:15 -04:00
|
|
|
}
|
2020-03-22 15:16:36 -04:00
|
|
|
go c.gc(ctx)
|
2020-11-02 13:00:45 -05:00
|
|
|
if c.commitWriteback {
|
|
|
|
c.wbRetryCh = make(chan ObjectInfo, 10000)
|
|
|
|
go func() {
|
|
|
|
<-GlobalContext.Done()
|
|
|
|
close(c.wbRetryCh)
|
|
|
|
}()
|
|
|
|
go c.queuePendingWriteback(ctx)
|
|
|
|
}
|
|
|
|
|
2019-05-22 17:54:15 -04:00
|
|
|
return c, nil
|
|
|
|
}
|
2020-02-23 08:33:39 -05:00
|
|
|
|
2020-03-22 15:16:36 -04:00
|
|
|
func (c *cacheObjects) gc(ctx context.Context) {
|
2020-02-23 08:33:39 -05:00
|
|
|
ticker := time.NewTicker(cacheGCInterval)
|
|
|
|
|
|
|
|
defer ticker.Stop()
|
|
|
|
for {
|
|
|
|
select {
|
2020-03-22 15:16:36 -04:00
|
|
|
case <-ctx.Done():
|
2020-02-23 08:33:39 -05:00
|
|
|
return
|
|
|
|
case <-ticker.C:
|
|
|
|
if c.migrating {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, dcache := range c.cache {
|
2020-07-28 13:06:52 -04:00
|
|
|
if dcache != nil {
|
2020-09-02 20:48:44 -04:00
|
|
|
// Check if there is disk.
|
|
|
|
// Will queue a GC scan if at high watermark.
|
|
|
|
dcache.diskSpaceAvailable(0)
|
2020-07-28 13:06:52 -04:00
|
|
|
}
|
2020-02-23 08:33:39 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-11-02 13:00:45 -05:00
|
|
|
|
|
|
|
// queues any pending or failed async commits when server restarts
|
|
|
|
func (c *cacheObjects) queuePendingWriteback(ctx context.Context) {
|
|
|
|
for _, dcache := range c.cache {
|
|
|
|
if dcache != nil {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case oi, ok := <-dcache.retryWritebackCh:
|
|
|
|
if !ok {
|
|
|
|
goto next
|
|
|
|
}
|
|
|
|
c.queueWritebackRetry(oi)
|
|
|
|
default:
|
|
|
|
time.Sleep(time.Second * 1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
next:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-11-01 11:11:58 -04:00
|
|
|
|
|
|
|
// NewMultipartUpload - Starts a new multipart upload operation to backend - if writethrough mode is enabled, starts caching the multipart.
|
2022-08-29 19:57:16 -04:00
|
|
|
func (c *cacheObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (res *NewMultipartUploadResult, err error) {
|
2021-11-01 11:11:58 -04:00
|
|
|
newMultipartUploadFn := c.InnerNewMultipartUploadFn
|
|
|
|
dcache, err := c.getCacheToLoc(ctx, bucket, object)
|
|
|
|
if err != nil {
|
|
|
|
// disk cache could not be located,execute backend call.
|
|
|
|
return newMultipartUploadFn(ctx, bucket, object, opts)
|
|
|
|
}
|
|
|
|
if c.skipCache() {
|
|
|
|
return newMultipartUploadFn(ctx, bucket, object, opts)
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.ServerSideEncryption != nil { // avoid caching encrypted objects
|
|
|
|
dcache.Delete(ctx, bucket, object)
|
|
|
|
return newMultipartUploadFn(ctx, bucket, object, opts)
|
|
|
|
}
|
|
|
|
|
|
|
|
// skip cache for objects with locks
|
|
|
|
objRetention := objectlock.GetObjectRetentionMeta(opts.UserDefined)
|
|
|
|
legalHold := objectlock.GetObjectLegalHoldMeta(opts.UserDefined)
|
|
|
|
if objRetention.Mode.Valid() || legalHold.Status.Valid() {
|
|
|
|
dcache.Delete(ctx, bucket, object)
|
|
|
|
return newMultipartUploadFn(ctx, bucket, object, opts)
|
|
|
|
}
|
|
|
|
|
|
|
|
// fetch from backend if cache exclude pattern or cache-control
|
|
|
|
// directive set to exclude
|
|
|
|
if c.isCacheExclude(bucket, object) {
|
|
|
|
dcache.Delete(ctx, bucket, object)
|
|
|
|
return newMultipartUploadFn(ctx, bucket, object, opts)
|
|
|
|
}
|
2021-11-10 11:12:03 -05:00
|
|
|
if !c.commitWritethrough && !c.commitWriteback {
|
2021-11-01 11:11:58 -04:00
|
|
|
return newMultipartUploadFn(ctx, bucket, object, opts)
|
|
|
|
}
|
|
|
|
|
|
|
|
// perform multipart upload on backend and cache simultaneously
|
2022-08-29 19:57:16 -04:00
|
|
|
res, err = newMultipartUploadFn(ctx, bucket, object, opts)
|
|
|
|
if err == nil {
|
|
|
|
dcache.NewMultipartUpload(GlobalContext, bucket, object, res.UploadID, opts)
|
|
|
|
}
|
|
|
|
return res, err
|
2021-11-01 11:11:58 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// PutObjectPart streams part to cache concurrently if writethrough mode is enabled. Otherwise redirects the call to remote
|
|
|
|
func (c *cacheObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
|
|
|
|
putObjectPartFn := c.InnerPutObjectPartFn
|
|
|
|
dcache, err := c.getCacheToLoc(ctx, bucket, object)
|
|
|
|
if err != nil {
|
|
|
|
// disk cache could not be located,execute backend call.
|
|
|
|
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
|
|
|
}
|
|
|
|
|
2021-11-10 11:12:03 -05:00
|
|
|
if !c.commitWritethrough && !c.commitWriteback {
|
2021-11-01 11:11:58 -04:00
|
|
|
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
|
|
|
}
|
|
|
|
if c.skipCache() {
|
|
|
|
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
|
|
|
}
|
|
|
|
size := data.Size()
|
|
|
|
|
|
|
|
// avoid caching part if space unavailable
|
|
|
|
if !dcache.diskSpaceAvailable(size) {
|
|
|
|
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.ServerSideEncryption != nil {
|
|
|
|
dcache.Delete(ctx, bucket, object)
|
|
|
|
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
|
|
|
}
|
|
|
|
|
|
|
|
// skip cache for objects with locks
|
|
|
|
objRetention := objectlock.GetObjectRetentionMeta(opts.UserDefined)
|
|
|
|
legalHold := objectlock.GetObjectLegalHoldMeta(opts.UserDefined)
|
|
|
|
if objRetention.Mode.Valid() || legalHold.Status.Valid() {
|
|
|
|
dcache.Delete(ctx, bucket, object)
|
|
|
|
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
|
|
|
}
|
|
|
|
|
|
|
|
// fetch from backend if cache exclude pattern or cache-control
|
|
|
|
// directive set to exclude
|
|
|
|
if c.isCacheExclude(bucket, object) {
|
|
|
|
dcache.Delete(ctx, bucket, object)
|
|
|
|
return putObjectPartFn(ctx, bucket, object, uploadID, partID, data, opts)
|
|
|
|
}
|
|
|
|
|
|
|
|
info = PartInfo{}
|
|
|
|
// Initialize pipe to stream data to backend
|
|
|
|
pipeReader, pipeWriter := io.Pipe()
|
|
|
|
hashReader, err := hash.NewReader(pipeReader, size, "", "", data.ActualSize())
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Initialize pipe to stream data to cache
|
|
|
|
rPipe, wPipe := io.Pipe()
|
|
|
|
pinfoCh := make(chan PartInfo)
|
|
|
|
cinfoCh := make(chan PartInfo)
|
|
|
|
|
|
|
|
errorCh := make(chan error)
|
|
|
|
go func() {
|
|
|
|
info, err = putObjectPartFn(ctx, bucket, object, uploadID, partID, NewPutObjReader(hashReader), opts)
|
|
|
|
if err != nil {
|
|
|
|
close(pinfoCh)
|
|
|
|
pipeReader.CloseWithError(err)
|
|
|
|
rPipe.CloseWithError(err)
|
|
|
|
errorCh <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
close(errorCh)
|
|
|
|
pinfoCh <- info
|
|
|
|
}()
|
|
|
|
go func() {
|
|
|
|
pinfo, perr := dcache.PutObjectPart(GlobalContext, bucket, object, uploadID, partID, rPipe, data.Size(), opts)
|
|
|
|
if perr != nil {
|
|
|
|
rPipe.CloseWithError(perr)
|
|
|
|
close(cinfoCh)
|
|
|
|
// clean up upload
|
|
|
|
dcache.AbortUpload(bucket, object, uploadID)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
cinfoCh <- pinfo
|
|
|
|
}()
|
|
|
|
|
|
|
|
mwriter := cacheMultiWriter(pipeWriter, wPipe)
|
|
|
|
_, err = io.Copy(mwriter, data)
|
|
|
|
pipeWriter.Close()
|
|
|
|
wPipe.Close()
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
err = <-errorCh
|
|
|
|
return PartInfo{}, err
|
|
|
|
}
|
|
|
|
info = <-pinfoCh
|
|
|
|
cachedInfo := <-cinfoCh
|
|
|
|
if info.PartNumber == cachedInfo.PartNumber {
|
|
|
|
cachedInfo.ETag = info.ETag
|
|
|
|
cachedInfo.LastModified = info.LastModified
|
|
|
|
dcache.SavePartMetadata(GlobalContext, bucket, object, uploadID, partID, cachedInfo)
|
|
|
|
}
|
|
|
|
return info, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// CopyObjectPart behaves similar to PutObjectPart - caches part to upload dir if writethrough mode is enabled.
|
|
|
|
func (c *cacheObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) {
|
|
|
|
copyObjectPartFn := c.InnerCopyObjectPartFn
|
|
|
|
dcache, err := c.getCacheToLoc(ctx, dstBucket, dstObject)
|
|
|
|
if err != nil {
|
|
|
|
// disk cache could not be located,execute backend call.
|
|
|
|
return copyObjectPartFn(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID, startOffset, length, srcInfo, srcOpts, dstOpts)
|
|
|
|
}
|
|
|
|
|
2021-11-10 11:12:03 -05:00
|
|
|
if !c.commitWritethrough && !c.commitWriteback {
|
2021-11-01 11:11:58 -04:00
|
|
|
return copyObjectPartFn(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID, startOffset, length, srcInfo, srcOpts, dstOpts)
|
|
|
|
}
|
|
|
|
if err := dcache.uploadIDExists(dstBucket, dstObject, uploadID); err != nil {
|
|
|
|
return copyObjectPartFn(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID, startOffset, length, srcInfo, srcOpts, dstOpts)
|
|
|
|
}
|
|
|
|
partInfo, err := copyObjectPartFn(ctx, srcBucket, srcObject, dstBucket, dstObject, uploadID, partID, startOffset, length, srcInfo, srcOpts, dstOpts)
|
|
|
|
if err != nil {
|
|
|
|
return pi, toObjectErr(err, dstBucket, dstObject)
|
|
|
|
}
|
|
|
|
go func() {
|
|
|
|
isSuffixLength := false
|
|
|
|
if startOffset < 0 {
|
|
|
|
isSuffixLength = true
|
|
|
|
}
|
|
|
|
|
|
|
|
rs := &HTTPRangeSpec{
|
|
|
|
IsSuffixLength: isSuffixLength,
|
|
|
|
Start: startOffset,
|
|
|
|
End: startOffset + length,
|
|
|
|
}
|
|
|
|
// fill cache in the background
|
|
|
|
bReader, bErr := c.InnerGetObjectNInfoFn(GlobalContext, srcBucket, srcObject, rs, http.Header{}, readLock, ObjectOptions{})
|
|
|
|
if bErr != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer bReader.Close()
|
|
|
|
// avoid cache overwrite if another background routine filled cache
|
|
|
|
dcache.PutObjectPart(GlobalContext, dstBucket, dstObject, uploadID, partID, bReader, length, ObjectOptions{UserDefined: getMetadata(bReader.ObjInfo)})
|
|
|
|
}()
|
|
|
|
// Success.
|
|
|
|
return partInfo, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CompleteMultipartUpload - completes multipart upload operation on the backend. If writethrough mode is enabled, this also
|
|
|
|
// finalizes the upload saved in cache multipart dir.
|
|
|
|
func (c *cacheObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (oi ObjectInfo, err error) {
|
|
|
|
completeMultipartUploadFn := c.InnerCompleteMultipartUploadFn
|
2021-11-10 11:12:03 -05:00
|
|
|
if !c.commitWritethrough && !c.commitWriteback {
|
2021-11-01 11:11:58 -04:00
|
|
|
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
|
|
|
|
}
|
|
|
|
dcache, err := c.getCacheToLoc(ctx, bucket, object)
|
|
|
|
if err != nil {
|
|
|
|
// disk cache could not be located,execute backend call.
|
|
|
|
return completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
|
|
|
|
}
|
|
|
|
|
|
|
|
// perform multipart upload on backend and cache simultaneously
|
|
|
|
oi, err = completeMultipartUploadFn(ctx, bucket, object, uploadID, uploadedParts, opts)
|
|
|
|
if err == nil {
|
|
|
|
// fill cache in the background
|
|
|
|
go func() {
|
2022-01-26 14:55:58 -05:00
|
|
|
_, err := dcache.CompleteMultipartUpload(bgContext(ctx), bucket, object, uploadID, uploadedParts, oi, opts)
|
2021-11-01 11:11:58 -04:00
|
|
|
if err != nil {
|
|
|
|
// fill cache in the background
|
|
|
|
bReader, bErr := c.InnerGetObjectNInfoFn(GlobalContext, bucket, object, nil, http.Header{}, readLock, ObjectOptions{})
|
|
|
|
if bErr != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer bReader.Close()
|
|
|
|
oi, _, err := dcache.Stat(GlobalContext, bucket, object)
|
|
|
|
// avoid cache overwrite if another background routine filled cache
|
|
|
|
if err != nil || oi.ETag != bReader.ObjInfo.ETag {
|
2021-11-10 11:12:03 -05:00
|
|
|
dcache.Put(GlobalContext, bucket, object, bReader, bReader.ObjInfo.Size, nil, ObjectOptions{UserDefined: getMetadata(bReader.ObjInfo)}, false, false)
|
2021-11-01 11:11:58 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// AbortMultipartUpload - aborts multipart upload on backend and cache.
|
|
|
|
func (c *cacheObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error {
|
|
|
|
abortMultipartUploadFn := c.InnerAbortMultipartUploadFn
|
2021-11-10 11:12:03 -05:00
|
|
|
if !c.commitWritethrough && !c.commitWriteback {
|
2021-11-01 11:11:58 -04:00
|
|
|
return abortMultipartUploadFn(ctx, bucket, object, uploadID, opts)
|
|
|
|
}
|
|
|
|
dcache, err := c.getCacheToLoc(ctx, bucket, object)
|
|
|
|
if err != nil {
|
|
|
|
// disk cache could not be located,execute backend call.
|
|
|
|
return abortMultipartUploadFn(ctx, bucket, object, uploadID, opts)
|
|
|
|
}
|
|
|
|
if err = dcache.uploadIDExists(bucket, object, uploadID); err != nil {
|
|
|
|
return toObjectErr(err, bucket, object, uploadID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// execute backend operation
|
|
|
|
err = abortMultipartUploadFn(ctx, bucket, object, uploadID, opts)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// abort multipart upload on cache
|
|
|
|
go dcache.AbortUpload(bucket, object, uploadID)
|
|
|
|
return nil
|
|
|
|
}
|