mirror of
https://github.com/minio/minio.git
synced 2025-11-09 21:49:46 -05:00
Deprecate and remove in-memory object caching (#5481)
in-memory caching cannot be cleanly implemented without the access to GC which Go doesn't naturally provide. At times we have seen that object caching is more of an hindrance rather than a boon for our use cases. Removing it completely from our implementation related to #5160 and #5182
This commit is contained in:
committed by
kannappanr
parent
1ebbc2ce88
commit
0c880bb852
@@ -76,9 +76,6 @@ func handleCommonEnvVars() {
|
||||
globalProfiler = startProfiler(profiler)
|
||||
}
|
||||
|
||||
// Check if object cache is disabled.
|
||||
globalXLObjCacheDisabled = strings.EqualFold(os.Getenv("_MINIO_CACHE"), "off")
|
||||
|
||||
accessKey := os.Getenv("MINIO_ACCESS_KEY")
|
||||
secretKey := os.Getenv("MINIO_SECRET_KEY")
|
||||
if accessKey != "" && secretKey != "" {
|
||||
|
||||
@@ -143,9 +143,8 @@ var (
|
||||
// Time when object layer was initialized on start up.
|
||||
globalBootTime time.Time
|
||||
|
||||
globalActiveCred auth.Credentials
|
||||
globalPublicCerts []*x509.Certificate
|
||||
globalXLObjCacheDisabled bool
|
||||
globalActiveCred auth.Credentials
|
||||
globalPublicCerts []*x509.Certificate
|
||||
|
||||
globalIsEnvDomainName bool
|
||||
globalDomainName string // Root domain for virtual host style requests
|
||||
|
||||
@@ -106,14 +106,6 @@ func testGetObject(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
||||
// Test case - 5.
|
||||
// Case with invalid object names.
|
||||
{bucketName, "", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Object name invalid: "+bucketName+"#")},
|
||||
// Test case - 6.
|
||||
// Valid object and bucket names but non-existent bucket.
|
||||
// {"abc", "def", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Bucket not found: abc")},
|
||||
// A custom writer is sent as an argument.
|
||||
// Its designed to return a EOF error after reading `n` bytes, where `n` is the argument when initializing the EOF writer.
|
||||
// This is to simulate the case of cache not filling up completly, since the EOFWriter doesn't allow the write to complete,
|
||||
// the cache gets filled up with partial data. The following up test case will read the object completly, tests the
|
||||
// purging of the cache during the incomplete write.
|
||||
// Test case - 7.
|
||||
{bucketName, objectName, 0, int64(len(bytesData[0].byteData)), buffers[0], NewEOFWriter(buffers[0], 100), false, []byte{}, io.EOF},
|
||||
// Test case with start offset set to 0 and length set to size of the object.
|
||||
@@ -374,14 +366,6 @@ func testGetObjectDiskNotFound(obj ObjectLayer, instanceType string, disks []str
|
||||
// Test case - 5.
|
||||
// Case with invalid object names.
|
||||
{bucketName, "", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Object name invalid: "+bucketName+"#")},
|
||||
// Test case - 6.
|
||||
// Valid object and bucket names but non-existent bucket.
|
||||
// {"abc", "def", 0, 0, nil, nil, false, []byte(""), fmt.Errorf("%s", "Bucket not found: abc")},
|
||||
// A custom writer is sent as an argument.
|
||||
// Its designed to return a EOF error after reading `n` bytes, where `n` is the argument when initializing the EOF writer.
|
||||
// This is to simulate the case of cache not filling up completly, since the EOFWriter doesn't allow the write to complete,
|
||||
// the cache gets filled up with partial data. The following up test case will read the object completly, tests the
|
||||
// purging of the cache during the incomplete write.
|
||||
// Test case - 7.
|
||||
{bucketName, objectName, 0, int64(len(bytesData[0].byteData)), buffers[0], NewEOFWriter(buffers[0], 100), false, []byte{}, io.EOF},
|
||||
// Test case with start offset set to 0 and length set to size of the object.
|
||||
|
||||
@@ -53,42 +53,3 @@ func setMaxResources() (err error) {
|
||||
err = sys.SetMaxMemoryLimit(maxLimit, maxLimit)
|
||||
return err
|
||||
}
|
||||
|
||||
func getMaxCacheSize(curLimit, totalRAM uint64) (cacheSize uint64) {
|
||||
// Return zero if current limit or totalTAM is less than minRAMSize.
|
||||
if curLimit < minRAMSize || totalRAM < minRAMSize {
|
||||
return cacheSize
|
||||
}
|
||||
|
||||
// Return 50% of current rlimit or total RAM as cache size.
|
||||
if curLimit < totalRAM {
|
||||
cacheSize = curLimit / 2
|
||||
} else {
|
||||
cacheSize = totalRAM / 2
|
||||
}
|
||||
|
||||
return cacheSize
|
||||
}
|
||||
|
||||
// GetMaxCacheSize returns maximum cache size based on current RAM size and memory limit.
|
||||
func GetMaxCacheSize() (cacheSize uint64, err error) {
|
||||
// Get max memory limit
|
||||
var curLimit uint64
|
||||
if curLimit, _, err = sys.GetMaxMemoryLimit(); err != nil {
|
||||
return cacheSize, err
|
||||
}
|
||||
|
||||
// Get total RAM.
|
||||
var stats sys.Stats
|
||||
if stats, err = sys.GetStats(); err != nil {
|
||||
return cacheSize, err
|
||||
}
|
||||
|
||||
// In some OS like windows, maxLimit is zero. Set total RAM as maxLimit.
|
||||
if curLimit == 0 {
|
||||
curLimit = stats.TotalRAM
|
||||
}
|
||||
|
||||
cacheSize = getMaxCacheSize(curLimit, stats.TotalRAM)
|
||||
return cacheSize, err
|
||||
}
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2017 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestGetMaxCacheSize(t *testing.T) {
|
||||
testCases := []struct {
|
||||
curLimit uint64
|
||||
totalRAM uint64
|
||||
expectedResult uint64
|
||||
}{
|
||||
{uint64(0), uint64(0), uint64(0)},
|
||||
{minRAMSize, uint64(0), uint64(0)},
|
||||
{uint64(0), minRAMSize, uint64(0)},
|
||||
{uint64(18446744073709551615), uint64(8115998720), uint64(0)},
|
||||
{uint64(8115998720), uint64(16115998720), uint64(0)},
|
||||
{minRAMSize, minRAMSize, uint64(12884901888)},
|
||||
{minRAMSize, uint64(16115998720), uint64(0)},
|
||||
{uint64(18446744073709551615), uint64(10115998720), uint64(0)},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
cacheSize := getMaxCacheSize(testCase.curLimit, testCase.totalRAM)
|
||||
if testCase.expectedResult != cacheSize {
|
||||
t.Fatalf("Test %d, Expected: %v, Got: %v", i+1, testCase.expectedResult, cacheSize)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1657,12 +1657,6 @@ func initObjectLayer(endpoints EndpointList) (ObjectLayer, []StorageAPI, error)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Disabling the cache for integration tests.
|
||||
// Should use the object layer tests for validating cache.
|
||||
if xl, ok := objLayer.(*xlObjects); ok {
|
||||
xl.objCacheEnabled = false
|
||||
}
|
||||
|
||||
// Success.
|
||||
return objLayer, formattedDisks, nil
|
||||
}
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -1068,19 +1067,6 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
|
||||
return oi, toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if xl.objCacheEnabled {
|
||||
// A new complete multipart upload invalidates any
|
||||
// previously cached object in memory.
|
||||
xl.objCache.Delete(path.Join(bucket, object))
|
||||
|
||||
// Prefetch the object from disk by triggering a fake GetObject call
|
||||
// Unlike a regular single PutObject, multipart PutObject is comes in
|
||||
// stages and it is harder to cache.
|
||||
go xl.GetObject(bucket, object, 0, objectSize, ioutil.Discard, s3MD5)
|
||||
}
|
||||
}()
|
||||
|
||||
if xl.isObject(bucket, object) {
|
||||
// Rename if an object already exists to temporary location.
|
||||
newUniqueID := mustGetUUID()
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
"github.com/minio/minio/pkg/errors"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
"github.com/minio/minio/pkg/mimedb"
|
||||
"github.com/minio/minio/pkg/objcache"
|
||||
)
|
||||
|
||||
// list all errors which can be ignored in object operations.
|
||||
@@ -289,52 +288,6 @@ func (xl xlObjects) getObject(bucket, object string, startOffset int64, length i
|
||||
return errors.Trace(InvalidRange{startOffset, length, xlMeta.Stat.Size})
|
||||
}
|
||||
|
||||
// Save the writer.
|
||||
mw := writer
|
||||
|
||||
// Object cache enabled block.
|
||||
if xlMeta.Stat.Size > 0 && xl.objCacheEnabled {
|
||||
// Validate if we have previous cache.
|
||||
var cachedBuffer io.ReaderAt
|
||||
cachedBuffer, err = xl.objCache.Open(path.Join(bucket, object), modTime)
|
||||
if err == nil { // Cache hit
|
||||
// Create a new section reader, starting at an offset with length.
|
||||
reader := io.NewSectionReader(cachedBuffer, startOffset, length)
|
||||
|
||||
// Copy the data out.
|
||||
if _, err = io.Copy(writer, reader); err != nil {
|
||||
return errors.Trace(err)
|
||||
}
|
||||
|
||||
// Success.
|
||||
return nil
|
||||
|
||||
} // Cache miss.
|
||||
|
||||
// For unknown error, return and error out.
|
||||
if err != objcache.ErrKeyNotFoundInCache {
|
||||
return errors.Trace(err)
|
||||
} // Cache has not been found, fill the cache.
|
||||
|
||||
// Cache is only set if whole object is being read.
|
||||
if startOffset == 0 && length == xlMeta.Stat.Size {
|
||||
// Proceed to set the cache.
|
||||
var newBuffer io.WriteCloser
|
||||
// Create a new entry in memory of length.
|
||||
newBuffer, err = xl.objCache.Create(path.Join(bucket, object), length)
|
||||
if err == nil {
|
||||
// Create a multi writer to write to both memory and client response.
|
||||
mw = io.MultiWriter(newBuffer, writer)
|
||||
defer newBuffer.Close()
|
||||
}
|
||||
// Ignore error if cache is full, proceed to write the object.
|
||||
if err != nil && err != objcache.ErrCacheFull {
|
||||
// For any other error return here.
|
||||
return toObjectErr(errors.Trace(err), bucket, object)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var totalBytesRead int64
|
||||
storage, err := NewErasureStorage(onlineDisks, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize)
|
||||
if err != nil {
|
||||
@@ -366,7 +319,7 @@ func (xl xlObjects) getObject(bucket, object string, startOffset int64, length i
|
||||
checksums[index] = checksumInfo.Hash
|
||||
}
|
||||
|
||||
file, err := storage.ReadFile(mw, bucket, pathJoin(object, partName), partOffset, readSize, partSize, checksums, algorithm, xlMeta.Erasure.BlockSize)
|
||||
file, err := storage.ReadFile(writer, bucket, pathJoin(object, partName), partOffset, readSize, partSize, checksums, algorithm, xlMeta.Erasure.BlockSize)
|
||||
if err != nil {
|
||||
return toObjectErr(err, bucket, object)
|
||||
}
|
||||
@@ -663,27 +616,6 @@ func (xl xlObjects) putObject(bucket string, object string, data *hash.Reader, m
|
||||
// Limit the reader to its provided size if specified.
|
||||
var reader io.Reader = data
|
||||
|
||||
// Proceed to set the cache.
|
||||
var newBuffer io.WriteCloser
|
||||
|
||||
// If caching is enabled, proceed to set the cache.
|
||||
if data.Size() > 0 && xl.objCacheEnabled {
|
||||
// PutObject invalidates any previously cached object in memory.
|
||||
xl.objCache.Delete(path.Join(bucket, object))
|
||||
|
||||
// Create a new entry in memory of size.
|
||||
newBuffer, err = xl.objCache.Create(path.Join(bucket, object), data.Size())
|
||||
if err == nil {
|
||||
// Cache incoming data into a buffer
|
||||
reader = io.TeeReader(data, newBuffer)
|
||||
} else {
|
||||
// Return errors other than ErrCacheFull
|
||||
if err != objcache.ErrCacheFull {
|
||||
return ObjectInfo{}, toObjectErr(errors.Trace(err), bucket, object)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize parts metadata
|
||||
partsMetadata := make([]xlMetaV1, len(xl.storageDisks))
|
||||
|
||||
@@ -811,12 +743,6 @@ func (xl xlObjects) putObject(bucket string, object string, data *hash.Reader, m
|
||||
return ObjectInfo{}, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
// Once we have successfully renamed the object, Close the buffer which would
|
||||
// save the object on cache.
|
||||
if sizeWritten > 0 && xl.objCacheEnabled && newBuffer != nil {
|
||||
newBuffer.Close()
|
||||
}
|
||||
|
||||
// Object info is the same in all disks, so we can pick the first meta
|
||||
// of the first disk
|
||||
xlMeta = partsMetadata[0]
|
||||
@@ -916,11 +842,6 @@ func (xl xlObjects) DeleteObject(bucket, object string) (err error) {
|
||||
return toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
||||
if xl.objCacheEnabled {
|
||||
// Delete from the cache.
|
||||
xl.objCache.Delete(pathJoin(bucket, object))
|
||||
}
|
||||
|
||||
// Success.
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -184,8 +184,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Disable caching to avoid returning early and not covering other code-paths
|
||||
xl.objCacheEnabled = false
|
||||
|
||||
// Make 9 disks offline, which leaves less than quorum number of disks
|
||||
// in a 16 disk XL setup. The original disks are 'replaced' with
|
||||
// naughtyDisks that fail after 'f' successful StorageAPI method
|
||||
|
||||
34
cmd/xl-v1.go
34
cmd/xl-v1.go
@@ -18,15 +18,12 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/minio/minio/pkg/disk"
|
||||
"github.com/minio/minio/pkg/errors"
|
||||
"github.com/minio/minio/pkg/objcache"
|
||||
)
|
||||
|
||||
// XL constants.
|
||||
@@ -37,9 +34,6 @@ const (
|
||||
// Uploads metadata file carries per multipart object metadata.
|
||||
uploadsJSONFile = "uploads.json"
|
||||
|
||||
// Represents the minimum required RAM size to enable caching.
|
||||
minRAMSize = 24 * humanize.GiByte
|
||||
|
||||
// Maximum erasure blocks.
|
||||
maxErasureBlocks = 16
|
||||
|
||||
@@ -55,12 +49,6 @@ type xlObjects struct {
|
||||
// ListObjects pool management.
|
||||
listPool *treeWalkPool
|
||||
|
||||
// Object cache for caching objects.
|
||||
objCache *objcache.Cache
|
||||
|
||||
// Object cache enabled.
|
||||
objCacheEnabled bool
|
||||
|
||||
// name space mutex for object layer
|
||||
nsMutex *nsLockMap
|
||||
}
|
||||
@@ -112,28 +100,6 @@ func newXLObjects(storageDisks []StorageAPI) (ObjectLayer, error) {
|
||||
listPool: listPool,
|
||||
nsMutex: newNSLock(globalIsDistXL),
|
||||
}
|
||||
// Get cache size if _MINIO_CACHE environment variable is set.
|
||||
var maxCacheSize uint64
|
||||
if !globalXLObjCacheDisabled {
|
||||
maxCacheSize, err = GetMaxCacheSize()
|
||||
errorIf(err, "Unable to get maximum cache size")
|
||||
|
||||
// Enable object cache if cache size is more than zero
|
||||
xl.objCacheEnabled = maxCacheSize > 0
|
||||
}
|
||||
|
||||
// Check if object cache is enabled.
|
||||
if xl.objCacheEnabled {
|
||||
// Initialize object cache.
|
||||
objCache, oerr := objcache.New(maxCacheSize, objcache.DefaultExpiry)
|
||||
if oerr != nil {
|
||||
return nil, oerr
|
||||
}
|
||||
objCache.OnEviction = func(key string) {
|
||||
debug.FreeOSMemory()
|
||||
}
|
||||
xl.objCache = objCache
|
||||
}
|
||||
|
||||
// Initialize meta volume, if volume already exists ignores it.
|
||||
if err = initMetaVolume(xl.storageDisks); err != nil {
|
||||
|
||||
Reference in New Issue
Block a user