Make caching a package trove, and use it inside memory driver

This commit is contained in:
Harshavardhana
2015-05-31 18:38:34 -07:00
parent 122d67625f
commit a91e519692
4 changed files with 64 additions and 19 deletions

View File

@@ -34,14 +34,15 @@ import (
"github.com/minio/minio/pkg/iodine"
"github.com/minio/minio/pkg/storage/drivers"
"github.com/minio/minio/pkg/storage/trove"
)
// memoryDriver - local variables
type memoryDriver struct {
storedBuckets map[string]storedBucket
lock *sync.RWMutex
objects *Cache
multiPartObjects *Cache
objects *trove.Cache
multiPartObjects *trove.Cache
}
type storedBucket struct {
@@ -69,8 +70,8 @@ func Start(maxSize uint64, expiration time.Duration) (chan<- string, <-chan erro
var memory *memoryDriver
memory = new(memoryDriver)
memory.storedBuckets = make(map[string]storedBucket)
memory.objects = NewCache(maxSize, expiration)
memory.multiPartObjects = NewCache(0, time.Duration(0))
memory.objects = trove.NewCache(maxSize, expiration)
memory.multiPartObjects = trove.NewCache(0, time.Duration(0))
memory.lock = new(sync.RWMutex)
memory.objects.OnExpired = memory.expiredObject
@@ -108,7 +109,7 @@ func (memory *memoryDriver) GetObject(w io.Writer, bucket string, object string)
memory.lock.RUnlock()
return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, nil)
}
written, err := io.Copy(w, bytes.NewBuffer(data.([]byte)))
written, err := io.Copy(w, bytes.NewBuffer(data))
memory.lock.RUnlock()
return written, iodine.New(err, nil)
}
@@ -142,7 +143,7 @@ func (memory *memoryDriver) GetPartialObject(w io.Writer, bucket, object string,
memory.lock.RUnlock()
return 0, iodine.New(drivers.ObjectNotFound{Bucket: bucket, Object: object}, errParams)
}
written, err := io.CopyN(w, bytes.NewBuffer(data.([]byte)[start:]), length)
written, err := io.CopyN(w, bytes.NewBuffer(data[start:]), length)
memory.lock.RUnlock()
return written, iodine.New(err, nil)
}

View File

@@ -1,181 +0,0 @@
/*
* Minimalist Object Storage, (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package memory
import (
"sync"
"time"
)
var zeroExpiration = time.Duration(0)
// Cache holds the required variables to compose an in memory cache system
// which also provides expiring key mechanism and also maxSize
type Cache struct {
// Mutex is used for handling the concurrent
// read/write requests for cache
sync.Mutex
// items hold the cached objects
items map[string]interface{}
// updatedAt holds the time that related item's updated at
updatedAt map[string]time.Time
// expiration is a duration for a cache key to expire
expiration time.Duration
// stopExpireTimer channel to quit the timer thread
stopExpireTimer chan struct{}
// maxSize is a total size for overall cache
maxSize uint64
// currentSize is a current size in memory
currentSize uint64
// OnExpired - callback function for eviction
OnExpired func(a ...interface{})
// totalExpired counter to keep track of total expirations
totalExpired uint64
}
// Stats current cache statistics
type Stats struct {
Bytes uint64
Items uint64
Expired uint64
}
// NewCache creates an inmemory cache
//
// maxSize is used for expiring objects before we run out of memory
// expiration is used for expiration of a key from cache
func NewCache(maxSize uint64, expiration time.Duration) *Cache {
return &Cache{
items: map[string]interface{}{},
updatedAt: map[string]time.Time{},
expiration: expiration,
maxSize: maxSize,
}
}
// Stats get current cache statistics
func (r *Cache) Stats() Stats {
return Stats{
Bytes: r.currentSize,
Items: uint64(len(r.items)),
Expired: r.totalExpired,
}
}
// ExpireObjects expire objects in go routine
func (r *Cache) ExpireObjects(gcInterval time.Duration) {
r.stopExpireTimer = make(chan struct{})
ticker := time.NewTicker(gcInterval)
go func() {
for {
select {
case <-ticker.C:
r.Expire()
case <-r.stopExpireTimer:
ticker.Stop()
return
}
}
}()
}
// Get returns a value of a given key if it exists
func (r *Cache) Get(key string) (interface{}, bool) {
r.Lock()
defer r.Unlock()
value, ok := r.items[key]
if !ok {
return nil, false
}
r.updatedAt[key] = time.Now()
return value, true
}
// Set will persist a value to the cache
func (r *Cache) Set(key string, value interface{}) bool {
r.Lock()
defer r.Unlock()
valueLen := uint64(len(value.([]byte)))
if r.maxSize > 0 {
// check if the size of the object is not bigger than the
// capacity of the cache
if valueLen > r.maxSize {
return false
}
// remove random key if only we reach the maxSize threshold
for key := range r.items {
for (r.currentSize + valueLen) > r.maxSize {
r.doDelete(key)
}
break
}
}
r.items[key] = value
r.currentSize += valueLen
r.updatedAt[key] = time.Now()
return true
}
// Expire expires keys which have expired
func (r *Cache) Expire() {
r.Lock()
defer r.Unlock()
for key := range r.items {
if !r.isValid(key) {
r.doDelete(key)
}
}
}
// Delete deletes a given key if exists
func (r *Cache) Delete(key string) {
r.Lock()
defer r.Unlock()
r.doDelete(key)
}
func (r *Cache) doDelete(key string) {
if _, ok := r.items[key]; ok {
r.currentSize -= uint64(len(r.items[key].([]byte)))
delete(r.items, key)
delete(r.updatedAt, key)
r.totalExpired++
if r.OnExpired != nil {
r.OnExpired(key)
}
}
}
func (r *Cache) isValid(key string) bool {
updatedAt, ok := r.updatedAt[key]
if !ok {
return false
}
if r.expiration == zeroExpiration {
return true
}
return updatedAt.Add(r.expiration).After(time.Now())
}

View File

@@ -208,11 +208,9 @@ func (memory *memoryDriver) cleanupMultipartSession(bucket, key, uploadID string
}
func (memory *memoryDriver) cleanupMultiparts(bucket, key, uploadID string) {
memory.lock.Lock()
defer memory.lock.Unlock()
for i := 1; i <= memory.storedBuckets[bucket].multiPartSession[key].totalParts; i++ {
objectKey := bucket + "/" + getMultipartKey(key, uploadID, i)
memory.multiPartObjects.doDelete(objectKey)
memory.multiPartObjects.Delete(objectKey)
}
}
@@ -246,7 +244,7 @@ func (memory *memoryDriver) CompleteMultipartUpload(bucket, key, uploadID string
memory.lock.Unlock()
return "", iodine.New(errors.New("missing part: "+strconv.Itoa(i)), nil)
}
obj := object.([]byte)
obj := object
size += int64(len(obj))
calcMD5Bytes := md5.Sum(obj)
// complete multi part request header md5sum per part is hex encoded