mirror of
https://github.com/minio/minio.git
synced 2025-02-03 01:46:00 -05:00
Merge pull request #701 from harshavardhana/pr_out_move_to_container_list_datastructure_from_map_string_byte
Move to container/list datastructure from map[string][]byte
This commit is contained in:
commit
181727ab57
154
pkg/donut/cache/data/data.go
vendored
154
pkg/donut/cache/data/data.go
vendored
@ -18,6 +18,7 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@ -32,16 +33,10 @@ type Cache struct {
|
||||
sync.Mutex
|
||||
|
||||
// items hold the cached objects
|
||||
items map[string][]byte
|
||||
items *list.List
|
||||
|
||||
// updatedAt holds the time that related item's updated at
|
||||
updatedAt map[string]time.Time
|
||||
|
||||
// expiration is a duration for a cache key to expire
|
||||
expiration time.Duration
|
||||
|
||||
// stopExpireTimer channel to quit the timer thread
|
||||
stopExpireTimer chan struct{}
|
||||
// reverseItems holds the time that related item's updated at
|
||||
reverseItems map[string]*list.Element
|
||||
|
||||
// maxSize is a total size for overall cache
|
||||
maxSize uint64
|
||||
@ -49,30 +44,34 @@ type Cache struct {
|
||||
// currentSize is a current size in memory
|
||||
currentSize uint64
|
||||
|
||||
// OnExpired - callback function for eviction
|
||||
OnExpired func(a ...interface{})
|
||||
// OnEvicted - callback function for eviction
|
||||
OnEvicted func(a ...interface{})
|
||||
|
||||
// totalExpired counter to keep track of total expirations
|
||||
totalExpired uint64
|
||||
// totalEvicted counter to keep track of total expirations
|
||||
totalEvicted int
|
||||
}
|
||||
|
||||
// Stats current cache statistics
|
||||
type Stats struct {
|
||||
Bytes uint64
|
||||
Items uint64
|
||||
Expired uint64
|
||||
Items int
|
||||
Evicted int
|
||||
}
|
||||
|
||||
type element struct {
|
||||
key string
|
||||
value []byte
|
||||
}
|
||||
|
||||
// NewCache creates an inmemory cache
|
||||
//
|
||||
// maxSize is used for expiring objects before we run out of memory
|
||||
// expiration is used for expiration of a key from cache
|
||||
func NewCache(maxSize uint64, expiration time.Duration) *Cache {
|
||||
func NewCache(maxSize uint64) *Cache {
|
||||
return &Cache{
|
||||
items: make(map[string][]byte),
|
||||
updatedAt: map[string]time.Time{},
|
||||
expiration: expiration,
|
||||
maxSize: maxSize,
|
||||
items: list.New(),
|
||||
reverseItems: make(map[string]*list.Element),
|
||||
maxSize: maxSize,
|
||||
}
|
||||
}
|
||||
|
||||
@ -80,50 +79,32 @@ func NewCache(maxSize uint64, expiration time.Duration) *Cache {
|
||||
func (r *Cache) Stats() Stats {
|
||||
return Stats{
|
||||
Bytes: r.currentSize,
|
||||
Items: uint64(len(r.items)),
|
||||
Expired: r.totalExpired,
|
||||
Items: r.items.Len(),
|
||||
Evicted: r.totalEvicted,
|
||||
}
|
||||
}
|
||||
|
||||
// ExpireObjects expire objects in go routine
|
||||
func (r *Cache) ExpireObjects(gcInterval time.Duration) {
|
||||
r.stopExpireTimer = make(chan struct{})
|
||||
ticker := time.NewTicker(gcInterval)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
r.Expire()
|
||||
case <-r.stopExpireTimer:
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Get returns a value of a given key if it exists
|
||||
func (r *Cache) Get(key string) ([]byte, bool) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
value, ok := r.items[key]
|
||||
if !ok {
|
||||
ele, hit := r.reverseItems[key]
|
||||
if !hit {
|
||||
return nil, false
|
||||
}
|
||||
r.updatedAt[key] = time.Now()
|
||||
return value, true
|
||||
r.items.MoveToFront(ele)
|
||||
return ele.Value.(*element).value, true
|
||||
}
|
||||
|
||||
// Len returns length of the value of a given key, returns zero if key doesn't exist
|
||||
func (r *Cache) Len(key string) int {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
_, ok := r.items[key]
|
||||
_, ok := r.reverseItems[key]
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
return len(r.items[key])
|
||||
return len(r.reverseItems[key].Value.(*element).value)
|
||||
}
|
||||
|
||||
// Append will append new data to an existing key,
|
||||
@ -140,22 +121,20 @@ func (r *Cache) Append(key string, value []byte) bool {
|
||||
}
|
||||
// remove random key if only we reach the maxSize threshold
|
||||
for (r.currentSize + valueLen) > r.maxSize {
|
||||
for randomKey := range r.items {
|
||||
r.doDelete(randomKey)
|
||||
break
|
||||
}
|
||||
r.doDeleteOldest()
|
||||
break
|
||||
}
|
||||
}
|
||||
_, ok := r.items[key]
|
||||
if !ok {
|
||||
r.items[key] = value
|
||||
ele, hit := r.reverseItems[key]
|
||||
if !hit {
|
||||
ele := r.items.PushFront(&element{key, value})
|
||||
r.currentSize += valueLen
|
||||
r.updatedAt[key] = time.Now()
|
||||
r.reverseItems[key] = ele
|
||||
return true
|
||||
}
|
||||
r.items[key] = append(r.items[key], value...)
|
||||
r.items.MoveToFront(ele)
|
||||
r.currentSize += valueLen
|
||||
r.updatedAt[key] = time.Now()
|
||||
ele.Value.(*element).value = append(ele.Value.(*element).value, value...)
|
||||
return true
|
||||
}
|
||||
|
||||
@ -172,55 +151,46 @@ func (r *Cache) Set(key string, value []byte) bool {
|
||||
}
|
||||
// remove random key if only we reach the maxSize threshold
|
||||
for (r.currentSize + valueLen) > r.maxSize {
|
||||
for randomKey := range r.items {
|
||||
r.doDelete(randomKey)
|
||||
break
|
||||
}
|
||||
r.doDeleteOldest()
|
||||
}
|
||||
}
|
||||
r.items[key] = value
|
||||
if _, hit := r.reverseItems[key]; hit {
|
||||
return false
|
||||
}
|
||||
ele := r.items.PushFront(&element{key, value})
|
||||
r.currentSize += valueLen
|
||||
r.updatedAt[key] = time.Now()
|
||||
r.reverseItems[key] = ele
|
||||
return true
|
||||
}
|
||||
|
||||
// Expire expires keys which have expired
|
||||
func (r *Cache) Expire() {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
for key := range r.items {
|
||||
if !r.isValid(key) {
|
||||
r.doDelete(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delete deletes a given key if exists
|
||||
func (r *Cache) Delete(key string) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
r.doDelete(key)
|
||||
}
|
||||
|
||||
func (r *Cache) doDelete(key string) {
|
||||
if _, ok := r.items[key]; ok {
|
||||
r.currentSize -= uint64(len(r.items[key]))
|
||||
delete(r.items, key)
|
||||
delete(r.updatedAt, key)
|
||||
r.totalExpired++
|
||||
if r.OnExpired != nil {
|
||||
r.OnExpired(key)
|
||||
ele, ok := r.reverseItems[key]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if ele != nil {
|
||||
r.currentSize -= uint64(len(r.reverseItems[key].Value.(*element).value))
|
||||
r.items.Remove(ele)
|
||||
delete(r.reverseItems, key)
|
||||
r.totalEvicted++
|
||||
if r.OnEvicted != nil {
|
||||
r.OnEvicted(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Cache) isValid(key string) bool {
|
||||
updatedAt, ok := r.updatedAt[key]
|
||||
if !ok {
|
||||
return false
|
||||
func (r *Cache) doDeleteOldest() {
|
||||
ele := r.items.Back()
|
||||
if ele != nil {
|
||||
r.currentSize -= uint64(len(r.reverseItems[ele.Value.(*element).key].Value.(*element).value))
|
||||
delete(r.reverseItems, ele.Value.(*element).key)
|
||||
r.items.Remove(ele)
|
||||
r.totalEvicted++
|
||||
if r.OnEvicted != nil {
|
||||
r.OnEvicted(ele.Value.(*element).key)
|
||||
}
|
||||
}
|
||||
if r.expiration == noExpiration {
|
||||
return true
|
||||
}
|
||||
return updatedAt.Add(r.expiration).After(time.Now())
|
||||
}
|
||||
|
2
pkg/donut/cache/data/data_test.go
vendored
2
pkg/donut/cache/data/data_test.go
vendored
@ -29,7 +29,7 @@ type MySuite struct{}
|
||||
var _ = Suite(&MySuite{})
|
||||
|
||||
func (s *MySuite) TestCache(c *C) {
|
||||
cache := NewCache(1000, 0)
|
||||
cache := NewCache(1000)
|
||||
data := []byte("Hello, world!")
|
||||
ok := cache.Set("filename", data)
|
||||
|
||||
|
@ -80,15 +80,12 @@ func New(c *Config) (Interface, error) {
|
||||
a.storedBuckets = metadata.NewCache()
|
||||
a.nodes = make(map[string]node)
|
||||
a.buckets = make(map[string]bucket)
|
||||
a.objects = data.NewCache(a.config.MaxSize, a.config.Expiration)
|
||||
a.multiPartObjects = data.NewCache(0, time.Duration(0))
|
||||
a.objects.OnExpired = a.expiredObject
|
||||
a.multiPartObjects.OnExpired = a.expiredPart
|
||||
a.objects = data.NewCache(a.config.MaxSize)
|
||||
a.multiPartObjects = data.NewCache(0)
|
||||
a.objects.OnEvicted = a.evictedObject
|
||||
a.multiPartObjects.OnEvicted = a.evictedPart
|
||||
a.lock = new(sync.Mutex)
|
||||
|
||||
// set up cache expiration
|
||||
a.objects.ExpireObjects(time.Second * 5)
|
||||
|
||||
if len(a.config.NodeDiskMap) > 0 {
|
||||
for k, v := range a.config.NodeDiskMap {
|
||||
if len(v) == 0 {
|
||||
@ -570,10 +567,10 @@ func (donut API) GetObjectMetadata(bucket, key string) (ObjectMetadata, error) {
|
||||
return ObjectMetadata{}, iodine.New(ObjectNotFound{Object: key}, nil)
|
||||
}
|
||||
|
||||
func (donut API) expiredObject(a ...interface{}) {
|
||||
func (donut API) evictedObject(a ...interface{}) {
|
||||
cacheStats := donut.objects.Stats()
|
||||
log.Printf("CurrentSize: %d, CurrentItems: %d, TotalExpirations: %d",
|
||||
cacheStats.Bytes, cacheStats.Items, cacheStats.Expired)
|
||||
log.Printf("CurrentSize: %d, CurrentItems: %d, TotalEvicted: %d",
|
||||
cacheStats.Bytes, cacheStats.Items, cacheStats.Evicted)
|
||||
key := a[0].(string)
|
||||
// loop through all buckets
|
||||
for _, bucket := range donut.storedBuckets.GetAll() {
|
||||
|
@ -376,7 +376,7 @@ func (donut API) ListObjectParts(bucket, key string, resources ObjectResourcesMe
|
||||
return objectResourcesMetadata, nil
|
||||
}
|
||||
|
||||
func (donut API) expiredPart(a ...interface{}) {
|
||||
func (donut API) evictedPart(a ...interface{}) {
|
||||
key := a[0].(string)
|
||||
// loop through all buckets
|
||||
buckets := donut.storedBuckets.GetAll()
|
||||
|
Loading…
x
Reference in New Issue
Block a user