Add lexicographic Marker/NextMarker support for recursive listing of objects.

Also update times when an object is accessed logic
This commit is contained in:
Harshavardhana 2015-05-05 14:27:18 -07:00
parent 7a87f89604
commit 75028c2ad1
8 changed files with 105 additions and 62 deletions

View File

@ -27,45 +27,66 @@ const (
// ListObjectsResponse - format for list objects response // ListObjectsResponse - format for list objects response
type ListObjectsResponse struct { type ListObjectsResponse struct {
XMLName xml.Name `xml:"http://doc.s3.amazonaws.com/2006-03-01 ListBucketResult" json:"-"` XMLName xml.Name `xml:"http://doc.s3.amazonaws.com/2006-03-01 ListBucketResult" json:"-"`
Name string
Prefix string CommonPrefixes []*CommonPrefix
Marker string Contents []*Object
MaxKeys int
Delimiter string Delimiter string
IsTruncated bool
Contents []*Item // Encoding type used to encode object keys in the response.
CommonPrefixes []*Prefix EncodingType string
// A flag that indicates whether or not ListObjects returned all of the results
// that satisfied the search criteria.
IsTruncated bool
Marker string
MaxKeys int
Name string
// When response is truncated (the IsTruncated element value in the response
// is true), you can use the key name in this field as marker in the subsequent
// request to get next set of objects. Object storage lists objects in alphabetical
// order Note: This element is returned only if you have delimiter request parameter
// specified. If response does not include the NextMaker and it is truncated,
// you can use the value of the last Key in the response as the marker in the
// subsequent request to get the next set of object keys.
NextMarker string
Prefix string
} }
// ListBucketsResponse - format for list buckets response // ListBucketsResponse - format for list buckets response
type ListBucketsResponse struct { type ListBucketsResponse struct {
XMLName xml.Name `xml:"http://doc.s3.amazonaws.com/2006-03-01 ListAllMyBucketsResult" json:"-"` XMLName xml.Name `xml:"http://doc.s3.amazonaws.com/2006-03-01 ListAllMyBucketsResult" json:"-"`
Owner Owner // Container for one or more buckets.
Buckets struct { Buckets struct {
Bucket []*Bucket Bucket []*Bucket
} // Buckets are nested } // Buckets are nested
Owner Owner
} }
// Prefix - common prefix // CommonPrefix container for prefix response in ListObjectsResponse
type Prefix struct { type CommonPrefix struct {
Prefix string Prefix string
} }
// Bucket - bucket item // Bucket container for bucket metadata
type Bucket struct { type Bucket struct {
Name string Name string
CreationDate string CreationDate string
} }
// Item - object item // Object container for object metadata
type Item struct { type Object struct {
ETag string
Key string Key string
LastModified string LastModified string
ETag string
Size int64 Size int64
Owner Owner
// The class of storage used to store the object.
StorageClass string StorageClass string
Owner Owner
} }
// Owner - bucket owner/principal // Owner - bucket owner/principal

View File

@ -57,7 +57,7 @@ func generateListBucketsResponse(buckets []drivers.BucketMetadata) ListBucketsRe
} }
// itemKey // itemKey
type itemKey []*Item type itemKey []*Object
func (b itemKey) Len() int { return len(b) } func (b itemKey) Len() int { return len(b) }
func (b itemKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b itemKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
@ -72,8 +72,8 @@ func (b itemKey) Less(i, j int) bool { return b[i].Key < b[j].Key }
// output: // output:
// populated struct that can be serialized to match xml and json api spec output // populated struct that can be serialized to match xml and json api spec output
func generateListObjectsResponse(bucket string, objects []drivers.ObjectMetadata, bucketResources drivers.BucketResourcesMetadata) ListObjectsResponse { func generateListObjectsResponse(bucket string, objects []drivers.ObjectMetadata, bucketResources drivers.BucketResourcesMetadata) ListObjectsResponse {
var contents []*Item var contents []*Object
var prefixes []*Prefix var prefixes []*CommonPrefix
var owner = Owner{} var owner = Owner{}
var data = ListObjectsResponse{} var data = ListObjectsResponse{}
@ -81,7 +81,7 @@ func generateListObjectsResponse(bucket string, objects []drivers.ObjectMetadata
owner.DisplayName = "minio" owner.DisplayName = "minio"
for _, object := range objects { for _, object := range objects {
var content = &Item{} var content = &Object{}
if object.Key == "" { if object.Key == "" {
continue continue
} }
@ -94,15 +94,17 @@ func generateListObjectsResponse(bucket string, objects []drivers.ObjectMetadata
contents = append(contents, content) contents = append(contents, content)
} }
sort.Sort(itemKey(contents)) sort.Sort(itemKey(contents))
// TODO - support EncodingType in xml decoding
data.Name = bucket data.Name = bucket
data.Contents = contents data.Contents = contents
data.MaxKeys = bucketResources.Maxkeys data.MaxKeys = bucketResources.Maxkeys
data.Prefix = bucketResources.Prefix data.Prefix = bucketResources.Prefix
data.Delimiter = bucketResources.Delimiter data.Delimiter = bucketResources.Delimiter
data.Marker = bucketResources.Marker data.Marker = bucketResources.Marker
data.NextMarker = bucketResources.NextMarker
data.IsTruncated = bucketResources.IsTruncated data.IsTruncated = bucketResources.IsTruncated
for _, prefix := range bucketResources.CommonPrefixes { for _, prefix := range bucketResources.CommonPrefixes {
var prefixItem = &Prefix{} var prefixItem = &CommonPrefix{}
prefixItem.Prefix = prefix prefixItem.Prefix = prefix
prefixes = append(prefixes, prefixItem) prefixes = append(prefixes, prefixItem)
} }

View File

@ -35,6 +35,8 @@ func getBucketResources(values url.Values) (v drivers.BucketResourcesMetadata) {
v.Maxkeys, _ = strconv.Atoi(value[0]) v.Maxkeys, _ = strconv.Atoi(value[0])
case key == "delimiter": case key == "delimiter":
v.Delimiter = value[0] v.Delimiter = value[0]
case key == "encoding-type":
v.EncodingType = value[0]
} }
} }
return return

View File

@ -92,7 +92,6 @@ func (d donut) ListBuckets() (results []string, err error) {
// ListObjects - return list of objects // ListObjects - return list of objects
func (d donut) ListObjects(bucket, prefix, marker, delimiter string, maxkeys int) ([]string, []string, bool, error) { func (d donut) ListObjects(bucket, prefix, marker, delimiter string, maxkeys int) ([]string, []string, bool, error) {
// TODO: Marker is not yet handled please handle it
errParams := map[string]string{ errParams := map[string]string{
"bucket": bucket, "bucket": bucket,
"prefix": prefix, "prefix": prefix,

View File

@ -329,17 +329,16 @@ func (d donutDriver) ListObjects(bucketName string, resources drivers.BucketReso
if !drivers.IsValidObjectName(resources.Prefix) { if !drivers.IsValidObjectName(resources.Prefix) {
return nil, drivers.BucketResourcesMetadata{}, iodine.New(drivers.ObjectNameInvalid{Object: resources.Prefix}, nil) return nil, drivers.BucketResourcesMetadata{}, iodine.New(drivers.ObjectNameInvalid{Object: resources.Prefix}, nil)
} }
actualObjects, commonPrefixes, isTruncated, err := d.donut.ListObjects(bucketName, actualObjects, commonPrefixes, isTruncated, err := d.donut.ListObjects(bucketName, resources.Prefix, resources.Marker, resources.Delimiter,
resources.Prefix,
resources.Marker,
resources.Delimiter,
resources.Maxkeys) resources.Maxkeys)
if err != nil { if err != nil {
return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, errParams) return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, errParams)
} }
resources.CommonPrefixes = commonPrefixes resources.CommonPrefixes = commonPrefixes
resources.IsTruncated = isTruncated resources.IsTruncated = isTruncated
if resources.IsTruncated && resources.IsDelimiterSet() {
resources.NextMarker = actualObjects[len(actualObjects)-1]
}
var results []drivers.ObjectMetadata var results []drivers.ObjectMetadata
for _, objectName := range actualObjects { for _, objectName := range actualObjects {
objectMetadata, err := d.donut.GetObjectMetadata(bucketName, objectName) objectMetadata, err := d.donut.GetObjectMetadata(bucketName, objectName)

View File

@ -102,7 +102,9 @@ const (
type BucketResourcesMetadata struct { type BucketResourcesMetadata struct {
Prefix string Prefix string
Marker string Marker string
NextMarker string
Maxkeys int Maxkeys int
EncodingType string
Delimiter string Delimiter string
IsTruncated bool IsTruncated bool
CommonPrefixes []string CommonPrefixes []string

View File

@ -66,7 +66,7 @@ func Start(maxSize uint64, expiration time.Duration) (chan<- string, <-chan erro
memory.objects.OnEvicted = memory.evictObject memory.objects.OnEvicted = memory.evictObject
// set up memory expiration // set up memory expiration
memory.objects.ExpireObjects(time.Millisecond * 10) memory.objects.ExpireObjects(time.Second * 5)
go start(ctrlChannel, errorChannel) go start(ctrlChannel, errorChannel)
return ctrlChannel, errorChannel, memory return ctrlChannel, errorChannel, memory
@ -356,45 +356,45 @@ func appendUniq(slice []string, i string) []string {
return append(slice, i) return append(slice, i)
} }
func (memory *memoryDriver) filterDelimiterPrefix(keys []string, key, delimitedName string, resources drivers.BucketResourcesMetadata) (drivers.BucketResourcesMetadata, []string) { func (memory *memoryDriver) filterDelimiterPrefix(keys []string, key, delim string, r drivers.BucketResourcesMetadata) ([]string, drivers.BucketResourcesMetadata) {
switch true { switch true {
case key == resources.Prefix: case key == r.Prefix:
keys = appendUniq(keys, key) keys = appendUniq(keys, key)
// DelimitedName - requires resources.Prefix as it was trimmed off earlier in the flow // delim - requires r.Prefix as it was trimmed off earlier
case key == resources.Prefix+delimitedName: case key == r.Prefix+delim:
keys = appendUniq(keys, key) keys = appendUniq(keys, key)
case delimitedName != "": case delim != "":
resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, resources.Prefix+delimitedName) r.CommonPrefixes = appendUniq(r.CommonPrefixes, r.Prefix+delim)
} }
return resources, keys return keys, r
} }
func (memory *memoryDriver) listObjectsInternal(keys []string, key string, resources drivers.BucketResourcesMetadata) ([]string, drivers.BucketResourcesMetadata) { func (memory *memoryDriver) listObjects(keys []string, key string, r drivers.BucketResourcesMetadata) ([]string, drivers.BucketResourcesMetadata) {
switch true { switch true {
// Prefix absent, delimit object key based on delimiter // Prefix absent, delimit object key based on delimiter
case resources.IsDelimiterSet(): case r.IsDelimiterSet():
delimitedName := delimiter(key, resources.Delimiter) delim := delimiter(key, r.Delimiter)
switch true { switch true {
case delimitedName == "" || delimitedName == key: case delim == "" || delim == key:
keys = appendUniq(keys, key) keys = appendUniq(keys, key)
case delimitedName != "": case delim != "":
resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, delimitedName) r.CommonPrefixes = appendUniq(r.CommonPrefixes, delim)
} }
// Prefix present, delimit object key with prefix key based on delimiter // Prefix present, delimit object key with prefix key based on delimiter
case resources.IsDelimiterPrefixSet(): case r.IsDelimiterPrefixSet():
if strings.HasPrefix(key, resources.Prefix) { if strings.HasPrefix(key, r.Prefix) {
trimmedName := strings.TrimPrefix(key, resources.Prefix) trimmedName := strings.TrimPrefix(key, r.Prefix)
delimitedName := delimiter(trimmedName, resources.Delimiter) delim := delimiter(trimmedName, r.Delimiter)
resources, keys = memory.filterDelimiterPrefix(keys, key, delimitedName, resources) keys, r = memory.filterDelimiterPrefix(keys, key, delim, r)
} }
// Prefix present, nothing to delimit // Prefix present, nothing to delimit
case resources.IsPrefixSet(): case r.IsPrefixSet():
keys = appendUniq(keys, key) keys = appendUniq(keys, key)
// Prefix and delimiter absent // Prefix and delimiter absent
case resources.IsDefault(): case r.IsDefault():
keys = appendUniq(keys, key) keys = appendUniq(keys, key)
} }
return keys, resources return keys, r
} }
// ListObjects - list objects from memory // ListObjects - list objects from memory
@ -416,13 +416,28 @@ func (memory *memoryDriver) ListObjects(bucket string, resources drivers.BucketR
for key := range storedBucket.objectMetadata { for key := range storedBucket.objectMetadata {
if strings.HasPrefix(key, bucket+"/") { if strings.HasPrefix(key, bucket+"/") {
key = key[len(bucket)+1:] key = key[len(bucket)+1:]
keys, resources = memory.listObjectsInternal(keys, key, resources) keys, resources = memory.listObjects(keys, key, resources)
} }
} }
sort.Strings(keys) // Marker logic - TODO in-efficient right now fix it
for _, key := range keys { var newKeys []string
switch {
case resources.Marker != "":
for _, key := range keys {
if key > resources.Marker {
newKeys = appendUniq(newKeys, key)
}
}
default:
newKeys = keys
}
sort.Strings(newKeys)
for _, key := range newKeys {
if len(results) == resources.Maxkeys { if len(results) == resources.Maxkeys {
resources.IsTruncated = true resources.IsTruncated = true
if resources.IsTruncated && resources.IsDelimiterSet() {
resources.NextMarker = results[len(results)-1].Key
}
return results, resources, nil return results, resources, nil
} }
object := storedBucket.objectMetadata[bucket+"/"+key] object := storedBucket.objectMetadata[bucket+"/"+key]

View File

@ -33,8 +33,8 @@ type Intelligent struct {
// items hold the cached objects // items hold the cached objects
items map[string]interface{} items map[string]interface{}
// createdAt holds the time that related item's created At // updatedAt holds the time that related item's updated at
createdAt map[string]time.Time updatedAt map[string]time.Time
// expiration is a duration for a cache key to expire // expiration is a duration for a cache key to expire
expiration time.Duration expiration time.Duration
@ -69,7 +69,7 @@ type Stats struct {
func NewIntelligent(maxSize uint64, expiration time.Duration) *Intelligent { func NewIntelligent(maxSize uint64, expiration time.Duration) *Intelligent {
return &Intelligent{ return &Intelligent{
items: map[string]interface{}{}, items: map[string]interface{}{},
createdAt: map[string]time.Time{}, updatedAt: map[string]time.Time{},
expiration: expiration, expiration: expiration,
maxSize: maxSize, maxSize: maxSize,
} }
@ -91,7 +91,6 @@ func (r *Intelligent) ExpireObjects(gcInterval time.Duration) {
for range time.Tick(gcInterval) { for range time.Tick(gcInterval) {
r.Lock() r.Lock()
for key := range r.items { for key := range r.items {
if !r.isValid(key) { if !r.isValid(key) {
r.Delete(key) r.Delete(key)
} }
@ -106,7 +105,11 @@ func (r *Intelligent) Get(key string) (interface{}, bool) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
value, ok := r.items[key] value, ok := r.items[key]
return value, ok if !ok {
return nil, false
}
r.updatedAt[key] = time.Now()
return value, true
} }
// Set will persist a value to the cache // Set will persist a value to the cache
@ -124,7 +127,7 @@ func (r *Intelligent) Set(key string, value interface{}) {
} }
r.items[key] = value r.items[key] = value
r.currentSize += uint64(len(value.([]byte))) r.currentSize += uint64(len(value.([]byte)))
r.createdAt[key] = time.Now() r.updatedAt[key] = time.Now()
r.Unlock() r.Unlock()
return return
} }
@ -133,7 +136,7 @@ func (r *Intelligent) Set(key string, value interface{}) {
func (r *Intelligent) Delete(key string) { func (r *Intelligent) Delete(key string) {
r.currentSize -= uint64(len(r.items[key].([]byte))) r.currentSize -= uint64(len(r.items[key].([]byte)))
delete(r.items, key) delete(r.items, key)
delete(r.createdAt, key) delete(r.updatedAt, key)
r.totalEvicted++ r.totalEvicted++
if r.OnEvicted != nil { if r.OnEvicted != nil {
r.OnEvicted(key) r.OnEvicted(key)
@ -141,12 +144,12 @@ func (r *Intelligent) Delete(key string) {
} }
func (r *Intelligent) isValid(key string) bool { func (r *Intelligent) isValid(key string) bool {
createdAt, ok := r.createdAt[key] updatedAt, ok := r.updatedAt[key]
if !ok { if !ok {
return false return false
} }
if r.expiration == zeroExpiration { if r.expiration == zeroExpiration {
return true return true
} }
return createdAt.Add(r.expiration).After(time.Now()) return updatedAt.Add(r.expiration).After(time.Now())
} }