Merge pull request #571 from harshavardhana/pr_out_add_lexicographic_marker_nextmarker_support_for_recursive_listing_of_objects_wip_do_not_merge

This commit is contained in:
Harshavardhana 2015-05-05 18:12:18 -07:00
commit 142f9263e1
8 changed files with 105 additions and 62 deletions

View File

@ -27,45 +27,66 @@ const (
// ListObjectsResponse - format for list objects response
type ListObjectsResponse struct {
XMLName xml.Name `xml:"http://doc.s3.amazonaws.com/2006-03-01 ListBucketResult" json:"-"`
Name string
Prefix string
Marker string
MaxKeys int
Delimiter string
IsTruncated bool
Contents []*Item
CommonPrefixes []*Prefix
XMLName xml.Name `xml:"http://doc.s3.amazonaws.com/2006-03-01 ListBucketResult" json:"-"`
CommonPrefixes []*CommonPrefix
Contents []*Object
Delimiter string
// Encoding type used to encode object keys in the response.
EncodingType string
// A flag that indicates whether or not ListObjects returned all of the results
// that satisfied the search criteria.
IsTruncated bool
Marker string
MaxKeys int
Name string
// When response is truncated (the IsTruncated element value in the response
// is true), you can use the key name in this field as marker in the subsequent
// request to get next set of objects. Object storage lists objects in alphabetical
// order Note: This element is returned only if you have delimiter request parameter
// specified. If response does not include the NextMaker and it is truncated,
// you can use the value of the last Key in the response as the marker in the
// subsequent request to get the next set of object keys.
NextMarker string
Prefix string
}
// ListBucketsResponse - format for list buckets response
type ListBucketsResponse struct {
XMLName xml.Name `xml:"http://doc.s3.amazonaws.com/2006-03-01 ListAllMyBucketsResult" json:"-"`
Owner Owner
// Container for one or more buckets.
Buckets struct {
Bucket []*Bucket
} // Buckets are nested
Owner Owner
}
// Prefix - common prefix
type Prefix struct {
// CommonPrefix container for prefix response in ListObjectsResponse
type CommonPrefix struct {
Prefix string
}
// Bucket - bucket item
// Bucket container for bucket metadata
type Bucket struct {
Name string
CreationDate string
}
// Item - object item
type Item struct {
// Object container for object metadata
type Object struct {
ETag string
Key string
LastModified string
ETag string
Size int64
Owner Owner
// The class of storage used to store the object.
StorageClass string
Owner Owner
}
// Owner - bucket owner/principal

View File

@ -57,7 +57,7 @@ func generateListBucketsResponse(buckets []drivers.BucketMetadata) ListBucketsRe
}
// itemKey
type itemKey []*Item
type itemKey []*Object
func (b itemKey) Len() int { return len(b) }
func (b itemKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
@ -72,8 +72,8 @@ func (b itemKey) Less(i, j int) bool { return b[i].Key < b[j].Key }
// output:
// populated struct that can be serialized to match xml and json api spec output
func generateListObjectsResponse(bucket string, objects []drivers.ObjectMetadata, bucketResources drivers.BucketResourcesMetadata) ListObjectsResponse {
var contents []*Item
var prefixes []*Prefix
var contents []*Object
var prefixes []*CommonPrefix
var owner = Owner{}
var data = ListObjectsResponse{}
@ -81,7 +81,7 @@ func generateListObjectsResponse(bucket string, objects []drivers.ObjectMetadata
owner.DisplayName = "minio"
for _, object := range objects {
var content = &Item{}
var content = &Object{}
if object.Key == "" {
continue
}
@ -94,15 +94,17 @@ func generateListObjectsResponse(bucket string, objects []drivers.ObjectMetadata
contents = append(contents, content)
}
sort.Sort(itemKey(contents))
// TODO - support EncodingType in xml decoding
data.Name = bucket
data.Contents = contents
data.MaxKeys = bucketResources.Maxkeys
data.Prefix = bucketResources.Prefix
data.Delimiter = bucketResources.Delimiter
data.Marker = bucketResources.Marker
data.NextMarker = bucketResources.NextMarker
data.IsTruncated = bucketResources.IsTruncated
for _, prefix := range bucketResources.CommonPrefixes {
var prefixItem = &Prefix{}
var prefixItem = &CommonPrefix{}
prefixItem.Prefix = prefix
prefixes = append(prefixes, prefixItem)
}

View File

@ -35,6 +35,8 @@ func getBucketResources(values url.Values) (v drivers.BucketResourcesMetadata) {
v.Maxkeys, _ = strconv.Atoi(value[0])
case key == "delimiter":
v.Delimiter = value[0]
case key == "encoding-type":
v.EncodingType = value[0]
}
}
return

View File

@ -92,7 +92,6 @@ func (d donut) ListBuckets() (results []string, err error) {
// ListObjects - return list of objects
func (d donut) ListObjects(bucket, prefix, marker, delimiter string, maxkeys int) ([]string, []string, bool, error) {
// TODO: Marker is not yet handled please handle it
errParams := map[string]string{
"bucket": bucket,
"prefix": prefix,

View File

@ -329,17 +329,16 @@ func (d donutDriver) ListObjects(bucketName string, resources drivers.BucketReso
if !drivers.IsValidObjectName(resources.Prefix) {
return nil, drivers.BucketResourcesMetadata{}, iodine.New(drivers.ObjectNameInvalid{Object: resources.Prefix}, nil)
}
actualObjects, commonPrefixes, isTruncated, err := d.donut.ListObjects(bucketName,
resources.Prefix,
resources.Marker,
resources.Delimiter,
actualObjects, commonPrefixes, isTruncated, err := d.donut.ListObjects(bucketName, resources.Prefix, resources.Marker, resources.Delimiter,
resources.Maxkeys)
if err != nil {
return nil, drivers.BucketResourcesMetadata{}, iodine.New(err, errParams)
}
resources.CommonPrefixes = commonPrefixes
resources.IsTruncated = isTruncated
if resources.IsTruncated && resources.IsDelimiterSet() {
resources.NextMarker = actualObjects[len(actualObjects)-1]
}
var results []drivers.ObjectMetadata
for _, objectName := range actualObjects {
objectMetadata, err := d.donut.GetObjectMetadata(bucketName, objectName)

View File

@ -102,7 +102,9 @@ const (
type BucketResourcesMetadata struct {
Prefix string
Marker string
NextMarker string
Maxkeys int
EncodingType string
Delimiter string
IsTruncated bool
CommonPrefixes []string

View File

@ -66,7 +66,7 @@ func Start(maxSize uint64, expiration time.Duration) (chan<- string, <-chan erro
memory.objects.OnEvicted = memory.evictObject
// set up memory expiration
memory.objects.ExpireObjects(time.Millisecond * 10)
memory.objects.ExpireObjects(time.Second * 5)
go start(ctrlChannel, errorChannel)
return ctrlChannel, errorChannel, memory
@ -356,45 +356,45 @@ func appendUniq(slice []string, i string) []string {
return append(slice, i)
}
func (memory *memoryDriver) filterDelimiterPrefix(keys []string, key, delimitedName string, resources drivers.BucketResourcesMetadata) (drivers.BucketResourcesMetadata, []string) {
func (memory *memoryDriver) filterDelimiterPrefix(keys []string, key, delim string, r drivers.BucketResourcesMetadata) ([]string, drivers.BucketResourcesMetadata) {
switch true {
case key == resources.Prefix:
case key == r.Prefix:
keys = appendUniq(keys, key)
// DelimitedName - requires resources.Prefix as it was trimmed off earlier in the flow
case key == resources.Prefix+delimitedName:
// delim - requires r.Prefix as it was trimmed off earlier
case key == r.Prefix+delim:
keys = appendUniq(keys, key)
case delimitedName != "":
resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, resources.Prefix+delimitedName)
case delim != "":
r.CommonPrefixes = appendUniq(r.CommonPrefixes, r.Prefix+delim)
}
return resources, keys
return keys, r
}
func (memory *memoryDriver) listObjectsInternal(keys []string, key string, resources drivers.BucketResourcesMetadata) ([]string, drivers.BucketResourcesMetadata) {
func (memory *memoryDriver) listObjects(keys []string, key string, r drivers.BucketResourcesMetadata) ([]string, drivers.BucketResourcesMetadata) {
switch true {
// Prefix absent, delimit object key based on delimiter
case resources.IsDelimiterSet():
delimitedName := delimiter(key, resources.Delimiter)
case r.IsDelimiterSet():
delim := delimiter(key, r.Delimiter)
switch true {
case delimitedName == "" || delimitedName == key:
case delim == "" || delim == key:
keys = appendUniq(keys, key)
case delimitedName != "":
resources.CommonPrefixes = appendUniq(resources.CommonPrefixes, delimitedName)
case delim != "":
r.CommonPrefixes = appendUniq(r.CommonPrefixes, delim)
}
// Prefix present, delimit object key with prefix key based on delimiter
case resources.IsDelimiterPrefixSet():
if strings.HasPrefix(key, resources.Prefix) {
trimmedName := strings.TrimPrefix(key, resources.Prefix)
delimitedName := delimiter(trimmedName, resources.Delimiter)
resources, keys = memory.filterDelimiterPrefix(keys, key, delimitedName, resources)
case r.IsDelimiterPrefixSet():
if strings.HasPrefix(key, r.Prefix) {
trimmedName := strings.TrimPrefix(key, r.Prefix)
delim := delimiter(trimmedName, r.Delimiter)
keys, r = memory.filterDelimiterPrefix(keys, key, delim, r)
}
// Prefix present, nothing to delimit
case resources.IsPrefixSet():
case r.IsPrefixSet():
keys = appendUniq(keys, key)
// Prefix and delimiter absent
case resources.IsDefault():
case r.IsDefault():
keys = appendUniq(keys, key)
}
return keys, resources
return keys, r
}
// ListObjects - list objects from memory
@ -416,13 +416,28 @@ func (memory *memoryDriver) ListObjects(bucket string, resources drivers.BucketR
for key := range storedBucket.objectMetadata {
if strings.HasPrefix(key, bucket+"/") {
key = key[len(bucket)+1:]
keys, resources = memory.listObjectsInternal(keys, key, resources)
keys, resources = memory.listObjects(keys, key, resources)
}
}
sort.Strings(keys)
for _, key := range keys {
// Marker logic - TODO in-efficient right now fix it
var newKeys []string
switch {
case resources.Marker != "":
for _, key := range keys {
if key > resources.Marker {
newKeys = appendUniq(newKeys, key)
}
}
default:
newKeys = keys
}
sort.Strings(newKeys)
for _, key := range newKeys {
if len(results) == resources.Maxkeys {
resources.IsTruncated = true
if resources.IsTruncated && resources.IsDelimiterSet() {
resources.NextMarker = results[len(results)-1].Key
}
return results, resources, nil
}
object := storedBucket.objectMetadata[bucket+"/"+key]

View File

@ -33,8 +33,8 @@ type Intelligent struct {
// items hold the cached objects
items map[string]interface{}
// createdAt holds the time that related item's created At
createdAt map[string]time.Time
// updatedAt holds the time that related item's updated at
updatedAt map[string]time.Time
// expiration is a duration for a cache key to expire
expiration time.Duration
@ -69,7 +69,7 @@ type Stats struct {
func NewIntelligent(maxSize uint64, expiration time.Duration) *Intelligent {
return &Intelligent{
items: map[string]interface{}{},
createdAt: map[string]time.Time{},
updatedAt: map[string]time.Time{},
expiration: expiration,
maxSize: maxSize,
}
@ -91,7 +91,6 @@ func (r *Intelligent) ExpireObjects(gcInterval time.Duration) {
for range time.Tick(gcInterval) {
r.Lock()
for key := range r.items {
if !r.isValid(key) {
r.Delete(key)
}
@ -106,7 +105,11 @@ func (r *Intelligent) Get(key string) (interface{}, bool) {
r.Lock()
defer r.Unlock()
value, ok := r.items[key]
return value, ok
if !ok {
return nil, false
}
r.updatedAt[key] = time.Now()
return value, true
}
// Set will persist a value to the cache
@ -124,7 +127,7 @@ func (r *Intelligent) Set(key string, value interface{}) {
}
r.items[key] = value
r.currentSize += uint64(len(value.([]byte)))
r.createdAt[key] = time.Now()
r.updatedAt[key] = time.Now()
r.Unlock()
return
}
@ -133,7 +136,7 @@ func (r *Intelligent) Set(key string, value interface{}) {
func (r *Intelligent) Delete(key string) {
r.currentSize -= uint64(len(r.items[key].([]byte)))
delete(r.items, key)
delete(r.createdAt, key)
delete(r.updatedAt, key)
r.totalEvicted++
if r.OnEvicted != nil {
r.OnEvicted(key)
@ -141,12 +144,12 @@ func (r *Intelligent) Delete(key string) {
}
func (r *Intelligent) isValid(key string) bool {
createdAt, ok := r.createdAt[key]
updatedAt, ok := r.updatedAt[key]
if !ok {
return false
}
if r.expiration == zeroExpiration {
return true
}
return createdAt.Add(r.expiration).After(time.Now())
return updatedAt.Add(r.expiration).After(time.Now())
}