mirror of https://github.com/minio/minio.git
Merge pull request #674 from harshavardhana/pr_out_donut_cleanup_another_set
Donut cleanup another set
This commit is contained in:
commit
bd0dccd8f1
|
@ -22,6 +22,7 @@ import (
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -42,6 +43,7 @@ type bucket struct {
|
||||||
time time.Time
|
time time.Time
|
||||||
donutName string
|
donutName string
|
||||||
nodes map[string]node
|
nodes map[string]node
|
||||||
|
objects map[string]object
|
||||||
lock *sync.RWMutex
|
lock *sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,47 +67,101 @@ func newBucket(bucketName, aclType, donutName string, nodes map[string]node) (bu
|
||||||
b.time = t
|
b.time = t
|
||||||
b.donutName = donutName
|
b.donutName = donutName
|
||||||
b.nodes = nodes
|
b.nodes = nodes
|
||||||
|
b.objects = make(map[string]object)
|
||||||
b.lock = new(sync.RWMutex)
|
b.lock = new(sync.RWMutex)
|
||||||
return b, bucketMetadata, nil
|
return b, bucketMetadata, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b bucket) getObjectName(fileName, diskPath, bucketPath string) (string, error) {
|
||||||
|
newObject, err := newObject(fileName, filepath.Join(diskPath, bucketPath))
|
||||||
|
if err != nil {
|
||||||
|
return "", iodine.New(err, nil)
|
||||||
|
}
|
||||||
|
newObjectMetadata, err := newObject.GetObjectMetadata()
|
||||||
|
if err != nil {
|
||||||
|
return "", iodine.New(err, nil)
|
||||||
|
}
|
||||||
|
objectName, ok := newObjectMetadata["object"]
|
||||||
|
if !ok {
|
||||||
|
return "", iodine.New(ObjectCorrupted{Object: newObject.name}, nil)
|
||||||
|
}
|
||||||
|
b.objects[objectName] = newObject
|
||||||
|
return objectName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b bucket) GetObjectMetadata(objectName string) (map[string]string, error) {
|
||||||
|
return b.objects[objectName].GetObjectMetadata()
|
||||||
|
}
|
||||||
|
|
||||||
// ListObjects - list all objects
|
// ListObjects - list all objects
|
||||||
func (b bucket) ListObjects() (map[string]object, error) {
|
func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) ([]string, []string, bool, error) {
|
||||||
b.lock.RLock()
|
b.lock.RLock()
|
||||||
defer b.lock.RUnlock()
|
defer b.lock.RUnlock()
|
||||||
|
|
||||||
|
if maxkeys <= 0 {
|
||||||
|
maxkeys = 1000
|
||||||
|
}
|
||||||
|
var isTruncated bool
|
||||||
nodeSlice := 0
|
nodeSlice := 0
|
||||||
objects := make(map[string]object)
|
var objects []string
|
||||||
for _, node := range b.nodes {
|
for _, node := range b.nodes {
|
||||||
disks, err := node.ListDisks()
|
disks, err := node.ListDisks()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, iodine.New(err, nil)
|
return nil, nil, false, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
for order, disk := range disks {
|
for order, disk := range disks {
|
||||||
bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, order)
|
bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, order)
|
||||||
bucketPath := filepath.Join(b.donutName, bucketSlice)
|
bucketPath := filepath.Join(b.donutName, bucketSlice)
|
||||||
files, err := disk.ListDir(bucketPath)
|
files, err := disk.ListDir(bucketPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, iodine.New(err, nil)
|
return nil, nil, false, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
newObject, err := newObject(file.Name(), filepath.Join(disk.GetPath(), bucketPath))
|
objectName, err := b.getObjectName(file.Name(), disk.GetPath(), bucketPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, iodine.New(err, nil)
|
return nil, nil, false, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
newObjectMetadata, err := newObject.GetObjectMetadata()
|
if strings.HasPrefix(objectName, strings.TrimSpace(prefix)) {
|
||||||
if err != nil {
|
if objectName > marker {
|
||||||
return nil, iodine.New(err, nil)
|
objects = appendUniq(objects, objectName)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
objectName, ok := newObjectMetadata["object"]
|
|
||||||
if !ok {
|
|
||||||
return nil, iodine.New(ObjectCorrupted{Object: newObject.name}, nil)
|
|
||||||
}
|
|
||||||
objects[objectName] = newObject
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
nodeSlice = nodeSlice + 1
|
nodeSlice = nodeSlice + 1
|
||||||
}
|
}
|
||||||
return objects, nil
|
{
|
||||||
|
if strings.TrimSpace(prefix) != "" {
|
||||||
|
objects = removePrefix(objects, prefix)
|
||||||
|
}
|
||||||
|
var prefixes []string
|
||||||
|
var filteredObjects []string
|
||||||
|
if strings.TrimSpace(delimiter) != "" {
|
||||||
|
filteredObjects = filterDelimited(objects, delimiter)
|
||||||
|
prefixes = filterNotDelimited(objects, delimiter)
|
||||||
|
prefixes = extractDelimited(prefixes, delimiter)
|
||||||
|
prefixes = uniqueObjects(prefixes)
|
||||||
|
} else {
|
||||||
|
filteredObjects = objects
|
||||||
|
}
|
||||||
|
var results []string
|
||||||
|
var commonPrefixes []string
|
||||||
|
|
||||||
|
sort.Strings(filteredObjects)
|
||||||
|
for _, objectName := range filteredObjects {
|
||||||
|
if len(results) >= maxkeys {
|
||||||
|
isTruncated = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
results = appendUniq(results, prefix+objectName)
|
||||||
|
}
|
||||||
|
for _, commonPrefix := range prefixes {
|
||||||
|
commonPrefixes = appendUniq(commonPrefixes, prefix+commonPrefix)
|
||||||
|
}
|
||||||
|
sort.Strings(results)
|
||||||
|
sort.Strings(commonPrefixes)
|
||||||
|
return results, commonPrefixes, isTruncated, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadObject - open an object to read
|
// ReadObject - open an object to read
|
||||||
|
@ -114,12 +170,12 @@ func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64,
|
||||||
defer b.lock.RUnlock()
|
defer b.lock.RUnlock()
|
||||||
reader, writer := io.Pipe()
|
reader, writer := io.Pipe()
|
||||||
// get list of objects
|
// get list of objects
|
||||||
objects, err := b.ListObjects()
|
_, _, _, err = b.ListObjects(objectName, "", "", 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, iodine.New(err, nil)
|
return nil, 0, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
// check if object exists
|
// check if object exists
|
||||||
object, ok := objects[objectName]
|
object, ok := b.objects[objectName]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, 0, iodine.New(ObjectNotFound{Object: objectName}, nil)
|
return nil, 0, iodine.New(ObjectNotFound{Object: objectName}, nil)
|
||||||
}
|
}
|
||||||
|
@ -149,7 +205,6 @@ func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64,
|
||||||
func (b bucket) WriteObject(objectName string, objectData io.Reader, expectedMD5Sum string, metadata map[string]string) (string, error) {
|
func (b bucket) WriteObject(objectName string, objectData io.Reader, expectedMD5Sum string, metadata map[string]string) (string, error) {
|
||||||
b.lock.Lock()
|
b.lock.Lock()
|
||||||
defer b.lock.Unlock()
|
defer b.lock.Unlock()
|
||||||
|
|
||||||
if objectName == "" || objectData == nil {
|
if objectName == "" || objectData == nil {
|
||||||
return "", iodine.New(InvalidArgument{}, nil)
|
return "", iodine.New(InvalidArgument{}, nil)
|
||||||
}
|
}
|
||||||
|
@ -362,6 +417,9 @@ func (b bucket) readEncodedData(objectName string, writer *io.PipeWriter, donutO
|
||||||
writer.CloseWithError(iodine.New(err, nil))
|
writer.CloseWithError(iodine.New(err, nil))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
for _, reader := range readers {
|
||||||
|
defer reader.Close()
|
||||||
|
}
|
||||||
hasher := md5.New()
|
hasher := md5.New()
|
||||||
mwriter := io.MultiWriter(writer, hasher)
|
mwriter := io.MultiWriter(writer, hasher)
|
||||||
switch len(readers) == 1 {
|
switch len(readers) == 1 {
|
||||||
|
|
|
@ -52,7 +52,7 @@ func filterNotDelimited(objects []string, delim string) []string {
|
||||||
return results
|
return results
|
||||||
}
|
}
|
||||||
|
|
||||||
func extractDir(objects []string, delim string) []string {
|
func extractDelimited(objects []string, delim string) []string {
|
||||||
var results []string
|
var results []string
|
||||||
for _, object := range objects {
|
for _, object := range objects {
|
||||||
parts := strings.Split(object, delim)
|
parts := strings.Split(object, delim)
|
||||||
|
|
|
@ -93,6 +93,7 @@ func (disk Disk) ListDir(dirname string) ([]os.FileInfo, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, iodine.New(err, nil)
|
return nil, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
|
defer dir.Close()
|
||||||
contents, err := dir.Readdir(-1)
|
contents, err := dir.Readdir(-1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, iodine.New(err, nil)
|
return nil, iodine.New(err, nil)
|
||||||
|
@ -113,6 +114,7 @@ func (disk Disk) ListFiles(dirname string) ([]os.FileInfo, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, iodine.New(err, nil)
|
return nil, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
|
defer dir.Close()
|
||||||
contents, err := dir.Readdir(-1)
|
contents, err := dir.Readdir(-1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, iodine.New(err, nil)
|
return nil, iodine.New(err, nil)
|
||||||
|
|
|
@ -22,7 +22,6 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -101,7 +100,7 @@ func (dt donut) MakeBucket(bucket, acl string) error {
|
||||||
func (dt donut) GetBucketMetadata(bucket string) (map[string]string, error) {
|
func (dt donut) GetBucketMetadata(bucket string) (map[string]string, error) {
|
||||||
dt.lock.RLock()
|
dt.lock.RLock()
|
||||||
defer dt.lock.RUnlock()
|
defer dt.lock.RUnlock()
|
||||||
if err := dt.getDonutBuckets(); err != nil {
|
if err := dt.listDonutBuckets(); err != nil {
|
||||||
return nil, iodine.New(err, nil)
|
return nil, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
if _, ok := dt.buckets[bucket]; !ok {
|
if _, ok := dt.buckets[bucket]; !ok {
|
||||||
|
@ -118,7 +117,7 @@ func (dt donut) GetBucketMetadata(bucket string) (map[string]string, error) {
|
||||||
func (dt donut) SetBucketMetadata(bucket string, bucketMetadata map[string]string) error {
|
func (dt donut) SetBucketMetadata(bucket string, bucketMetadata map[string]string) error {
|
||||||
dt.lock.Lock()
|
dt.lock.Lock()
|
||||||
defer dt.lock.Unlock()
|
defer dt.lock.Unlock()
|
||||||
if err := dt.getDonutBuckets(); err != nil {
|
if err := dt.listDonutBuckets(); err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
metadata, err := dt.getDonutBucketMetadata()
|
metadata, err := dt.getDonutBucketMetadata()
|
||||||
|
@ -136,7 +135,7 @@ func (dt donut) SetBucketMetadata(bucket string, bucketMetadata map[string]strin
|
||||||
func (dt donut) ListBuckets() (metadata map[string]map[string]string, err error) {
|
func (dt donut) ListBuckets() (metadata map[string]map[string]string, err error) {
|
||||||
dt.lock.RLock()
|
dt.lock.RLock()
|
||||||
defer dt.lock.RUnlock()
|
defer dt.lock.RUnlock()
|
||||||
if err := dt.getDonutBuckets(); err != nil {
|
if err := dt.listDonutBuckets(); err != nil {
|
||||||
return nil, iodine.New(err, nil)
|
return nil, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
metadata, err = dt.getDonutBucketMetadata()
|
metadata, err = dt.getDonutBucketMetadata()
|
||||||
|
@ -160,68 +159,17 @@ func (dt donut) ListObjects(bucket, prefix, marker, delimiter string, maxkeys in
|
||||||
"delimiter": delimiter,
|
"delimiter": delimiter,
|
||||||
"maxkeys": strconv.Itoa(maxkeys),
|
"maxkeys": strconv.Itoa(maxkeys),
|
||||||
}
|
}
|
||||||
if err := dt.getDonutBuckets(); err != nil {
|
if err := dt.listDonutBuckets(); err != nil {
|
||||||
return nil, nil, false, iodine.New(err, errParams)
|
return nil, nil, false, iodine.New(err, errParams)
|
||||||
}
|
}
|
||||||
if _, ok := dt.buckets[bucket]; !ok {
|
if _, ok := dt.buckets[bucket]; !ok {
|
||||||
return nil, nil, false, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
return nil, nil, false, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
||||||
}
|
}
|
||||||
objectList, err := dt.buckets[bucket].ListObjects()
|
objects, commonPrefixes, isTruncated, err := dt.buckets[bucket].ListObjects(prefix, marker, delimiter, maxkeys)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, false, iodine.New(err, errParams)
|
return nil, nil, false, iodine.New(err, errParams)
|
||||||
}
|
}
|
||||||
var donutObjects []string
|
return objects, commonPrefixes, isTruncated, nil
|
||||||
for objectName := range objectList {
|
|
||||||
donutObjects = append(donutObjects, objectName)
|
|
||||||
}
|
|
||||||
if maxkeys <= 0 {
|
|
||||||
maxkeys = 1000
|
|
||||||
}
|
|
||||||
if strings.TrimSpace(prefix) != "" {
|
|
||||||
donutObjects = filterPrefix(donutObjects, prefix)
|
|
||||||
donutObjects = removePrefix(donutObjects, prefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
var actualObjects []string
|
|
||||||
var actualPrefixes []string
|
|
||||||
var isTruncated bool
|
|
||||||
if strings.TrimSpace(delimiter) != "" {
|
|
||||||
actualObjects = filterDelimited(donutObjects, delimiter)
|
|
||||||
actualPrefixes = filterNotDelimited(donutObjects, delimiter)
|
|
||||||
actualPrefixes = extractDir(actualPrefixes, delimiter)
|
|
||||||
actualPrefixes = uniqueObjects(actualPrefixes)
|
|
||||||
} else {
|
|
||||||
actualObjects = donutObjects
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Strings(actualObjects)
|
|
||||||
var newActualObjects []string
|
|
||||||
switch {
|
|
||||||
case marker != "":
|
|
||||||
for _, objectName := range actualObjects {
|
|
||||||
if objectName > marker {
|
|
||||||
newActualObjects = append(newActualObjects, objectName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
newActualObjects = actualObjects
|
|
||||||
}
|
|
||||||
|
|
||||||
var results []string
|
|
||||||
var commonPrefixes []string
|
|
||||||
for _, objectName := range newActualObjects {
|
|
||||||
if len(results) >= maxkeys {
|
|
||||||
isTruncated = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
results = appendUniq(results, prefix+objectName)
|
|
||||||
}
|
|
||||||
for _, commonPrefix := range actualPrefixes {
|
|
||||||
commonPrefixes = appendUniq(commonPrefixes, prefix+commonPrefix)
|
|
||||||
}
|
|
||||||
sort.Strings(results)
|
|
||||||
sort.Strings(commonPrefixes)
|
|
||||||
return results, commonPrefixes, isTruncated, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObject - put object
|
// PutObject - put object
|
||||||
|
@ -238,17 +186,17 @@ func (dt donut) PutObject(bucket, object, expectedMD5Sum string, reader io.ReadC
|
||||||
if object == "" || strings.TrimSpace(object) == "" {
|
if object == "" || strings.TrimSpace(object) == "" {
|
||||||
return "", iodine.New(InvalidArgument{}, errParams)
|
return "", iodine.New(InvalidArgument{}, errParams)
|
||||||
}
|
}
|
||||||
if err := dt.getDonutBuckets(); err != nil {
|
if err := dt.listDonutBuckets(); err != nil {
|
||||||
return "", iodine.New(err, errParams)
|
return "", iodine.New(err, errParams)
|
||||||
}
|
}
|
||||||
if _, ok := dt.buckets[bucket]; !ok {
|
if _, ok := dt.buckets[bucket]; !ok {
|
||||||
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||||
}
|
}
|
||||||
objectList, err := dt.buckets[bucket].ListObjects()
|
objectList, _, _, err := dt.buckets[bucket].ListObjects("", "", "", 1000)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", iodine.New(err, nil)
|
return "", iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
for objectName := range objectList {
|
for _, objectName := range objectList {
|
||||||
if objectName == object {
|
if objectName == object {
|
||||||
return "", iodine.New(ObjectExists{Object: object}, nil)
|
return "", iodine.New(ObjectExists{Object: object}, nil)
|
||||||
}
|
}
|
||||||
|
@ -274,7 +222,7 @@ func (dt donut) GetObject(bucket, object string) (reader io.ReadCloser, size int
|
||||||
if object == "" || strings.TrimSpace(object) == "" {
|
if object == "" || strings.TrimSpace(object) == "" {
|
||||||
return nil, 0, iodine.New(InvalidArgument{}, errParams)
|
return nil, 0, iodine.New(InvalidArgument{}, errParams)
|
||||||
}
|
}
|
||||||
if err := dt.getDonutBuckets(); err != nil {
|
if err := dt.listDonutBuckets(); err != nil {
|
||||||
return nil, 0, iodine.New(err, nil)
|
return nil, 0, iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
if _, ok := dt.buckets[bucket]; !ok {
|
if _, ok := dt.buckets[bucket]; !ok {
|
||||||
|
@ -291,21 +239,27 @@ func (dt donut) GetObjectMetadata(bucket, object string) (map[string]string, err
|
||||||
"bucket": bucket,
|
"bucket": bucket,
|
||||||
"object": object,
|
"object": object,
|
||||||
}
|
}
|
||||||
if err := dt.getDonutBuckets(); err != nil {
|
if err := dt.listDonutBuckets(); err != nil {
|
||||||
return nil, iodine.New(err, errParams)
|
return nil, iodine.New(err, errParams)
|
||||||
}
|
}
|
||||||
if _, ok := dt.buckets[bucket]; !ok {
|
if _, ok := dt.buckets[bucket]; !ok {
|
||||||
return nil, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
return nil, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
||||||
}
|
}
|
||||||
objectList, err := dt.buckets[bucket].ListObjects()
|
//
|
||||||
|
// there is a potential issue here, if the object comes after the truncated list
|
||||||
|
// below GetObjectMetadata would fail as ObjectNotFound{}
|
||||||
|
//
|
||||||
|
// will fix it when we bring in persistent json into Donut - TODO
|
||||||
|
objectList, _, _, err := dt.buckets[bucket].ListObjects("", "", "", 1000)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, iodine.New(err, errParams)
|
return nil, iodine.New(err, errParams)
|
||||||
}
|
}
|
||||||
donutObject, ok := objectList[object]
|
for _, objectName := range objectList {
|
||||||
if !ok {
|
if objectName == object {
|
||||||
return nil, iodine.New(ObjectNotFound{Object: object}, errParams)
|
return dt.buckets[bucket].GetObjectMetadata(object)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return donutObject.GetObjectMetadata()
|
return nil, iodine.New(ObjectNotFound{Object: object}, errParams)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getDiskWriters -
|
// getDiskWriters -
|
||||||
|
@ -384,7 +338,7 @@ func (dt donut) getDonutBucketMetadata() (map[string]map[string]string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dt donut) makeDonutBucket(bucketName, acl string) error {
|
func (dt donut) makeDonutBucket(bucketName, acl string) error {
|
||||||
if err := dt.getDonutBuckets(); err != nil {
|
if err := dt.listDonutBuckets(); err != nil {
|
||||||
return iodine.New(err, nil)
|
return iodine.New(err, nil)
|
||||||
}
|
}
|
||||||
if _, ok := dt.buckets[bucketName]; ok {
|
if _, ok := dt.buckets[bucketName]; ok {
|
||||||
|
@ -412,8 +366,7 @@ func (dt donut) makeDonutBucket(bucketName, acl string) error {
|
||||||
}
|
}
|
||||||
metadata, err := dt.getDonutBucketMetadata()
|
metadata, err := dt.getDonutBucketMetadata()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = iodine.ToError(err)
|
if os.IsNotExist(iodine.ToError(err)) {
|
||||||
if os.IsNotExist(err) {
|
|
||||||
metadata := make(map[string]map[string]string)
|
metadata := make(map[string]map[string]string)
|
||||||
metadata[bucketName] = bucketMetadata
|
metadata[bucketName] = bucketMetadata
|
||||||
err = dt.setDonutBucketMetadata(metadata)
|
err = dt.setDonutBucketMetadata(metadata)
|
||||||
|
@ -432,7 +385,7 @@ func (dt donut) makeDonutBucket(bucketName, acl string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dt donut) getDonutBuckets() error {
|
func (dt donut) listDonutBuckets() error {
|
||||||
for _, node := range dt.nodes {
|
for _, node := range dt.nodes {
|
||||||
disks, err := node.ListDisks()
|
disks, err := node.ListDisks()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -306,7 +306,7 @@ func (s *MySuite) TestMultipleNewObjects(c *C) {
|
||||||
/// test list of objects
|
/// test list of objects
|
||||||
|
|
||||||
// test list objects with prefix and delimiter
|
// test list objects with prefix and delimiter
|
||||||
listObjects, prefixes, isTruncated, err := donut.ListObjects("foo", "o", "", "1", 1)
|
listObjects, prefixes, isTruncated, err := donut.ListObjects("foo", "o", "", "1", 10)
|
||||||
c.Assert(err, IsNil)
|
c.Assert(err, IsNil)
|
||||||
c.Assert(isTruncated, Equals, false)
|
c.Assert(isTruncated, Equals, false)
|
||||||
c.Assert(prefixes[0], Equals, "obj1")
|
c.Assert(prefixes[0], Equals, "obj1")
|
||||||
|
|
|
@ -187,9 +187,9 @@ func testPaging(c *check.C, create func() Driver) {
|
||||||
resources.Maxkeys = 5
|
resources.Maxkeys = 5
|
||||||
resources.Prefix = ""
|
resources.Prefix = ""
|
||||||
objects, resources, err = drivers.ListObjects("bucket", resources)
|
objects, resources, err = drivers.ListObjects("bucket", resources)
|
||||||
|
c.Assert(err, check.IsNil)
|
||||||
c.Assert(len(objects), check.Equals, i+1)
|
c.Assert(len(objects), check.Equals, i+1)
|
||||||
c.Assert(resources.IsTruncated, check.Equals, false)
|
c.Assert(resources.IsTruncated, check.Equals, false)
|
||||||
c.Assert(err, check.IsNil)
|
|
||||||
}
|
}
|
||||||
// check after paging occurs pages work
|
// check after paging occurs pages work
|
||||||
for i := 6; i <= 10; i++ {
|
for i := 6; i <= 10; i++ {
|
||||||
|
@ -198,9 +198,9 @@ func testPaging(c *check.C, create func() Driver) {
|
||||||
resources.Maxkeys = 5
|
resources.Maxkeys = 5
|
||||||
resources.Prefix = ""
|
resources.Prefix = ""
|
||||||
objects, resources, err = drivers.ListObjects("bucket", resources)
|
objects, resources, err = drivers.ListObjects("bucket", resources)
|
||||||
|
c.Assert(err, check.IsNil)
|
||||||
c.Assert(len(objects), check.Equals, 5)
|
c.Assert(len(objects), check.Equals, 5)
|
||||||
c.Assert(resources.IsTruncated, check.Equals, true)
|
c.Assert(resources.IsTruncated, check.Equals, true)
|
||||||
c.Assert(err, check.IsNil)
|
|
||||||
}
|
}
|
||||||
// check paging with prefix at end returns less objects
|
// check paging with prefix at end returns less objects
|
||||||
{
|
{
|
||||||
|
@ -209,6 +209,7 @@ func testPaging(c *check.C, create func() Driver) {
|
||||||
resources.Prefix = "new"
|
resources.Prefix = "new"
|
||||||
resources.Maxkeys = 5
|
resources.Maxkeys = 5
|
||||||
objects, resources, err = drivers.ListObjects("bucket", resources)
|
objects, resources, err = drivers.ListObjects("bucket", resources)
|
||||||
|
c.Assert(err, check.IsNil)
|
||||||
c.Assert(len(objects), check.Equals, 2)
|
c.Assert(len(objects), check.Equals, 2)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -217,6 +218,7 @@ func testPaging(c *check.C, create func() Driver) {
|
||||||
resources.Prefix = ""
|
resources.Prefix = ""
|
||||||
resources.Maxkeys = 1000
|
resources.Maxkeys = 1000
|
||||||
objects, resources, err = drivers.ListObjects("bucket", resources)
|
objects, resources, err = drivers.ListObjects("bucket", resources)
|
||||||
|
c.Assert(err, check.IsNil)
|
||||||
c.Assert(objects[0].Key, check.Equals, "newPrefix")
|
c.Assert(objects[0].Key, check.Equals, "newPrefix")
|
||||||
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
|
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
|
||||||
c.Assert(objects[2].Key, check.Equals, "obj0")
|
c.Assert(objects[2].Key, check.Equals, "obj0")
|
||||||
|
@ -248,6 +250,7 @@ func testPaging(c *check.C, create func() Driver) {
|
||||||
resources.Prefix = ""
|
resources.Prefix = ""
|
||||||
resources.Maxkeys = 1000
|
resources.Maxkeys = 1000
|
||||||
objects, resources, err = drivers.ListObjects("bucket", resources)
|
objects, resources, err = drivers.ListObjects("bucket", resources)
|
||||||
|
c.Assert(err, check.IsNil)
|
||||||
c.Assert(objects[0].Key, check.Equals, "newPrefix")
|
c.Assert(objects[0].Key, check.Equals, "newPrefix")
|
||||||
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
|
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
|
||||||
c.Assert(objects[2].Key, check.Equals, "obj0")
|
c.Assert(objects[2].Key, check.Equals, "obj0")
|
||||||
|
@ -265,6 +268,7 @@ func testPaging(c *check.C, create func() Driver) {
|
||||||
resources.Delimiter = ""
|
resources.Delimiter = ""
|
||||||
resources.Maxkeys = 3
|
resources.Maxkeys = 3
|
||||||
objects, resources, err = drivers.ListObjects("bucket", resources)
|
objects, resources, err = drivers.ListObjects("bucket", resources)
|
||||||
|
c.Assert(err, check.IsNil)
|
||||||
c.Assert(objects[0].Key, check.Equals, "newPrefix2")
|
c.Assert(objects[0].Key, check.Equals, "newPrefix2")
|
||||||
c.Assert(objects[1].Key, check.Equals, "obj0")
|
c.Assert(objects[1].Key, check.Equals, "obj0")
|
||||||
c.Assert(objects[2].Key, check.Equals, "obj1")
|
c.Assert(objects[2].Key, check.Equals, "obj1")
|
||||||
|
@ -276,6 +280,7 @@ func testPaging(c *check.C, create func() Driver) {
|
||||||
resources.Marker = ""
|
resources.Marker = ""
|
||||||
resources.Maxkeys = 1000
|
resources.Maxkeys = 1000
|
||||||
objects, resources, err = drivers.ListObjects("bucket", resources)
|
objects, resources, err = drivers.ListObjects("bucket", resources)
|
||||||
|
c.Assert(err, check.IsNil)
|
||||||
c.Assert(objects[0].Key, check.Equals, "obj0")
|
c.Assert(objects[0].Key, check.Equals, "obj0")
|
||||||
c.Assert(objects[1].Key, check.Equals, "obj1")
|
c.Assert(objects[1].Key, check.Equals, "obj1")
|
||||||
c.Assert(objects[2].Key, check.Equals, "obj10")
|
c.Assert(objects[2].Key, check.Equals, "obj10")
|
||||||
|
@ -288,6 +293,7 @@ func testPaging(c *check.C, create func() Driver) {
|
||||||
resources.Marker = ""
|
resources.Marker = ""
|
||||||
resources.Maxkeys = 5
|
resources.Maxkeys = 5
|
||||||
objects, resources, err = drivers.ListObjects("bucket", resources)
|
objects, resources, err = drivers.ListObjects("bucket", resources)
|
||||||
|
c.Assert(err, check.IsNil)
|
||||||
c.Assert(objects[0].Key, check.Equals, "newPrefix")
|
c.Assert(objects[0].Key, check.Equals, "newPrefix")
|
||||||
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
|
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
|
||||||
}
|
}
|
||||||
|
@ -313,8 +319,8 @@ func testObjectOverwriteFails(c *check.C, create func() Driver) {
|
||||||
|
|
||||||
var bytesBuffer bytes.Buffer
|
var bytesBuffer bytes.Buffer
|
||||||
length, err := drivers.GetObject(&bytesBuffer, "bucket", "object")
|
length, err := drivers.GetObject(&bytesBuffer, "bucket", "object")
|
||||||
c.Assert(length, check.Equals, int64(len("one")))
|
|
||||||
c.Assert(err, check.IsNil)
|
c.Assert(err, check.IsNil)
|
||||||
|
c.Assert(length, check.Equals, int64(len("one")))
|
||||||
c.Assert(string(bytesBuffer.Bytes()), check.Equals, "one")
|
c.Assert(string(bytesBuffer.Bytes()), check.Equals, "one")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -358,9 +364,9 @@ func testPutObjectInSubdir(c *check.C, create func() Driver) {
|
||||||
|
|
||||||
var bytesBuffer bytes.Buffer
|
var bytesBuffer bytes.Buffer
|
||||||
length, err := drivers.GetObject(&bytesBuffer, "bucket", "dir1/dir2/object")
|
length, err := drivers.GetObject(&bytesBuffer, "bucket", "dir1/dir2/object")
|
||||||
|
c.Assert(err, check.IsNil)
|
||||||
c.Assert(len(bytesBuffer.Bytes()), check.Equals, len("hello world"))
|
c.Assert(len(bytesBuffer.Bytes()), check.Equals, len("hello world"))
|
||||||
c.Assert(int64(len(bytesBuffer.Bytes())), check.Equals, length)
|
c.Assert(int64(len(bytesBuffer.Bytes())), check.Equals, length)
|
||||||
c.Assert(err, check.IsNil)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func testListBuckets(c *check.C, create func() Driver) {
|
func testListBuckets(c *check.C, create func() Driver) {
|
||||||
|
@ -405,8 +411,8 @@ func testListBucketsOrder(c *check.C, create func() Driver) {
|
||||||
drivers.CreateBucket("bucket2", "")
|
drivers.CreateBucket("bucket2", "")
|
||||||
|
|
||||||
buckets, err := drivers.ListBuckets()
|
buckets, err := drivers.ListBuckets()
|
||||||
c.Assert(len(buckets), check.Equals, 2)
|
|
||||||
c.Assert(err, check.IsNil)
|
c.Assert(err, check.IsNil)
|
||||||
|
c.Assert(len(buckets), check.Equals, 2)
|
||||||
c.Assert(buckets[0].Name, check.Equals, "bucket1")
|
c.Assert(buckets[0].Name, check.Equals, "bucket1")
|
||||||
c.Assert(buckets[1].Name, check.Equals, "bucket2")
|
c.Assert(buckets[1].Name, check.Equals, "bucket2")
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue