Merge pull request #674 from harshavardhana/pr_out_donut_cleanup_another_set

Donut cleanup another set
This commit is contained in:
Harshavardhana 2015-06-26 01:59:45 +00:00
commit bd0dccd8f1
6 changed files with 116 additions and 97 deletions

View File

@ -22,6 +22,7 @@ import (
"hash"
"io"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
@ -42,6 +43,7 @@ type bucket struct {
time time.Time
donutName string
nodes map[string]node
objects map[string]object
lock *sync.RWMutex
}
@ -65,47 +67,101 @@ func newBucket(bucketName, aclType, donutName string, nodes map[string]node) (bu
b.time = t
b.donutName = donutName
b.nodes = nodes
b.objects = make(map[string]object)
b.lock = new(sync.RWMutex)
return b, bucketMetadata, nil
}
func (b bucket) getObjectName(fileName, diskPath, bucketPath string) (string, error) {
newObject, err := newObject(fileName, filepath.Join(diskPath, bucketPath))
if err != nil {
return "", iodine.New(err, nil)
}
newObjectMetadata, err := newObject.GetObjectMetadata()
if err != nil {
return "", iodine.New(err, nil)
}
objectName, ok := newObjectMetadata["object"]
if !ok {
return "", iodine.New(ObjectCorrupted{Object: newObject.name}, nil)
}
b.objects[objectName] = newObject
return objectName, nil
}
func (b bucket) GetObjectMetadata(objectName string) (map[string]string, error) {
return b.objects[objectName].GetObjectMetadata()
}
// ListObjects - list all objects
func (b bucket) ListObjects() (map[string]object, error) {
func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) ([]string, []string, bool, error) {
b.lock.RLock()
defer b.lock.RUnlock()
if maxkeys <= 0 {
maxkeys = 1000
}
var isTruncated bool
nodeSlice := 0
objects := make(map[string]object)
var objects []string
for _, node := range b.nodes {
disks, err := node.ListDisks()
if err != nil {
return nil, iodine.New(err, nil)
return nil, nil, false, iodine.New(err, nil)
}
for order, disk := range disks {
bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, order)
bucketPath := filepath.Join(b.donutName, bucketSlice)
files, err := disk.ListDir(bucketPath)
if err != nil {
return nil, iodine.New(err, nil)
return nil, nil, false, iodine.New(err, nil)
}
for _, file := range files {
newObject, err := newObject(file.Name(), filepath.Join(disk.GetPath(), bucketPath))
objectName, err := b.getObjectName(file.Name(), disk.GetPath(), bucketPath)
if err != nil {
return nil, iodine.New(err, nil)
return nil, nil, false, iodine.New(err, nil)
}
newObjectMetadata, err := newObject.GetObjectMetadata()
if err != nil {
return nil, iodine.New(err, nil)
if strings.HasPrefix(objectName, strings.TrimSpace(prefix)) {
if objectName > marker {
objects = appendUniq(objects, objectName)
}
objectName, ok := newObjectMetadata["object"]
if !ok {
return nil, iodine.New(ObjectCorrupted{Object: newObject.name}, nil)
}
objects[objectName] = newObject
}
}
nodeSlice = nodeSlice + 1
}
return objects, nil
{
if strings.TrimSpace(prefix) != "" {
objects = removePrefix(objects, prefix)
}
var prefixes []string
var filteredObjects []string
if strings.TrimSpace(delimiter) != "" {
filteredObjects = filterDelimited(objects, delimiter)
prefixes = filterNotDelimited(objects, delimiter)
prefixes = extractDelimited(prefixes, delimiter)
prefixes = uniqueObjects(prefixes)
} else {
filteredObjects = objects
}
var results []string
var commonPrefixes []string
sort.Strings(filteredObjects)
for _, objectName := range filteredObjects {
if len(results) >= maxkeys {
isTruncated = true
break
}
results = appendUniq(results, prefix+objectName)
}
for _, commonPrefix := range prefixes {
commonPrefixes = appendUniq(commonPrefixes, prefix+commonPrefix)
}
sort.Strings(results)
sort.Strings(commonPrefixes)
return results, commonPrefixes, isTruncated, nil
}
}
// ReadObject - open an object to read
@ -114,12 +170,12 @@ func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64,
defer b.lock.RUnlock()
reader, writer := io.Pipe()
// get list of objects
objects, err := b.ListObjects()
_, _, _, err = b.ListObjects(objectName, "", "", 1)
if err != nil {
return nil, 0, iodine.New(err, nil)
}
// check if object exists
object, ok := objects[objectName]
object, ok := b.objects[objectName]
if !ok {
return nil, 0, iodine.New(ObjectNotFound{Object: objectName}, nil)
}
@ -149,7 +205,6 @@ func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64,
func (b bucket) WriteObject(objectName string, objectData io.Reader, expectedMD5Sum string, metadata map[string]string) (string, error) {
b.lock.Lock()
defer b.lock.Unlock()
if objectName == "" || objectData == nil {
return "", iodine.New(InvalidArgument{}, nil)
}
@ -362,6 +417,9 @@ func (b bucket) readEncodedData(objectName string, writer *io.PipeWriter, donutO
writer.CloseWithError(iodine.New(err, nil))
return
}
for _, reader := range readers {
defer reader.Close()
}
hasher := md5.New()
mwriter := io.MultiWriter(writer, hasher)
switch len(readers) == 1 {

View File

@ -52,7 +52,7 @@ func filterNotDelimited(objects []string, delim string) []string {
return results
}
func extractDir(objects []string, delim string) []string {
func extractDelimited(objects []string, delim string) []string {
var results []string
for _, object := range objects {
parts := strings.Split(object, delim)

View File

@ -93,6 +93,7 @@ func (disk Disk) ListDir(dirname string) ([]os.FileInfo, error) {
if err != nil {
return nil, iodine.New(err, nil)
}
defer dir.Close()
contents, err := dir.Readdir(-1)
if err != nil {
return nil, iodine.New(err, nil)
@ -113,6 +114,7 @@ func (disk Disk) ListFiles(dirname string) ([]os.FileInfo, error) {
if err != nil {
return nil, iodine.New(err, nil)
}
defer dir.Close()
contents, err := dir.Readdir(-1)
if err != nil {
return nil, iodine.New(err, nil)

View File

@ -22,7 +22,6 @@ import (
"io"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
@ -101,7 +100,7 @@ func (dt donut) MakeBucket(bucket, acl string) error {
func (dt donut) GetBucketMetadata(bucket string) (map[string]string, error) {
dt.lock.RLock()
defer dt.lock.RUnlock()
if err := dt.getDonutBuckets(); err != nil {
if err := dt.listDonutBuckets(); err != nil {
return nil, iodine.New(err, nil)
}
if _, ok := dt.buckets[bucket]; !ok {
@ -118,7 +117,7 @@ func (dt donut) GetBucketMetadata(bucket string) (map[string]string, error) {
func (dt donut) SetBucketMetadata(bucket string, bucketMetadata map[string]string) error {
dt.lock.Lock()
defer dt.lock.Unlock()
if err := dt.getDonutBuckets(); err != nil {
if err := dt.listDonutBuckets(); err != nil {
return iodine.New(err, nil)
}
metadata, err := dt.getDonutBucketMetadata()
@ -136,7 +135,7 @@ func (dt donut) SetBucketMetadata(bucket string, bucketMetadata map[string]strin
func (dt donut) ListBuckets() (metadata map[string]map[string]string, err error) {
dt.lock.RLock()
defer dt.lock.RUnlock()
if err := dt.getDonutBuckets(); err != nil {
if err := dt.listDonutBuckets(); err != nil {
return nil, iodine.New(err, nil)
}
metadata, err = dt.getDonutBucketMetadata()
@ -160,68 +159,17 @@ func (dt donut) ListObjects(bucket, prefix, marker, delimiter string, maxkeys in
"delimiter": delimiter,
"maxkeys": strconv.Itoa(maxkeys),
}
if err := dt.getDonutBuckets(); err != nil {
if err := dt.listDonutBuckets(); err != nil {
return nil, nil, false, iodine.New(err, errParams)
}
if _, ok := dt.buckets[bucket]; !ok {
return nil, nil, false, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
}
objectList, err := dt.buckets[bucket].ListObjects()
objects, commonPrefixes, isTruncated, err := dt.buckets[bucket].ListObjects(prefix, marker, delimiter, maxkeys)
if err != nil {
return nil, nil, false, iodine.New(err, errParams)
}
var donutObjects []string
for objectName := range objectList {
donutObjects = append(donutObjects, objectName)
}
if maxkeys <= 0 {
maxkeys = 1000
}
if strings.TrimSpace(prefix) != "" {
donutObjects = filterPrefix(donutObjects, prefix)
donutObjects = removePrefix(donutObjects, prefix)
}
var actualObjects []string
var actualPrefixes []string
var isTruncated bool
if strings.TrimSpace(delimiter) != "" {
actualObjects = filterDelimited(donutObjects, delimiter)
actualPrefixes = filterNotDelimited(donutObjects, delimiter)
actualPrefixes = extractDir(actualPrefixes, delimiter)
actualPrefixes = uniqueObjects(actualPrefixes)
} else {
actualObjects = donutObjects
}
sort.Strings(actualObjects)
var newActualObjects []string
switch {
case marker != "":
for _, objectName := range actualObjects {
if objectName > marker {
newActualObjects = append(newActualObjects, objectName)
}
}
default:
newActualObjects = actualObjects
}
var results []string
var commonPrefixes []string
for _, objectName := range newActualObjects {
if len(results) >= maxkeys {
isTruncated = true
break
}
results = appendUniq(results, prefix+objectName)
}
for _, commonPrefix := range actualPrefixes {
commonPrefixes = appendUniq(commonPrefixes, prefix+commonPrefix)
}
sort.Strings(results)
sort.Strings(commonPrefixes)
return results, commonPrefixes, isTruncated, nil
return objects, commonPrefixes, isTruncated, nil
}
// PutObject - put object
@ -238,17 +186,17 @@ func (dt donut) PutObject(bucket, object, expectedMD5Sum string, reader io.ReadC
if object == "" || strings.TrimSpace(object) == "" {
return "", iodine.New(InvalidArgument{}, errParams)
}
if err := dt.getDonutBuckets(); err != nil {
if err := dt.listDonutBuckets(); err != nil {
return "", iodine.New(err, errParams)
}
if _, ok := dt.buckets[bucket]; !ok {
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
}
objectList, err := dt.buckets[bucket].ListObjects()
objectList, _, _, err := dt.buckets[bucket].ListObjects("", "", "", 1000)
if err != nil {
return "", iodine.New(err, nil)
}
for objectName := range objectList {
for _, objectName := range objectList {
if objectName == object {
return "", iodine.New(ObjectExists{Object: object}, nil)
}
@ -274,7 +222,7 @@ func (dt donut) GetObject(bucket, object string) (reader io.ReadCloser, size int
if object == "" || strings.TrimSpace(object) == "" {
return nil, 0, iodine.New(InvalidArgument{}, errParams)
}
if err := dt.getDonutBuckets(); err != nil {
if err := dt.listDonutBuckets(); err != nil {
return nil, 0, iodine.New(err, nil)
}
if _, ok := dt.buckets[bucket]; !ok {
@ -291,21 +239,27 @@ func (dt donut) GetObjectMetadata(bucket, object string) (map[string]string, err
"bucket": bucket,
"object": object,
}
if err := dt.getDonutBuckets(); err != nil {
if err := dt.listDonutBuckets(); err != nil {
return nil, iodine.New(err, errParams)
}
if _, ok := dt.buckets[bucket]; !ok {
return nil, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
}
objectList, err := dt.buckets[bucket].ListObjects()
//
// there is a potential issue here, if the object comes after the truncated list
// below GetObjectMetadata would fail as ObjectNotFound{}
//
// will fix it when we bring in persistent json into Donut - TODO
objectList, _, _, err := dt.buckets[bucket].ListObjects("", "", "", 1000)
if err != nil {
return nil, iodine.New(err, errParams)
}
donutObject, ok := objectList[object]
if !ok {
return nil, iodine.New(ObjectNotFound{Object: object}, errParams)
for _, objectName := range objectList {
if objectName == object {
return dt.buckets[bucket].GetObjectMetadata(object)
}
return donutObject.GetObjectMetadata()
}
return nil, iodine.New(ObjectNotFound{Object: object}, errParams)
}
// getDiskWriters -
@ -384,7 +338,7 @@ func (dt donut) getDonutBucketMetadata() (map[string]map[string]string, error) {
}
func (dt donut) makeDonutBucket(bucketName, acl string) error {
if err := dt.getDonutBuckets(); err != nil {
if err := dt.listDonutBuckets(); err != nil {
return iodine.New(err, nil)
}
if _, ok := dt.buckets[bucketName]; ok {
@ -412,8 +366,7 @@ func (dt donut) makeDonutBucket(bucketName, acl string) error {
}
metadata, err := dt.getDonutBucketMetadata()
if err != nil {
err = iodine.ToError(err)
if os.IsNotExist(err) {
if os.IsNotExist(iodine.ToError(err)) {
metadata := make(map[string]map[string]string)
metadata[bucketName] = bucketMetadata
err = dt.setDonutBucketMetadata(metadata)
@ -432,7 +385,7 @@ func (dt donut) makeDonutBucket(bucketName, acl string) error {
return nil
}
func (dt donut) getDonutBuckets() error {
func (dt donut) listDonutBuckets() error {
for _, node := range dt.nodes {
disks, err := node.ListDisks()
if err != nil {

View File

@ -306,7 +306,7 @@ func (s *MySuite) TestMultipleNewObjects(c *C) {
/// test list of objects
// test list objects with prefix and delimiter
listObjects, prefixes, isTruncated, err := donut.ListObjects("foo", "o", "", "1", 1)
listObjects, prefixes, isTruncated, err := donut.ListObjects("foo", "o", "", "1", 10)
c.Assert(err, IsNil)
c.Assert(isTruncated, Equals, false)
c.Assert(prefixes[0], Equals, "obj1")

View File

@ -187,9 +187,9 @@ func testPaging(c *check.C, create func() Driver) {
resources.Maxkeys = 5
resources.Prefix = ""
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(len(objects), check.Equals, i+1)
c.Assert(resources.IsTruncated, check.Equals, false)
c.Assert(err, check.IsNil)
}
// check after paging occurs pages work
for i := 6; i <= 10; i++ {
@ -198,9 +198,9 @@ func testPaging(c *check.C, create func() Driver) {
resources.Maxkeys = 5
resources.Prefix = ""
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(len(objects), check.Equals, 5)
c.Assert(resources.IsTruncated, check.Equals, true)
c.Assert(err, check.IsNil)
}
// check paging with prefix at end returns less objects
{
@ -209,6 +209,7 @@ func testPaging(c *check.C, create func() Driver) {
resources.Prefix = "new"
resources.Maxkeys = 5
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(len(objects), check.Equals, 2)
}
@ -217,6 +218,7 @@ func testPaging(c *check.C, create func() Driver) {
resources.Prefix = ""
resources.Maxkeys = 1000
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(objects[0].Key, check.Equals, "newPrefix")
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
c.Assert(objects[2].Key, check.Equals, "obj0")
@ -248,6 +250,7 @@ func testPaging(c *check.C, create func() Driver) {
resources.Prefix = ""
resources.Maxkeys = 1000
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(objects[0].Key, check.Equals, "newPrefix")
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
c.Assert(objects[2].Key, check.Equals, "obj0")
@ -265,6 +268,7 @@ func testPaging(c *check.C, create func() Driver) {
resources.Delimiter = ""
resources.Maxkeys = 3
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(objects[0].Key, check.Equals, "newPrefix2")
c.Assert(objects[1].Key, check.Equals, "obj0")
c.Assert(objects[2].Key, check.Equals, "obj1")
@ -276,6 +280,7 @@ func testPaging(c *check.C, create func() Driver) {
resources.Marker = ""
resources.Maxkeys = 1000
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(objects[0].Key, check.Equals, "obj0")
c.Assert(objects[1].Key, check.Equals, "obj1")
c.Assert(objects[2].Key, check.Equals, "obj10")
@ -288,6 +293,7 @@ func testPaging(c *check.C, create func() Driver) {
resources.Marker = ""
resources.Maxkeys = 5
objects, resources, err = drivers.ListObjects("bucket", resources)
c.Assert(err, check.IsNil)
c.Assert(objects[0].Key, check.Equals, "newPrefix")
c.Assert(objects[1].Key, check.Equals, "newPrefix2")
}
@ -313,8 +319,8 @@ func testObjectOverwriteFails(c *check.C, create func() Driver) {
var bytesBuffer bytes.Buffer
length, err := drivers.GetObject(&bytesBuffer, "bucket", "object")
c.Assert(length, check.Equals, int64(len("one")))
c.Assert(err, check.IsNil)
c.Assert(length, check.Equals, int64(len("one")))
c.Assert(string(bytesBuffer.Bytes()), check.Equals, "one")
}
@ -358,9 +364,9 @@ func testPutObjectInSubdir(c *check.C, create func() Driver) {
var bytesBuffer bytes.Buffer
length, err := drivers.GetObject(&bytesBuffer, "bucket", "dir1/dir2/object")
c.Assert(err, check.IsNil)
c.Assert(len(bytesBuffer.Bytes()), check.Equals, len("hello world"))
c.Assert(int64(len(bytesBuffer.Bytes())), check.Equals, length)
c.Assert(err, check.IsNil)
}
func testListBuckets(c *check.C, create func() Driver) {
@ -405,8 +411,8 @@ func testListBucketsOrder(c *check.C, create func() Driver) {
drivers.CreateBucket("bucket2", "")
buckets, err := drivers.ListBuckets()
c.Assert(len(buckets), check.Equals, 2)
c.Assert(err, check.IsNil)
c.Assert(len(buckets), check.Equals, 2)
c.Assert(buckets[0].Name, check.Equals, "bucket1")
c.Assert(buckets[1].Name, check.Equals, "bucket2")
}