mirror of https://github.com/minio/minio.git
Take all the ListObjects into bucket handlers
Earlier the listing would wait for all the objects to be processed this is essentially very time consuming considering even for 100,000 files.
This commit is contained in:
parent
8405c4d42f
commit
eec66f195a
|
@ -22,6 +22,7 @@ import (
|
|||
"hash"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -42,6 +43,7 @@ type bucket struct {
|
|||
time time.Time
|
||||
donutName string
|
||||
nodes map[string]node
|
||||
objects map[string]object
|
||||
lock *sync.RWMutex
|
||||
}
|
||||
|
||||
|
@ -65,47 +67,102 @@ func newBucket(bucketName, aclType, donutName string, nodes map[string]node) (bu
|
|||
b.time = t
|
||||
b.donutName = donutName
|
||||
b.nodes = nodes
|
||||
b.objects = make(map[string]object)
|
||||
b.lock = new(sync.RWMutex)
|
||||
return b, bucketMetadata, nil
|
||||
}
|
||||
|
||||
func (b bucket) getObjectName(fileName, diskPath, bucketPath string) (string, error) {
|
||||
newObject, err := newObject(fileName, filepath.Join(diskPath, bucketPath))
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
newObjectMetadata, err := newObject.GetObjectMetadata()
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
objectName, ok := newObjectMetadata["object"]
|
||||
if !ok {
|
||||
return "", iodine.New(ObjectCorrupted{Object: newObject.name}, nil)
|
||||
}
|
||||
b.objects[objectName] = newObject
|
||||
return objectName, nil
|
||||
}
|
||||
|
||||
func (b bucket) GetObjectMetadata(objectName string) (map[string]string, error) {
|
||||
return b.objects[objectName].GetObjectMetadata()
|
||||
}
|
||||
|
||||
// ListObjects - list all objects
|
||||
func (b bucket) ListObjects() (map[string]object, error) {
|
||||
func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) ([]string, []string, bool, error) {
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
|
||||
if maxkeys <= 0 {
|
||||
maxkeys = 1000
|
||||
}
|
||||
var isTruncated bool
|
||||
nodeSlice := 0
|
||||
objects := make(map[string]object)
|
||||
var objects []string
|
||||
for _, node := range b.nodes {
|
||||
disks, err := node.ListDisks()
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, nil, false, iodine.New(err, nil)
|
||||
}
|
||||
for order, disk := range disks {
|
||||
bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, order)
|
||||
bucketPath := filepath.Join(b.donutName, bucketSlice)
|
||||
files, err := disk.ListDir(bucketPath)
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, nil, false, iodine.New(err, nil)
|
||||
}
|
||||
for _, file := range files {
|
||||
newObject, err := newObject(file.Name(), filepath.Join(disk.GetPath(), bucketPath))
|
||||
if len(objects) >= maxkeys {
|
||||
isTruncated = true
|
||||
goto truncated
|
||||
}
|
||||
objectName, err := b.getObjectName(file.Name(), disk.GetPath(), bucketPath)
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
return nil, nil, false, iodine.New(err, nil)
|
||||
}
|
||||
newObjectMetadata, err := newObject.GetObjectMetadata()
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
if strings.HasPrefix(objectName, strings.TrimSpace(prefix)) {
|
||||
if objectName > marker {
|
||||
objects = appendUniq(objects, objectName)
|
||||
}
|
||||
}
|
||||
objectName, ok := newObjectMetadata["object"]
|
||||
if !ok {
|
||||
return nil, iodine.New(ObjectCorrupted{Object: newObject.name}, nil)
|
||||
}
|
||||
objects[objectName] = newObject
|
||||
}
|
||||
}
|
||||
nodeSlice = nodeSlice + 1
|
||||
}
|
||||
return objects, nil
|
||||
|
||||
truncated:
|
||||
{
|
||||
if strings.TrimSpace(prefix) != "" {
|
||||
objects = removePrefix(objects, prefix)
|
||||
}
|
||||
var prefixes []string
|
||||
var filteredObjects []string
|
||||
if strings.TrimSpace(delimiter) != "" {
|
||||
filteredObjects = filterDelimited(objects, delimiter)
|
||||
prefixes = filterNotDelimited(objects, delimiter)
|
||||
prefixes = extractDelimited(prefixes, delimiter)
|
||||
prefixes = uniqueObjects(prefixes)
|
||||
} else {
|
||||
filteredObjects = objects
|
||||
}
|
||||
|
||||
var results []string
|
||||
var commonPrefixes []string
|
||||
for _, objectName := range filteredObjects {
|
||||
results = appendUniq(results, prefix+objectName)
|
||||
}
|
||||
for _, commonPrefix := range prefixes {
|
||||
commonPrefixes = appendUniq(commonPrefixes, prefix+commonPrefix)
|
||||
}
|
||||
sort.Strings(results)
|
||||
sort.Strings(commonPrefixes)
|
||||
return results, commonPrefixes, isTruncated, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ReadObject - open an object to read
|
||||
|
@ -114,12 +171,12 @@ func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64,
|
|||
defer b.lock.RUnlock()
|
||||
reader, writer := io.Pipe()
|
||||
// get list of objects
|
||||
objects, err := b.ListObjects()
|
||||
_, _, _, err = b.ListObjects(objectName, "", "", 1)
|
||||
if err != nil {
|
||||
return nil, 0, iodine.New(err, nil)
|
||||
}
|
||||
// check if object exists
|
||||
object, ok := objects[objectName]
|
||||
object, ok := b.objects[objectName]
|
||||
if !ok {
|
||||
return nil, 0, iodine.New(ObjectNotFound{Object: objectName}, nil)
|
||||
}
|
||||
|
@ -149,7 +206,6 @@ func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64,
|
|||
func (b bucket) WriteObject(objectName string, objectData io.Reader, expectedMD5Sum string, metadata map[string]string) (string, error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if objectName == "" || objectData == nil {
|
||||
return "", iodine.New(InvalidArgument{}, nil)
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ func filterNotDelimited(objects []string, delim string) []string {
|
|||
return results
|
||||
}
|
||||
|
||||
func extractDir(objects []string, delim string) []string {
|
||||
func extractDelimited(objects []string, delim string) []string {
|
||||
var results []string
|
||||
for _, object := range objects {
|
||||
parts := strings.Split(object, delim)
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -101,7 +100,7 @@ func (dt donut) MakeBucket(bucket, acl string) error {
|
|||
func (dt donut) GetBucketMetadata(bucket string) (map[string]string, error) {
|
||||
dt.lock.RLock()
|
||||
defer dt.lock.RUnlock()
|
||||
if err := dt.getDonutBuckets(); err != nil {
|
||||
if err := dt.listDonutBuckets(); err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
}
|
||||
if _, ok := dt.buckets[bucket]; !ok {
|
||||
|
@ -118,7 +117,7 @@ func (dt donut) GetBucketMetadata(bucket string) (map[string]string, error) {
|
|||
func (dt donut) SetBucketMetadata(bucket string, bucketMetadata map[string]string) error {
|
||||
dt.lock.Lock()
|
||||
defer dt.lock.Unlock()
|
||||
if err := dt.getDonutBuckets(); err != nil {
|
||||
if err := dt.listDonutBuckets(); err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
metadata, err := dt.getDonutBucketMetadata()
|
||||
|
@ -136,7 +135,7 @@ func (dt donut) SetBucketMetadata(bucket string, bucketMetadata map[string]strin
|
|||
func (dt donut) ListBuckets() (metadata map[string]map[string]string, err error) {
|
||||
dt.lock.RLock()
|
||||
defer dt.lock.RUnlock()
|
||||
if err := dt.getDonutBuckets(); err != nil {
|
||||
if err := dt.listDonutBuckets(); err != nil {
|
||||
return nil, iodine.New(err, nil)
|
||||
}
|
||||
metadata, err = dt.getDonutBucketMetadata()
|
||||
|
@ -160,68 +159,17 @@ func (dt donut) ListObjects(bucket, prefix, marker, delimiter string, maxkeys in
|
|||
"delimiter": delimiter,
|
||||
"maxkeys": strconv.Itoa(maxkeys),
|
||||
}
|
||||
if err := dt.getDonutBuckets(); err != nil {
|
||||
if err := dt.listDonutBuckets(); err != nil {
|
||||
return nil, nil, false, iodine.New(err, errParams)
|
||||
}
|
||||
if _, ok := dt.buckets[bucket]; !ok {
|
||||
return nil, nil, false, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
||||
}
|
||||
objectList, err := dt.buckets[bucket].ListObjects()
|
||||
objects, commonPrefixes, isTruncated, err := dt.buckets[bucket].ListObjects(prefix, marker, delimiter, maxkeys)
|
||||
if err != nil {
|
||||
return nil, nil, false, iodine.New(err, errParams)
|
||||
}
|
||||
var donutObjects []string
|
||||
for objectName := range objectList {
|
||||
donutObjects = append(donutObjects, objectName)
|
||||
}
|
||||
if maxkeys <= 0 {
|
||||
maxkeys = 1000
|
||||
}
|
||||
if strings.TrimSpace(prefix) != "" {
|
||||
donutObjects = filterPrefix(donutObjects, prefix)
|
||||
donutObjects = removePrefix(donutObjects, prefix)
|
||||
}
|
||||
|
||||
var actualObjects []string
|
||||
var actualPrefixes []string
|
||||
var isTruncated bool
|
||||
if strings.TrimSpace(delimiter) != "" {
|
||||
actualObjects = filterDelimited(donutObjects, delimiter)
|
||||
actualPrefixes = filterNotDelimited(donutObjects, delimiter)
|
||||
actualPrefixes = extractDir(actualPrefixes, delimiter)
|
||||
actualPrefixes = uniqueObjects(actualPrefixes)
|
||||
} else {
|
||||
actualObjects = donutObjects
|
||||
}
|
||||
|
||||
sort.Strings(actualObjects)
|
||||
var newActualObjects []string
|
||||
switch {
|
||||
case marker != "":
|
||||
for _, objectName := range actualObjects {
|
||||
if objectName > marker {
|
||||
newActualObjects = append(newActualObjects, objectName)
|
||||
}
|
||||
}
|
||||
default:
|
||||
newActualObjects = actualObjects
|
||||
}
|
||||
|
||||
var results []string
|
||||
var commonPrefixes []string
|
||||
for _, objectName := range newActualObjects {
|
||||
if len(results) >= maxkeys {
|
||||
isTruncated = true
|
||||
break
|
||||
}
|
||||
results = appendUniq(results, prefix+objectName)
|
||||
}
|
||||
for _, commonPrefix := range actualPrefixes {
|
||||
commonPrefixes = appendUniq(commonPrefixes, prefix+commonPrefix)
|
||||
}
|
||||
sort.Strings(results)
|
||||
sort.Strings(commonPrefixes)
|
||||
return results, commonPrefixes, isTruncated, nil
|
||||
return objects, commonPrefixes, isTruncated, nil
|
||||
}
|
||||
|
||||
// PutObject - put object
|
||||
|
@ -238,17 +186,17 @@ func (dt donut) PutObject(bucket, object, expectedMD5Sum string, reader io.ReadC
|
|||
if object == "" || strings.TrimSpace(object) == "" {
|
||||
return "", iodine.New(InvalidArgument{}, errParams)
|
||||
}
|
||||
if err := dt.getDonutBuckets(); err != nil {
|
||||
if err := dt.listDonutBuckets(); err != nil {
|
||||
return "", iodine.New(err, errParams)
|
||||
}
|
||||
if _, ok := dt.buckets[bucket]; !ok {
|
||||
return "", iodine.New(BucketNotFound{Bucket: bucket}, nil)
|
||||
}
|
||||
objectList, err := dt.buckets[bucket].ListObjects()
|
||||
objectList, _, _, err := dt.buckets[bucket].ListObjects(object, "", "", 1)
|
||||
if err != nil {
|
||||
return "", iodine.New(err, nil)
|
||||
}
|
||||
for objectName := range objectList {
|
||||
for _, objectName := range objectList {
|
||||
if objectName == object {
|
||||
return "", iodine.New(ObjectExists{Object: object}, nil)
|
||||
}
|
||||
|
@ -274,7 +222,7 @@ func (dt donut) GetObject(bucket, object string) (reader io.ReadCloser, size int
|
|||
if object == "" || strings.TrimSpace(object) == "" {
|
||||
return nil, 0, iodine.New(InvalidArgument{}, errParams)
|
||||
}
|
||||
if err := dt.getDonutBuckets(); err != nil {
|
||||
if err := dt.listDonutBuckets(); err != nil {
|
||||
return nil, 0, iodine.New(err, nil)
|
||||
}
|
||||
if _, ok := dt.buckets[bucket]; !ok {
|
||||
|
@ -291,21 +239,22 @@ func (dt donut) GetObjectMetadata(bucket, object string) (map[string]string, err
|
|||
"bucket": bucket,
|
||||
"object": object,
|
||||
}
|
||||
if err := dt.getDonutBuckets(); err != nil {
|
||||
if err := dt.listDonutBuckets(); err != nil {
|
||||
return nil, iodine.New(err, errParams)
|
||||
}
|
||||
if _, ok := dt.buckets[bucket]; !ok {
|
||||
return nil, iodine.New(BucketNotFound{Bucket: bucket}, errParams)
|
||||
}
|
||||
objectList, err := dt.buckets[bucket].ListObjects()
|
||||
objectList, _, _, err := dt.buckets[bucket].ListObjects(object, "", "", 1)
|
||||
if err != nil {
|
||||
return nil, iodine.New(err, errParams)
|
||||
}
|
||||
donutObject, ok := objectList[object]
|
||||
if !ok {
|
||||
return nil, iodine.New(ObjectNotFound{Object: object}, errParams)
|
||||
for _, objectName := range objectList {
|
||||
if objectName == object {
|
||||
return dt.buckets[bucket].GetObjectMetadata(object)
|
||||
}
|
||||
}
|
||||
return donutObject.GetObjectMetadata()
|
||||
return nil, iodine.New(ObjectNotFound{Object: object}, errParams)
|
||||
}
|
||||
|
||||
// getDiskWriters -
|
||||
|
@ -384,7 +333,7 @@ func (dt donut) getDonutBucketMetadata() (map[string]map[string]string, error) {
|
|||
}
|
||||
|
||||
func (dt donut) makeDonutBucket(bucketName, acl string) error {
|
||||
if err := dt.getDonutBuckets(); err != nil {
|
||||
if err := dt.listDonutBuckets(); err != nil {
|
||||
return iodine.New(err, nil)
|
||||
}
|
||||
if _, ok := dt.buckets[bucketName]; ok {
|
||||
|
@ -412,8 +361,7 @@ func (dt donut) makeDonutBucket(bucketName, acl string) error {
|
|||
}
|
||||
metadata, err := dt.getDonutBucketMetadata()
|
||||
if err != nil {
|
||||
err = iodine.ToError(err)
|
||||
if os.IsNotExist(err) {
|
||||
if os.IsNotExist(iodine.ToError(err)) {
|
||||
metadata := make(map[string]map[string]string)
|
||||
metadata[bucketName] = bucketMetadata
|
||||
err = dt.setDonutBucketMetadata(metadata)
|
||||
|
@ -432,7 +380,7 @@ func (dt donut) makeDonutBucket(bucketName, acl string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (dt donut) getDonutBuckets() error {
|
||||
func (dt donut) listDonutBuckets() error {
|
||||
for _, node := range dt.nodes {
|
||||
disks, err := node.ListDisks()
|
||||
if err != nil {
|
||||
|
|
|
@ -306,7 +306,7 @@ func (s *MySuite) TestMultipleNewObjects(c *C) {
|
|||
/// test list of objects
|
||||
|
||||
// test list objects with prefix and delimiter
|
||||
listObjects, prefixes, isTruncated, err := donut.ListObjects("foo", "o", "", "1", 1)
|
||||
listObjects, prefixes, isTruncated, err := donut.ListObjects("foo", "o", "", "1", 10)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(isTruncated, Equals, false)
|
||||
c.Assert(prefixes[0], Equals, "obj1")
|
||||
|
|
Loading…
Reference in New Issue