mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
listObjects: Channel based changes.
Supports: - prefixes - marker
This commit is contained in:
parent
9e18bfa60e
commit
682020ef2f
@ -53,7 +53,6 @@ const (
|
||||
InvalidBucketName
|
||||
InvalidDigest
|
||||
InvalidRange
|
||||
InvalidRequest
|
||||
InvalidMaxKeys
|
||||
InvalidMaxUploads
|
||||
InvalidMaxParts
|
||||
|
@ -24,12 +24,12 @@ import (
|
||||
)
|
||||
|
||||
// parse bucket url queries
|
||||
func getBucketResources(values url.Values) (v fs.BucketResourcesMetadata) {
|
||||
v.Prefix = values.Get("prefix")
|
||||
v.Marker = values.Get("marker")
|
||||
v.Maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
|
||||
v.Delimiter = values.Get("delimiter")
|
||||
v.EncodingType = values.Get("encoding-type")
|
||||
func getBucketResources(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType string) {
|
||||
prefix = values.Get("prefix")
|
||||
marker = values.Get("marker")
|
||||
delimiter = values.Get("delimiter")
|
||||
maxkeys, _ = strconv.Atoi(values.Get("max-keys"))
|
||||
encodingType = values.Get("encoding-type")
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -92,8 +92,7 @@ func generateAccessControlPolicyResponse(acl fs.BucketACL) AccessControlPolicyRe
|
||||
}
|
||||
|
||||
// generates an ListObjects response for the said bucket with other enumerated options.
|
||||
// func generateListObjectsResponse(bucket string, objects []fs.ObjectMetadata, bucketResources fs.BucketResourcesMetadata) ListObjectsResponse {
|
||||
func generateListObjectsResponse(bucket string, req fs.ListObjectsReq, resp fs.ListObjectsResp) ListObjectsResponse {
|
||||
func generateListObjectsResponse(bucket, prefix, marker, delimiter string, maxKeys int, resp fs.ListObjectsResult) ListObjectsResponse {
|
||||
var contents []*Object
|
||||
var prefixes []*CommonPrefix
|
||||
var owner = Owner{}
|
||||
@ -119,10 +118,10 @@ func generateListObjectsResponse(bucket string, req fs.ListObjectsReq, resp fs.L
|
||||
data.Name = bucket
|
||||
data.Contents = contents
|
||||
|
||||
data.MaxKeys = req.MaxKeys
|
||||
data.Prefix = req.Prefix
|
||||
data.Delimiter = req.Delimiter
|
||||
data.Marker = req.Marker
|
||||
data.Prefix = prefix
|
||||
data.Marker = marker
|
||||
data.Delimiter = delimiter
|
||||
data.MaxKeys = maxKeys
|
||||
|
||||
data.NextMarker = resp.NextMarker
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
|
@ -52,6 +52,7 @@ func (api CloudStorageAPI) GetBucketLocationHandler(w http.ResponseWriter, req *
|
||||
default:
|
||||
writeErrorResponse(w, req, InternalError, req.URL.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: Location value for LocationResponse is deliberately not used, until
|
||||
@ -128,25 +129,21 @@ func (api CloudStorageAPI) ListObjectsHandler(w http.ResponseWriter, req *http.R
|
||||
}
|
||||
}
|
||||
}
|
||||
resources := getBucketResources(req.URL.Query())
|
||||
if resources.Maxkeys < 0 {
|
||||
|
||||
// TODO handle encoding type.
|
||||
prefix, marker, delimiter, maxkeys, _ := getBucketResources(req.URL.Query())
|
||||
if maxkeys < 0 {
|
||||
writeErrorResponse(w, req, InvalidMaxKeys, req.URL.Path)
|
||||
return
|
||||
}
|
||||
if resources.Maxkeys == 0 {
|
||||
resources.Maxkeys = maxObjectList
|
||||
if maxkeys == 0 {
|
||||
maxkeys = maxObjectList
|
||||
}
|
||||
|
||||
listReq := fs.ListObjectsReq{
|
||||
Prefix: resources.Prefix,
|
||||
Marker: resources.Marker,
|
||||
Delimiter: resources.Delimiter,
|
||||
MaxKeys: resources.Maxkeys,
|
||||
}
|
||||
listResp, err := api.Filesystem.ListObjects(bucket, listReq)
|
||||
listResp, err := api.Filesystem.ListObjects(bucket, prefix, marker, delimiter, maxkeys)
|
||||
if err == nil {
|
||||
// generate response
|
||||
response := generateListObjectsResponse(bucket, listReq, listResp)
|
||||
response := generateListObjectsResponse(bucket, prefix, marker, delimiter, maxkeys, listResp)
|
||||
encodedSuccessResponse := encodeSuccessResponse(response)
|
||||
// Write headers
|
||||
setCommonHeaders(w)
|
||||
|
@ -165,59 +165,50 @@ func testMultipleObjectCreation(c *check.C, create func() Filesystem) {
|
||||
func testPaging(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
fs.MakeBucket("bucket", "")
|
||||
resources := BucketResourcesMetadata{}
|
||||
objects, resources, err := fs.ListObjects("bucket", resources)
|
||||
result, err := fs.ListObjects("bucket", "", "", "", 0)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(objects), check.Equals, 0)
|
||||
c.Assert(resources.IsTruncated, check.Equals, false)
|
||||
c.Assert(len(result.Objects), check.Equals, 0)
|
||||
c.Assert(result.IsTruncated, check.Equals, false)
|
||||
// check before paging occurs
|
||||
for i := 0; i < 5; i++ {
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
_, err = fs.CreateObject("bucket", key, "", int64(len(key)), bytes.NewBufferString(key), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
resources.Maxkeys = 5
|
||||
resources.Prefix = ""
|
||||
objects, resources, err = fs.ListObjects("bucket", resources)
|
||||
result, err = fs.ListObjects("bucket", "", "", "", 5)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(objects), check.Equals, i+1)
|
||||
c.Assert(resources.IsTruncated, check.Equals, false)
|
||||
c.Assert(len(result.Objects), check.Equals, i+1)
|
||||
c.Assert(result.IsTruncated, check.Equals, false)
|
||||
}
|
||||
// check after paging occurs pages work
|
||||
for i := 6; i <= 10; i++ {
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
_, err = fs.CreateObject("bucket", key, "", int64(len(key)), bytes.NewBufferString(key), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
resources.Maxkeys = 5
|
||||
resources.Prefix = ""
|
||||
objects, resources, err = fs.ListObjects("bucket", resources)
|
||||
result, err = fs.ListObjects("bucket", "", "", "", 5)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(objects), check.Equals, 5)
|
||||
c.Assert(resources.IsTruncated, check.Equals, true)
|
||||
c.Assert(len(result.Objects), check.Equals, 5)
|
||||
c.Assert(result.IsTruncated, check.Equals, true)
|
||||
}
|
||||
// check paging with prefix at end returns less objects
|
||||
{
|
||||
_, err = fs.CreateObject("bucket", "newPrefix", "", int64(len("prefix1")), bytes.NewBufferString("prefix1"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
fs.CreateObject("bucket", "newPrefix2", "", int64(len("prefix2")), bytes.NewBufferString("prefix2"), nil)
|
||||
_, err = fs.CreateObject("bucket", "newPrefix2", "", int64(len("prefix2")), bytes.NewBufferString("prefix2"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
resources.Prefix = "new"
|
||||
resources.Maxkeys = 5
|
||||
objects, resources, err = fs.ListObjects("bucket", resources)
|
||||
result, err = fs.ListObjects("bucket", "new", "", "", 5)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(objects), check.Equals, 2)
|
||||
c.Assert(len(result.Objects), check.Equals, 2)
|
||||
}
|
||||
|
||||
// check ordering of pages
|
||||
{
|
||||
resources.Prefix = ""
|
||||
resources.Maxkeys = 1000
|
||||
objects, resources, err = fs.ListObjects("bucket", resources)
|
||||
result, err = fs.ListObjects("bucket", "", "", "", 1000)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(objects[0].Object, check.Equals, "newPrefix")
|
||||
c.Assert(objects[1].Object, check.Equals, "newPrefix2")
|
||||
c.Assert(objects[2].Object, check.Equals, "obj0")
|
||||
c.Assert(objects[3].Object, check.Equals, "obj1")
|
||||
c.Assert(objects[4].Object, check.Equals, "obj10")
|
||||
c.Assert(result.Objects[0].Object, check.Equals, "newPrefix")
|
||||
c.Assert(result.Objects[1].Object, check.Equals, "newPrefix2")
|
||||
c.Assert(result.Objects[2].Object, check.Equals, "obj0")
|
||||
c.Assert(result.Objects[3].Object, check.Equals, "obj1")
|
||||
c.Assert(result.Objects[4].Object, check.Equals, "obj10")
|
||||
}
|
||||
|
||||
// check delimited results with delimiter and prefix
|
||||
@ -226,72 +217,49 @@ func testPaging(c *check.C, create func() Filesystem) {
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = fs.CreateObject("bucket", "this/is/also/a/delimited/file", "", int64(len("prefix2")), bytes.NewBufferString("prefix2"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
var prefixes []string
|
||||
resources.CommonPrefixes = prefixes // allocate new everytime
|
||||
resources.Delimiter = "/"
|
||||
resources.Prefix = "this/is/"
|
||||
resources.Maxkeys = 10
|
||||
objects, resources, err = fs.ListObjects("bucket", resources)
|
||||
result, err = fs.ListObjects("bucket", "this/is/", "", "/", 10)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(objects), check.Equals, 1)
|
||||
c.Assert(resources.CommonPrefixes[0], check.Equals, "this/is/also/")
|
||||
c.Assert(len(result.Objects), check.Equals, 1)
|
||||
c.Assert(result.Prefixes[0], check.Equals, "this/is/also/")
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
// check delimited results with delimiter without prefix
|
||||
{
|
||||
var prefixes []string
|
||||
resources.CommonPrefixes = prefixes // allocate new everytime
|
||||
resources.Delimiter = "/"
|
||||
resources.Prefix = ""
|
||||
resources.Maxkeys = 1000
|
||||
objects, resources, err = fs.ListObjects("bucket", resources)
|
||||
result, err = fs.ListObjects("bucket", "", "", "/", 1000)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(objects[0].Object, check.Equals, "newPrefix")
|
||||
c.Assert(objects[1].Object, check.Equals, "newPrefix2")
|
||||
c.Assert(objects[2].Object, check.Equals, "obj0")
|
||||
c.Assert(objects[3].Object, check.Equals, "obj1")
|
||||
c.Assert(objects[4].Object, check.Equals, "obj10")
|
||||
c.Assert(resources.CommonPrefixes[0], check.Equals, "this/")
|
||||
c.Assert(result.Objects[0].Object, check.Equals, "newPrefix")
|
||||
c.Assert(result.Objects[1].Object, check.Equals, "newPrefix2")
|
||||
c.Assert(result.Objects[2].Object, check.Equals, "obj0")
|
||||
c.Assert(result.Objects[3].Object, check.Equals, "obj1")
|
||||
c.Assert(result.Objects[4].Object, check.Equals, "obj10")
|
||||
c.Assert(result.Prefixes[0], check.Equals, "this/")
|
||||
}
|
||||
|
||||
// check results with Marker
|
||||
{
|
||||
var prefixes []string
|
||||
resources.CommonPrefixes = prefixes // allocate new everytime
|
||||
resources.Prefix = ""
|
||||
resources.Marker = "newPrefix"
|
||||
resources.Delimiter = ""
|
||||
resources.Maxkeys = 3
|
||||
objects, resources, err = fs.ListObjects("bucket", resources)
|
||||
result, err = fs.ListObjects("bucket", "", "newPrefix", "", 3)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(objects[0].Object, check.Equals, "newPrefix2")
|
||||
c.Assert(objects[1].Object, check.Equals, "obj0")
|
||||
c.Assert(objects[2].Object, check.Equals, "obj1")
|
||||
c.Assert(result.Objects[0].Object, check.Equals, "newPrefix2")
|
||||
c.Assert(result.Objects[1].Object, check.Equals, "obj0")
|
||||
c.Assert(result.Objects[2].Object, check.Equals, "obj1")
|
||||
}
|
||||
// check ordering of results with prefix
|
||||
{
|
||||
resources.Prefix = "obj"
|
||||
resources.Delimiter = ""
|
||||
resources.Marker = ""
|
||||
resources.Maxkeys = 1000
|
||||
objects, resources, err = fs.ListObjects("bucket", resources)
|
||||
result, err = fs.ListObjects("bucket", "obj", "", "", 1000)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(objects[0].Object, check.Equals, "obj0")
|
||||
c.Assert(objects[1].Object, check.Equals, "obj1")
|
||||
c.Assert(objects[2].Object, check.Equals, "obj10")
|
||||
c.Assert(objects[3].Object, check.Equals, "obj2")
|
||||
c.Assert(objects[4].Object, check.Equals, "obj3")
|
||||
c.Assert(result.Objects[0].Object, check.Equals, "obj0")
|
||||
c.Assert(result.Objects[1].Object, check.Equals, "obj1")
|
||||
c.Assert(result.Objects[2].Object, check.Equals, "obj10")
|
||||
c.Assert(result.Objects[3].Object, check.Equals, "obj2")
|
||||
c.Assert(result.Objects[4].Object, check.Equals, "obj3")
|
||||
}
|
||||
// check ordering of results with prefix and no paging
|
||||
{
|
||||
resources.Prefix = "new"
|
||||
resources.Marker = ""
|
||||
resources.Maxkeys = 5
|
||||
objects, resources, err = fs.ListObjects("bucket", resources)
|
||||
result, err = fs.ListObjects("bucket", "new", "", "", 5)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(objects[0].Object, check.Equals, "newPrefix")
|
||||
c.Assert(objects[1].Object, check.Equals, "newPrefix2")
|
||||
c.Assert(result.Objects[0].Object, check.Equals, "newPrefix")
|
||||
c.Assert(result.Objects[1].Object, check.Equals, "newPrefix2")
|
||||
}
|
||||
}
|
||||
|
||||
@ -417,11 +385,10 @@ func testListBucketsOrder(c *check.C, create func() Filesystem) {
|
||||
|
||||
func testListObjectsTestsForNonExistantBucket(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
resources := BucketResourcesMetadata{Prefix: "", Maxkeys: 1000}
|
||||
objects, resources, err := fs.ListObjects("bucket", resources)
|
||||
result, err := fs.ListObjects("bucket", "", "", "", 1000)
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
c.Assert(resources.IsTruncated, check.Equals, false)
|
||||
c.Assert(len(objects), check.Equals, 0)
|
||||
c.Assert(result.IsTruncated, check.Equals, false)
|
||||
c.Assert(len(result.Objects), check.Equals, 0)
|
||||
}
|
||||
|
||||
func testNonExistantObjectInBucket(c *check.C, create func() Filesystem) {
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
"encoding/xml"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
@ -165,59 +164,50 @@ func testMultipleObjectCreation(c *check.C, create func() Filesystem) {
|
||||
func testPaging(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
fs.MakeBucket("bucket", "")
|
||||
resources := BucketResourcesMetadata{}
|
||||
objects, resources, err := fs.ListObjects("bucket", resources)
|
||||
result, err := fs.ListObjects("bucket", "", "", "", 0)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(objects), check.Equals, 0)
|
||||
c.Assert(resources.IsTruncated, check.Equals, false)
|
||||
c.Assert(len(result.Objects), check.Equals, 0)
|
||||
c.Assert(result.IsTruncated, check.Equals, false)
|
||||
// check before paging occurs
|
||||
for i := 0; i < 5; i++ {
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
_, err = fs.CreateObject("bucket", key, "", int64(len(key)), bytes.NewBufferString(key), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
resources.Maxkeys = 5
|
||||
resources.Prefix = ""
|
||||
objects, resources, err = fs.ListObjects("bucket", resources)
|
||||
result, err = fs.ListObjects("bucket", "", "", "", 5)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(objects), check.Equals, i+1)
|
||||
c.Assert(resources.IsTruncated, check.Equals, false)
|
||||
c.Assert(len(result.Objects), check.Equals, i+1)
|
||||
c.Assert(result.IsTruncated, check.Equals, false)
|
||||
}
|
||||
// check after paging occurs pages work
|
||||
for i := 6; i <= 10; i++ {
|
||||
key := "obj" + strconv.Itoa(i)
|
||||
_, err = fs.CreateObject("bucket", key, "", int64(len(key)), bytes.NewBufferString(key), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
resources.Maxkeys = 5
|
||||
resources.Prefix = ""
|
||||
objects, resources, err = fs.ListObjects("bucket", resources)
|
||||
result, err = fs.ListObjects("bucket", "", "", "", 5)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(objects), check.Equals, 5)
|
||||
c.Assert(resources.IsTruncated, check.Equals, true)
|
||||
c.Assert(len(result.Objects), check.Equals, 5)
|
||||
c.Assert(result.IsTruncated, check.Equals, true)
|
||||
}
|
||||
// check paging with prefix at end returns less objects
|
||||
{
|
||||
_, err = fs.CreateObject("bucket", "newPrefix", "", int64(len("prefix1")), bytes.NewBufferString("prefix1"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = fs.CreateObject("bucket", "newPrefix2", "", int64(len("prefix2")), bytes.NewBufferString("prefix2"), nil)
|
||||
fs.CreateObject("bucket", "newPrefix2", "", int64(len("prefix2")), bytes.NewBufferString("prefix2"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
resources.Prefix = "new"
|
||||
resources.Maxkeys = 5
|
||||
objects, resources, err = fs.ListObjects("bucket", resources)
|
||||
result, err = fs.ListObjects("bucket", "new", "", "", 5)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(objects), check.Equals, 2)
|
||||
c.Assert(len(result.Objects), check.Equals, 2)
|
||||
}
|
||||
|
||||
// check ordering of pages
|
||||
{
|
||||
resources.Prefix = ""
|
||||
resources.Maxkeys = 1000
|
||||
objects, resources, err = fs.ListObjects("bucket", resources)
|
||||
result, err = fs.ListObjects("bucket", "", "", "", 1000)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(objects[0].Object, check.Equals, "newPrefix")
|
||||
c.Assert(objects[1].Object, check.Equals, "newPrefix2")
|
||||
c.Assert(objects[2].Object, check.Equals, "obj0")
|
||||
c.Assert(objects[3].Object, check.Equals, "obj1")
|
||||
c.Assert(objects[4].Object, check.Equals, "obj10")
|
||||
c.Assert(result.Objects[0].Object, check.Equals, "newPrefix")
|
||||
c.Assert(result.Objects[1].Object, check.Equals, "newPrefix2")
|
||||
c.Assert(result.Objects[2].Object, check.Equals, "obj0")
|
||||
c.Assert(result.Objects[3].Object, check.Equals, "obj1")
|
||||
c.Assert(result.Objects[4].Object, check.Equals, "obj10")
|
||||
}
|
||||
|
||||
// check delimited results with delimiter and prefix
|
||||
@ -226,72 +216,48 @@ func testPaging(c *check.C, create func() Filesystem) {
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = fs.CreateObject("bucket", "this/is/also/a/delimited/file", "", int64(len("prefix2")), bytes.NewBufferString("prefix2"), nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
var prefixes []string
|
||||
resources.CommonPrefixes = prefixes // allocate new everytime
|
||||
resources.Delimiter = "/"
|
||||
resources.Prefix = "this/is/"
|
||||
resources.Maxkeys = 10
|
||||
objects, resources, err = fs.ListObjects("bucket", resources)
|
||||
result, err = fs.ListObjects("bucket", "this/is/", "", "/", 10)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(objects), check.Equals, 1)
|
||||
c.Assert(resources.CommonPrefixes[0], check.Equals, "this/is/also/")
|
||||
c.Assert(len(result.Objects), check.Equals, 1)
|
||||
c.Assert(result.Prefixes[0], check.Equals, "this/is/also/")
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
// check delimited results with delimiter without prefix
|
||||
{
|
||||
var prefixes []string
|
||||
resources.CommonPrefixes = prefixes // allocate new everytime
|
||||
resources.Delimiter = "/"
|
||||
resources.Prefix = ""
|
||||
resources.Maxkeys = 1000
|
||||
objects, resources, err = fs.ListObjects("bucket", resources)
|
||||
result, err = fs.ListObjects("bucket", "", "", "/", 1000)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(objects[0].Object, check.Equals, "newPrefix")
|
||||
c.Assert(objects[1].Object, check.Equals, "newPrefix2")
|
||||
c.Assert(objects[2].Object, check.Equals, "obj0")
|
||||
c.Assert(objects[3].Object, check.Equals, "obj1")
|
||||
c.Assert(objects[4].Object, check.Equals, "obj10")
|
||||
c.Assert(resources.CommonPrefixes[0], check.Equals, "this/")
|
||||
c.Assert(result.Objects[0].Object, check.Equals, "newPrefix")
|
||||
c.Assert(result.Objects[1].Object, check.Equals, "newPrefix2")
|
||||
c.Assert(result.Objects[2].Object, check.Equals, "obj0")
|
||||
c.Assert(result.Objects[3].Object, check.Equals, "obj1")
|
||||
c.Assert(result.Objects[4].Object, check.Equals, "obj10")
|
||||
c.Assert(result.Prefixes[0], check.Equals, "this/")
|
||||
}
|
||||
|
||||
// check results with Marker
|
||||
{
|
||||
var prefixes []string
|
||||
resources.CommonPrefixes = prefixes // allocate new everytime
|
||||
resources.Prefix = ""
|
||||
resources.Marker = "newPrefix"
|
||||
resources.Delimiter = ""
|
||||
resources.Maxkeys = 3
|
||||
objects, resources, err = fs.ListObjects("bucket", resources)
|
||||
result, err = fs.ListObjects("bucket", "", "newPrefix", "", 3)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(objects[0].Object, check.Equals, "newPrefix2")
|
||||
c.Assert(objects[1].Object, check.Equals, "obj0")
|
||||
c.Assert(objects[2].Object, check.Equals, "obj1")
|
||||
c.Assert(result.Objects[0].Object, check.Equals, "newPrefix2")
|
||||
c.Assert(result.Objects[1].Object, check.Equals, "obj0")
|
||||
c.Assert(result.Objects[2].Object, check.Equals, "obj1")
|
||||
}
|
||||
// check ordering of results with prefix
|
||||
{
|
||||
resources.Prefix = "obj"
|
||||
resources.Delimiter = ""
|
||||
resources.Marker = ""
|
||||
resources.Maxkeys = 1000
|
||||
objects, resources, err = fs.ListObjects("bucket", resources)
|
||||
result, err = fs.ListObjects("bucket", "obj", "", "", 1000)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(objects[0].Object, check.Equals, "obj0")
|
||||
c.Assert(objects[1].Object, check.Equals, "obj1")
|
||||
c.Assert(objects[2].Object, check.Equals, "obj10")
|
||||
c.Assert(objects[3].Object, check.Equals, "obj2")
|
||||
c.Assert(objects[4].Object, check.Equals, "obj3")
|
||||
c.Assert(result.Objects[0].Object, check.Equals, "obj0")
|
||||
c.Assert(result.Objects[1].Object, check.Equals, "obj1")
|
||||
c.Assert(result.Objects[2].Object, check.Equals, "obj10")
|
||||
c.Assert(result.Objects[3].Object, check.Equals, "obj2")
|
||||
c.Assert(result.Objects[4].Object, check.Equals, "obj3")
|
||||
}
|
||||
// check ordering of results with prefix and no paging
|
||||
{
|
||||
resources.Prefix = "new"
|
||||
resources.Marker = ""
|
||||
resources.Maxkeys = 5
|
||||
objects, resources, err = fs.ListObjects("bucket", resources)
|
||||
result, err = fs.ListObjects("bucket", "new", "", "", 5)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(objects[0].Object, check.Equals, "newPrefix")
|
||||
c.Assert(objects[1].Object, check.Equals, "newPrefix2")
|
||||
c.Assert(result.Objects[0].Object, check.Equals, "newPrefix")
|
||||
c.Assert(result.Objects[1].Object, check.Equals, "newPrefix2")
|
||||
}
|
||||
}
|
||||
|
||||
@ -416,11 +382,10 @@ func testListBucketsOrder(c *check.C, create func() Filesystem) {
|
||||
|
||||
func testListObjectsTestsForNonExistantBucket(c *check.C, create func() Filesystem) {
|
||||
fs := create()
|
||||
resources := BucketResourcesMetadata{Prefix: "", Maxkeys: 1000}
|
||||
objects, resources, err := fs.ListObjects("bucket", resources)
|
||||
result, err := fs.ListObjects("bucket", "", "", "", 1000)
|
||||
c.Assert(err, check.Not(check.IsNil))
|
||||
c.Assert(resources.IsTruncated, check.Equals, false)
|
||||
c.Assert(len(objects), check.Equals, 0)
|
||||
c.Assert(result.IsTruncated, check.Equals, false)
|
||||
c.Assert(len(result.Objects), check.Equals, 0)
|
||||
}
|
||||
|
||||
func testNonExistantObjectInBucket(c *check.C, create func() Filesystem) {
|
||||
|
@ -118,19 +118,15 @@ type BucketMultipartResourcesMetadata struct {
|
||||
CommonPrefixes []string
|
||||
}
|
||||
|
||||
// BucketResourcesMetadata - various types of bucket resources
|
||||
type BucketResourcesMetadata struct {
|
||||
Prefix string
|
||||
Marker string
|
||||
NextMarker string
|
||||
Maxkeys int
|
||||
EncodingType string
|
||||
Delimiter string
|
||||
IsTruncated bool
|
||||
CommonPrefixes []string
|
||||
// ListObjectsResult - container for list object request results.
|
||||
type ListObjectsResult struct {
|
||||
IsTruncated bool
|
||||
NextMarker string
|
||||
Objects []ObjectMetadata
|
||||
Prefixes []string
|
||||
}
|
||||
|
||||
type ListObjectsReq struct {
|
||||
type listObjectsReq struct {
|
||||
Bucket string
|
||||
Prefix string
|
||||
Marker string
|
||||
@ -138,21 +134,14 @@ type ListObjectsReq struct {
|
||||
MaxKeys int
|
||||
}
|
||||
|
||||
type ListObjectsResp struct {
|
||||
IsTruncated bool
|
||||
NextMarker string
|
||||
Objects []ObjectMetadata
|
||||
Prefixes []string
|
||||
}
|
||||
|
||||
type listServiceReq struct {
|
||||
req ListObjectsReq
|
||||
respCh chan ListObjectsResp
|
||||
req listObjectsReq
|
||||
respCh chan ListObjectsResult
|
||||
}
|
||||
|
||||
type listWorkerReq struct {
|
||||
req ListObjectsReq
|
||||
respCh chan ListObjectsResp
|
||||
req listObjectsReq
|
||||
respCh chan ListObjectsResult
|
||||
}
|
||||
|
||||
// CompletePart - completed part container
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2015 Minio, Inc.
|
||||
* Minio Cloud Storage, (C) 2015-2016 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -18,7 +18,6 @@ package fs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@ -27,74 +26,88 @@ import (
|
||||
"github.com/minio/minio-xl/pkg/probe"
|
||||
)
|
||||
|
||||
func (fs Filesystem) listWorker(startReq ListObjectsReq) (chan<- listWorkerReq, *probe.Error) {
|
||||
Separator := string(os.PathSeparator)
|
||||
func (fs Filesystem) listWorker(startReq listObjectsReq) (chan<- listWorkerReq, *probe.Error) {
|
||||
bucket := startReq.Bucket
|
||||
prefix := startReq.Prefix
|
||||
marker := startReq.Marker
|
||||
delimiter := startReq.Delimiter
|
||||
quit := make(chan bool)
|
||||
if marker != "" {
|
||||
return nil, probe.NewError(errors.New("Not supported"))
|
||||
}
|
||||
if delimiter != "" && delimiter != Separator {
|
||||
return nil, probe.NewError(errors.New("Not supported"))
|
||||
}
|
||||
quitWalker := make(chan bool)
|
||||
reqCh := make(chan listWorkerReq)
|
||||
walkerCh := make(chan ObjectMetadata)
|
||||
go func() {
|
||||
rootPath := filepath.Join(fs.path, bucket, prefix)
|
||||
stripPath := filepath.Join(fs.path, bucket) + Separator
|
||||
var rootPath string
|
||||
bucketPath := filepath.Join(fs.path, bucket)
|
||||
trimBucketPathPrefix := bucketPath + string(os.PathSeparator)
|
||||
prefixPath := trimBucketPathPrefix + prefix
|
||||
st, err := os.Stat(prefixPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
rootPath = bucketPath
|
||||
} else {
|
||||
if st.IsDir() && !strings.HasSuffix(prefix, delimiter) {
|
||||
rootPath = bucketPath
|
||||
} else {
|
||||
rootPath = prefixPath
|
||||
}
|
||||
}
|
||||
filepath.Walk(rootPath, func(path string, info os.FileInfo, err error) error {
|
||||
if path == rootPath {
|
||||
return nil
|
||||
}
|
||||
if info.IsDir() {
|
||||
path = path + Separator
|
||||
path = path + string(os.PathSeparator)
|
||||
}
|
||||
objectName := strings.TrimPrefix(path, stripPath)
|
||||
object := ObjectMetadata{
|
||||
Object: objectName,
|
||||
Created: info.ModTime(),
|
||||
Mode: info.Mode(),
|
||||
Size: info.Size(),
|
||||
}
|
||||
select {
|
||||
case walkerCh <- object:
|
||||
// do nothings
|
||||
case <-quit:
|
||||
fmt.Println("walker got quit")
|
||||
// returning error ends the Walk()
|
||||
return errors.New("Ending")
|
||||
}
|
||||
if delimiter == Separator && info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
objectName := strings.TrimPrefix(path, trimBucketPathPrefix)
|
||||
if strings.HasPrefix(objectName, prefix) {
|
||||
if marker >= objectName {
|
||||
return nil
|
||||
}
|
||||
object := ObjectMetadata{
|
||||
Object: objectName,
|
||||
Created: info.ModTime(),
|
||||
Mode: info.Mode(),
|
||||
Size: info.Size(),
|
||||
}
|
||||
select {
|
||||
case walkerCh <- object:
|
||||
// Do nothing
|
||||
case <-quitWalker:
|
||||
// Returning error ends the Walk()
|
||||
return errors.New("Ending")
|
||||
}
|
||||
if delimiter != "" && info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
close(walkerCh)
|
||||
}()
|
||||
go func() {
|
||||
resp := ListObjectsResp{}
|
||||
resp := ListObjectsResult{}
|
||||
for {
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
fmt.Println("worker got timeout")
|
||||
quit <- true
|
||||
timeoutReq := ListObjectsReq{bucket, prefix, marker, delimiter, 0}
|
||||
fmt.Println("after timeout", fs)
|
||||
quitWalker <- true
|
||||
timeoutReq := listObjectsReq{bucket, prefix, marker, delimiter, 0}
|
||||
fs.timeoutReqCh <- timeoutReq
|
||||
// FIXME: can there be a race such that sender on reqCh panics?
|
||||
return
|
||||
case req := <-reqCh:
|
||||
resp = ListObjectsResp{}
|
||||
case req, ok := <-reqCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
resp = ListObjectsResult{}
|
||||
resp.Objects = make([]ObjectMetadata, 0)
|
||||
resp.Prefixes = make([]string, 0)
|
||||
count := 0
|
||||
for object := range walkerCh {
|
||||
if count == req.req.MaxKeys {
|
||||
resp.IsTruncated = true
|
||||
break
|
||||
}
|
||||
if object.Mode.IsDir() {
|
||||
if delimiter == "" {
|
||||
// skip directories for recursive list
|
||||
// Skip directories for recursive list
|
||||
continue
|
||||
}
|
||||
resp.Prefixes = append(resp.Prefixes, object.Object)
|
||||
@ -103,13 +116,7 @@ func (fs Filesystem) listWorker(startReq ListObjectsReq) (chan<- listWorkerReq,
|
||||
}
|
||||
resp.NextMarker = object.Object
|
||||
count++
|
||||
if count == req.req.MaxKeys {
|
||||
resp.IsTruncated = true
|
||||
break
|
||||
}
|
||||
}
|
||||
fmt.Println("response objects: ", len(resp.Objects))
|
||||
marker = resp.NextMarker
|
||||
req.respCh <- resp
|
||||
}
|
||||
}
|
||||
@ -118,9 +125,8 @@ func (fs Filesystem) listWorker(startReq ListObjectsReq) (chan<- listWorkerReq,
|
||||
}
|
||||
|
||||
func (fs *Filesystem) startListService() *probe.Error {
|
||||
fmt.Println("startListService starting")
|
||||
listServiceReqCh := make(chan listServiceReq)
|
||||
timeoutReqCh := make(chan ListObjectsReq)
|
||||
timeoutReqCh := make(chan listObjectsReq)
|
||||
reqToListWorkerReqCh := make(map[string](chan<- listWorkerReq))
|
||||
reqToStr := func(bucket string, prefix string, marker string, delimiter string) string {
|
||||
return strings.Join([]string{bucket, prefix, marker, delimiter}, ":")
|
||||
@ -129,7 +135,6 @@ func (fs *Filesystem) startListService() *probe.Error {
|
||||
for {
|
||||
select {
|
||||
case timeoutReq := <-timeoutReqCh:
|
||||
fmt.Println("listservice got timeout on ", timeoutReq)
|
||||
reqStr := reqToStr(timeoutReq.Bucket, timeoutReq.Prefix, timeoutReq.Marker, timeoutReq.Delimiter)
|
||||
listWorkerReqCh, ok := reqToListWorkerReqCh[reqStr]
|
||||
if ok {
|
||||
@ -137,27 +142,22 @@ func (fs *Filesystem) startListService() *probe.Error {
|
||||
}
|
||||
delete(reqToListWorkerReqCh, reqStr)
|
||||
case serviceReq := <-listServiceReqCh:
|
||||
fmt.Println("serviceReq received", serviceReq)
|
||||
fmt.Println("sending to listservicereqch", fs)
|
||||
|
||||
reqStr := reqToStr(serviceReq.req.Bucket, serviceReq.req.Prefix, serviceReq.req.Marker, serviceReq.req.Delimiter)
|
||||
listWorkerReqCh, ok := reqToListWorkerReqCh[reqStr]
|
||||
if !ok {
|
||||
var err *probe.Error
|
||||
listWorkerReqCh, err = fs.listWorker(serviceReq.req)
|
||||
if err != nil {
|
||||
fmt.Println("listWorker returned error", err)
|
||||
serviceReq.respCh <- ListObjectsResp{}
|
||||
serviceReq.respCh <- ListObjectsResult{}
|
||||
return
|
||||
}
|
||||
reqToListWorkerReqCh[reqStr] = listWorkerReqCh
|
||||
}
|
||||
respCh := make(chan ListObjectsResp)
|
||||
respCh := make(chan ListObjectsResult)
|
||||
listWorkerReqCh <- listWorkerReq{serviceReq.req, respCh}
|
||||
resp, ok := <-respCh
|
||||
if !ok {
|
||||
serviceReq.respCh <- ListObjectsResp{}
|
||||
fmt.Println("listWorker resp was not ok")
|
||||
serviceReq.respCh <- ListObjectsResult{}
|
||||
return
|
||||
}
|
||||
delete(reqToListWorkerReqCh, reqStr)
|
||||
@ -177,13 +177,12 @@ func (fs *Filesystem) startListService() *probe.Error {
|
||||
}
|
||||
|
||||
// ListObjects -
|
||||
func (fs Filesystem) ListObjects(bucket string, req ListObjectsReq) (ListObjectsResp, *probe.Error) {
|
||||
func (fs Filesystem) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsResult, *probe.Error) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
Separator := string(os.PathSeparator)
|
||||
if !IsValidBucketName(bucket) {
|
||||
return ListObjectsResp{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
return ListObjectsResult{}, probe.NewError(BucketNameInvalid{Bucket: bucket})
|
||||
}
|
||||
|
||||
bucket = fs.denormalizeBucket(bucket)
|
||||
@ -191,39 +190,34 @@ func (fs Filesystem) ListObjects(bucket string, req ListObjectsReq) (ListObjects
|
||||
// check bucket exists
|
||||
if _, e := os.Stat(rootPrefix); e != nil {
|
||||
if os.IsNotExist(e) {
|
||||
return ListObjectsResp{}, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
return ListObjectsResult{}, probe.NewError(BucketNotFound{Bucket: bucket})
|
||||
}
|
||||
return ListObjectsResp{}, probe.NewError(e)
|
||||
}
|
||||
|
||||
canonicalize := func(str string) string {
|
||||
return strings.Replace(str, "/", string(os.PathSeparator), -1)
|
||||
}
|
||||
decanonicalize := func(str string) string {
|
||||
return strings.Replace(str, string(os.PathSeparator), "/", -1)
|
||||
return ListObjectsResult{}, probe.NewError(e)
|
||||
}
|
||||
|
||||
req := listObjectsReq{}
|
||||
req.Bucket = bucket
|
||||
req.Prefix = canonicalize(req.Prefix)
|
||||
req.Marker = canonicalize(req.Marker)
|
||||
req.Delimiter = canonicalize(req.Delimiter)
|
||||
req.Prefix = filepath.FromSlash(prefix)
|
||||
req.Marker = filepath.FromSlash(marker)
|
||||
req.Delimiter = filepath.FromSlash(delimiter)
|
||||
req.MaxKeys = maxKeys
|
||||
|
||||
if req.Delimiter != "" && req.Delimiter != Separator {
|
||||
return ListObjectsResp{}, probe.NewError(errors.New("not supported"))
|
||||
}
|
||||
|
||||
respCh := make(chan ListObjectsResp)
|
||||
respCh := make(chan ListObjectsResult)
|
||||
fs.listServiceReqCh <- listServiceReq{req, respCh}
|
||||
resp := <-respCh
|
||||
|
||||
for i := 0; i < len(resp.Prefixes); i++ {
|
||||
resp.Prefixes[i] = decanonicalize(resp.Prefixes[i])
|
||||
resp.Prefixes[i] = filepath.ToSlash(resp.Prefixes[i])
|
||||
}
|
||||
for i := 0; i < len(resp.Objects); i++ {
|
||||
resp.Objects[i].Object = decanonicalize(resp.Objects[i].Object)
|
||||
resp.Objects[i].Object = filepath.ToSlash(resp.Objects[i].Object)
|
||||
}
|
||||
if req.Delimiter == "" {
|
||||
// unset NextMaker for recursive list
|
||||
// This element is set only if you have delimiter set.
|
||||
// If response does not include the NextMaker and it is
|
||||
// truncated, you can use the value of the last Key in the
|
||||
// response as the marker in the subsequent request to get the
|
||||
// next set of object keys.
|
||||
resp.NextMarker = ""
|
||||
}
|
||||
return resp, nil
|
||||
|
11
pkg/fs/fs.go
11
pkg/fs/fs.go
@ -34,7 +34,7 @@ type Filesystem struct {
|
||||
multiparts *Multiparts
|
||||
buckets *Buckets
|
||||
listServiceReqCh chan<- listServiceReq
|
||||
timeoutReqCh chan<- ListObjectsReq
|
||||
timeoutReqCh chan<- listObjectsReq
|
||||
}
|
||||
|
||||
// Buckets holds acl information
|
||||
@ -94,10 +94,10 @@ func New(rootPath string) (Filesystem, *probe.Error) {
|
||||
return Filesystem{}, err.Trace()
|
||||
}
|
||||
}
|
||||
a := Filesystem{lock: new(sync.Mutex)}
|
||||
a.path = rootPath
|
||||
a.multiparts = multiparts
|
||||
a.buckets = buckets
|
||||
fs := Filesystem{lock: new(sync.Mutex)}
|
||||
fs.path = rootPath
|
||||
fs.multiparts = multiparts
|
||||
fs.buckets = buckets
|
||||
/// Defaults
|
||||
|
||||
// maximum buckets to be listed from list buckets.
|
||||
@ -105,6 +105,7 @@ func New(rootPath string) (Filesystem, *probe.Error) {
|
||||
// minium free disk required for i/o operations to succeed.
|
||||
fs.minFreeDisk = 10
|
||||
|
||||
// Start list goroutine.
|
||||
err = fs.startListService()
|
||||
if err != nil {
|
||||
return Filesystem{}, err.Trace(rootPath)
|
||||
|
@ -159,24 +159,6 @@ func createConfigPath() *probe.Error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// isAuthConfigFileExists is auth config file exists?
|
||||
func isConfigFileExists() bool {
|
||||
if _, err := os.Stat(mustGetConfigFile()); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// mustGetConfigFile always get users config file, if not panic
|
||||
func mustGetConfigFile() string {
|
||||
configFile, err := getConfigFile()
|
||||
fatalIf(err.Trace(), "Unable to get config file.", nil)
|
||||
return configFile
|
||||
}
|
||||
|
||||
// getConfigFile get users config file
|
||||
func getConfigFile() (string, *probe.Error) {
|
||||
configPath, err := getConfigPath()
|
||||
|
@ -126,16 +126,17 @@ var ignoredHeaders = map[string]bool{
|
||||
}
|
||||
|
||||
func (s *MyAPIFSCacheSuite) newRequest(method, urlStr string, contentLength int64, body io.ReadSeeker) (*http.Request, error) {
|
||||
if method == "" {
|
||||
method = "POST"
|
||||
}
|
||||
t := time.Now().UTC()
|
||||
|
||||
req, err := http.NewRequest(method, urlStr, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Set("x-amz-date", t.Format(iso8601Format))
|
||||
if method == "" {
|
||||
method = "POST"
|
||||
}
|
||||
|
||||
// add Content-Length
|
||||
req.ContentLength = contentLength
|
||||
|
@ -74,12 +74,6 @@ const (
|
||||
minioUpdateExperimentalURL = "https://dl.minio.io/server/minio/experimental/"
|
||||
)
|
||||
|
||||
// minioUpdates container to hold updates json.
|
||||
type minioUpdates struct {
|
||||
BuildDate string
|
||||
Platforms map[string]string
|
||||
}
|
||||
|
||||
// updateMessage container to hold update messages.
|
||||
type updateMessage struct {
|
||||
Status string `json:"status"`
|
||||
|
Loading…
Reference in New Issue
Block a user