Merge pull request #1178 from harshavardhana/list-objects

list: Fix handling of maxKeys and prefixes.
This commit is contained in:
Harshavardhana 2016-03-01 17:55:19 -08:00
commit dc622674e1
3 changed files with 53 additions and 56 deletions

View File

@ -174,22 +174,20 @@ func testPaging(c *check.C, create func() Filesystem) {
key := "obj" + strconv.Itoa(i) key := "obj" + strconv.Itoa(i)
_, err = fs.CreateObject("bucket", key, "", int64(len(key)), bytes.NewBufferString(key), nil) _, err = fs.CreateObject("bucket", key, "", int64(len(key)), bytes.NewBufferString(key), nil)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
// TODO result, err = fs.ListObjects("bucket", "", "", "", 5)
//result, err = fs.ListObjects("bucket", "", "", "", 5) c.Assert(err, check.IsNil)
//c.Assert(err, check.IsNil) c.Assert(len(result.Objects), check.Equals, i+1)
//c.Assert(len(result.Objects), check.Equals, i+1) c.Assert(result.IsTruncated, check.Equals, false)
//c.Assert(result.IsTruncated, check.Equals, false)
} }
// check after paging occurs pages work // check after paging occurs pages work
for i := 6; i <= 10; i++ { for i := 6; i <= 10; i++ {
key := "obj" + strconv.Itoa(i) key := "obj" + strconv.Itoa(i)
_, err = fs.CreateObject("bucket", key, "", int64(len(key)), bytes.NewBufferString(key), nil) _, err = fs.CreateObject("bucket", key, "", int64(len(key)), bytes.NewBufferString(key), nil)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
// TODO result, err = fs.ListObjects("bucket", "obj", "", "", 5)
//result, err = fs.ListObjects("bucket", "obj", "", "", 5) c.Assert(err, check.IsNil)
//c.Assert(err, check.IsNil) c.Assert(len(result.Objects), check.Equals, 5)
//c.Assert(len(result.Objects), check.Equals, 5) c.Assert(result.IsTruncated, check.Equals, true)
//c.Assert(result.IsTruncated, check.Equals, true)
} }
// check paging with prefix at end returns less objects // check paging with prefix at end returns less objects
{ {

View File

@ -173,22 +173,20 @@ func testPaging(c *check.C, create func() Filesystem) {
key := "obj" + strconv.Itoa(i) key := "obj" + strconv.Itoa(i)
_, err = fs.CreateObject("bucket", key, "", int64(len(key)), bytes.NewBufferString(key), nil) _, err = fs.CreateObject("bucket", key, "", int64(len(key)), bytes.NewBufferString(key), nil)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
// TODO result, err = fs.ListObjects("bucket", "", "", "", 5)
//result, err = fs.ListObjects("bucket", "", "", "", 5) c.Assert(err, check.IsNil)
//c.Assert(err, check.IsNil) c.Assert(len(result.Objects), check.Equals, i+1)
//c.Assert(len(result.Objects), check.Equals, i+1) c.Assert(result.IsTruncated, check.Equals, false)
//c.Assert(result.IsTruncated, check.Equals, false)
} }
// check after paging occurs pages work // check after paging occurs pages work
for i := 6; i <= 10; i++ { for i := 6; i <= 10; i++ {
key := "obj" + strconv.Itoa(i) key := "obj" + strconv.Itoa(i)
_, err = fs.CreateObject("bucket", key, "", int64(len(key)), bytes.NewBufferString(key), nil) _, err = fs.CreateObject("bucket", key, "", int64(len(key)), bytes.NewBufferString(key), nil)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
// TODO result, err = fs.ListObjects("bucket", "", "", "", 5)
//result, err = fs.ListObjects("bucket", "", "", "", 5) c.Assert(err, check.IsNil)
//c.Assert(err, check.IsNil) c.Assert(len(result.Objects), check.Equals, 5)
//c.Assert(len(result.Objects), check.Equals, 5) c.Assert(result.IsTruncated, check.Equals, true)
//c.Assert(result.IsTruncated, check.Equals, true)
} }
// check paging with prefix at end returns less objects // check paging with prefix at end returns less objects
{ {

View File

@ -60,7 +60,7 @@ type listWorkerReq struct {
func (fs Filesystem) listObjects(bucket, prefix, marker, delimiter string, maxKeys int) (chan<- listWorkerReq, *probe.Error) { func (fs Filesystem) listObjects(bucket, prefix, marker, delimiter string, maxKeys int) (chan<- listWorkerReq, *probe.Error) {
quitWalker := make(chan bool) quitWalker := make(chan bool)
reqCh := make(chan listWorkerReq) reqCh := make(chan listWorkerReq)
walkerCh := make(chan ObjectMetadata, 1000) walkerCh := make(chan ObjectMetadata, 2000)
go func() { go func() {
defer close(walkerCh) defer close(walkerCh)
var walkPath string var walkPath string
@ -81,6 +81,7 @@ func (fs Filesystem) listObjects(bucket, prefix, marker, delimiter string, maxKe
} }
} }
ioutils.FTW(walkPath, func(path string, info os.FileInfo, e error) error { ioutils.FTW(walkPath, func(path string, info os.FileInfo, e error) error {
// For any error return right here.
if e != nil { if e != nil {
return e return e
} }
@ -88,10 +89,11 @@ func (fs Filesystem) listObjects(bucket, prefix, marker, delimiter string, maxKe
if strings.Contains(path, "$multiparts") || strings.Contains(path, "$tmpobject") { if strings.Contains(path, "$multiparts") || strings.Contains(path, "$tmpobject") {
return nil return nil
} }
// We don't need to list the walk path. // We don't need to list the walk path if its a directory.
if path == walkPath { if path == walkPath && info.IsDir() {
return nil return nil
} }
// Skip all directories if there is no delimiter.
if info.IsDir() && delimiter == "" { if info.IsDir() && delimiter == "" {
return nil return nil
} }
@ -115,8 +117,7 @@ func (fs Filesystem) listObjects(bucket, prefix, marker, delimiter string, maxKe
// Returning error ends the file tree Walk(). // Returning error ends the file tree Walk().
return errors.New("Quit list worker.") return errors.New("Quit list worker.")
} }
// If delimiter is set, we stop if current path is a // If delimiter is set, we stop if current path is a directory.
// directory.
if delimiter != "" && info.IsDir() { if delimiter != "" && info.IsDir() {
return ioutils.ErrSkipDir return ioutils.ErrSkipDir
} }
@ -128,9 +129,9 @@ func (fs Filesystem) listObjects(bucket, prefix, marker, delimiter string, maxKe
go func() { go func() {
for { for {
select { select {
// Timeout after 1 seconds if request did not arrive for // Timeout after 30 seconds if request did not arrive for
// the given list parameters. // the given list parameters.
case <-time.After(1 * time.Second): case <-time.After(30 * time.Second):
quitWalker <- true // Quit file path walk if running. quitWalker <- true // Quit file path walk if running.
// Send back the hash for this request. // Send back the hash for this request.
fs.timeoutReqCh <- fnvSum(bucket, prefix, marker, delimiter) fs.timeoutReqCh <- fnvSum(bucket, prefix, marker, delimiter)
@ -143,7 +144,32 @@ func (fs Filesystem) listObjects(bucket, prefix, marker, delimiter string, maxKe
} }
resp := ListObjectsResult{} resp := ListObjectsResult{}
var count int var count int
for object := range walkerCh { for {
// We have read all the keys necessary by now. We
// cleanly break out.
if count == maxKeys {
if delimiter != "" {
// Set the next marker for the next request.
// This element is set only if you have delimiter set.
// If response does not include the NextMaker and it is
// truncated, you can use the value of the last Key in the
// response as the marker in the subsequent request to get the
// next set of object keys.
if len(resp.Objects) > 0 {
// NextMarker is only set when there
// are more than maxKeys worth of
// objects for a given prefix path.
resp.NextMarker = resp.Objects[len(resp.Objects)-1:][0].Object
}
}
resp.IsTruncated = len(walkerCh) > 0
break
}
object, walkerOK := <-walkerCh
// If the channel is closed return right here.
if !walkerOK {
break
}
// Verify if the object is lexically smaller than // Verify if the object is lexically smaller than
// the marker, we will skip those objects. // the marker, we will skip those objects.
if marker != "" { if marker != "" {
@ -162,36 +188,15 @@ func (fs Filesystem) listObjects(bucket, prefix, marker, delimiter string, maxKe
if object.Mode.IsDir() { if object.Mode.IsDir() {
resp.Prefixes = append(resp.Prefixes, object.Object) resp.Prefixes = append(resp.Prefixes, object.Object)
} else { } else {
// Rest of them are treated as files. // Rest of them are treated as objects.
resp.Objects = append(resp.Objects, object) resp.Objects = append(resp.Objects, object)
} }
} else { } else {
// In-case of no delimiters, there are no // In-case of no delimiters, there are no
// prefixes all are considered to be objects. // prefixes - all are considered to be objects.
resp.Objects = append(resp.Objects, object) resp.Objects = append(resp.Objects, object)
} }
count++ // Bump the counter count++ // Bump the number.
// Verify if we have reached the maxKeys requested.
if count == maxKeys {
if delimiter != "" {
// Set the next marker for the next request.
// This element is set only if you have delimiter set.
// If response does not include the NextMaker and it is
// truncated, you can use the value of the last Key in the
// response as the marker in the subsequent request to get the
// next set of object keys.
if len(resp.Objects) > 0 {
// NextMarker is only set when there
// are more than maxKeys worth of
// objects for a given prefix path.
resp.NextMarker = resp.Objects[len(resp.Objects)-1:][0].Object
}
}
// Set truncated boolean to indicate the
// client to send the next batch of requests.
resp.IsTruncated = true
break
}
} }
// Set the marker right here for the new set of the // Set the marker right here for the new set of the
// values coming in the from the client. // values coming in the from the client.
@ -271,10 +276,6 @@ func (fs *Filesystem) listObjectsService() *probe.Error {
delete(reqToListWorkerReqCh, reqHash) delete(reqToListWorkerReqCh, reqHash)
if !resp.IsTruncated { if !resp.IsTruncated {
close(listWorkerReqCh) close(listWorkerReqCh)
} else {
nextMarker := resp.NextMarker
reqHash = fnvSum(bucket, prefix, nextMarker, delimiter)
reqToListWorkerReqCh[reqHash] = listWorkerReqCh
} }
srvReq.respCh <- resp srvReq.respCh <- resp
} }