2016-08-17 14:36:33 -04:00
|
|
|
/*
|
2017-01-17 13:02:58 -05:00
|
|
|
* Minio Cloud Storage, (C) 2016, 2017 Minio, Inc.
|
2016-08-17 14:36:33 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-08-17 14:36:33 -04:00
|
|
|
|
|
|
|
import (
|
2017-03-16 03:15:06 -04:00
|
|
|
"path/filepath"
|
2016-08-17 14:36:33 -04:00
|
|
|
"sort"
|
|
|
|
"strings"
|
2017-11-25 14:58:29 -05:00
|
|
|
|
|
|
|
"github.com/minio/minio/pkg/errors"
|
2016-08-17 14:36:33 -04:00
|
|
|
)
|
|
|
|
|
2016-10-14 22:57:40 -04:00
|
|
|
func listDirHealFactory(isLeaf isLeafFunc, disks ...StorageAPI) listDirFunc {
|
2016-08-17 14:36:33 -04:00
|
|
|
// Returns sorted merged entries from all the disks.
|
2016-10-14 22:57:40 -04:00
|
|
|
listDir := func(bucket, prefixDir, prefixEntry string) (mergedEntries []string, delayIsLeaf bool, err error) {
|
2016-08-17 14:36:33 -04:00
|
|
|
for _, disk := range disks {
|
2016-10-14 22:57:40 -04:00
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
2016-08-17 14:36:33 -04:00
|
|
|
var entries []string
|
|
|
|
var newEntries []string
|
|
|
|
entries, err = disk.ListDir(bucket, prefixDir)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
2016-10-14 22:57:40 -04:00
|
|
|
|
|
|
|
// Filter entries that have the prefix prefixEntry.
|
|
|
|
entries = filterMatchingPrefix(entries, prefixEntry)
|
2016-08-17 14:36:33 -04:00
|
|
|
|
2017-03-23 11:24:59 -04:00
|
|
|
// isLeaf() check has to happen here so that
|
|
|
|
// trailing "/" for objects can be removed.
|
2016-08-17 14:36:33 -04:00
|
|
|
for i, entry := range entries {
|
2016-10-14 22:57:40 -04:00
|
|
|
if isLeaf(bucket, pathJoin(prefixDir, entry)) {
|
|
|
|
entries[i] = strings.TrimSuffix(entry, slashSeparator)
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
|
|
|
}
|
2017-03-23 11:24:59 -04:00
|
|
|
|
|
|
|
// Find elements in entries which are not in mergedEntries
|
2016-08-17 14:36:33 -04:00
|
|
|
for _, entry := range entries {
|
2016-10-14 22:57:40 -04:00
|
|
|
idx := sort.SearchStrings(mergedEntries, entry)
|
2017-03-23 11:24:59 -04:00
|
|
|
// if entry is already present in mergedEntries don't add.
|
|
|
|
if idx < len(mergedEntries) && mergedEntries[idx] == entry {
|
2016-08-17 14:36:33 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
newEntries = append(newEntries, entry)
|
|
|
|
}
|
2017-03-23 11:24:59 -04:00
|
|
|
|
2016-08-17 14:36:33 -04:00
|
|
|
if len(newEntries) > 0 {
|
|
|
|
// Merge the entries and sort it.
|
2016-10-14 22:57:40 -04:00
|
|
|
mergedEntries = append(mergedEntries, newEntries...)
|
|
|
|
sort.Strings(mergedEntries)
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
|
|
|
}
|
2016-10-14 22:57:40 -04:00
|
|
|
return mergedEntries, false, nil
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
|
|
|
return listDir
|
|
|
|
}
|
|
|
|
|
|
|
|
// listObjectsHeal - wrapper function implemented over file tree walk.
|
2017-06-21 22:53:09 -04:00
|
|
|
func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
|
2016-08-17 14:36:33 -04:00
|
|
|
// Default is recursive, if delimiter is set then list non recursive.
|
|
|
|
recursive := true
|
|
|
|
if delimiter == slashSeparator {
|
|
|
|
recursive = false
|
|
|
|
}
|
|
|
|
|
|
|
|
// "heal" true for listObjectsHeal() and false for listObjects()
|
|
|
|
heal := true
|
|
|
|
walkResultCh, endWalkCh := xl.listPool.Release(listParams{bucket, recursive, marker, prefix, heal})
|
|
|
|
if walkResultCh == nil {
|
|
|
|
endWalkCh = make(chan struct{})
|
2016-10-14 22:57:40 -04:00
|
|
|
isLeaf := xl.isObject
|
|
|
|
listDir := listDirHealFactory(isLeaf, xl.storageDisks...)
|
2016-08-17 14:36:33 -04:00
|
|
|
walkResultCh = startTreeWalk(bucket, prefix, marker, recursive, listDir, nil, endWalkCh)
|
|
|
|
}
|
|
|
|
|
|
|
|
var objInfos []ObjectInfo
|
|
|
|
var eof bool
|
|
|
|
var nextMarker string
|
|
|
|
for i := 0; i < maxKeys; {
|
|
|
|
walkResult, ok := <-walkResultCh
|
|
|
|
if !ok {
|
|
|
|
// Closed channel.
|
|
|
|
eof = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// For any walk error return right away.
|
|
|
|
if walkResult.err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return loi, toObjectErr(walkResult.err, bucket, prefix)
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
|
|
|
entry := walkResult.entry
|
|
|
|
var objInfo ObjectInfo
|
2017-02-16 17:52:14 -05:00
|
|
|
if hasSuffix(entry, slashSeparator) {
|
2016-08-17 14:36:33 -04:00
|
|
|
// Object name needs to be full path.
|
|
|
|
objInfo.Bucket = bucket
|
|
|
|
objInfo.Name = entry
|
|
|
|
objInfo.IsDir = true
|
|
|
|
} else {
|
2017-01-17 13:02:58 -05:00
|
|
|
var err error
|
|
|
|
objInfo, err = xl.getObjectInfo(bucket, entry)
|
|
|
|
if err != nil {
|
|
|
|
// Ignore errFileNotFound
|
2017-11-25 14:58:29 -05:00
|
|
|
if errors.Cause(err) == errFileNotFound {
|
2017-01-17 13:02:58 -05:00
|
|
|
continue
|
|
|
|
}
|
2017-06-21 22:53:09 -04:00
|
|
|
return loi, toObjectErr(err, bucket, prefix)
|
2017-01-17 13:02:58 -05:00
|
|
|
}
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
|
|
|
nextMarker = objInfo.Name
|
|
|
|
objInfos = append(objInfos, objInfo)
|
|
|
|
i++
|
2017-03-08 13:00:47 -05:00
|
|
|
if walkResult.end {
|
2016-08-17 14:36:33 -04:00
|
|
|
eof = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
params := listParams{bucket, recursive, nextMarker, prefix, heal}
|
|
|
|
if !eof {
|
|
|
|
xl.listPool.Set(params, walkResultCh, endWalkCh)
|
|
|
|
}
|
|
|
|
|
|
|
|
result := ListObjectsInfo{IsTruncated: !eof}
|
|
|
|
for _, objInfo := range objInfos {
|
|
|
|
result.NextMarker = objInfo.Name
|
|
|
|
if objInfo.IsDir {
|
|
|
|
result.Prefixes = append(result.Prefixes, objInfo.Name)
|
|
|
|
continue
|
|
|
|
}
|
2016-08-31 14:39:08 -04:00
|
|
|
|
2016-08-24 13:13:01 -04:00
|
|
|
// Check if the current object needs healing
|
2016-12-10 19:15:12 -05:00
|
|
|
objectLock := globalNSMutex.NewNSLock(bucket, objInfo.Name)
|
2017-08-31 14:29:22 -04:00
|
|
|
if err := objectLock.GetRLock(globalHealingTimeout); err != nil {
|
|
|
|
return loi, err
|
|
|
|
}
|
2016-08-24 13:13:01 -04:00
|
|
|
partsMetadata, errs := readAllXLMetadata(xl.storageDisks, bucket, objInfo.Name)
|
2017-03-24 11:40:44 -04:00
|
|
|
if xlShouldHeal(xl.storageDisks, partsMetadata, errs, bucket, objInfo.Name) {
|
2017-01-17 13:02:58 -05:00
|
|
|
healStat := xlHealStat(xl, partsMetadata, errs)
|
2016-08-24 13:13:01 -04:00
|
|
|
result.Objects = append(result.Objects, ObjectInfo{
|
2017-01-19 12:34:18 -05:00
|
|
|
Name: objInfo.Name,
|
|
|
|
ModTime: objInfo.ModTime,
|
|
|
|
Size: objInfo.Size,
|
|
|
|
IsDir: false,
|
|
|
|
HealObjectInfo: &healStat,
|
2016-08-24 13:13:01 -04:00
|
|
|
})
|
|
|
|
}
|
2016-11-09 13:58:41 -05:00
|
|
|
objectLock.RUnlock()
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjects - list all objects at prefix, delimited by '/'.
|
2017-06-21 22:53:09 -04:00
|
|
|
func (xl xlObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
|
2016-12-02 02:15:17 -05:00
|
|
|
if err := checkListObjsArgs(bucket, prefix, marker, delimiter, xl); err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return loi, err
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// With max keys of zero we have reached eof, return right here.
|
|
|
|
if maxKeys == 0 {
|
2017-06-21 22:53:09 -04:00
|
|
|
return loi, nil
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// For delimiter and prefix as '/' we do not list anything at all
|
|
|
|
// since according to s3 spec we stop at the 'delimiter' along
|
|
|
|
// with the prefix. On a flat namespace with 'prefix' as '/'
|
|
|
|
// we don't have any entries, since all the keys are of form 'keyName/...'
|
|
|
|
if delimiter == slashSeparator && prefix == slashSeparator {
|
2017-06-21 22:53:09 -04:00
|
|
|
return loi, nil
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Over flowing count - reset to maxObjectList.
|
|
|
|
if maxKeys < 0 || maxKeys > maxObjectList {
|
|
|
|
maxKeys = maxObjectList
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initiate a list operation, if successful filter and return quickly.
|
|
|
|
listObjInfo, err := xl.listObjectsHeal(bucket, prefix, marker, delimiter, maxKeys)
|
|
|
|
if err == nil {
|
|
|
|
// We got the entries successfully return.
|
|
|
|
return listObjInfo, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return error at the end.
|
2017-06-21 22:53:09 -04:00
|
|
|
return loi, toObjectErr(err, bucket, prefix)
|
2016-08-17 14:36:33 -04:00
|
|
|
}
|
2017-03-16 03:15:06 -04:00
|
|
|
|
|
|
|
// ListUploadsHeal - lists ongoing multipart uploads that require
|
|
|
|
// healing in one or more disks.
|
|
|
|
func (xl xlObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
|
2017-06-21 22:53:09 -04:00
|
|
|
delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
|
2017-03-16 03:15:06 -04:00
|
|
|
|
|
|
|
// For delimiter and prefix as '/' we do not list anything at all
|
|
|
|
// since according to s3 spec we stop at the 'delimiter' along
|
|
|
|
// with the prefix. On a flat namespace with 'prefix' as '/'
|
|
|
|
// we don't have any entries, since all the keys are of form 'keyName/...'
|
|
|
|
if delimiter == slashSeparator && prefix == slashSeparator {
|
2017-06-21 22:53:09 -04:00
|
|
|
return lmi, nil
|
2017-03-16 03:15:06 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initiate a list operation.
|
|
|
|
listMultipartInfo, err := xl.listMultipartUploadsHeal(bucket, prefix,
|
|
|
|
marker, uploadIDMarker, delimiter, maxUploads)
|
|
|
|
if err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return lmi, toObjectErr(err, bucket, prefix)
|
2017-03-16 03:15:06 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// We got the entries successfully return.
|
|
|
|
return listMultipartInfo, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetches list of multipart uploadIDs given bucket, keyMarker, uploadIDMarker.
|
|
|
|
func fetchMultipartUploadIDs(bucket, keyMarker, uploadIDMarker string,
|
2017-11-14 03:25:10 -05:00
|
|
|
maxUploads int, disks []StorageAPI) (uploads []MultipartInfo, end bool,
|
2017-03-16 03:15:06 -04:00
|
|
|
err error) {
|
|
|
|
|
|
|
|
// Hold a read lock on keyMarker path.
|
|
|
|
keyMarkerLock := globalNSMutex.NewNSLock(minioMetaMultipartBucket,
|
|
|
|
pathJoin(bucket, keyMarker))
|
2017-08-31 14:29:22 -04:00
|
|
|
if err = keyMarkerLock.GetRLock(globalHealingTimeout); err != nil {
|
|
|
|
return uploads, end, err
|
|
|
|
}
|
2017-03-16 03:15:06 -04:00
|
|
|
for _, disk := range disks {
|
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
uploads, end, err = listMultipartUploadIDs(bucket, keyMarker,
|
|
|
|
uploadIDMarker, maxUploads, disk)
|
|
|
|
if err == nil ||
|
2017-11-25 14:58:29 -05:00
|
|
|
!errors.IsErrIgnored(err, objMetadataOpIgnoredErrs...) {
|
2017-03-16 03:15:06 -04:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
keyMarkerLock.RUnlock()
|
|
|
|
return uploads, end, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// listMultipartUploadsHeal - Returns a list of incomplete multipart
|
|
|
|
// uploads that need to be healed.
|
|
|
|
func (xl xlObjects) listMultipartUploadsHeal(bucket, prefix, keyMarker,
|
2017-06-21 22:53:09 -04:00
|
|
|
uploadIDMarker, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
|
2017-03-16 03:15:06 -04:00
|
|
|
|
|
|
|
result := ListMultipartsInfo{
|
|
|
|
IsTruncated: true,
|
|
|
|
MaxUploads: maxUploads,
|
|
|
|
KeyMarker: keyMarker,
|
|
|
|
Prefix: prefix,
|
|
|
|
Delimiter: delimiter,
|
|
|
|
}
|
|
|
|
|
|
|
|
recursive := delimiter != slashSeparator
|
|
|
|
|
2017-11-14 03:25:10 -05:00
|
|
|
var uploads []MultipartInfo
|
2017-03-16 03:15:06 -04:00
|
|
|
var err error
|
|
|
|
// List all upload ids for the given keyMarker, starting from
|
|
|
|
// uploadIDMarker.
|
|
|
|
if uploadIDMarker != "" {
|
|
|
|
uploads, _, err = fetchMultipartUploadIDs(bucket, keyMarker,
|
|
|
|
uploadIDMarker, maxUploads, xl.getLoadBalancedDisks())
|
|
|
|
if err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return lmi, err
|
2017-03-16 03:15:06 -04:00
|
|
|
}
|
|
|
|
maxUploads = maxUploads - len(uploads)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We can't use path.Join() as it strips off the trailing '/'.
|
|
|
|
multipartPrefixPath := pathJoin(bucket, prefix)
|
|
|
|
// multipartPrefixPath should have a trailing '/' when prefix = "".
|
|
|
|
if prefix == "" {
|
|
|
|
multipartPrefixPath += slashSeparator
|
|
|
|
}
|
|
|
|
|
|
|
|
multipartMarkerPath := ""
|
|
|
|
if keyMarker != "" {
|
|
|
|
multipartMarkerPath = pathJoin(bucket, keyMarker)
|
|
|
|
}
|
|
|
|
|
|
|
|
// `heal bool` is used to differentiate listing of incomplete
|
|
|
|
// uploads (and parts) from a regular listing of incomplete
|
|
|
|
// parts by client SDKs or mc-like commands, within a treewalk
|
|
|
|
// pool.
|
|
|
|
heal := true
|
|
|
|
// The listing is truncated if we have maxUploads entries and
|
|
|
|
// there are more entries to be listed.
|
|
|
|
truncated := true
|
|
|
|
var walkerCh chan treeWalkResult
|
|
|
|
var walkerDoneCh chan struct{}
|
|
|
|
// Check if we have room left to send more uploads.
|
|
|
|
if maxUploads > 0 {
|
2017-03-20 18:31:25 -04:00
|
|
|
uploadsLeft := maxUploads
|
|
|
|
|
2017-03-16 03:15:06 -04:00
|
|
|
walkerCh, walkerDoneCh = xl.listPool.Release(listParams{
|
|
|
|
bucket: minioMetaMultipartBucket,
|
|
|
|
recursive: recursive,
|
|
|
|
marker: multipartMarkerPath,
|
|
|
|
prefix: multipartPrefixPath,
|
|
|
|
heal: heal,
|
|
|
|
})
|
|
|
|
if walkerCh == nil {
|
|
|
|
walkerDoneCh = make(chan struct{})
|
|
|
|
isLeaf := xl.isMultipartUpload
|
|
|
|
listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs,
|
|
|
|
xl.getLoadBalancedDisks()...)
|
|
|
|
walkerCh = startTreeWalk(minioMetaMultipartBucket,
|
|
|
|
multipartPrefixPath, multipartMarkerPath,
|
|
|
|
recursive, listDir, isLeaf, walkerDoneCh)
|
|
|
|
}
|
2017-03-20 18:31:25 -04:00
|
|
|
// Collect uploads until leftUploads limit is reached.
|
|
|
|
for {
|
|
|
|
walkResult, ok := <-walkerCh
|
|
|
|
if !ok {
|
|
|
|
truncated = false
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// For any error during tree walk, we should return right away.
|
2017-03-16 03:15:06 -04:00
|
|
|
if walkResult.err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return lmi, walkResult.err
|
2017-03-16 03:15:06 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
entry := strings.TrimPrefix(walkResult.entry,
|
|
|
|
retainSlash(bucket))
|
|
|
|
// Skip entries that are not object directory.
|
|
|
|
if hasSuffix(walkResult.entry, slashSeparator) {
|
2017-11-14 03:25:10 -05:00
|
|
|
uploads = append(uploads, MultipartInfo{
|
2017-03-16 03:15:06 -04:00
|
|
|
Object: entry,
|
|
|
|
})
|
2017-03-20 18:31:25 -04:00
|
|
|
uploadsLeft--
|
|
|
|
if uploadsLeft == 0 {
|
2017-03-16 03:15:06 -04:00
|
|
|
break
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// For an object entry we get all its pending
|
|
|
|
// uploadIDs.
|
2017-11-14 03:25:10 -05:00
|
|
|
var newUploads []MultipartInfo
|
2017-03-16 03:15:06 -04:00
|
|
|
var end bool
|
|
|
|
uploadIDMarker = ""
|
|
|
|
newUploads, end, err = fetchMultipartUploadIDs(bucket, entry, uploadIDMarker,
|
2017-03-20 18:31:25 -04:00
|
|
|
uploadsLeft, xl.getLoadBalancedDisks())
|
2017-03-16 03:15:06 -04:00
|
|
|
if err != nil {
|
2017-06-21 22:53:09 -04:00
|
|
|
return lmi, err
|
2017-03-16 03:15:06 -04:00
|
|
|
}
|
|
|
|
uploads = append(uploads, newUploads...)
|
2017-03-20 18:31:25 -04:00
|
|
|
uploadsLeft -= len(newUploads)
|
2017-03-16 03:15:06 -04:00
|
|
|
if end && walkResult.end {
|
|
|
|
truncated = false
|
|
|
|
break
|
|
|
|
}
|
2017-03-20 18:31:25 -04:00
|
|
|
if uploadsLeft == 0 {
|
2017-03-16 03:15:06 -04:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2017-03-20 18:31:25 -04:00
|
|
|
|
2017-03-16 03:15:06 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// For all received uploads fill in the multiparts result.
|
|
|
|
for _, upload := range uploads {
|
|
|
|
var objectName string
|
|
|
|
var uploadID string
|
|
|
|
if hasSuffix(upload.Object, slashSeparator) {
|
|
|
|
// All directory entries are common
|
|
|
|
// prefixes. For common prefixes, upload ids
|
|
|
|
// are empty.
|
|
|
|
uploadID = ""
|
|
|
|
objectName = upload.Object
|
|
|
|
result.CommonPrefixes = append(result.CommonPrefixes, objectName)
|
|
|
|
} else {
|
|
|
|
// Check if upload needs healing.
|
|
|
|
uploadIDPath := filepath.Join(bucket, upload.Object, upload.UploadID)
|
|
|
|
partsMetadata, errs := readAllXLMetadata(xl.storageDisks,
|
|
|
|
minioMetaMultipartBucket, uploadIDPath)
|
2017-03-24 11:40:44 -04:00
|
|
|
if xlShouldHeal(xl.storageDisks, partsMetadata, errs,
|
|
|
|
minioMetaMultipartBucket, uploadIDPath) {
|
|
|
|
|
2017-03-16 03:15:06 -04:00
|
|
|
healUploadInfo := xlHealStat(xl, partsMetadata, errs)
|
|
|
|
upload.HealUploadInfo = &healUploadInfo
|
|
|
|
result.Uploads = append(result.Uploads, upload)
|
|
|
|
}
|
|
|
|
uploadID = upload.UploadID
|
|
|
|
objectName = upload.Object
|
|
|
|
}
|
|
|
|
|
|
|
|
result.NextKeyMarker = objectName
|
|
|
|
result.NextUploadIDMarker = uploadID
|
|
|
|
}
|
|
|
|
|
|
|
|
if truncated {
|
|
|
|
// Put back the tree walk go-routine into the pool for
|
|
|
|
// subsequent use.
|
|
|
|
xl.listPool.Set(listParams{
|
|
|
|
bucket: bucket,
|
|
|
|
recursive: recursive,
|
|
|
|
marker: result.NextKeyMarker,
|
|
|
|
prefix: prefix,
|
|
|
|
heal: heal,
|
|
|
|
}, walkerCh, walkerDoneCh)
|
|
|
|
}
|
|
|
|
|
|
|
|
result.IsTruncated = truncated
|
|
|
|
// Result is not truncated, reset the markers.
|
|
|
|
if !result.IsTruncated {
|
|
|
|
result.NextKeyMarker = ""
|
|
|
|
result.NextUploadIDMarker = ""
|
|
|
|
}
|
|
|
|
return result, nil
|
|
|
|
}
|