2016-04-29 20:52:17 -04:00
|
|
|
/*
|
2019-04-17 12:52:08 -04:00
|
|
|
* MinIO Cloud Storage, (C) 2016-2019 MinIO, Inc.
|
2016-04-29 20:52:17 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-04-29 20:52:17 -04:00
|
|
|
|
2016-05-25 04:33:39 -04:00
|
|
|
import (
|
2018-03-15 16:27:16 -04:00
|
|
|
"context"
|
2018-01-29 21:43:13 -05:00
|
|
|
"path"
|
2016-05-25 04:33:39 -04:00
|
|
|
"sync"
|
2016-11-22 21:18:22 -05:00
|
|
|
|
2019-07-05 17:06:12 -04:00
|
|
|
"strings"
|
|
|
|
|
2016-11-22 21:18:22 -05:00
|
|
|
humanize "github.com/dustin/go-humanize"
|
2018-04-05 18:04:40 -04:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
2016-05-25 04:33:39 -04:00
|
|
|
)
|
2016-04-29 20:52:17 -04:00
|
|
|
|
2016-05-29 18:38:14 -04:00
|
|
|
const (
|
|
|
|
// Block size used for all internal operations version 1.
|
2016-11-22 21:18:22 -05:00
|
|
|
blockSizeV1 = 10 * humanize.MiByte
|
2016-06-25 06:03:27 -04:00
|
|
|
|
|
|
|
// Staging buffer read size for all internal operations version 1.
|
2016-11-22 21:18:22 -05:00
|
|
|
readSizeV1 = 1 * humanize.MiByte
|
2016-07-21 20:31:14 -04:00
|
|
|
|
|
|
|
// Buckets meta prefix.
|
|
|
|
bucketMetaPrefix = "buckets"
|
2017-04-10 22:51:23 -04:00
|
|
|
|
2017-05-14 15:05:51 -04:00
|
|
|
// ETag (hex encoded md5sum) of empty string.
|
|
|
|
emptyETag = "d41d8cd98f00b204e9800998ecf8427e"
|
2016-05-29 18:38:14 -04:00
|
|
|
)
|
|
|
|
|
2016-10-10 02:03:10 -04:00
|
|
|
// Global object layer mutex, used for safely updating object layer.
|
2017-03-03 04:07:45 -05:00
|
|
|
var globalObjLayerMutex *sync.RWMutex
|
2016-10-10 02:03:10 -04:00
|
|
|
|
2019-10-31 02:39:09 -04:00
|
|
|
// Global object layer, only accessed by globalObjectAPI.
|
2016-10-10 02:03:10 -04:00
|
|
|
var globalObjectAPI ObjectLayer
|
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
//Global cacheObjects, only accessed by newCacheObjectsFn().
|
|
|
|
var globalCacheObjectAPI CacheObjectLayer
|
|
|
|
|
2016-10-10 02:03:10 -04:00
|
|
|
func init() {
|
|
|
|
// Initialize this once per server initialization.
|
2017-03-03 04:07:45 -05:00
|
|
|
globalObjLayerMutex = &sync.RWMutex{}
|
2016-10-10 02:03:10 -04:00
|
|
|
}
|
|
|
|
|
2017-01-20 19:33:01 -05:00
|
|
|
// Checks if the object is a directory, this logic uses
|
2019-08-06 15:08:58 -04:00
|
|
|
// if size == 0 and object ends with SlashSeparator then
|
2017-01-20 19:33:01 -05:00
|
|
|
// returns true.
|
|
|
|
func isObjectDir(object string, size int64) bool {
|
2019-08-06 15:08:58 -04:00
|
|
|
return hasSuffix(object, SlashSeparator) && size == 0
|
2017-01-20 19:33:01 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Converts just bucket, object metadata into ObjectInfo datatype.
|
|
|
|
func dirObjectInfo(bucket, object string, size int64, metadata map[string]string) ObjectInfo {
|
|
|
|
// This is a special case with size as '0' and object ends with
|
|
|
|
// a slash separator, we treat it like a valid operation and
|
|
|
|
// return success.
|
2017-05-14 15:05:51 -04:00
|
|
|
etag := metadata["etag"]
|
|
|
|
delete(metadata, "etag")
|
|
|
|
if etag == "" {
|
|
|
|
etag = emptyETag
|
2017-04-10 22:51:23 -04:00
|
|
|
}
|
|
|
|
|
2017-01-20 19:33:01 -05:00
|
|
|
return ObjectInfo{
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: object,
|
2017-03-18 14:28:41 -04:00
|
|
|
ModTime: UTCNow(),
|
2017-01-20 19:33:01 -05:00
|
|
|
ContentType: "application/octet-stream",
|
|
|
|
IsDir: true,
|
|
|
|
Size: size,
|
2017-05-14 15:05:51 -04:00
|
|
|
ETag: etag,
|
2017-01-20 19:33:01 -05:00
|
|
|
UserDefined: metadata,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
func deleteBucketMetadata(ctx context.Context, bucket string, objAPI ObjectLayer) {
|
2018-02-12 21:34:30 -05:00
|
|
|
// Delete bucket access policy, if present - ignore any errors.
|
2018-04-24 18:53:30 -04:00
|
|
|
removePolicyConfig(ctx, objAPI, bucket)
|
2018-02-12 21:34:30 -05:00
|
|
|
|
|
|
|
// Delete notification config, if present - ignore any errors.
|
2018-04-05 18:04:40 -04:00
|
|
|
removeNotificationConfig(ctx, objAPI, bucket)
|
2018-02-12 21:34:30 -05:00
|
|
|
|
|
|
|
// Delete listener config, if present - ignore any errors.
|
2018-04-05 18:04:40 -04:00
|
|
|
removeListenerConfig(ctx, objAPI, bucket)
|
2018-02-12 21:34:30 -05:00
|
|
|
}
|
|
|
|
|
2016-06-02 04:49:46 -04:00
|
|
|
// Depending on the disk type network or local, initialize storage API.
|
2017-04-11 18:44:27 -04:00
|
|
|
func newStorageAPI(endpoint Endpoint) (storage StorageAPI, err error) {
|
|
|
|
if endpoint.IsLocal {
|
2019-10-25 13:37:53 -04:00
|
|
|
storage, err := newPosix(endpoint.Path)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &posixDiskIDCheck{storage: storage}, nil
|
2016-06-02 04:49:46 -04:00
|
|
|
}
|
2017-04-11 18:44:27 -04:00
|
|
|
|
2019-11-13 15:17:45 -05:00
|
|
|
return newStorageRESTClient(endpoint), nil
|
2016-06-02 04:49:46 -04:00
|
|
|
}
|
|
|
|
|
2016-05-08 13:15:34 -04:00
|
|
|
// Cleanup a directory recursively.
|
2018-04-05 18:04:40 -04:00
|
|
|
func cleanupDir(ctx context.Context, storage StorageAPI, volume, dirPath string) error {
|
2016-05-08 13:15:34 -04:00
|
|
|
var delFunc func(string) error
|
|
|
|
// Function to delete entries recursively.
|
|
|
|
delFunc = func(entryPath string) error {
|
2019-08-06 15:08:58 -04:00
|
|
|
if !hasSuffix(entryPath, SlashSeparator) {
|
2016-07-12 04:01:47 -04:00
|
|
|
// Delete the file entry.
|
2018-04-05 18:04:40 -04:00
|
|
|
err := storage.DeleteFile(volume, entryPath)
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return err
|
2016-05-08 13:15:34 -04:00
|
|
|
}
|
2016-07-12 04:01:47 -04:00
|
|
|
|
2016-05-08 13:15:34 -04:00
|
|
|
// If it's a directory, list and call delFunc() for each entry.
|
2019-04-23 17:54:28 -04:00
|
|
|
entries, err := storage.ListDir(volume, entryPath, -1, "")
|
2016-07-12 04:01:47 -04:00
|
|
|
// If entryPath prefix never existed, safe to ignore.
|
|
|
|
if err == errFileNotFound {
|
|
|
|
return nil
|
|
|
|
} else if err != nil { // For any other errors fail.
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return err
|
2016-07-12 04:01:47 -04:00
|
|
|
} // else on success..
|
2016-06-14 04:39:40 -04:00
|
|
|
|
2018-01-29 21:43:13 -05:00
|
|
|
// Entry path is empty, just delete it.
|
|
|
|
if len(entries) == 0 {
|
2019-07-29 17:48:18 -04:00
|
|
|
err = storage.DeleteFile(volume, entryPath)
|
2018-04-05 18:04:40 -04:00
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
return err
|
2018-01-29 21:43:13 -05:00
|
|
|
}
|
|
|
|
|
2016-07-12 04:01:47 -04:00
|
|
|
// Recurse and delete all other entries.
|
|
|
|
for _, entry := range entries {
|
|
|
|
if err = delFunc(pathJoin(entryPath, entry)); err != nil {
|
2016-05-08 13:15:34 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2016-06-02 19:34:15 -04:00
|
|
|
err := delFunc(retainSlash(pathJoin(dirPath)))
|
|
|
|
return err
|
2016-05-08 13:15:34 -04:00
|
|
|
}
|
2018-03-15 16:03:41 -04:00
|
|
|
|
2019-05-13 15:25:49 -04:00
|
|
|
// Cleanup objects in bulk and recursively: each object will have a list of sub-files to delete in the backend
|
2019-10-15 21:35:41 -04:00
|
|
|
func cleanupObjectsBulk(storage StorageAPI, volume string, objsPaths []string, errs []error) ([]error, error) {
|
2019-05-13 15:25:49 -04:00
|
|
|
// The list of files in disk to delete
|
|
|
|
var filesToDelete []string
|
|
|
|
// Map files to delete to the passed objsPaths
|
|
|
|
var filesToDeleteObjsIndexes []int
|
|
|
|
|
|
|
|
// Traverse and return the list of sub entries
|
|
|
|
var traverse func(string) ([]string, error)
|
|
|
|
traverse = func(entryPath string) ([]string, error) {
|
|
|
|
var output = make([]string, 0)
|
2019-08-06 15:08:58 -04:00
|
|
|
if !hasSuffix(entryPath, SlashSeparator) {
|
2019-05-13 15:25:49 -04:00
|
|
|
output = append(output, entryPath)
|
|
|
|
return output, nil
|
|
|
|
}
|
|
|
|
entries, err := storage.ListDir(volume, entryPath, -1, "")
|
|
|
|
if err != nil {
|
|
|
|
if err == errFileNotFound {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, entry := range entries {
|
|
|
|
subEntries, err := traverse(pathJoin(entryPath, entry))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
output = append(output, subEntries...)
|
|
|
|
}
|
|
|
|
return output, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find and collect the list of files to remove associated
|
|
|
|
// to the passed objects paths
|
|
|
|
for idx, objPath := range objsPaths {
|
|
|
|
if errs[idx] != nil {
|
|
|
|
continue
|
|
|
|
}
|
2019-08-14 14:15:25 -04:00
|
|
|
output, err := traverse(retainSlash(pathJoin(objPath)))
|
2019-05-13 15:25:49 -04:00
|
|
|
if err != nil {
|
|
|
|
errs[idx] = err
|
|
|
|
continue
|
|
|
|
} else {
|
|
|
|
errs[idx] = nil
|
|
|
|
}
|
|
|
|
filesToDelete = append(filesToDelete, output...)
|
|
|
|
for i := 0; i < len(output); i++ {
|
|
|
|
filesToDeleteObjsIndexes = append(filesToDeleteObjsIndexes, idx)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reverse the list so remove can succeed
|
|
|
|
reverseStringSlice(filesToDelete)
|
|
|
|
|
|
|
|
dErrs, err := storage.DeleteFileBulk(volume, filesToDelete)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Map files deletion errors to the correspondent objects
|
|
|
|
for i := range dErrs {
|
|
|
|
if dErrs[i] != nil {
|
|
|
|
if errs[filesToDeleteObjsIndexes[i]] != nil {
|
|
|
|
errs[filesToDeleteObjsIndexes[i]] = dErrs[i]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return errs, nil
|
|
|
|
}
|
|
|
|
|
2018-03-15 16:03:41 -04:00
|
|
|
// Removes notification.xml for a given bucket, only used during DeleteBucket.
|
2018-04-05 18:04:40 -04:00
|
|
|
func removeNotificationConfig(ctx context.Context, objAPI ObjectLayer, bucket string) error {
|
2018-03-15 16:03:41 -04:00
|
|
|
// Verify bucket is valid.
|
|
|
|
if !IsValidBucketName(bucket) {
|
|
|
|
return BucketNameInvalid{Bucket: bucket}
|
|
|
|
}
|
|
|
|
|
|
|
|
ncPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig)
|
2018-04-05 18:04:40 -04:00
|
|
|
return objAPI.DeleteObject(ctx, minioMetaBucket, ncPath)
|
2018-03-15 16:03:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Remove listener configuration from storage layer. Used when a bucket is deleted.
|
2018-04-05 18:04:40 -04:00
|
|
|
func removeListenerConfig(ctx context.Context, objAPI ObjectLayer, bucket string) error {
|
2018-03-15 16:03:41 -04:00
|
|
|
// make the path
|
|
|
|
lcPath := path.Join(bucketConfigPrefix, bucket, bucketListenerConfig)
|
2018-04-05 18:04:40 -04:00
|
|
|
return objAPI.DeleteObject(ctx, minioMetaBucket, lcPath)
|
2018-03-15 16:03:41 -04:00
|
|
|
}
|
2019-04-17 12:52:08 -04:00
|
|
|
|
2019-10-15 21:35:41 -04:00
|
|
|
func listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int, tpool *TreeWalkPool, listDir ListDirFunc, getObjInfo func(context.Context, string, string) (ObjectInfo, error), getObjectInfoDirs ...func(context.Context, string, string) (ObjectInfo, error)) (loi ListObjectsInfo, err error) {
|
2019-07-05 17:06:12 -04:00
|
|
|
endWalkCh := make(chan struct{})
|
|
|
|
defer close(endWalkCh)
|
|
|
|
recursive := true
|
|
|
|
walkResultCh := startTreeWalk(ctx, bucket, prefix, "", recursive, listDir, endWalkCh)
|
|
|
|
|
|
|
|
var objInfos []ObjectInfo
|
|
|
|
var eof bool
|
|
|
|
var prevPrefix string
|
|
|
|
|
|
|
|
for {
|
|
|
|
if len(objInfos) == maxKeys {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
result, ok := <-walkResultCh
|
|
|
|
if !ok {
|
|
|
|
eof = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
var objInfo ObjectInfo
|
|
|
|
var err error
|
|
|
|
|
|
|
|
index := strings.Index(strings.TrimPrefix(result.entry, prefix), delimiter)
|
|
|
|
if index == -1 {
|
|
|
|
objInfo, err = getObjInfo(ctx, bucket, result.entry)
|
|
|
|
if err != nil {
|
|
|
|
// Ignore errFileNotFound as the object might have got
|
|
|
|
// deleted in the interim period of listing and getObjectInfo(),
|
|
|
|
// ignore quorum error as it might be an entry from an outdated disk.
|
|
|
|
if IsErrIgnored(err, []error{
|
|
|
|
errFileNotFound,
|
|
|
|
errXLReadQuorum,
|
|
|
|
}...) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return loi, toObjectErr(err, bucket, prefix)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
index = len(prefix) + index + len(delimiter)
|
|
|
|
currPrefix := result.entry[:index]
|
|
|
|
if currPrefix == prevPrefix {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
prevPrefix = currPrefix
|
|
|
|
|
|
|
|
objInfo = ObjectInfo{
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: currPrefix,
|
|
|
|
IsDir: true,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if objInfo.Name <= marker {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
objInfos = append(objInfos, objInfo)
|
|
|
|
if result.end {
|
|
|
|
eof = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
result := ListObjectsInfo{}
|
|
|
|
for _, objInfo := range objInfos {
|
|
|
|
if objInfo.IsDir {
|
|
|
|
result.Prefixes = append(result.Prefixes, objInfo.Name)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
result.Objects = append(result.Objects, objInfo)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !eof {
|
|
|
|
result.IsTruncated = true
|
|
|
|
if len(objInfos) > 0 {
|
|
|
|
result.NextMarker = objInfos[len(objInfos)-1].Name
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2019-05-02 01:06:57 -04:00
|
|
|
func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, delimiter string, maxKeys int, tpool *TreeWalkPool, listDir ListDirFunc, getObjInfo func(context.Context, string, string) (ObjectInfo, error), getObjectInfoDirs ...func(context.Context, string, string) (ObjectInfo, error)) (loi ListObjectsInfo, err error) {
|
2019-08-06 15:08:58 -04:00
|
|
|
if delimiter != SlashSeparator && delimiter != "" {
|
2019-10-15 21:35:41 -04:00
|
|
|
return listObjectsNonSlash(ctx, bucket, prefix, marker, delimiter, maxKeys, tpool, listDir, getObjInfo, getObjectInfoDirs...)
|
2019-07-05 17:06:12 -04:00
|
|
|
}
|
|
|
|
|
2019-04-17 12:52:08 -04:00
|
|
|
if err := checkListObjsArgs(ctx, bucket, prefix, marker, delimiter, obj); err != nil {
|
|
|
|
return loi, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Marker is set validate pre-condition.
|
|
|
|
if marker != "" {
|
|
|
|
// Marker not common with prefix is not implemented. Send an empty response
|
|
|
|
if !hasPrefix(marker, prefix) {
|
|
|
|
return loi, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// With max keys of zero we have reached eof, return right here.
|
|
|
|
if maxKeys == 0 {
|
|
|
|
return loi, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// For delimiter and prefix as '/' we do not list anything at all
|
|
|
|
// since according to s3 spec we stop at the 'delimiter'
|
|
|
|
// along // with the prefix. On a flat namespace with 'prefix'
|
|
|
|
// as '/' we don't have any entries, since all the keys are
|
|
|
|
// of form 'keyName/...'
|
2019-08-06 15:08:58 -04:00
|
|
|
if delimiter == SlashSeparator && prefix == SlashSeparator {
|
2019-04-17 12:52:08 -04:00
|
|
|
return loi, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Over flowing count - reset to maxObjectList.
|
|
|
|
if maxKeys < 0 || maxKeys > maxObjectList {
|
|
|
|
maxKeys = maxObjectList
|
|
|
|
}
|
|
|
|
|
|
|
|
// Default is recursive, if delimiter is set then list non recursive.
|
|
|
|
recursive := true
|
2019-08-06 15:08:58 -04:00
|
|
|
if delimiter == SlashSeparator {
|
2019-04-17 12:52:08 -04:00
|
|
|
recursive = false
|
|
|
|
}
|
|
|
|
|
2019-06-09 01:14:07 -04:00
|
|
|
walkResultCh, endWalkCh := tpool.Release(listParams{bucket, recursive, marker, prefix, false})
|
2019-04-17 12:52:08 -04:00
|
|
|
if walkResultCh == nil {
|
|
|
|
endWalkCh = make(chan struct{})
|
2019-05-02 01:06:57 -04:00
|
|
|
walkResultCh = startTreeWalk(ctx, bucket, prefix, marker, recursive, listDir, endWalkCh)
|
2019-04-17 12:52:08 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
var objInfos []ObjectInfo
|
|
|
|
var eof bool
|
|
|
|
var nextMarker string
|
|
|
|
|
|
|
|
// List until maxKeys requested.
|
|
|
|
for i := 0; i < maxKeys; {
|
|
|
|
walkResult, ok := <-walkResultCh
|
|
|
|
if !ok {
|
|
|
|
// Closed channel.
|
|
|
|
eof = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
var objInfo ObjectInfo
|
|
|
|
var err error
|
2019-08-06 15:08:58 -04:00
|
|
|
if hasSuffix(walkResult.entry, SlashSeparator) {
|
2019-04-17 12:52:08 -04:00
|
|
|
for _, getObjectInfoDir := range getObjectInfoDirs {
|
|
|
|
objInfo, err = getObjectInfoDir(ctx, bucket, walkResult.entry)
|
|
|
|
if err == nil {
|
|
|
|
break
|
|
|
|
}
|
2019-04-23 17:54:28 -04:00
|
|
|
if err == errFileNotFound {
|
|
|
|
err = nil
|
|
|
|
objInfo = ObjectInfo{
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: walkResult.entry,
|
|
|
|
IsDir: true,
|
|
|
|
}
|
|
|
|
}
|
2019-04-17 12:52:08 -04:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
objInfo, err = getObjInfo(ctx, bucket, walkResult.entry)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
// Ignore errFileNotFound as the object might have got
|
|
|
|
// deleted in the interim period of listing and getObjectInfo(),
|
|
|
|
// ignore quorum error as it might be an entry from an outdated disk.
|
|
|
|
if IsErrIgnored(err, []error{
|
|
|
|
errFileNotFound,
|
|
|
|
errXLReadQuorum,
|
|
|
|
}...) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return loi, toObjectErr(err, bucket, prefix)
|
|
|
|
}
|
|
|
|
nextMarker = objInfo.Name
|
|
|
|
objInfos = append(objInfos, objInfo)
|
|
|
|
if walkResult.end {
|
|
|
|
eof = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save list routine for the next marker if we haven't reached EOF.
|
2019-06-09 01:14:07 -04:00
|
|
|
params := listParams{bucket, recursive, nextMarker, prefix, false}
|
2019-04-17 12:52:08 -04:00
|
|
|
if !eof {
|
|
|
|
tpool.Set(params, walkResultCh, endWalkCh)
|
|
|
|
}
|
|
|
|
|
|
|
|
result := ListObjectsInfo{}
|
|
|
|
for _, objInfo := range objInfos {
|
2019-08-06 15:08:58 -04:00
|
|
|
if objInfo.IsDir && delimiter == SlashSeparator {
|
2019-04-17 12:52:08 -04:00
|
|
|
result.Prefixes = append(result.Prefixes, objInfo.Name)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
result.Objects = append(result.Objects, objInfo)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !eof {
|
|
|
|
result.IsTruncated = true
|
|
|
|
if len(objInfos) > 0 {
|
|
|
|
result.NextMarker = objInfos[len(objInfos)-1].Name
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Success.
|
|
|
|
return result, nil
|
|
|
|
}
|