2021-04-18 15:41:13 -04:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2016-04-29 20:52:17 -04:00
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-04-29 20:52:17 -04:00
|
|
|
|
2016-05-25 04:33:39 -04:00
|
|
|
import (
|
2018-03-15 16:27:16 -04:00
|
|
|
"context"
|
2019-07-05 17:06:12 -04:00
|
|
|
"strings"
|
2021-03-08 14:30:43 -05:00
|
|
|
"sync"
|
2019-07-05 17:06:12 -04:00
|
|
|
|
2021-06-17 11:16:31 -04:00
|
|
|
"github.com/dustin/go-humanize"
|
2021-06-01 17:59:40 -04:00
|
|
|
"github.com/minio/minio/internal/sync/errgroup"
|
2016-05-25 04:33:39 -04:00
|
|
|
)
|
2016-04-29 20:52:17 -04:00
|
|
|
|
2016-05-29 18:38:14 -04:00
|
|
|
const (
|
|
|
|
// Block size used for all internal operations version 1.
|
[feat]: change erasure coding default block size from 10MiB to 1MiB (#11721)
major performance improvements in range GETs to avoid large
read amplification when ranges are tiny and random
```
-------------------
Operation: GET
Operations: 142014 -> 339421
Duration: 4m50s -> 4m56s
* Average: +139.41% (+1177.3 MiB/s) throughput, +139.11% (+658.4) obj/s
* Fastest: +125.24% (+1207.4 MiB/s) throughput, +132.32% (+612.9) obj/s
* 50% Median: +139.06% (+1175.7 MiB/s) throughput, +133.46% (+660.9) obj/s
* Slowest: +203.40% (+1267.9 MiB/s) throughput, +198.59% (+753.5) obj/s
```
TTFB from 10MiB BlockSize
```
* First Access TTFB: Avg: 81ms, Median: 61ms, Best: 20ms, Worst: 2.056s
```
TTFB from 1MiB BlockSize
```
* First Access TTFB: Avg: 22ms, Median: 21ms, Best: 8ms, Worst: 91ms
```
Full object reads however do see a slight change which won't be
noticeable in real world, so not doing any comparisons
TTFB still had improvements with full object reads with 1MiB
```
* First Access TTFB: Avg: 68ms, Median: 35ms, Best: 11ms, Worst: 1.16s
```
v/s
TTFB with 10MiB
```
* First Access TTFB: Avg: 388ms, Median: 98ms, Best: 20ms, Worst: 4.156s
```
This change should affect all new uploads, previous uploads should
continue to work with business as usual. But dramatic improvements can
be seen with these changes.
2021-03-06 17:09:34 -05:00
|
|
|
|
|
|
|
// TLDR..
|
|
|
|
// Not used anymore xl.meta captures the right blockSize
|
|
|
|
// so blockSizeV2 should be used for all future purposes.
|
|
|
|
// this value is kept here to calculate the max API
|
|
|
|
// requests based on RAM size for existing content.
|
2016-11-22 21:18:22 -05:00
|
|
|
blockSizeV1 = 10 * humanize.MiByte
|
2016-06-25 06:03:27 -04:00
|
|
|
|
[feat]: change erasure coding default block size from 10MiB to 1MiB (#11721)
major performance improvements in range GETs to avoid large
read amplification when ranges are tiny and random
```
-------------------
Operation: GET
Operations: 142014 -> 339421
Duration: 4m50s -> 4m56s
* Average: +139.41% (+1177.3 MiB/s) throughput, +139.11% (+658.4) obj/s
* Fastest: +125.24% (+1207.4 MiB/s) throughput, +132.32% (+612.9) obj/s
* 50% Median: +139.06% (+1175.7 MiB/s) throughput, +133.46% (+660.9) obj/s
* Slowest: +203.40% (+1267.9 MiB/s) throughput, +198.59% (+753.5) obj/s
```
TTFB from 10MiB BlockSize
```
* First Access TTFB: Avg: 81ms, Median: 61ms, Best: 20ms, Worst: 2.056s
```
TTFB from 1MiB BlockSize
```
* First Access TTFB: Avg: 22ms, Median: 21ms, Best: 8ms, Worst: 91ms
```
Full object reads however do see a slight change which won't be
noticeable in real world, so not doing any comparisons
TTFB still had improvements with full object reads with 1MiB
```
* First Access TTFB: Avg: 68ms, Median: 35ms, Best: 11ms, Worst: 1.16s
```
v/s
TTFB with 10MiB
```
* First Access TTFB: Avg: 388ms, Median: 98ms, Best: 20ms, Worst: 4.156s
```
This change should affect all new uploads, previous uploads should
continue to work with business as usual. But dramatic improvements can
be seen with these changes.
2021-03-06 17:09:34 -05:00
|
|
|
// Block size used in erasure coding version 2.
|
|
|
|
blockSizeV2 = 1 * humanize.MiByte
|
|
|
|
|
2016-07-21 20:31:14 -04:00
|
|
|
// Buckets meta prefix.
|
|
|
|
bucketMetaPrefix = "buckets"
|
2017-04-10 22:51:23 -04:00
|
|
|
|
2017-05-14 15:05:51 -04:00
|
|
|
// ETag (hex encoded md5sum) of empty string.
|
|
|
|
emptyETag = "d41d8cd98f00b204e9800998ecf8427e"
|
2016-05-29 18:38:14 -04:00
|
|
|
)
|
|
|
|
|
2016-10-10 02:03:10 -04:00
|
|
|
// Global object layer mutex, used for safely updating object layer.
|
2020-05-04 01:35:40 -04:00
|
|
|
var globalObjLayerMutex sync.RWMutex
|
2016-10-10 02:03:10 -04:00
|
|
|
|
2019-10-31 02:39:09 -04:00
|
|
|
// Global object layer, only accessed by globalObjectAPI.
|
2016-10-10 02:03:10 -04:00
|
|
|
var globalObjectAPI ObjectLayer
|
|
|
|
|
2018-03-28 17:14:06 -04:00
|
|
|
//Global cacheObjects, only accessed by newCacheObjectsFn().
|
|
|
|
var globalCacheObjectAPI CacheObjectLayer
|
|
|
|
|
2017-01-20 19:33:01 -05:00
|
|
|
// Checks if the object is a directory, this logic uses
|
2019-08-06 15:08:58 -04:00
|
|
|
// if size == 0 and object ends with SlashSeparator then
|
2017-01-20 19:33:01 -05:00
|
|
|
// returns true.
|
|
|
|
func isObjectDir(object string, size int64) bool {
|
2019-12-06 02:16:06 -05:00
|
|
|
return HasSuffix(object, SlashSeparator) && size == 0
|
2017-01-20 19:33:01 -05:00
|
|
|
}
|
|
|
|
|
2020-10-26 13:29:29 -04:00
|
|
|
func newStorageAPIWithoutHealthCheck(endpoint Endpoint) (storage StorageAPI, err error) {
|
|
|
|
if endpoint.IsLocal {
|
|
|
|
storage, err := newXLStorage(endpoint)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-03-16 23:06:57 -04:00
|
|
|
return newXLStorageDiskIDCheck(storage), nil
|
2020-10-26 13:29:29 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return newStorageRESTClient(endpoint, false), nil
|
|
|
|
}
|
|
|
|
|
2016-06-02 04:49:46 -04:00
|
|
|
// Depending on the disk type network or local, initialize storage API.
|
2017-04-11 18:44:27 -04:00
|
|
|
func newStorageAPI(endpoint Endpoint) (storage StorageAPI, err error) {
|
|
|
|
if endpoint.IsLocal {
|
2020-08-25 13:55:15 -04:00
|
|
|
storage, err := newXLStorage(endpoint)
|
2019-10-25 13:37:53 -04:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-03-16 23:06:57 -04:00
|
|
|
return newXLStorageDiskIDCheck(storage), nil
|
2016-06-02 04:49:46 -04:00
|
|
|
}
|
2017-04-11 18:44:27 -04:00
|
|
|
|
2020-10-26 13:29:29 -04:00
|
|
|
return newStorageRESTClient(endpoint, true), nil
|
2016-06-02 04:49:46 -04:00
|
|
|
}
|
|
|
|
|
2020-08-25 15:26:48 -04:00
|
|
|
func listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int, tpool *TreeWalkPool, listDir ListDirFunc, isLeaf IsLeafFunc, isLeafDir IsLeafDirFunc, getObjInfo func(context.Context, string, string) (ObjectInfo, error), getObjectInfoDirs ...func(context.Context, string, string) (ObjectInfo, error)) (loi ListObjectsInfo, err error) {
|
2019-07-05 17:06:12 -04:00
|
|
|
endWalkCh := make(chan struct{})
|
|
|
|
defer close(endWalkCh)
|
|
|
|
recursive := true
|
2020-08-25 15:26:48 -04:00
|
|
|
walkResultCh := startTreeWalk(ctx, bucket, prefix, "", recursive, listDir, isLeaf, isLeafDir, endWalkCh)
|
2019-07-05 17:06:12 -04:00
|
|
|
|
|
|
|
var objInfos []ObjectInfo
|
|
|
|
var eof bool
|
|
|
|
var prevPrefix string
|
|
|
|
|
|
|
|
for {
|
|
|
|
if len(objInfos) == maxKeys {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
result, ok := <-walkResultCh
|
|
|
|
if !ok {
|
|
|
|
eof = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
var objInfo ObjectInfo
|
|
|
|
var err error
|
|
|
|
|
|
|
|
index := strings.Index(strings.TrimPrefix(result.entry, prefix), delimiter)
|
|
|
|
if index == -1 {
|
|
|
|
objInfo, err = getObjInfo(ctx, bucket, result.entry)
|
|
|
|
if err != nil {
|
|
|
|
// Ignore errFileNotFound as the object might have got
|
|
|
|
// deleted in the interim period of listing and getObjectInfo(),
|
|
|
|
// ignore quorum error as it might be an entry from an outdated disk.
|
|
|
|
if IsErrIgnored(err, []error{
|
|
|
|
errFileNotFound,
|
2020-06-12 23:04:01 -04:00
|
|
|
errErasureReadQuorum,
|
2019-07-05 17:06:12 -04:00
|
|
|
}...) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return loi, toObjectErr(err, bucket, prefix)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
index = len(prefix) + index + len(delimiter)
|
|
|
|
currPrefix := result.entry[:index]
|
|
|
|
if currPrefix == prevPrefix {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
prevPrefix = currPrefix
|
|
|
|
|
|
|
|
objInfo = ObjectInfo{
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: currPrefix,
|
|
|
|
IsDir: true,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if objInfo.Name <= marker {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
objInfos = append(objInfos, objInfo)
|
|
|
|
if result.end {
|
|
|
|
eof = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
result := ListObjectsInfo{}
|
|
|
|
for _, objInfo := range objInfos {
|
|
|
|
if objInfo.IsDir {
|
|
|
|
result.Prefixes = append(result.Prefixes, objInfo.Name)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
result.Objects = append(result.Objects, objInfo)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !eof {
|
|
|
|
result.IsTruncated = true
|
|
|
|
if len(objInfos) > 0 {
|
|
|
|
result.NextMarker = objInfos[len(objInfos)-1].Name
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2020-02-25 10:52:28 -05:00
|
|
|
// Walk a bucket, optionally prefix recursively, until we have returned
|
|
|
|
// all the content to objectInfo channel, it is callers responsibility
|
|
|
|
// to allocate a receive channel for ObjectInfo, upon any unhandled
|
|
|
|
// error walker returns error. Optionally if context.Done() is received
|
|
|
|
// then Walk() stops the walker.
|
2020-08-25 15:26:48 -04:00
|
|
|
func fsWalk(ctx context.Context, obj ObjectLayer, bucket, prefix string, listDir ListDirFunc, isLeaf IsLeafFunc, isLeafDir IsLeafDirFunc, results chan<- ObjectInfo, getObjInfo func(context.Context, string, string) (ObjectInfo, error), getObjectInfoDirs ...func(context.Context, string, string) (ObjectInfo, error)) error {
|
2020-02-25 10:52:28 -05:00
|
|
|
if err := checkListObjsArgs(ctx, bucket, prefix, "", obj); err != nil {
|
2020-02-25 22:58:58 -05:00
|
|
|
// Upon error close the channel.
|
|
|
|
close(results)
|
2020-02-25 10:52:28 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-08-25 15:26:48 -04:00
|
|
|
walkResultCh := startTreeWalk(ctx, bucket, prefix, "", true, listDir, isLeaf, isLeafDir, ctx.Done())
|
2020-02-25 10:52:28 -05:00
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer close(results)
|
|
|
|
|
|
|
|
for {
|
|
|
|
walkResult, ok := <-walkResultCh
|
|
|
|
if !ok {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
var objInfo ObjectInfo
|
|
|
|
var err error
|
|
|
|
if HasSuffix(walkResult.entry, SlashSeparator) {
|
|
|
|
for _, getObjectInfoDir := range getObjectInfoDirs {
|
|
|
|
objInfo, err = getObjectInfoDir(ctx, bucket, walkResult.entry)
|
|
|
|
if err == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err == errFileNotFound {
|
|
|
|
err = nil
|
|
|
|
objInfo = ObjectInfo{
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: walkResult.entry,
|
|
|
|
IsDir: true,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
objInfo, err = getObjInfo(ctx, bucket, walkResult.entry)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
results <- objInfo
|
|
|
|
if walkResult.end {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-08-25 15:26:48 -04:00
|
|
|
func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, delimiter string, maxKeys int, tpool *TreeWalkPool, listDir ListDirFunc, isLeaf IsLeafFunc, isLeafDir IsLeafDirFunc, getObjInfo func(context.Context, string, string) (ObjectInfo, error), getObjectInfoDirs ...func(context.Context, string, string) (ObjectInfo, error)) (loi ListObjectsInfo, err error) {
|
2019-08-06 15:08:58 -04:00
|
|
|
if delimiter != SlashSeparator && delimiter != "" {
|
2020-08-25 15:26:48 -04:00
|
|
|
return listObjectsNonSlash(ctx, bucket, prefix, marker, delimiter, maxKeys, tpool, listDir, isLeaf, isLeafDir, getObjInfo, getObjectInfoDirs...)
|
2019-07-05 17:06:12 -04:00
|
|
|
}
|
|
|
|
|
2020-02-25 10:52:28 -05:00
|
|
|
if err := checkListObjsArgs(ctx, bucket, prefix, marker, obj); err != nil {
|
2019-04-17 12:52:08 -04:00
|
|
|
return loi, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Marker is set validate pre-condition.
|
|
|
|
if marker != "" {
|
|
|
|
// Marker not common with prefix is not implemented. Send an empty response
|
2019-12-06 02:16:06 -05:00
|
|
|
if !HasPrefix(marker, prefix) {
|
2019-04-17 12:52:08 -04:00
|
|
|
return loi, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// With max keys of zero we have reached eof, return right here.
|
|
|
|
if maxKeys == 0 {
|
|
|
|
return loi, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// For delimiter and prefix as '/' we do not list anything at all
|
|
|
|
// since according to s3 spec we stop at the 'delimiter'
|
|
|
|
// along // with the prefix. On a flat namespace with 'prefix'
|
|
|
|
// as '/' we don't have any entries, since all the keys are
|
|
|
|
// of form 'keyName/...'
|
2019-08-06 15:08:58 -04:00
|
|
|
if delimiter == SlashSeparator && prefix == SlashSeparator {
|
2019-04-17 12:52:08 -04:00
|
|
|
return loi, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Over flowing count - reset to maxObjectList.
|
|
|
|
if maxKeys < 0 || maxKeys > maxObjectList {
|
|
|
|
maxKeys = maxObjectList
|
|
|
|
}
|
|
|
|
|
|
|
|
// Default is recursive, if delimiter is set then list non recursive.
|
|
|
|
recursive := true
|
2019-08-06 15:08:58 -04:00
|
|
|
if delimiter == SlashSeparator {
|
2019-04-17 12:52:08 -04:00
|
|
|
recursive = false
|
|
|
|
}
|
|
|
|
|
2020-01-30 06:50:07 -05:00
|
|
|
walkResultCh, endWalkCh := tpool.Release(listParams{bucket, recursive, marker, prefix})
|
2019-04-17 12:52:08 -04:00
|
|
|
if walkResultCh == nil {
|
|
|
|
endWalkCh = make(chan struct{})
|
2020-08-25 15:26:48 -04:00
|
|
|
walkResultCh = startTreeWalk(ctx, bucket, prefix, marker, recursive, listDir, isLeaf, isLeafDir, endWalkCh)
|
2019-04-17 12:52:08 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
var eof bool
|
|
|
|
var nextMarker string
|
|
|
|
|
2021-09-08 11:15:42 -04:00
|
|
|
maxConcurrent := maxKeys / 10
|
|
|
|
if maxConcurrent == 0 {
|
|
|
|
maxConcurrent = maxKeys
|
|
|
|
}
|
|
|
|
|
2019-04-17 12:52:08 -04:00
|
|
|
// List until maxKeys requested.
|
2021-09-08 11:15:42 -04:00
|
|
|
g := errgroup.WithNErrs(maxKeys).WithConcurrency(maxConcurrent)
|
2021-03-08 14:30:43 -05:00
|
|
|
ctx, cancel := g.WithCancelOnError(ctx)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
objInfoFound := make([]*ObjectInfo, maxKeys)
|
|
|
|
var i int
|
|
|
|
for i = 0; i < maxKeys; i++ {
|
|
|
|
i := i
|
2019-04-17 12:52:08 -04:00
|
|
|
walkResult, ok := <-walkResultCh
|
|
|
|
if !ok {
|
|
|
|
// Closed channel.
|
|
|
|
eof = true
|
2021-06-17 11:16:31 -04:00
|
|
|
break
|
2019-04-17 12:52:08 -04:00
|
|
|
}
|
|
|
|
|
2019-12-06 02:16:06 -05:00
|
|
|
if HasSuffix(walkResult.entry, SlashSeparator) {
|
2021-03-08 14:30:43 -05:00
|
|
|
g.Go(func() error {
|
|
|
|
for _, getObjectInfoDir := range getObjectInfoDirs {
|
|
|
|
objInfo, err := getObjectInfoDir(ctx, bucket, walkResult.entry)
|
|
|
|
if err == nil {
|
|
|
|
objInfoFound[i] = &objInfo
|
|
|
|
// Done...
|
|
|
|
return nil
|
2019-04-23 17:54:28 -04:00
|
|
|
}
|
2021-03-08 14:30:43 -05:00
|
|
|
|
|
|
|
// Add temp, may be overridden,
|
|
|
|
if err == errFileNotFound {
|
|
|
|
objInfoFound[i] = &ObjectInfo{
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: walkResult.entry,
|
|
|
|
IsDir: true,
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
return toObjectErr(err, bucket, prefix)
|
2019-04-23 17:54:28 -04:00
|
|
|
}
|
2021-03-08 14:30:43 -05:00
|
|
|
return nil
|
|
|
|
}, i)
|
2019-04-17 12:52:08 -04:00
|
|
|
} else {
|
2021-03-08 14:30:43 -05:00
|
|
|
g.Go(func() error {
|
|
|
|
objInfo, err := getObjInfo(ctx, bucket, walkResult.entry)
|
|
|
|
if err != nil {
|
|
|
|
// Ignore errFileNotFound as the object might have got
|
|
|
|
// deleted in the interim period of listing and getObjectInfo(),
|
|
|
|
// ignore quorum error as it might be an entry from an outdated disk.
|
|
|
|
if IsErrIgnored(err, []error{
|
|
|
|
errFileNotFound,
|
|
|
|
errErasureReadQuorum,
|
|
|
|
}...) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return toObjectErr(err, bucket, prefix)
|
|
|
|
}
|
|
|
|
objInfoFound[i] = &objInfo
|
|
|
|
return nil
|
|
|
|
}, i)
|
2019-04-17 12:52:08 -04:00
|
|
|
}
|
2021-03-08 14:30:43 -05:00
|
|
|
|
2019-04-17 12:52:08 -04:00
|
|
|
if walkResult.end {
|
|
|
|
eof = true
|
|
|
|
break
|
|
|
|
}
|
2021-03-08 14:30:43 -05:00
|
|
|
}
|
|
|
|
if err := g.WaitErr(); err != nil {
|
|
|
|
return loi, err
|
|
|
|
}
|
|
|
|
// Copy found objects
|
|
|
|
objInfos := make([]ObjectInfo, 0, i+1)
|
|
|
|
for _, objInfo := range objInfoFound {
|
|
|
|
if objInfo == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
objInfos = append(objInfos, *objInfo)
|
|
|
|
nextMarker = objInfo.Name
|
2019-04-17 12:52:08 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Save list routine for the next marker if we haven't reached EOF.
|
2020-01-30 06:50:07 -05:00
|
|
|
params := listParams{bucket, recursive, nextMarker, prefix}
|
2019-04-17 12:52:08 -04:00
|
|
|
if !eof {
|
|
|
|
tpool.Set(params, walkResultCh, endWalkCh)
|
|
|
|
}
|
|
|
|
|
|
|
|
result := ListObjectsInfo{}
|
|
|
|
for _, objInfo := range objInfos {
|
2019-08-06 15:08:58 -04:00
|
|
|
if objInfo.IsDir && delimiter == SlashSeparator {
|
2019-04-17 12:52:08 -04:00
|
|
|
result.Prefixes = append(result.Prefixes, objInfo.Name)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
result.Objects = append(result.Objects, objInfo)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !eof {
|
|
|
|
result.IsTruncated = true
|
|
|
|
if len(objInfos) > 0 {
|
|
|
|
result.NextMarker = objInfos[len(objInfos)-1].Name
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Success.
|
|
|
|
return result, nil
|
|
|
|
}
|