mirror of
https://github.com/minio/minio.git
synced 2025-01-13 07:53:21 -05:00
fix: walk missing entries with opts.Marker set (#19661)
'opts.Marker` is causing many missed entries if used since results are returned unsorted. Also since pools are serialized. Switch to do fully concurrent listing and merging across pools to return sorted entries. Returning errors on listings is impossible with the current API, so document that. Return an error at once if no drives are found instead of just returning an empty listing and no error.
This commit is contained in:
parent
1526e7ece3
commit
4afb59e63f
@ -2034,44 +2034,38 @@ func (z *erasureServerPools) HealBucket(ctx context.Context, bucket string, opts
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Walk a bucket, optionally prefix recursively, until we have returned
|
// Walk a bucket, optionally prefix recursively, until we have returned
|
||||||
// all the content to objectInfo channel, it is callers responsibility
|
// all the contents of the provided bucket+prefix.
|
||||||
// to allocate a receive channel for ObjectInfo, upon any unhandled
|
// TODO: Note that most errors will result in a truncated listing.
|
||||||
// error walker returns error. Optionally if context.Done() is received
|
|
||||||
// then Walk() stops the walker.
|
|
||||||
func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo, opts WalkOptions) error {
|
func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo, opts WalkOptions) error {
|
||||||
if err := checkListObjsArgs(ctx, bucket, prefix, ""); err != nil {
|
if err := checkListObjsArgs(ctx, bucket, prefix, ""); err != nil {
|
||||||
// Upon error close the channel.
|
|
||||||
xioutil.SafeClose(results)
|
xioutil.SafeClose(results)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
vcfg, _ := globalBucketVersioningSys.Get(bucket)
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
go func() {
|
var entries []chan metaCacheEntry
|
||||||
defer cancel()
|
|
||||||
defer xioutil.SafeClose(results)
|
|
||||||
|
|
||||||
for _, erasureSet := range z.serverPools {
|
for poolIdx, erasureSet := range z.serverPools {
|
||||||
var wg sync.WaitGroup
|
for setIdx, set := range erasureSet.sets {
|
||||||
for _, set := range erasureSet.sets {
|
|
||||||
set := set
|
set := set
|
||||||
wg.Add(1)
|
listOut := make(chan metaCacheEntry, 1)
|
||||||
go func() {
|
entries = append(entries, listOut)
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
disks, infos, _ := set.getOnlineDisksWithHealingAndInfo(true)
|
disks, infos, _ := set.getOnlineDisksWithHealingAndInfo(true)
|
||||||
if len(disks) == 0 {
|
if len(disks) == 0 {
|
||||||
|
xioutil.SafeClose(results)
|
||||||
cancel()
|
cancel()
|
||||||
|
return fmt.Errorf("Walk: no online disks found in pool %d, set %d", setIdx, poolIdx)
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
defer xioutil.SafeClose(listOut)
|
||||||
|
send := func(e metaCacheEntry) {
|
||||||
|
if e.isDir() {
|
||||||
|
// Ignore directories.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
send := func(objInfo ObjectInfo) bool {
|
|
||||||
select {
|
select {
|
||||||
|
case listOut <- e:
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return false
|
|
||||||
case results <- objInfo:
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2108,56 +2102,6 @@ func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, re
|
|||||||
if opts.LatestOnly {
|
if opts.LatestOnly {
|
||||||
requestedVersions = 1
|
requestedVersions = 1
|
||||||
}
|
}
|
||||||
loadEntry := func(entry metaCacheEntry) {
|
|
||||||
if entry.isDir() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.LatestOnly {
|
|
||||||
fi, err := entry.fileInfo(bucket)
|
|
||||||
if err != nil {
|
|
||||||
cancel()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if opts.Filter != nil {
|
|
||||||
if opts.Filter(fi) {
|
|
||||||
if !send(fi.ToObjectInfo(bucket, fi.Name, vcfg != nil && vcfg.Versioned(fi.Name))) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if !send(fi.ToObjectInfo(bucket, fi.Name, vcfg != nil && vcfg.Versioned(fi.Name))) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
fivs, err := entry.fileInfoVersions(bucket)
|
|
||||||
if err != nil {
|
|
||||||
cancel()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: entry.fileInfoVersions returns versions sorted in reverse chronological order based on ModTime
|
|
||||||
if opts.VersionsSort == WalkVersionsSortAsc {
|
|
||||||
versionsSorter(fivs.Versions).reverse()
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, version := range fivs.Versions {
|
|
||||||
if opts.Filter != nil {
|
|
||||||
if opts.Filter(version) {
|
|
||||||
if !send(version.ToObjectInfo(bucket, version.Name, vcfg != nil && vcfg.Versioned(version.Name))) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if !send(version.ToObjectInfo(bucket, version.Name, vcfg != nil && vcfg.Versioned(version.Name))) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// However many we ask, versions must exist on ~50%
|
// However many we ask, versions must exist on ~50%
|
||||||
listingQuorum := (askDisks + 1) / 2
|
listingQuorum := (askDisks + 1) / 2
|
||||||
@ -2184,13 +2128,13 @@ func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, re
|
|||||||
filterPrefix: filterPrefix,
|
filterPrefix: filterPrefix,
|
||||||
recursive: true,
|
recursive: true,
|
||||||
forwardTo: opts.Marker,
|
forwardTo: opts.Marker,
|
||||||
minDisks: 1,
|
minDisks: listingQuorum,
|
||||||
reportNotFound: false,
|
reportNotFound: false,
|
||||||
agreed: loadEntry,
|
agreed: send,
|
||||||
partial: func(entries metaCacheEntries, _ []error) {
|
partial: func(entries metaCacheEntries, _ []error) {
|
||||||
entry, ok := entries.resolve(&resolver)
|
entry, ok := entries.resolve(&resolver)
|
||||||
if ok {
|
if ok {
|
||||||
loadEntry(*entry)
|
send(*entry)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
finished: nil,
|
finished: nil,
|
||||||
@ -2203,8 +2147,71 @@ func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, re
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
wg.Wait()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Convert and filter merged entries.
|
||||||
|
merged := make(chan metaCacheEntry, 100)
|
||||||
|
vcfg, _ := globalBucketVersioningSys.Get(bucket)
|
||||||
|
go func() {
|
||||||
|
defer cancel()
|
||||||
|
defer xioutil.SafeClose(results)
|
||||||
|
send := func(oi ObjectInfo) bool {
|
||||||
|
select {
|
||||||
|
case results <- oi:
|
||||||
|
return true
|
||||||
|
case <-ctx.Done():
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for entry := range merged {
|
||||||
|
if opts.LatestOnly {
|
||||||
|
fi, err := entry.fileInfo(bucket)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if opts.Filter != nil {
|
||||||
|
if opts.Filter(fi) {
|
||||||
|
if !send(fi.ToObjectInfo(bucket, fi.Name, vcfg != nil && vcfg.Versioned(fi.Name))) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !send(fi.ToObjectInfo(bucket, fi.Name, vcfg != nil && vcfg.Versioned(fi.Name))) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fivs, err := entry.fileInfoVersions(bucket)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: entry.fileInfoVersions returns versions sorted in reverse chronological order based on ModTime
|
||||||
|
if opts.VersionsSort == WalkVersionsSortAsc {
|
||||||
|
versionsSorter(fivs.Versions).reverse()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, version := range fivs.Versions {
|
||||||
|
if opts.Filter != nil {
|
||||||
|
if opts.Filter(version) {
|
||||||
|
if !send(version.ToObjectInfo(bucket, version.Name, vcfg != nil && vcfg.Versioned(version.Name))) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !send(version.ToObjectInfo(bucket, version.Name, vcfg != nil && vcfg.Versioned(version.Name))) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
// Merge all entries from all disks.
|
||||||
|
// We leave quorum at 1, since entries are already resolved to have the desired quorum.
|
||||||
|
// mergeEntryChannels will close 'merged' channel upon completion or cancellation.
|
||||||
|
storageLogIf(ctx, mergeEntryChannels(ctx, entries, merged, 1))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -217,6 +217,27 @@ func prepareErasure(ctx context.Context, nDisks int) (ObjectLayer, []string, err
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wait up to 10 seconds for disks to come online.
|
||||||
|
pools := obj.(*erasureServerPools)
|
||||||
|
t := time.Now()
|
||||||
|
for _, pool := range pools.serverPools {
|
||||||
|
for _, sets := range pool.erasureDisks {
|
||||||
|
for _, s := range sets {
|
||||||
|
if !s.IsLocal() {
|
||||||
|
for {
|
||||||
|
if s.IsOnline() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
if time.Since(t) > 10*time.Second {
|
||||||
|
return nil, nil, errors.New("timeout waiting for disk to come online")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return obj, fsDirs, nil
|
return obj, fsDirs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user