Fix listing fallback re-using disks (#14576)

When more than 2 disks are unavailable for listing, the same disk will be used for fallback.

This makes quorum calculations incorrect since the same disk will have multiple entries.

This PR keeps track of which fallback disks have been handed out and only every returns a disk once.
This commit is contained in:
Klaus Post 2022-03-18 11:35:27 -07:00 committed by GitHub
parent 43eb5a001c
commit 61eb9d4e29
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -789,14 +789,30 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
fallback := func(err error) bool {
// Keep track of fallback disks
var fdMu sync.Mutex
fds := opts.fallbackDisks
fallback := func(err error) StorageAPI {
switch err.(type) {
case StorageErr:
// all supported disk errors
// attempt a fallback.
return true
// Attempt to grab a fallback disk
fdMu.Lock()
defer fdMu.Unlock()
if len(fds) == 0 {
return nil
}
return false
fdsCopy := fds
for _, fd := range fdsCopy {
// Grab a fallback disk
fds = fds[1:]
if fd != nil {
return fd
}
}
}
// Either no more disks for fallback or
// not a storage error.
return nil
}
askDisks := len(disks)
readers := make([]*metacacheReader, askDisks)
@ -825,14 +841,10 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) {
}
// fallback only when set.
if len(opts.fallbackDisks) > 0 && fallback(werr) {
for fd := fallback(werr); fd != nil; {
// This fallback is only set when
// askDisks is less than total
// number of disks per set.
for _, fd := range opts.fallbackDisks {
if fd == nil {
continue
}
werr = fd.WalkDir(ctx, WalkDirOptions{
Bucket: opts.bucket,
BaseDir: opts.path,
@ -845,7 +857,6 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) {
break
}
}
}
w.CloseWithError(werr)
if werr != io.EOF && werr != nil &&