fix: make sure to avoid calling RenameData() on disconnected disks. (#14094)

Large clusters with multiple sets, or multi-pool setups at times might
fail and report unexpected "file not found" errors. This can become
a problem during startup sequence when some files need to be created
at multiple locations.

- This PR ensures that we nil the erasure writers such that they
  are skipped in RenameData() call.

- RenameData() doesn't need to "Access()" calls for `.minio.sys`
  folders they always exist.

- Make sure PutObject() never returns ObjectNotFound{} for any
  errors, make sure it always returns "WriteQuorum" when renameData()
  fails with ObjectNotFound{}. Return appropriate errors for all
  other cases.
This commit is contained in:
Harshavardhana 2022-01-12 18:49:01 -08:00 committed by GitHub
parent 04e669a6be
commit 38ccc4f672
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 59 additions and 25 deletions

View File

@ -119,8 +119,10 @@ func newBitrotReader(disk StorageAPI, data []byte, bucket string, filePath strin
// Close all the readers. // Close all the readers.
func closeBitrotReaders(rs []io.ReaderAt) { func closeBitrotReaders(rs []io.ReaderAt) {
for _, r := range rs { for _, r := range rs {
if br, ok := r.(io.Closer); ok { if r != nil {
br.Close() if br, ok := r.(io.Closer); ok {
br.Close()
}
} }
} }
} }
@ -128,8 +130,10 @@ func closeBitrotReaders(rs []io.ReaderAt) {
// Close all the writers. // Close all the writers.
func closeBitrotWriters(ws []io.Writer) { func closeBitrotWriters(ws []io.Writer) {
for _, w := range ws { for _, w := range ws {
if bw, ok := w.(io.Closer); ok { if w != nil {
bw.Close() if bw, ok := w.(io.Closer); ok {
bw.Close()
}
} }
} }
} }

View File

@ -322,8 +322,8 @@ func (e Erasure) Heal(ctx context.Context, writers []io.Writer, readers []io.Rea
errs: make([]error, len(writers)), errs: make([]error, len(writers)),
} }
err = w.Write(ctx, bufs) if err = w.Write(ctx, bufs); err != nil {
if err != nil { logger.LogIf(ctx, err)
return err return err
} }
} }

View File

@ -52,7 +52,10 @@ func (p *parallelWriter) Write(ctx context.Context, blocks [][]byte) error {
if p.errs[i] == nil { if p.errs[i] == nil {
if n != len(blocks[i]) { if n != len(blocks[i]) {
p.errs[i] = io.ErrShortWrite p.errs[i] = io.ErrShortWrite
p.writers[i] = nil
} }
} else {
p.writers[i] = nil
} }
}(i) }(i)
} }
@ -93,6 +96,7 @@ func (e *Erasure) Encode(ctx context.Context, src io.Reader, writers []io.Writer
blocks, err = e.EncodeData(ctx, buf[:n]) blocks, err = e.EncodeData(ctx, buf[:n])
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return 0, err return 0, err
} }

View File

@ -540,7 +540,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
// If all disks are having errors, we give up. // If all disks are having errors, we give up.
if disksToHealCount == 0 { if disksToHealCount == 0 {
return result, fmt.Errorf("all disks had write errors, unable to heal") return result, fmt.Errorf("all disks had write errors, unable to heal %s/%s", bucket, object)
} }
} }

View File

@ -701,9 +701,10 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *
if disk == nil { if disk == nil {
continue continue
} }
if disk.IsOnline() {
inlineBuffers[i] = bytes.NewBuffer(make([]byte, 0, shardFileSize)) inlineBuffers[i] = bytes.NewBuffer(make([]byte, 0, shardFileSize))
writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize()) writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize())
}
} }
n, erasureErr := erasure.Encode(ctx, data, writers, buffer, writeQuorum) n, erasureErr := erasure.Encode(ctx, data, writers, buffer, writeQuorum)
@ -720,6 +721,7 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *
for i, w := range writers { for i, w := range writers {
if w == nil { if w == nil {
// Make sure to avoid writing to disks which we couldn't complete in erasure.Encode()
onlineDisks[i] = nil onlineDisks[i] = nil
continue continue
} }
@ -930,6 +932,10 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
continue continue
} }
if !disk.IsOnline() {
continue
}
if len(inlineBuffers) > 0 { if len(inlineBuffers) > 0 {
sz := shardFileSize sz := shardFileSize
if sz < 0 { if sz < 0 {
@ -939,6 +945,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize()) writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize())
continue continue
} }
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tempErasureObj, shardFileSize, DefaultBitrotAlgorithm, erasure.ShardSize()) writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tempErasureObj, shardFileSize, DefaultBitrotAlgorithm, erasure.ShardSize())
} }
@ -1012,6 +1019,9 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
// Rename the successfully written temporary object to final location. // Rename the successfully written temporary object to final location.
if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, bucket, object, writeQuorum); err != nil { if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, bucket, object, writeQuorum); err != nil {
if errors.Is(err, errFileNotFound) {
return ObjectInfo{}, toObjectErr(errErasureWriteQuorum, bucket, object)
}
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return ObjectInfo{}, toObjectErr(err, bucket, object) return ObjectInfo{}, toObjectErr(err, bucket, object)
} }

View File

@ -291,6 +291,7 @@ func configRetriableErrors(err error) bool {
errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, io.ErrUnexpectedEOF) ||
errors.As(err, &rquorum) || errors.As(err, &rquorum) ||
errors.As(err, &wquorum) || errors.As(err, &wquorum) ||
isErrObjectNotFound(err) ||
isErrBucketNotFound(err) || isErrBucketNotFound(err) ||
errors.Is(err, os.ErrDeadlineExceeded) errors.Is(err, os.ErrDeadlineExceeded)
} }

View File

@ -1984,6 +1984,14 @@ func (s *xlStorage) Delete(ctx context.Context, volume string, path string, recu
return s.deleteFile(volumeDir, filePath, recursive) return s.deleteFile(volumeDir, filePath, recursive)
} }
func skipAccessChecks(volume string) bool {
switch volume {
case minioMetaTmpBucket, minioMetaBucket, minioMetaMultipartBucket:
return true
}
return false
}
// RenameData - rename source path to destination path atomically, metadata and data directory. // RenameData - rename source path to destination path atomically, metadata and data directory.
func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string) (err error) { func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string) (err error) {
defer func() { defer func() {
@ -2010,23 +2018,27 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
return err return err
} }
// Stat a volume entry. if !skipAccessChecks(srcVolume) {
if err = Access(srcVolumeDir); err != nil { // Stat a volume entry.
if osIsNotExist(err) { if err = Access(srcVolumeDir); err != nil {
return errVolumeNotFound if osIsNotExist(err) {
} else if isSysErrIO(err) { return errVolumeNotFound
return errFaultyDisk } else if isSysErrIO(err) {
return errFaultyDisk
}
return err
} }
return err
} }
if err = Access(dstVolumeDir); err != nil { if !skipAccessChecks(dstVolume) {
if osIsNotExist(err) { if err = Access(dstVolumeDir); err != nil {
return errVolumeNotFound if osIsNotExist(err) {
} else if isSysErrIO(err) { return errVolumeNotFound
return errFaultyDisk } else if isSysErrIO(err) {
return errFaultyDisk
}
return err
} }
return err
} }
srcFilePath := pathutil.Join(srcVolumeDir, pathJoin(srcPath, xlStorageFormatFile)) srcFilePath := pathutil.Join(srcVolumeDir, pathJoin(srcPath, xlStorageFormatFile))
@ -2237,7 +2249,8 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
s.deleteFile(dstVolumeDir, legacyDataPath, true) s.deleteFile(dstVolumeDir, legacyDataPath, true)
} }
s.deleteFile(dstVolumeDir, dstDataPath, false) s.deleteFile(dstVolumeDir, dstDataPath, false)
if err != errFileNotFound { // Looks like srcFilePath is missing usually at .minio.sys/ ignore it.
if !errors.Is(err, errFileNotFound) {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
} }
return osErrToFileErr(err) return osErrToFileErr(err)
@ -2252,9 +2265,11 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
} }
s.deleteFile(dstVolumeDir, dstFilePath, false) s.deleteFile(dstVolumeDir, dstFilePath, false)
if err != errFileNotFound { // Looks like srcFilePath is missing usually at .minio.sys/ ignore it.
if !errors.Is(err, errFileNotFound) {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
} }
return osErrToFileErr(err) return osErrToFileErr(err)
} }