From 6160188bf32aa67b818d12527b7a795e87c17ee4 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Mon, 15 Mar 2021 20:03:13 -0700 Subject: [PATCH] fix: erasure index based reading based on actual ParityBlocks (#11792) in some setups with ordering issues in drive configuration, we should rely on expected parityBlocks instead of `len(disks)/2` --- cmd/erasure-decode.go | 4 ++-- cmd/erasure-metadata-utils.go | 5 +++-- cmd/erasure-multipart.go | 2 +- cmd/erasure-object.go | 6 +++--- cmd/erasure-utils.go | 5 +++-- 5 files changed, 12 insertions(+), 10 deletions(-) diff --git a/cmd/erasure-decode.go b/cmd/erasure-decode.go index 5200d2441..287fa291d 100644 --- a/cmd/erasure-decode.go +++ b/cmd/erasure-decode.go @@ -161,7 +161,7 @@ func (p *parallelReader) Read(dst [][]byte) ([][]byte, error) { // For the last shard, the shardsize might be less than previous shard sizes. // Hence the following statement ensures that the buffer size is reset to the right size. p.buf[bufIdx] = p.buf[bufIdx][:p.shardSize] - _, err := rr.ReadAt(p.buf[bufIdx], p.offset) + n, err := rr.ReadAt(p.buf[bufIdx], p.offset) if err != nil { if errors.Is(err, errFileNotFound) { atomic.StoreInt32(&missingPartsHeal, 1) @@ -179,7 +179,7 @@ func (p *parallelReader) Read(dst [][]byte) ([][]byte, error) { return } newBufLK.Lock() - newBuf[bufIdx] = p.buf[bufIdx] + newBuf[bufIdx] = p.buf[bufIdx][:n] newBufLK.Unlock() // Since ReadAt returned success, there is no need to trigger another read. readTriggerCh <- false diff --git a/cmd/erasure-metadata-utils.go b/cmd/erasure-metadata-utils.go index 054e4e1c9..84b13f595 100644 --- a/cmd/erasure-metadata-utils.go +++ b/cmd/erasure-metadata-utils.go @@ -145,10 +145,11 @@ func readAllFileInfo(ctx context.Context, disks []StorageAPI, bucket, object, ve return metadataArray, g.Wait() } -func shuffleDisksAndPartsMetadataByIndex(disks []StorageAPI, metaArr []FileInfo, distribution []int) (shuffledDisks []StorageAPI, shuffledPartsMetadata []FileInfo) { +func shuffleDisksAndPartsMetadataByIndex(disks []StorageAPI, metaArr []FileInfo, fi FileInfo) (shuffledDisks []StorageAPI, shuffledPartsMetadata []FileInfo) { shuffledDisks = make([]StorageAPI, len(disks)) shuffledPartsMetadata = make([]FileInfo, len(disks)) var inconsistent int + distribution := fi.Erasure.Distribution for i, meta := range metaArr { if disks[i] == nil { // Assuming offline drives as inconsistent, @@ -171,7 +172,7 @@ func shuffleDisksAndPartsMetadataByIndex(disks []StorageAPI, metaArr []FileInfo, // Inconsistent meta info is with in the limit of // expected quorum, proceed with EcIndex based // disk order. - if inconsistent < len(disks)/2 { + if inconsistent < fi.Erasure.ParityBlocks { return shuffledDisks, shuffledPartsMetadata } diff --git a/cmd/erasure-multipart.go b/cmd/erasure-multipart.go index 1e4cd6e82..11a38393e 100644 --- a/cmd/erasure-multipart.go +++ b/cmd/erasure-multipart.go @@ -783,7 +783,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str // Order online disks in accordance with distribution order. // Order parts metadata in accordance with distribution order. - onlineDisks, partsMetadata = shuffleDisksAndPartsMetadataByIndex(onlineDisks, partsMetadata, fi.Erasure.Distribution) + onlineDisks, partsMetadata = shuffleDisksAndPartsMetadataByIndex(onlineDisks, partsMetadata, fi) // Save current erasure metadata for validation. var currentFI = fi diff --git a/cmd/erasure-object.go b/cmd/erasure-object.go index 4881e417c..b2c8140ae 100644 --- a/cmd/erasure-object.go +++ b/cmd/erasure-object.go @@ -85,7 +85,7 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d return fi.ToObjectInfo(srcBucket, srcObject), toObjectErr(errMethodNotAllowed, srcBucket, srcObject) } - onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi.Erasure.Distribution) + onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi) versionID := srcInfo.VersionID if srcInfo.versionOnly { @@ -238,7 +238,7 @@ func (er erasureObjects) GetObject(ctx context.Context, bucket, object string, s func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI) error { // Reorder online disks based on erasure distribution order. // Reorder parts metadata based on erasure distribution order. - onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi.Erasure.Distribution) + onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi) // For negative length read everything. if length < 0 { @@ -1190,7 +1190,7 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin return ObjectInfo{}, toObjectErr(errMethodNotAllowed, bucket, object) } - onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi.Erasure.Distribution) + onlineDisks, metaArr = shuffleDisksAndPartsMetadataByIndex(onlineDisks, metaArr, fi) for i, metaFi := range metaArr { if metaFi.IsValid() { // clean fi.Meta of tag key, before updating the new tags diff --git a/cmd/erasure-utils.go b/cmd/erasure-utils.go index 1c1c88f16..569ea7821 100644 --- a/cmd/erasure-utils.go +++ b/cmd/erasure-utils.go @@ -19,6 +19,7 @@ package cmd import ( "bytes" "context" + "fmt" "io" "github.com/klauspost/reedsolomon" @@ -46,13 +47,13 @@ func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, data // Do we have enough blocks? if len(enBlocks) < dataBlocks { - logger.LogIf(ctx, reedsolomon.ErrTooFewShards) + logger.LogIf(ctx, fmt.Errorf("diskBlocks(%d)/dataBlocks(%d) - %w", len(enBlocks), dataBlocks, reedsolomon.ErrTooFewShards)) return 0, reedsolomon.ErrTooFewShards } // Do we have enough data? if int64(getDataBlockLen(enBlocks, dataBlocks)) < length { - logger.LogIf(ctx, reedsolomon.ErrShortData) + logger.LogIf(ctx, fmt.Errorf("getDataBlockLen(enBlocks, dataBlocks)(%d)/length(%d) - %w", getDataBlockLen(enBlocks, dataBlocks), length, reedsolomon.ErrShortData)) return 0, reedsolomon.ErrShortData }