diff --git a/cmd/erasure-healing.go b/cmd/erasure-healing.go index 73b215bb6..24ab1ea98 100644 --- a/cmd/erasure-healing.go +++ b/cmd/erasure-healing.go @@ -443,7 +443,10 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s } erasureInfo := latestMeta.Erasure - + bp := er.bp + if erasureInfo.BlockSize == blockSizeV1 { + bp = er.bpOld + } for partIndex := 0; partIndex < len(latestMeta.Parts); partIndex++ { partSize := latestMeta.Parts[partIndex].Size partActualSize := latestMeta.Parts[partIndex].ActualSize @@ -457,7 +460,8 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s } checksumInfo := copyPartsMetadata[i].Erasure.GetChecksumInfo(partNumber) partPath := pathJoin(object, srcDataDir, fmt.Sprintf("part.%d", partNumber)) - readers[i] = newBitrotReader(disk, partsMetadata[i].Data, bucket, partPath, tillOffset, checksumAlgo, checksumInfo.Hash, erasure.ShardSize()) + readers[i] = newBitrotReader(disk, partsMetadata[i].Data, bucket, partPath, tillOffset, checksumAlgo, + checksumInfo.Hash, erasure.ShardSize()) } writers := make([]io.Writer, len(outDatedDisks)) for i, disk := range outDatedDisks { @@ -473,7 +477,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize()) } } - err = erasure.Heal(ctx, readers, writers, partSize, er.bp) + err = erasure.Heal(ctx, readers, writers, partSize, bp) closeBitrotReaders(readers) closeBitrotWriters(writers) if err != nil { diff --git a/cmd/erasure-sets.go b/cmd/erasure-sets.go index 9968b73c0..5e79beb6d 100644 --- a/cmd/erasure-sets.go +++ b/cmd/erasure-sets.go @@ -391,6 +391,14 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto // setCount * setDriveCount with each memory upto blockSizeV2. bp := bpool.NewBytePoolCap(n, blockSizeV2, blockSizeV2*2) + // Initialize byte pool for all sets, bpool size is set to + // setCount * setDriveCount with each memory upto blockSizeV1 + // + // Number of buffers, max 10GiB + m := (10 * humanize.GiByte) / (blockSizeV1 * 2) + + bpOld := bpool.NewBytePoolCap(m, blockSizeV1, blockSizeV1*2) + for i := 0; i < setCount; i++ { s.erasureDisks[i] = make([]StorageAPI, setDriveCount) } @@ -440,6 +448,7 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto deletedCleanupSleeper: newDynamicSleeper(10, 2*time.Second), nsMutex: mutex, bp: bp, + bpOld: bpOld, mrfOpCh: make(chan partialOperation, 10000), } } diff --git a/cmd/erasure.go b/cmd/erasure.go index b798f8646..92a8d9781 100644 --- a/cmd/erasure.go +++ b/cmd/erasure.go @@ -74,6 +74,10 @@ type erasureObjects struct { // Byte pools used for temporary i/o buffers. bp *bpool.BytePoolCap + // Byte pools used for temporary i/o buffers, + // legacy objects. + bpOld *bpool.BytePoolCap + mrfOpCh chan partialOperation deletedCleanupSleeper *dynamicSleeper