do not use large buffers if not necessary (#11220)

without this change, there is a performance
regression for small objects GETs, this makes
the overall speed to go back to pre '59d363'
commit days.
This commit is contained in:
Harshavardhana
2021-01-04 18:51:52 -08:00
committed by GitHub
parent cb7fc99368
commit d0027c3c41
24 changed files with 92 additions and 196 deletions

View File

@@ -395,7 +395,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
if latestMeta.XLV1 {
partPath = pathJoin(object, fmt.Sprintf("part.%d", partNumber))
}
readers[i] = newBitrotReader(disk, partsMetadata[i].Data, bucket, partPath, tillOffset, checksumAlgo, checksumInfo.Hash, erasure.ShardSize())
readers[i] = newBitrotReader(disk, bucket, partPath, tillOffset, checksumAlgo, checksumInfo.Hash, erasure.ShardSize())
}
writers := make([]io.Writer, len(outDatedDisks))
for i, disk := range outDatedDisks {
@@ -811,7 +811,7 @@ func (er erasureObjects) HealObject(ctx context.Context, bucket, object, version
storageEndpoints := er.getEndpoints()
// Read metadata files from all the disks
partsMetadata, errs := readAllFileInfo(healCtx, storageDisks, bucket, object, versionID, false)
partsMetadata, errs := readAllFileInfo(healCtx, storageDisks, bucket, object, versionID)
if isAllNotFound(errs) {
err = toObjectErr(errFileNotFound, bucket, object)