do not use large buffers if not necessary (#11220)

without this change, there is a performance
regression for small objects GETs, this makes
the overall speed to go back to pre '59d363'
commit days.
This commit is contained in:
Harshavardhana
2021-01-04 18:51:52 -08:00
committed by GitHub
parent cb7fc99368
commit d0027c3c41
24 changed files with 92 additions and 196 deletions

View File

@@ -186,7 +186,7 @@ func TestListOnlineDisks(t *testing.T) {
t.Fatalf("Failed to putObject %v", err)
}
partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", false)
partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "")
fi, err := getLatestFileInfo(ctx, partsMetadata, errs)
if err != nil {
t.Fatalf("Failed to getLatestFileInfo %v", err)
@@ -287,7 +287,7 @@ func TestDisksWithAllParts(t *testing.T) {
t.Fatalf("Failed to putObject %v", err)
}
_, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", false)
_, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "")
readQuorum := len(erasureDisks) / 2
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
t.Fatalf("Failed to read xl meta data %v", reducedErr)
@@ -295,7 +295,7 @@ func TestDisksWithAllParts(t *testing.T) {
// Test that all disks are returned without any failures with
// unmodified meta data
partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", false)
partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "")
if err != nil {
t.Fatalf("Failed to read xl meta data %v", err)
}