avoid repeated large allocations for large parts (#17968)

objects with 10,000 parts and many of them can
cause a large memory spike which can potentially
lead to OOM due to lack of GC.

with previous PR reducing the memory usage significantly
in #17963, this PR reduces this further by 80% under
repeated calls.

Scanner sub-system has no use for the slice of Parts(),
it is better left empty.

```
benchmark                            old ns/op     new ns/op     delta
BenchmarkToFileInfo/ToFileInfo-8     295658        188143        -36.36%

benchmark                            old allocs     new allocs     delta
BenchmarkToFileInfo/ToFileInfo-8     61             60             -1.64%

benchmark                            old bytes     new bytes     delta
BenchmarkToFileInfo/ToFileInfo-8     1097210       227255        -79.29%
```
This commit is contained in:
Harshavardhana
2023-09-02 07:49:24 -07:00
committed by GitHub
parent 8208bcb896
commit 3995355150
9 changed files with 92 additions and 51 deletions

View File

@@ -485,7 +485,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) {
b.Fatal(err)
}
// List...
_, err = xl.ToFileInfo("volume", "path", ids[rng.Intn(size)], false)
_, err = xl.ToFileInfo("volume", "path", ids[rng.Intn(size)], false, true)
if err != nil {
b.Fatal(err)
}
@@ -503,7 +503,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) {
b.Fatal(err)
}
// List...
_, err = xl.ListVersions("volume", "path")
_, err = xl.ListVersions("volume", "path", true)
if err != nil {
b.Fatal(err)
}
@@ -518,7 +518,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) {
if buf == nil {
b.Fatal("buf == nil")
}
_, err = buf.ToFileInfo("volume", "path", ids[rng.Intn(size)])
_, err = buf.ToFileInfo("volume", "path", ids[rng.Intn(size)], true)
if err != nil {
b.Fatal(err)
}
@@ -533,7 +533,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) {
if buf == nil {
b.Fatal("buf == nil")
}
_, err = buf.ListVersions("volume", "path")
_, err = buf.ListVersions("volume", "path", true)
if err != nil {
b.Fatal(err)
}