reduce allocations on multi-disk clusters (#12311)

multi-disk clusters initialize buffer pools
per disk, this is perhaps expensive and perhaps
not useful, for a running server instance. As this
may disallow re-use of buffers across sets,

this change ensures that buffers across sets
can be re-used at drive level, this can reduce
quite a lot of memory on large drive setups.
This commit is contained in:
Harshavardhana
2021-05-17 17:49:48 -07:00
committed by GitHub
parent d610578d84
commit 2daba018d6
3 changed files with 40 additions and 37 deletions

View File

@@ -164,7 +164,6 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w
h := algo.New()
hashBuf := make([]byte, h.Size())
buf := make([]byte, shardSize)
left := wantSize
// Calculate the size of the bitrot file and compare
@@ -173,6 +172,9 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w
return errFileCorrupt
}
bufp := xlPoolSmall.Get().(*[]byte)
defer xlPoolSmall.Put(bufp)
for left > 0 {
// Read expected hash...
h.Reset()
@@ -186,13 +188,15 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w
if left < shardSize {
shardSize = left
}
read, err := io.CopyBuffer(h, io.LimitReader(r, shardSize), buf)
read, err := io.CopyBuffer(h, io.LimitReader(r, shardSize), *bufp)
if err != nil {
// Read's failed for object with right size, at different offsets.
return err
return errFileCorrupt
}
left -= read
if !bytes.Equal(h.Sum(nil), hashBuf) {
if !bytes.Equal(h.Sum(nil), hashBuf[:n]) {
return errFileCorrupt
}
}