reduce allocations on multi-disk clusters (#12311)

multi-disk clusters initialize buffer pools
per disk, this is perhaps expensive and perhaps
not useful, for a running server instance. As this
may disallow re-use of buffers across sets,

this change ensures that buffers across sets
can be re-used at drive level, this can reduce
quite a lot of memory on large drive setups.
This commit is contained in:
Harshavardhana 2021-05-17 17:49:48 -07:00 committed by GitHub
parent d610578d84
commit 2daba018d6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 40 additions and 37 deletions

View File

@ -164,7 +164,6 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w
h := algo.New()
hashBuf := make([]byte, h.Size())
buf := make([]byte, shardSize)
left := wantSize
// Calculate the size of the bitrot file and compare
@ -173,6 +172,9 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w
return errFileCorrupt
}
bufp := xlPoolSmall.Get().(*[]byte)
defer xlPoolSmall.Put(bufp)
for left > 0 {
// Read expected hash...
h.Reset()
@ -186,13 +188,15 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w
if left < shardSize {
shardSize = left
}
read, err := io.CopyBuffer(h, io.LimitReader(r, shardSize), buf)
read, err := io.CopyBuffer(h, io.LimitReader(r, shardSize), *bufp)
if err != nil {
// Read's failed for object with right size, at different offsets.
return err
return errFileCorrupt
}
left -= read
if !bytes.Equal(h.Sum(nil), hashBuf) {
if !bytes.Equal(h.Sum(nil), hashBuf[:n]) {
return errFileCorrupt
}
}

View File

@ -261,7 +261,7 @@ func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetad
// Always check data, if we got it.
if (len(meta.Data) > 0 || meta.Size == 0) && len(meta.Parts) > 0 {
checksumInfo := meta.Erasure.GetChecksumInfo(meta.Parts[0].Number)
dataErrs[i] = bitrotVerify(bytes.NewBuffer(meta.Data),
dataErrs[i] = bitrotVerify(bytes.NewReader(meta.Data),
int64(len(meta.Data)),
meta.Erasure.ShardFileSize(meta.Size),
checksumInfo.Algorithm,

View File

@ -98,6 +98,27 @@ func isValidVolname(volname string) bool {
return true
}
var (
xlPoolReallyLarge = sync.Pool{
New: func() interface{} {
b := disk.AlignedBlock(blockSizeReallyLarge)
return &b
},
}
xlPoolLarge = sync.Pool{
New: func() interface{} {
b := disk.AlignedBlock(blockSizeLarge)
return &b
},
}
xlPoolSmall = sync.Pool{
New: func() interface{} {
b := disk.AlignedBlock(blockSizeSmall)
return &b
},
}
)
// xlStorage - implements StorageAPI interface.
type xlStorage struct {
diskPath string
@ -105,10 +126,6 @@ type xlStorage struct {
globalSync bool
poolReallyLarge sync.Pool
poolLarge sync.Pool
poolSmall sync.Pool
rootDisk bool
diskID string
@ -256,24 +273,6 @@ func newXLStorage(ep Endpoint) (*xlStorage, error) {
p := &xlStorage{
diskPath: path,
endpoint: ep,
poolReallyLarge: sync.Pool{
New: func() interface{} {
b := disk.AlignedBlock(blockSizeReallyLarge)
return &b
},
},
poolLarge: sync.Pool{
New: func() interface{} {
b := disk.AlignedBlock(blockSizeLarge)
return &b
},
},
poolSmall: sync.Pool{
New: func() interface{} {
b := disk.AlignedBlock(blockSizeSmall)
return &b
},
},
globalSync: env.Get(config.EnvFSOSync, config.EnableOff) == config.EnableOn,
ctx: GlobalContext,
rootDisk: rootDisk,
@ -1323,9 +1322,9 @@ func (o *odirectReader) Read(buf []byte) (n int, err error) {
}
if o.buf == nil {
if o.smallFile {
o.bufp = o.s.poolSmall.Get().(*[]byte)
o.bufp = xlPoolSmall.Get().(*[]byte)
} else {
o.bufp = o.s.poolLarge.Get().(*[]byte)
o.bufp = xlPoolLarge.Get().(*[]byte)
}
}
if o.freshRead {
@ -1367,9 +1366,9 @@ func (o *odirectReader) Read(buf []byte) (n int, err error) {
// Close - Release the buffer and close the file.
func (o *odirectReader) Close() error {
if o.smallFile {
o.s.poolSmall.Put(o.bufp)
xlPoolSmall.Put(o.bufp)
} else {
o.s.poolLarge.Put(o.bufp)
xlPoolLarge.Put(o.bufp)
}
return o.f.Close()
}
@ -1554,11 +1553,11 @@ func (s *xlStorage) CreateFile(ctx context.Context, volume, path string, fileSiz
var bufp *[]byte
if fileSize > 0 && fileSize >= reallyLargeFileThreshold {
// use a larger 4MiB buffer for really large streams.
bufp = s.poolReallyLarge.Get().(*[]byte)
defer s.poolReallyLarge.Put(bufp)
bufp = xlPoolReallyLarge.Get().(*[]byte)
defer xlPoolReallyLarge.Put(bufp)
} else {
bufp = s.poolLarge.Get().(*[]byte)
defer s.poolLarge.Put(bufp)
bufp = xlPoolLarge.Get().(*[]byte)
defer xlPoolLarge.Put(bufp)
}
written, err := xioutil.CopyAligned(w, r, *bufp, fileSize)