mirror of
https://github.com/minio/minio.git
synced 2025-11-06 20:33:07 -05:00
reduce allocations on multi-disk clusters (#12311)
multi-disk clusters initialize buffer pools per disk, this is perhaps expensive and perhaps not useful, for a running server instance. As this may disallow re-use of buffers across sets, this change ensures that buffers across sets can be re-used at drive level, this can reduce quite a lot of memory on large drive setups.
This commit is contained in:
@@ -98,6 +98,27 @@ func isValidVolname(volname string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
var (
|
||||
xlPoolReallyLarge = sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := disk.AlignedBlock(blockSizeReallyLarge)
|
||||
return &b
|
||||
},
|
||||
}
|
||||
xlPoolLarge = sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := disk.AlignedBlock(blockSizeLarge)
|
||||
return &b
|
||||
},
|
||||
}
|
||||
xlPoolSmall = sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := disk.AlignedBlock(blockSizeSmall)
|
||||
return &b
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// xlStorage - implements StorageAPI interface.
|
||||
type xlStorage struct {
|
||||
diskPath string
|
||||
@@ -105,10 +126,6 @@ type xlStorage struct {
|
||||
|
||||
globalSync bool
|
||||
|
||||
poolReallyLarge sync.Pool
|
||||
poolLarge sync.Pool
|
||||
poolSmall sync.Pool
|
||||
|
||||
rootDisk bool
|
||||
|
||||
diskID string
|
||||
@@ -254,26 +271,8 @@ func newXLStorage(ep Endpoint) (*xlStorage, error) {
|
||||
}
|
||||
|
||||
p := &xlStorage{
|
||||
diskPath: path,
|
||||
endpoint: ep,
|
||||
poolReallyLarge: sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := disk.AlignedBlock(blockSizeReallyLarge)
|
||||
return &b
|
||||
},
|
||||
},
|
||||
poolLarge: sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := disk.AlignedBlock(blockSizeLarge)
|
||||
return &b
|
||||
},
|
||||
},
|
||||
poolSmall: sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := disk.AlignedBlock(blockSizeSmall)
|
||||
return &b
|
||||
},
|
||||
},
|
||||
diskPath: path,
|
||||
endpoint: ep,
|
||||
globalSync: env.Get(config.EnvFSOSync, config.EnableOff) == config.EnableOn,
|
||||
ctx: GlobalContext,
|
||||
rootDisk: rootDisk,
|
||||
@@ -1323,9 +1322,9 @@ func (o *odirectReader) Read(buf []byte) (n int, err error) {
|
||||
}
|
||||
if o.buf == nil {
|
||||
if o.smallFile {
|
||||
o.bufp = o.s.poolSmall.Get().(*[]byte)
|
||||
o.bufp = xlPoolSmall.Get().(*[]byte)
|
||||
} else {
|
||||
o.bufp = o.s.poolLarge.Get().(*[]byte)
|
||||
o.bufp = xlPoolLarge.Get().(*[]byte)
|
||||
}
|
||||
}
|
||||
if o.freshRead {
|
||||
@@ -1367,9 +1366,9 @@ func (o *odirectReader) Read(buf []byte) (n int, err error) {
|
||||
// Close - Release the buffer and close the file.
|
||||
func (o *odirectReader) Close() error {
|
||||
if o.smallFile {
|
||||
o.s.poolSmall.Put(o.bufp)
|
||||
xlPoolSmall.Put(o.bufp)
|
||||
} else {
|
||||
o.s.poolLarge.Put(o.bufp)
|
||||
xlPoolLarge.Put(o.bufp)
|
||||
}
|
||||
return o.f.Close()
|
||||
}
|
||||
@@ -1554,11 +1553,11 @@ func (s *xlStorage) CreateFile(ctx context.Context, volume, path string, fileSiz
|
||||
var bufp *[]byte
|
||||
if fileSize > 0 && fileSize >= reallyLargeFileThreshold {
|
||||
// use a larger 4MiB buffer for really large streams.
|
||||
bufp = s.poolReallyLarge.Get().(*[]byte)
|
||||
defer s.poolReallyLarge.Put(bufp)
|
||||
bufp = xlPoolReallyLarge.Get().(*[]byte)
|
||||
defer xlPoolReallyLarge.Put(bufp)
|
||||
} else {
|
||||
bufp = s.poolLarge.Get().(*[]byte)
|
||||
defer s.poolLarge.Put(bufp)
|
||||
bufp = xlPoolLarge.Get().(*[]byte)
|
||||
defer xlPoolLarge.Put(bufp)
|
||||
}
|
||||
|
||||
written, err := xioutil.CopyAligned(w, r, *bufp, fileSize)
|
||||
|
||||
Reference in New Issue
Block a user