mirror of
https://github.com/minio/minio.git
synced 2025-04-05 04:10:28 -04:00
Reduce parallelReader allocs (#19558)
This commit is contained in:
parent
5f774951b1
commit
ec816f3840
@ -38,6 +38,7 @@ type parallelReader struct {
|
|||||||
shardFileSize int64
|
shardFileSize int64
|
||||||
buf [][]byte
|
buf [][]byte
|
||||||
readerToBuf []int
|
readerToBuf []int
|
||||||
|
stashBuffer []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// newParallelReader returns parallelReader.
|
// newParallelReader returns parallelReader.
|
||||||
@ -46,6 +47,21 @@ func newParallelReader(readers []io.ReaderAt, e Erasure, offset, totalLength int
|
|||||||
for i := range r2b {
|
for i := range r2b {
|
||||||
r2b[i] = i
|
r2b[i] = i
|
||||||
}
|
}
|
||||||
|
bufs := make([][]byte, len(readers))
|
||||||
|
// Fill buffers
|
||||||
|
b := globalBytePoolCap.Load().Get()
|
||||||
|
shardSize := int(e.ShardSize())
|
||||||
|
if cap(b) < len(readers)*shardSize {
|
||||||
|
// We should always have enough capacity, but older objects may be bigger.
|
||||||
|
globalBytePoolCap.Load().Put(b)
|
||||||
|
b = nil
|
||||||
|
} else {
|
||||||
|
// Seed the buffers.
|
||||||
|
for i := range bufs {
|
||||||
|
bufs[i] = b[i*shardSize : (i+1)*shardSize]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return ¶llelReader{
|
return ¶llelReader{
|
||||||
readers: readers,
|
readers: readers,
|
||||||
orgReaders: readers,
|
orgReaders: readers,
|
||||||
@ -55,6 +71,15 @@ func newParallelReader(readers []io.ReaderAt, e Erasure, offset, totalLength int
|
|||||||
shardFileSize: e.ShardFileSize(totalLength),
|
shardFileSize: e.ShardFileSize(totalLength),
|
||||||
buf: make([][]byte, len(readers)),
|
buf: make([][]byte, len(readers)),
|
||||||
readerToBuf: r2b,
|
readerToBuf: r2b,
|
||||||
|
stashBuffer: b,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done will release any resources used by the parallelReader.
|
||||||
|
func (p *parallelReader) Done() {
|
||||||
|
if p.stashBuffer != nil {
|
||||||
|
globalBytePoolCap.Load().Put(p.stashBuffer)
|
||||||
|
p.stashBuffer = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -224,6 +249,7 @@ func (e Erasure) Decode(ctx context.Context, writer io.Writer, readers []io.Read
|
|||||||
if len(prefer) == len(readers) {
|
if len(prefer) == len(readers) {
|
||||||
reader.preferReaders(prefer)
|
reader.preferReaders(prefer)
|
||||||
}
|
}
|
||||||
|
defer reader.Done()
|
||||||
|
|
||||||
startBlock := offset / e.blockSize
|
startBlock := offset / e.blockSize
|
||||||
endBlock := (offset + length) / e.blockSize
|
endBlock := (offset + length) / e.blockSize
|
||||||
@ -294,6 +320,7 @@ func (e Erasure) Heal(ctx context.Context, writers []io.Writer, readers []io.Rea
|
|||||||
if len(readers) == len(prefer) {
|
if len(readers) == len(prefer) {
|
||||||
reader.preferReaders(prefer)
|
reader.preferReaders(prefer)
|
||||||
}
|
}
|
||||||
|
defer reader.Done()
|
||||||
|
|
||||||
startBlock := int64(0)
|
startBlock := int64(0)
|
||||||
endBlock := totalLength / e.blockSize
|
endBlock := totalLength / e.blockSize
|
||||||
|
@ -679,12 +679,12 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
|||||||
// Account for padding and forced compression overhead and encryption.
|
// Account for padding and forced compression overhead and encryption.
|
||||||
buffer = make([]byte, data.ActualSize()+256+32+32, data.ActualSize()*2+512)
|
buffer = make([]byte, data.ActualSize()+256+32+32, data.ActualSize()*2+512)
|
||||||
} else {
|
} else {
|
||||||
buffer = globalBytePoolCap.Get()
|
buffer = globalBytePoolCap.Load().Get()
|
||||||
defer globalBytePoolCap.Put(buffer)
|
defer globalBytePoolCap.Load().Put(buffer)
|
||||||
}
|
}
|
||||||
case size >= fi.Erasure.BlockSize:
|
case size >= fi.Erasure.BlockSize:
|
||||||
buffer = globalBytePoolCap.Get()
|
buffer = globalBytePoolCap.Load().Get()
|
||||||
defer globalBytePoolCap.Put(buffer)
|
defer globalBytePoolCap.Load().Put(buffer)
|
||||||
case size < fi.Erasure.BlockSize:
|
case size < fi.Erasure.BlockSize:
|
||||||
// No need to allocate fully fi.Erasure.BlockSize buffer if the incoming data is smaller.
|
// No need to allocate fully fi.Erasure.BlockSize buffer if the incoming data is smaller.
|
||||||
buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1))
|
buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1))
|
||||||
@ -705,10 +705,11 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
|
|||||||
if data.Size() > bigFileThreshold {
|
if data.Size() > bigFileThreshold {
|
||||||
// Add input readahead.
|
// Add input readahead.
|
||||||
// We use 2 buffers, so we always have a full buffer of input.
|
// We use 2 buffers, so we always have a full buffer of input.
|
||||||
bufA := globalBytePoolCap.Get()
|
pool := globalBytePoolCap.Load()
|
||||||
bufB := globalBytePoolCap.Get()
|
bufA := pool.Get()
|
||||||
defer globalBytePoolCap.Put(bufA)
|
bufB := pool.Get()
|
||||||
defer globalBytePoolCap.Put(bufB)
|
defer pool.Put(bufA)
|
||||||
|
defer pool.Put(bufB)
|
||||||
ra, err := readahead.NewReaderBuffer(data, [][]byte{bufA[:fi.Erasure.BlockSize], bufB[:fi.Erasure.BlockSize]})
|
ra, err := readahead.NewReaderBuffer(data, [][]byte{bufA[:fi.Erasure.BlockSize], bufB[:fi.Erasure.BlockSize]})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
toEncode = ra
|
toEncode = ra
|
||||||
|
@ -1138,8 +1138,8 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *
|
|||||||
case size == 0:
|
case size == 0:
|
||||||
buffer = make([]byte, 1) // Allocate at least a byte to reach EOF
|
buffer = make([]byte, 1) // Allocate at least a byte to reach EOF
|
||||||
case size >= fi.Erasure.BlockSize:
|
case size >= fi.Erasure.BlockSize:
|
||||||
buffer = globalBytePoolCap.Get()
|
buffer = globalBytePoolCap.Load().Get()
|
||||||
defer globalBytePoolCap.Put(buffer)
|
defer globalBytePoolCap.Load().Put(buffer)
|
||||||
case size < fi.Erasure.BlockSize:
|
case size < fi.Erasure.BlockSize:
|
||||||
// No need to allocate fully blockSizeV1 buffer if the incoming data is smaller.
|
// No need to allocate fully blockSizeV1 buffer if the incoming data is smaller.
|
||||||
buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1))
|
buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1))
|
||||||
@ -1388,8 +1388,8 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||||||
case size == 0:
|
case size == 0:
|
||||||
buffer = make([]byte, 1) // Allocate at least a byte to reach EOF
|
buffer = make([]byte, 1) // Allocate at least a byte to reach EOF
|
||||||
case size >= fi.Erasure.BlockSize || size == -1:
|
case size >= fi.Erasure.BlockSize || size == -1:
|
||||||
buffer = globalBytePoolCap.Get()
|
buffer = globalBytePoolCap.Load().Get()
|
||||||
defer globalBytePoolCap.Put(buffer)
|
defer globalBytePoolCap.Load().Put(buffer)
|
||||||
case size < fi.Erasure.BlockSize:
|
case size < fi.Erasure.BlockSize:
|
||||||
// No need to allocate fully blockSizeV1 buffer if the incoming data is smaller.
|
// No need to allocate fully blockSizeV1 buffer if the incoming data is smaller.
|
||||||
buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1))
|
buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1))
|
||||||
@ -1451,10 +1451,11 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
|
|||||||
toEncode := io.Reader(data)
|
toEncode := io.Reader(data)
|
||||||
if data.Size() >= bigFileThreshold {
|
if data.Size() >= bigFileThreshold {
|
||||||
// We use 2 buffers, so we always have a full buffer of input.
|
// We use 2 buffers, so we always have a full buffer of input.
|
||||||
bufA := globalBytePoolCap.Get()
|
pool := globalBytePoolCap.Load()
|
||||||
bufB := globalBytePoolCap.Get()
|
bufA := pool.Get()
|
||||||
defer globalBytePoolCap.Put(bufA)
|
bufB := pool.Get()
|
||||||
defer globalBytePoolCap.Put(bufB)
|
defer pool.Put(bufA)
|
||||||
|
defer pool.Put(bufB)
|
||||||
ra, err := readahead.NewReaderBuffer(data, [][]byte{bufA[:fi.Erasure.BlockSize], bufB[:fi.Erasure.BlockSize]})
|
ra, err := readahead.NewReaderBuffer(data, [][]byte{bufA[:fi.Erasure.BlockSize], bufB[:fi.Erasure.BlockSize]})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
toEncode = ra
|
toEncode = ra
|
||||||
|
@ -103,8 +103,9 @@ func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServ
|
|||||||
|
|
||||||
// Initialize byte pool once for all sets, bpool size is set to
|
// Initialize byte pool once for all sets, bpool size is set to
|
||||||
// setCount * setDriveCount with each memory upto blockSizeV2.
|
// setCount * setDriveCount with each memory upto blockSizeV2.
|
||||||
globalBytePoolCap = bpool.NewBytePoolCap(n, blockSizeV2, blockSizeV2*2)
|
buffers := bpool.NewBytePoolCap(n, blockSizeV2, blockSizeV2*2)
|
||||||
globalBytePoolCap.Populate()
|
buffers.Populate()
|
||||||
|
globalBytePoolCap.Store(buffers)
|
||||||
|
|
||||||
var localDrives []StorageAPI
|
var localDrives []StorageAPI
|
||||||
local := endpointServerPools.FirstLocal()
|
local := endpointServerPools.FirstLocal()
|
||||||
|
@ -241,7 +241,7 @@ var (
|
|||||||
globalBucketMonitor *bandwidth.Monitor
|
globalBucketMonitor *bandwidth.Monitor
|
||||||
globalPolicySys *PolicySys
|
globalPolicySys *PolicySys
|
||||||
globalIAMSys *IAMSys
|
globalIAMSys *IAMSys
|
||||||
globalBytePoolCap *bpool.BytePoolCap
|
globalBytePoolCap atomic.Pointer[bpool.BytePoolCap]
|
||||||
|
|
||||||
globalLifecycleSys *LifecycleSys
|
globalLifecycleSys *LifecycleSys
|
||||||
globalBucketSSEConfigSys *BucketSSEConfigSys
|
globalBucketSSEConfigSys *BucketSSEConfigSys
|
||||||
|
@ -52,6 +52,9 @@ func (bp *BytePoolCap) Populate() {
|
|||||||
// Get gets a []byte from the BytePool, or creates a new one if none are
|
// Get gets a []byte from the BytePool, or creates a new one if none are
|
||||||
// available in the pool.
|
// available in the pool.
|
||||||
func (bp *BytePoolCap) Get() (b []byte) {
|
func (bp *BytePoolCap) Get() (b []byte) {
|
||||||
|
if bp == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
select {
|
select {
|
||||||
case b = <-bp.c:
|
case b = <-bp.c:
|
||||||
// reuse existing buffer
|
// reuse existing buffer
|
||||||
@ -68,6 +71,9 @@ func (bp *BytePoolCap) Get() (b []byte) {
|
|||||||
|
|
||||||
// Put returns the given Buffer to the BytePool.
|
// Put returns the given Buffer to the BytePool.
|
||||||
func (bp *BytePoolCap) Put(b []byte) {
|
func (bp *BytePoolCap) Put(b []byte) {
|
||||||
|
if bp == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
select {
|
select {
|
||||||
case bp.c <- b:
|
case bp.c <- b:
|
||||||
// buffer went back into pool
|
// buffer went back into pool
|
||||||
@ -78,10 +84,16 @@ func (bp *BytePoolCap) Put(b []byte) {
|
|||||||
|
|
||||||
// Width returns the width of the byte arrays in this pool.
|
// Width returns the width of the byte arrays in this pool.
|
||||||
func (bp *BytePoolCap) Width() (n int) {
|
func (bp *BytePoolCap) Width() (n int) {
|
||||||
|
if bp == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
return bp.w
|
return bp.w
|
||||||
}
|
}
|
||||||
|
|
||||||
// WidthCap returns the cap width of the byte arrays in this pool.
|
// WidthCap returns the cap width of the byte arrays in this pool.
|
||||||
func (bp *BytePoolCap) WidthCap() (n int) {
|
func (bp *BytePoolCap) WidthCap() (n int) {
|
||||||
|
if bp == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
return bp.wcap
|
return bp.wcap
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user