mirror of
https://github.com/minio/minio.git
synced 2024-12-25 06:35:56 -05:00
fix: increase the tiering part size to 128MiB (#19424)
also introduce 8MiB buffer to read from for bigger parts
This commit is contained in:
parent
04101d472f
commit
c957e0d426
@ -142,7 +142,7 @@ func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) {
|
||||
// total_ram / ram_per_request
|
||||
// ram_per_request is (2MiB+128KiB) * driveCount \
|
||||
// + 2 * 10MiB (default erasure block size v1) + 2 * 1MiB (default erasure block size v2)
|
||||
blockSize := xioutil.BlockSizeLarge + xioutil.BlockSizeSmall
|
||||
blockSize := xioutil.LargeBlock + xioutil.SmallBlock
|
||||
apiRequestsMaxPerNode = int(maxMem / uint64(maxSetDrives*blockSize+int(blockSizeV1*2+blockSizeV2*2)))
|
||||
if globalIsDistErasure {
|
||||
logger.Info("Automatically configured API requests per node based on available memory on the system: %d", apiRequestsMaxPerNode)
|
||||
|
@ -450,7 +450,7 @@ func (r *RenameDataInlineHandlerParams) Recycle() {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
if cap(r.FI.Data) >= xioutil.BlockSizeSmall {
|
||||
if cap(r.FI.Data) >= xioutil.SmallBlock {
|
||||
grid.PutByteBuffer(r.FI.Data)
|
||||
r.FI.Data = nil
|
||||
}
|
||||
|
@ -595,6 +595,8 @@ func (s *storageRESTServer) ReadFileStreamHandler(w http.ResponseWriter, r *http
|
||||
// Windows can lock up with this optimization, so we fall back to regular copy.
|
||||
sr, ok := rc.(*sendFileReader)
|
||||
if ok {
|
||||
// Sendfile sends in 4MiB chunks per sendfile syscall which is more than enough
|
||||
// for most setups.
|
||||
_, err = rf.ReadFrom(sr.Reader)
|
||||
if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients
|
||||
storageLogIf(r.Context(), err)
|
||||
|
@ -42,7 +42,7 @@ const (
|
||||
maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
|
||||
maxPartsCount = 10000
|
||||
maxPartSize = 1024 * 1024 * 1024 * 5
|
||||
minPartSize = 1024 * 1024 * 64 // chosen by us to be optimal for HDDs
|
||||
minPartSize = 1024 * 1024 * 128 // chosen by us to be optimal for HDDs
|
||||
)
|
||||
|
||||
// optimalPartInfo - calculate the optimal part info for a given
|
||||
|
@ -2131,11 +2131,15 @@ func (s *xlStorage) writeAllDirect(ctx context.Context, filePath string, fileSiz
|
||||
|
||||
var bufp *[]byte
|
||||
switch {
|
||||
case fileSize > 0 && fileSize >= xioutil.BlockSizeReallyLarge:
|
||||
case fileSize > 0 && fileSize >= xioutil.XXLargeBlock*2:
|
||||
// use a larger 8MiB buffer for a really really large streamsx.
|
||||
bufp = xioutil.ODirectPoolXXLarge.Get().(*[]byte)
|
||||
defer xioutil.ODirectPoolXXLarge.Put(bufp)
|
||||
case fileSize > 0 && fileSize >= xioutil.XLargeBlock:
|
||||
// use a larger 4MiB buffer for a really large streams.
|
||||
bufp = xioutil.ODirectPoolXLarge.Get().(*[]byte)
|
||||
defer xioutil.ODirectPoolXLarge.Put(bufp)
|
||||
case fileSize <= xioutil.BlockSizeSmall:
|
||||
case fileSize <= xioutil.SmallBlock:
|
||||
bufp = xioutil.ODirectPoolSmall.Get().(*[]byte)
|
||||
defer xioutil.ODirectPoolSmall.Put(bufp)
|
||||
default:
|
||||
|
@ -34,28 +34,35 @@ import (
|
||||
|
||||
// Block sizes constant.
|
||||
const (
|
||||
BlockSizeSmall = 32 * humanize.KiByte // Default r/w block size for smaller objects.
|
||||
BlockSizeLarge = 1 * humanize.MiByte // Default r/w block size for normal objects.
|
||||
BlockSizeReallyLarge = 4 * humanize.MiByte // Default r/w block size for very large objects.
|
||||
SmallBlock = 32 * humanize.KiByte // Default r/w block size for smaller objects.
|
||||
LargeBlock = 1 * humanize.MiByte // Default r/w block size for normal objects.
|
||||
XLargeBlock = 4 * humanize.MiByte // Default r/w block size for very large objects.
|
||||
XXLargeBlock = 8 * humanize.MiByte // Default r/w block size for very very large objects.
|
||||
)
|
||||
|
||||
// aligned sync.Pool's
|
||||
var (
|
||||
ODirectPoolXXLarge = sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := disk.AlignedBlock(XXLargeBlock)
|
||||
return &b
|
||||
},
|
||||
}
|
||||
ODirectPoolXLarge = sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := disk.AlignedBlock(BlockSizeReallyLarge)
|
||||
b := disk.AlignedBlock(XLargeBlock)
|
||||
return &b
|
||||
},
|
||||
}
|
||||
ODirectPoolLarge = sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := disk.AlignedBlock(BlockSizeLarge)
|
||||
b := disk.AlignedBlock(LargeBlock)
|
||||
return &b
|
||||
},
|
||||
}
|
||||
ODirectPoolSmall = sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := disk.AlignedBlock(BlockSizeSmall)
|
||||
b := disk.AlignedBlock(SmallBlock)
|
||||
return &b
|
||||
},
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user