mirror of
https://github.com/minio/minio.git
synced 2025-04-25 12:34:03 -04:00
fix: increase the tiering part size to 128MiB (#19424)
also introduce 8MiB buffer to read from for bigger parts
This commit is contained in:
parent
04101d472f
commit
c957e0d426
@ -142,7 +142,7 @@ func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) {
|
|||||||
// total_ram / ram_per_request
|
// total_ram / ram_per_request
|
||||||
// ram_per_request is (2MiB+128KiB) * driveCount \
|
// ram_per_request is (2MiB+128KiB) * driveCount \
|
||||||
// + 2 * 10MiB (default erasure block size v1) + 2 * 1MiB (default erasure block size v2)
|
// + 2 * 10MiB (default erasure block size v1) + 2 * 1MiB (default erasure block size v2)
|
||||||
blockSize := xioutil.BlockSizeLarge + xioutil.BlockSizeSmall
|
blockSize := xioutil.LargeBlock + xioutil.SmallBlock
|
||||||
apiRequestsMaxPerNode = int(maxMem / uint64(maxSetDrives*blockSize+int(blockSizeV1*2+blockSizeV2*2)))
|
apiRequestsMaxPerNode = int(maxMem / uint64(maxSetDrives*blockSize+int(blockSizeV1*2+blockSizeV2*2)))
|
||||||
if globalIsDistErasure {
|
if globalIsDistErasure {
|
||||||
logger.Info("Automatically configured API requests per node based on available memory on the system: %d", apiRequestsMaxPerNode)
|
logger.Info("Automatically configured API requests per node based on available memory on the system: %d", apiRequestsMaxPerNode)
|
||||||
|
@ -450,7 +450,7 @@ func (r *RenameDataInlineHandlerParams) Recycle() {
|
|||||||
if r == nil {
|
if r == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if cap(r.FI.Data) >= xioutil.BlockSizeSmall {
|
if cap(r.FI.Data) >= xioutil.SmallBlock {
|
||||||
grid.PutByteBuffer(r.FI.Data)
|
grid.PutByteBuffer(r.FI.Data)
|
||||||
r.FI.Data = nil
|
r.FI.Data = nil
|
||||||
}
|
}
|
||||||
|
@ -595,6 +595,8 @@ func (s *storageRESTServer) ReadFileStreamHandler(w http.ResponseWriter, r *http
|
|||||||
// Windows can lock up with this optimization, so we fall back to regular copy.
|
// Windows can lock up with this optimization, so we fall back to regular copy.
|
||||||
sr, ok := rc.(*sendFileReader)
|
sr, ok := rc.(*sendFileReader)
|
||||||
if ok {
|
if ok {
|
||||||
|
// Sendfile sends in 4MiB chunks per sendfile syscall which is more than enough
|
||||||
|
// for most setups.
|
||||||
_, err = rf.ReadFrom(sr.Reader)
|
_, err = rf.ReadFrom(sr.Reader)
|
||||||
if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients
|
if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients
|
||||||
storageLogIf(r.Context(), err)
|
storageLogIf(r.Context(), err)
|
||||||
|
@ -42,7 +42,7 @@ const (
|
|||||||
maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
|
maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
|
||||||
maxPartsCount = 10000
|
maxPartsCount = 10000
|
||||||
maxPartSize = 1024 * 1024 * 1024 * 5
|
maxPartSize = 1024 * 1024 * 1024 * 5
|
||||||
minPartSize = 1024 * 1024 * 64 // chosen by us to be optimal for HDDs
|
minPartSize = 1024 * 1024 * 128 // chosen by us to be optimal for HDDs
|
||||||
)
|
)
|
||||||
|
|
||||||
// optimalPartInfo - calculate the optimal part info for a given
|
// optimalPartInfo - calculate the optimal part info for a given
|
||||||
|
@ -2131,11 +2131,15 @@ func (s *xlStorage) writeAllDirect(ctx context.Context, filePath string, fileSiz
|
|||||||
|
|
||||||
var bufp *[]byte
|
var bufp *[]byte
|
||||||
switch {
|
switch {
|
||||||
case fileSize > 0 && fileSize >= xioutil.BlockSizeReallyLarge:
|
case fileSize > 0 && fileSize >= xioutil.XXLargeBlock*2:
|
||||||
|
// use a larger 8MiB buffer for a really really large streamsx.
|
||||||
|
bufp = xioutil.ODirectPoolXXLarge.Get().(*[]byte)
|
||||||
|
defer xioutil.ODirectPoolXXLarge.Put(bufp)
|
||||||
|
case fileSize > 0 && fileSize >= xioutil.XLargeBlock:
|
||||||
// use a larger 4MiB buffer for a really large streams.
|
// use a larger 4MiB buffer for a really large streams.
|
||||||
bufp = xioutil.ODirectPoolXLarge.Get().(*[]byte)
|
bufp = xioutil.ODirectPoolXLarge.Get().(*[]byte)
|
||||||
defer xioutil.ODirectPoolXLarge.Put(bufp)
|
defer xioutil.ODirectPoolXLarge.Put(bufp)
|
||||||
case fileSize <= xioutil.BlockSizeSmall:
|
case fileSize <= xioutil.SmallBlock:
|
||||||
bufp = xioutil.ODirectPoolSmall.Get().(*[]byte)
|
bufp = xioutil.ODirectPoolSmall.Get().(*[]byte)
|
||||||
defer xioutil.ODirectPoolSmall.Put(bufp)
|
defer xioutil.ODirectPoolSmall.Put(bufp)
|
||||||
default:
|
default:
|
||||||
|
@ -34,28 +34,35 @@ import (
|
|||||||
|
|
||||||
// Block sizes constant.
|
// Block sizes constant.
|
||||||
const (
|
const (
|
||||||
BlockSizeSmall = 32 * humanize.KiByte // Default r/w block size for smaller objects.
|
SmallBlock = 32 * humanize.KiByte // Default r/w block size for smaller objects.
|
||||||
BlockSizeLarge = 1 * humanize.MiByte // Default r/w block size for normal objects.
|
LargeBlock = 1 * humanize.MiByte // Default r/w block size for normal objects.
|
||||||
BlockSizeReallyLarge = 4 * humanize.MiByte // Default r/w block size for very large objects.
|
XLargeBlock = 4 * humanize.MiByte // Default r/w block size for very large objects.
|
||||||
|
XXLargeBlock = 8 * humanize.MiByte // Default r/w block size for very very large objects.
|
||||||
)
|
)
|
||||||
|
|
||||||
// aligned sync.Pool's
|
// aligned sync.Pool's
|
||||||
var (
|
var (
|
||||||
|
ODirectPoolXXLarge = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
b := disk.AlignedBlock(XXLargeBlock)
|
||||||
|
return &b
|
||||||
|
},
|
||||||
|
}
|
||||||
ODirectPoolXLarge = sync.Pool{
|
ODirectPoolXLarge = sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() interface{} {
|
||||||
b := disk.AlignedBlock(BlockSizeReallyLarge)
|
b := disk.AlignedBlock(XLargeBlock)
|
||||||
return &b
|
return &b
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
ODirectPoolLarge = sync.Pool{
|
ODirectPoolLarge = sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() interface{} {
|
||||||
b := disk.AlignedBlock(BlockSizeLarge)
|
b := disk.AlignedBlock(LargeBlock)
|
||||||
return &b
|
return &b
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
ODirectPoolSmall = sync.Pool{
|
ODirectPoolSmall = sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() interface{} {
|
||||||
b := disk.AlignedBlock(BlockSizeSmall)
|
b := disk.AlignedBlock(SmallBlock)
|
||||||
return &b
|
return &b
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user