mirror of
https://github.com/minio/minio.git
synced 2025-03-31 17:53:43 -04:00
avoid using 10MiB EC buffers in maxAPI calculations (#19665)
max requests per node is more conservative in its value causing premature serialization of the calls, avoid it for newer deployments.
This commit is contained in:
parent
4afb59e63f
commit
da3e7747ca
@ -575,7 +575,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
|||||||
configLogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
|
configLogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
globalAPIConfig.init(apiConfig, setDriveCounts)
|
globalAPIConfig.init(apiConfig, setDriveCounts, objAPI.Legacy())
|
||||||
autoGenerateRootCredentials() // Generate the KMS root credentials here since we don't know whether API root access is disabled until now.
|
autoGenerateRootCredentials() // Generate the KMS root credentials here since we don't know whether API root access is disabled until now.
|
||||||
setRemoteInstanceTransport(NewHTTPTransportWithTimeout(apiConfig.RemoteTransportDeadline))
|
setRemoteInstanceTransport(NewHTTPTransportWithTimeout(apiConfig.RemoteTransportDeadline))
|
||||||
case config.CompressionSubSys:
|
case config.CompressionSubSys:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
// Copyright (c) 2015-2024 MinIO, Inc.
|
||||||
//
|
//
|
||||||
// This file is part of MinIO Object Storage stack
|
// This file is part of MinIO Object Storage stack
|
||||||
//
|
//
|
||||||
@ -644,6 +644,15 @@ func (z *erasureServerPools) Shutdown(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Legacy returns 'true' if distribution algo is CRCMOD
|
||||||
|
func (z *erasureServerPools) Legacy() (ok bool) {
|
||||||
|
ok = true
|
||||||
|
for _, set := range z.serverPools {
|
||||||
|
ok = ok && set.Legacy()
|
||||||
|
}
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
func (z *erasureServerPools) BackendInfo() (b madmin.BackendInfo) {
|
func (z *erasureServerPools) BackendInfo() (b madmin.BackendInfo) {
|
||||||
b.Type = madmin.Erasure
|
b.Type = madmin.Erasure
|
||||||
|
|
||||||
|
@ -194,6 +194,11 @@ func findDiskIndex(refFormat, format *formatErasureV3) (int, int, error) {
|
|||||||
return -1, -1, fmt.Errorf("DriveID: %s not found", format.Erasure.This)
|
return -1, -1, fmt.Errorf("DriveID: %s not found", format.Erasure.This)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Legacy returns 'true' if distribution algo is CRCMOD
|
||||||
|
func (s *erasureSets) Legacy() (ok bool) {
|
||||||
|
return s.distributionAlgo == formatErasureVersionV2DistributionAlgoV1
|
||||||
|
}
|
||||||
|
|
||||||
// connectDisks - attempt to connect all the endpoints, loads format
|
// connectDisks - attempt to connect all the endpoints, loads format
|
||||||
// and re-arranges the disks in proper position.
|
// and re-arranges the disks in proper position.
|
||||||
func (s *erasureSets) connectDisks() {
|
func (s *erasureSets) connectDisks() {
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@ -38,13 +39,11 @@ import (
|
|||||||
type apiConfig struct {
|
type apiConfig struct {
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
|
|
||||||
requestsDeadline time.Duration
|
requestsDeadline time.Duration
|
||||||
requestsPool chan struct{}
|
requestsPool chan struct{}
|
||||||
clusterDeadline time.Duration
|
clusterDeadline time.Duration
|
||||||
listQuorum string
|
listQuorum string
|
||||||
corsAllowOrigins []string
|
corsAllowOrigins []string
|
||||||
// total drives per erasure set across pools.
|
|
||||||
totalDriveCount int
|
|
||||||
replicationPriority string
|
replicationPriority string
|
||||||
replicationMaxWorkers int
|
replicationMaxWorkers int
|
||||||
transitionWorkers int
|
transitionWorkers int
|
||||||
@ -110,7 +109,7 @@ func availableMemory() (available uint64) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) {
|
func (t *apiConfig) init(cfg api.Config, setDriveCounts []int, legacy bool) {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
defer t.mu.Unlock()
|
defer t.mu.Unlock()
|
||||||
|
|
||||||
@ -125,27 +124,24 @@ func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) {
|
|||||||
}
|
}
|
||||||
t.corsAllowOrigins = corsAllowOrigin
|
t.corsAllowOrigins = corsAllowOrigin
|
||||||
|
|
||||||
maxSetDrives := 0
|
|
||||||
for _, setDriveCount := range setDriveCounts {
|
|
||||||
t.totalDriveCount += setDriveCount
|
|
||||||
if setDriveCount > maxSetDrives {
|
|
||||||
maxSetDrives = setDriveCount
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var apiRequestsMaxPerNode int
|
var apiRequestsMaxPerNode int
|
||||||
if cfg.RequestsMax <= 0 {
|
if cfg.RequestsMax <= 0 {
|
||||||
|
maxSetDrives := slices.Max(setDriveCounts)
|
||||||
|
|
||||||
// Returns 75% of max memory allowed
|
// Returns 75% of max memory allowed
|
||||||
maxMem := availableMemory()
|
maxMem := availableMemory()
|
||||||
|
|
||||||
// max requests per node is calculated as
|
// max requests per node is calculated as
|
||||||
// total_ram / ram_per_request
|
// total_ram / ram_per_request
|
||||||
// ram_per_request is (2MiB+128KiB) * driveCount \
|
|
||||||
// + 2 * 10MiB (default erasure block size v1) + 2 * 1MiB (default erasure block size v2)
|
|
||||||
blockSize := xioutil.LargeBlock + xioutil.SmallBlock
|
blockSize := xioutil.LargeBlock + xioutil.SmallBlock
|
||||||
apiRequestsMaxPerNode = int(maxMem / uint64(maxSetDrives*blockSize+int(blockSizeV1*2+blockSizeV2*2)))
|
if legacy {
|
||||||
if globalIsDistErasure {
|
// ram_per_request is (1MiB+32KiB) * driveCount \
|
||||||
logger.Info("Automatically configured API requests per node based on available memory on the system: %d", apiRequestsMaxPerNode)
|
// + 2 * 10MiB (default erasure block size v1) + 2 * 1MiB (default erasure block size v2)
|
||||||
|
apiRequestsMaxPerNode = int(maxMem / uint64(maxSetDrives*blockSize+int(blockSizeV1*2+blockSizeV2*2)))
|
||||||
|
} else {
|
||||||
|
// ram_per_request is (1MiB+32KiB) * driveCount \
|
||||||
|
// + 2 * 1MiB (default erasure block size v2)
|
||||||
|
apiRequestsMaxPerNode = int(maxMem / uint64(maxSetDrives*blockSize+int(blockSizeV2*2)))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
apiRequestsMaxPerNode = cfg.RequestsMax
|
apiRequestsMaxPerNode = cfg.RequestsMax
|
||||||
@ -154,6 +150,10 @@ func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if globalIsDistErasure {
|
||||||
|
logger.Info("Configured max API requests per node based on available memory: %d", apiRequestsMaxPerNode)
|
||||||
|
}
|
||||||
|
|
||||||
if cap(t.requestsPool) != apiRequestsMaxPerNode {
|
if cap(t.requestsPool) != apiRequestsMaxPerNode {
|
||||||
// Only replace if needed.
|
// Only replace if needed.
|
||||||
// Existing requests will use the previous limit,
|
// Existing requests will use the previous limit,
|
||||||
|
@ -244,6 +244,7 @@ type ObjectLayer interface {
|
|||||||
Shutdown(context.Context) error
|
Shutdown(context.Context) error
|
||||||
NSScanner(ctx context.Context, updates chan<- DataUsageInfo, wantCycle uint32, scanMode madmin.HealScanMode) error
|
NSScanner(ctx context.Context, updates chan<- DataUsageInfo, wantCycle uint32, scanMode madmin.HealScanMode) error
|
||||||
BackendInfo() madmin.BackendInfo
|
BackendInfo() madmin.BackendInfo
|
||||||
|
Legacy() bool // Only returns true for deployments which use CRCMOD as its object distribution algorithm.
|
||||||
StorageInfo(ctx context.Context, metrics bool) StorageInfo
|
StorageInfo(ctx context.Context, metrics bool) StorageInfo
|
||||||
LocalStorageInfo(ctx context.Context, metrics bool) StorageInfo
|
LocalStorageInfo(ctx context.Context, metrics bool) StorageInfo
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user