optimize speedtest for smaller setups (#15414)

this has been observed in multiple environments
where the setups are small `speedtest` naturally
fails with default '10s' and the concurrency
of '32' is big for such clusters.

choose a smaller value i.e equal to number of
drives in such clusters and let 'autotune'
increase the concurrency instead.
This commit is contained in:
Harshavardhana 2022-07-27 14:41:59 -07:00 committed by GitHub
parent 5e763b71dc
commit cbd70d26b5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 29 additions and 1226 deletions

View File

@ -1210,12 +1210,22 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
concurrent = runtime.GOMAXPROCS(0) concurrent = runtime.GOMAXPROCS(0)
} }
// if we have less drives than concurrency then choose
// only the concurrency to be number of drives to start
// with - since default '32' might be big and may not
// complete in total time of 10s.
if globalEndpoints.NEndpoints() < concurrent {
concurrent = globalEndpoints.NEndpoints()
}
duration, err := time.ParseDuration(durationStr) duration, err := time.ParseDuration(durationStr)
if err != nil { if err != nil {
duration = time.Second * 10 duration = time.Second * 10
} }
sufficientCapacity, canAutotune, capacityErrMsg := validateObjPerfOptions(ctx, objectAPI, concurrent, size, autotune) storageInfo, _ := objectAPI.StorageInfo(ctx)
sufficientCapacity, canAutotune, capacityErrMsg := validateObjPerfOptions(ctx, storageInfo, concurrent, size, autotune)
if !sufficientCapacity { if !sufficientCapacity {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, AdminError{ writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, AdminError{
Code: "XMinioSpeedtestInsufficientCapacity", Code: "XMinioSpeedtestInsufficientCapacity",
@ -1306,8 +1316,7 @@ func deleteObjectPerfBucket(objectAPI ObjectLayer) {
}) })
} }
func validateObjPerfOptions(ctx context.Context, objectAPI ObjectLayer, concurrent int, size int, autotune bool) (sufficientCapacity bool, canAutotune bool, capacityErrMsg string) { func validateObjPerfOptions(ctx context.Context, storageInfo madmin.StorageInfo, concurrent int, size int, autotune bool) (bool, bool, string) {
storageInfo, _ := objectAPI.StorageInfo(ctx)
capacityNeeded := uint64(concurrent * size) capacityNeeded := uint64(concurrent * size)
capacity := GetTotalUsableCapacityFree(storageInfo.Disks, storageInfo) capacity := GetTotalUsableCapacityFree(storageInfo.Disks, storageInfo)
@ -2220,11 +2229,20 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
concurrent = runtime.GOMAXPROCS(0) concurrent = runtime.GOMAXPROCS(0)
} }
// if we have less drives than concurrency then choose
// only the concurrency to be number of drives to start
// with - since default '32' might be big and may not
// complete in total time of 10s.
if globalEndpoints.NEndpoints() < concurrent {
concurrent = globalEndpoints.NEndpoints()
}
storageInfo, _ := objectAPI.StorageInfo(ctx)
size := 64 * humanize.MiByte size := 64 * humanize.MiByte
autotune := true autotune := true
sufficientCapacity, canAutotune, capacityErrMsg := validateObjPerfOptions(ctx, objectAPI, concurrent, size, autotune) sufficientCapacity, canAutotune, capacityErrMsg := validateObjPerfOptions(ctx, storageInfo, concurrent, size, autotune)
if !sufficientCapacity { if !sufficientCapacity {
healthInfo.Perf.Error = capacityErrMsg healthInfo.Perf.Error = capacityErrMsg
partialWrite(healthInfo) partialWrite(healthInfo)

View File

@ -533,9 +533,11 @@ func (z *erasureServerPools) BackendInfo() (b madmin.BackendInfo) {
rrSCParity := globalStorageClass.GetParityForSC(storageclass.RRS) rrSCParity := globalStorageClass.GetParityForSC(storageclass.RRS)
// Data blocks can vary per pool, but parity is same. // Data blocks can vary per pool, but parity is same.
for _, setDriveCount := range z.SetDriveCounts() { for i, setDriveCount := range z.SetDriveCounts() {
b.StandardSCData = append(b.StandardSCData, setDriveCount-scParity) b.StandardSCData = append(b.StandardSCData, setDriveCount-scParity)
b.RRSCData = append(b.RRSCData, setDriveCount-rrSCParity) b.RRSCData = append(b.RRSCData, setDriveCount-rrSCParity)
b.DrivesPerSet = append(b.DrivesPerSet, setDriveCount)
b.TotalSets = append(b.TotalSets, z.serverPools[i].setCount)
} }
b.StandardSCParity = scParity b.StandardSCParity = scParity

2
go.mod
View File

@ -48,7 +48,7 @@ require (
github.com/minio/dperf v0.4.2 github.com/minio/dperf v0.4.2
github.com/minio/highwayhash v1.0.2 github.com/minio/highwayhash v1.0.2
github.com/minio/kes v0.20.0 github.com/minio/kes v0.20.0
github.com/minio/madmin-go v1.4.6 github.com/minio/madmin-go v1.4.9
github.com/minio/minio-go/v7 v7.0.32 github.com/minio/minio-go/v7 v7.0.32
github.com/minio/pkg v1.1.26 github.com/minio/pkg v1.1.26
github.com/minio/selfupdate v0.5.0 github.com/minio/selfupdate v0.5.0

1221
go.sum

File diff suppressed because it is too large Load Diff