fix: totalDrives reported in speedTest for multiple-pools (#13770)

totalDrives reported in speedTest result were wrong
for multiple pools, this PR fixes this.

Bonus: add support for configurable storage-class, this
allows us to test REDUCED_REDUNDANCY to see further
maximum throughputs across the cluster.
This commit is contained in:
Harshavardhana 2021-11-29 09:05:46 -08:00 committed by GitHub
parent 4c0f48c548
commit 99d87c5ca2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 47 additions and 34 deletions

View File

@ -957,6 +957,7 @@ func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Reques
durationStr := r.Form.Get(peerRESTDuration)
concurrentStr := r.Form.Get(peerRESTConcurrent)
autotune := r.Form.Get("autotune") == "true"
storageClass := r.Form.Get("storage-class")
size, err := strconv.Atoi(sizeStr)
if err != nil {
@ -991,7 +992,7 @@ func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Reques
defer keepAliveTicker.Stop()
enc := json.NewEncoder(w)
ch := speedTest(ctx, size, concurrent, duration, autotune)
ch := speedTest(ctx, speedTestOpts{size, concurrent, duration, autotune, storageClass})
for {
select {
case <-ctx.Done():

View File

@ -1534,7 +1534,8 @@ func (sys *NotificationSys) ServiceFreeze(ctx context.Context, freeze bool) []No
// Speedtest run GET/PUT tests at input concurrency for requested object size,
// optionally you can extend the tests longer with time.Duration.
func (sys *NotificationSys) Speedtest(ctx context.Context, size int, concurrent int, duration time.Duration) []SpeedtestResult {
func (sys *NotificationSys) Speedtest(ctx context.Context, size int,
concurrent int, duration time.Duration, storageClass string) []SpeedtestResult {
length := len(sys.allPeerClients)
if length == 0 {
// For single node erasure setup.
@ -1555,7 +1556,8 @@ func (sys *NotificationSys) Speedtest(ctx context.Context, size int, concurrent
wg.Add(1)
go func(index int) {
defer wg.Done()
r, err := sys.peerClients[index].Speedtest(ctx, size, concurrent, duration)
r, err := sys.peerClients[index].Speedtest(ctx, size,
concurrent, duration, storageClass)
u := &url.URL{
Scheme: scheme,
Host: sys.peerClients[index].host.String(),
@ -1572,7 +1574,7 @@ func (sys *NotificationSys) Speedtest(ctx context.Context, size int, concurrent
wg.Add(1)
go func() {
defer wg.Done()
r, err := selfSpeedtest(ctx, size, concurrent, duration)
r, err := selfSpeedtest(ctx, size, concurrent, duration, storageClass)
u := &url.URL{
Scheme: scheme,
Host: globalLocalNodeName,

View File

@ -1013,11 +1013,13 @@ func (client *peerRESTClient) GetPeerMetrics(ctx context.Context) (<-chan Metric
return ch, nil
}
func (client *peerRESTClient) Speedtest(ctx context.Context, size, concurrent int, duration time.Duration) (SpeedtestResult, error) {
func (client *peerRESTClient) Speedtest(ctx context.Context, size,
concurrent int, duration time.Duration, storageClass string) (SpeedtestResult, error) {
values := make(url.Values)
values.Set(peerRESTSize, strconv.Itoa(size))
values.Set(peerRESTConcurrent, strconv.Itoa(concurrent))
values.Set(peerRESTDuration, duration.String())
values.Set(peerRESTStorageClass, storageClass)
respBody, err := client.callWithContext(context.Background(), peerRESTMethodSpeedtest, values, nil, -1)
if err != nil {

View File

@ -18,7 +18,7 @@
package cmd
const (
peerRESTVersion = "v16" // Add new ServiceSignals.
peerRESTVersion = "v17" // Add "storage-class" option for SpeedTest
peerRESTVersionPrefix = SlashSeparator + peerRESTVersion
peerRESTPrefix = minioReservedBucketPath + "/peer"
peerRESTPath = peerRESTPrefix + peerRESTVersionPrefix
@ -89,6 +89,7 @@ const (
peerRESTSize = "size"
peerRESTConcurrent = "concurrent"
peerRESTDuration = "duration"
peerRESTStorageClass = "storage-class"
peerRESTListenBucket = "bucket"
peerRESTListenPrefix = "prefix"

View File

@ -38,6 +38,7 @@ import (
b "github.com/minio/minio/internal/bucket/bandwidth"
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/randreader"
"github.com/tinylib/msgp/msgp"
@ -1134,7 +1135,7 @@ func newRandomReader(size int) io.Reader {
}
// Runs the speedtest on local MinIO process.
func selfSpeedtest(ctx context.Context, size, concurrent int, duration time.Duration) (SpeedtestResult, error) {
func selfSpeedtest(ctx context.Context, size, concurrent int, duration time.Duration, storageClass string) (SpeedtestResult, error) {
objAPI := newObjectLayerFn()
if objAPI == nil {
return SpeedtestResult{}, errServerNotInitialized
@ -1173,7 +1174,11 @@ func selfSpeedtest(ctx context.Context, size, concurrent int, duration time.Dura
}
reader := NewPutObjReader(hashReader)
objInfo, err := objAPI.PutObject(uploadsCtx, bucket, fmt.Sprintf("%s.%d.%d",
objNamePrefix, i, objCountPerThread[i]), reader, ObjectOptions{})
objNamePrefix, i, objCountPerThread[i]), reader, ObjectOptions{
UserDefined: map[string]string{
xhttp.AmzStorageClass: storageClass,
},
})
if err != nil && !uploadsStopped {
retError = err.Error()
logger.LogIf(ctx, err)
@ -1257,6 +1262,7 @@ func (s *peerRESTServer) SpeedtestHandler(w http.ResponseWriter, r *http.Request
sizeStr := r.Form.Get(peerRESTSize)
durationStr := r.Form.Get(peerRESTDuration)
concurrentStr := r.Form.Get(peerRESTConcurrent)
storageClass := r.Form.Get(peerRESTStorageClass)
size, err := strconv.Atoi(sizeStr)
if err != nil {
@ -1275,7 +1281,7 @@ func (s *peerRESTServer) SpeedtestHandler(w http.ResponseWriter, r *http.Request
done := keepHTTPResponseAlive(w)
result, err := selfSpeedtest(r.Context(), size, concurrent, duration)
result, err := selfSpeedtest(r.Context(), size, concurrent, duration, storageClass)
if err != nil {
result.Error = err.Error()
}

View File

@ -972,18 +972,21 @@ func auditLogInternal(ctx context.Context, bucket, object string, opts AuditLogO
logger.AuditLog(ctx, nil, nil, nil)
}
type speedTestOpts struct {
throughputSize int
concurrencyStart int
duration time.Duration
autotune bool
storageClass string
}
// Get the max throughput and iops numbers.
func speedTest(ctx context.Context, throughputSize, concurrencyStart int, duration time.Duration, autotune bool) chan madmin.SpeedTestResult {
func speedTest(ctx context.Context, opts speedTestOpts) chan madmin.SpeedTestResult {
ch := make(chan madmin.SpeedTestResult, 1)
go func() {
defer close(ch)
objAPI := newObjectLayerFn()
if objAPI == nil {
return
}
concurrency := concurrencyStart
concurrency := opts.concurrencyStart
throughputHighestGet := uint64(0)
throughputHighestPut := uint64(0)
@ -992,12 +995,12 @@ func speedTest(ctx context.Context, throughputSize, concurrencyStart int, durati
sendResult := func() {
var result madmin.SpeedTestResult
durationSecs := duration.Seconds()
durationSecs := opts.duration.Seconds()
result.GETStats.ThroughputPerSec = throughputHighestGet / uint64(durationSecs)
result.GETStats.ObjectsPerSec = throughputHighestGet / uint64(throughputSize) / uint64(durationSecs)
result.GETStats.ObjectsPerSec = throughputHighestGet / uint64(opts.throughputSize) / uint64(durationSecs)
result.PUTStats.ThroughputPerSec = throughputHighestPut / uint64(durationSecs)
result.PUTStats.ObjectsPerSec = throughputHighestPut / uint64(throughputSize) / uint64(durationSecs)
result.PUTStats.ObjectsPerSec = throughputHighestPut / uint64(opts.throughputSize) / uint64(durationSecs)
for i := 0; i < len(throughputHighestResults); i++ {
errStr := ""
if throughputHighestResults[i].Error != "" {
@ -1006,27 +1009,21 @@ func speedTest(ctx context.Context, throughputSize, concurrencyStart int, durati
result.PUTStats.Servers = append(result.PUTStats.Servers, madmin.SpeedTestStatServer{
Endpoint: throughputHighestResults[i].Endpoint,
ThroughputPerSec: throughputHighestResults[i].Uploads / uint64(durationSecs),
ObjectsPerSec: throughputHighestResults[i].Uploads / uint64(throughputSize) / uint64(durationSecs),
ObjectsPerSec: throughputHighestResults[i].Uploads / uint64(opts.throughputSize) / uint64(durationSecs),
Err: errStr,
})
result.GETStats.Servers = append(result.GETStats.Servers, madmin.SpeedTestStatServer{
Endpoint: throughputHighestResults[i].Endpoint,
ThroughputPerSec: throughputHighestResults[i].Downloads / uint64(durationSecs),
ObjectsPerSec: throughputHighestResults[i].Downloads / uint64(throughputSize) / uint64(durationSecs),
ObjectsPerSec: throughputHighestResults[i].Downloads / uint64(opts.throughputSize) / uint64(durationSecs),
Err: errStr,
})
}
numDisks := 0
if pools, ok := objAPI.(*erasureServerPools); ok {
for _, set := range pools.serverPools {
numDisks = set.setCount * set.setDriveCount
}
}
result.Disks = numDisks
result.Size = opts.throughputSize
result.Disks = globalEndpoints.NEndpoints()
result.Servers = len(globalNotificationSys.peerClients) + 1
result.Version = Version
result.Size = throughputSize
result.Concurrent = concurrency
ch <- result
@ -1040,10 +1037,13 @@ func speedTest(ctx context.Context, throughputSize, concurrencyStart int, durati
default:
}
results := globalNotificationSys.Speedtest(ctx, throughputSize, concurrency, duration)
results := globalNotificationSys.Speedtest(ctx,
opts.throughputSize, concurrency,
opts.duration, opts.storageClass)
sort.Slice(results, func(i, j int) bool {
return results[i].Endpoint < results[j].Endpoint
})
totalPut := uint64(0)
totalGet := uint64(0)
for _, result := range results {
@ -1085,10 +1085,11 @@ func speedTest(ctx context.Context, throughputSize, concurrencyStart int, durati
break
}
if !autotune {
if !opts.autotune {
sendResult()
break
}
sendResult()
// Try with a higher concurrency to see if we get better throughput
concurrency += (concurrency + 1) / 2

2
go.mod
View File

@ -50,7 +50,7 @@ require (
github.com/minio/kes v0.14.0
github.com/minio/madmin-go v1.1.16
github.com/minio/mc v0.0.0-20211118223026-df75eed32e9e // indirect
github.com/minio/minio-go/v7 v7.0.16-0.20211117164632-e517704ccb36
github.com/minio/minio-go/v7 v7.0.16
github.com/minio/parquet-go v1.1.0
github.com/minio/pkg v1.1.9
github.com/minio/selfupdate v0.3.1

4
go.sum
View File

@ -1100,8 +1100,8 @@ github.com/minio/minio-go/v7 v7.0.11-0.20210302210017-6ae69c73ce78/go.mod h1:mTh
github.com/minio/minio-go/v7 v7.0.15-0.20211004160302-3b57c1e369ca/go.mod h1:pUV0Pc+hPd1nccgmzQF/EXh48l/Z/yps6QPF1aaie4g=
github.com/minio/minio-go/v7 v7.0.15/go.mod h1:pUV0Pc+hPd1nccgmzQF/EXh48l/Z/yps6QPF1aaie4g=
github.com/minio/minio-go/v7 v7.0.16-0.20211108161804-a7a36ee131df/go.mod h1:pUV0Pc+hPd1nccgmzQF/EXh48l/Z/yps6QPF1aaie4g=
github.com/minio/minio-go/v7 v7.0.16-0.20211117164632-e517704ccb36 h1:amnEPz1PuZxUUSKQvQn7E4Pd+B7tIqmqiFeuc9yy2r4=
github.com/minio/minio-go/v7 v7.0.16-0.20211117164632-e517704ccb36/go.mod h1:pUV0Pc+hPd1nccgmzQF/EXh48l/Z/yps6QPF1aaie4g=
github.com/minio/minio-go/v7 v7.0.16 h1:GspaSBS8lOuEUCAqMe0W3UxSoyOA4b4F8PTspRVI+k4=
github.com/minio/minio-go/v7 v7.0.16/go.mod h1:pUV0Pc+hPd1nccgmzQF/EXh48l/Z/yps6QPF1aaie4g=
github.com/minio/operator v0.0.0-20211011212245-31460bbbc4b7 h1:dkfuMNslMjGoJ4ArAMSoQhidYNdm3SgzLBP+f96O3/E=
github.com/minio/operator v0.0.0-20211011212245-31460bbbc4b7/go.mod h1:lDpuz8nwsfhKlfiBaA3Z8AW019fWEAjO2gltfLbdorE=
github.com/minio/operator/logsearchapi v0.0.0-20211011212245-31460bbbc4b7 h1:vFtQqCt67ETp0JAkOKRWTKkgwFv14Vc1jJSxmQ8wJE0=