mirror of
https://github.com/minio/minio.git
synced 2025-04-20 02:27:50 -04:00
fix: totalDrives reported in speedTest for multiple-pools (#13770)
totalDrives reported in speedTest result were wrong for multiple pools, this PR fixes this. Bonus: add support for configurable storage-class, this allows us to test REDUCED_REDUNDANCY to see further maximum throughputs across the cluster.
This commit is contained in:
parent
4c0f48c548
commit
99d87c5ca2
@ -957,6 +957,7 @@ func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Reques
|
|||||||
durationStr := r.Form.Get(peerRESTDuration)
|
durationStr := r.Form.Get(peerRESTDuration)
|
||||||
concurrentStr := r.Form.Get(peerRESTConcurrent)
|
concurrentStr := r.Form.Get(peerRESTConcurrent)
|
||||||
autotune := r.Form.Get("autotune") == "true"
|
autotune := r.Form.Get("autotune") == "true"
|
||||||
|
storageClass := r.Form.Get("storage-class")
|
||||||
|
|
||||||
size, err := strconv.Atoi(sizeStr)
|
size, err := strconv.Atoi(sizeStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -991,7 +992,7 @@ func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Reques
|
|||||||
defer keepAliveTicker.Stop()
|
defer keepAliveTicker.Stop()
|
||||||
|
|
||||||
enc := json.NewEncoder(w)
|
enc := json.NewEncoder(w)
|
||||||
ch := speedTest(ctx, size, concurrent, duration, autotune)
|
ch := speedTest(ctx, speedTestOpts{size, concurrent, duration, autotune, storageClass})
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
@ -1534,7 +1534,8 @@ func (sys *NotificationSys) ServiceFreeze(ctx context.Context, freeze bool) []No
|
|||||||
|
|
||||||
// Speedtest run GET/PUT tests at input concurrency for requested object size,
|
// Speedtest run GET/PUT tests at input concurrency for requested object size,
|
||||||
// optionally you can extend the tests longer with time.Duration.
|
// optionally you can extend the tests longer with time.Duration.
|
||||||
func (sys *NotificationSys) Speedtest(ctx context.Context, size int, concurrent int, duration time.Duration) []SpeedtestResult {
|
func (sys *NotificationSys) Speedtest(ctx context.Context, size int,
|
||||||
|
concurrent int, duration time.Duration, storageClass string) []SpeedtestResult {
|
||||||
length := len(sys.allPeerClients)
|
length := len(sys.allPeerClients)
|
||||||
if length == 0 {
|
if length == 0 {
|
||||||
// For single node erasure setup.
|
// For single node erasure setup.
|
||||||
@ -1555,7 +1556,8 @@ func (sys *NotificationSys) Speedtest(ctx context.Context, size int, concurrent
|
|||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(index int) {
|
go func(index int) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
r, err := sys.peerClients[index].Speedtest(ctx, size, concurrent, duration)
|
r, err := sys.peerClients[index].Speedtest(ctx, size,
|
||||||
|
concurrent, duration, storageClass)
|
||||||
u := &url.URL{
|
u := &url.URL{
|
||||||
Scheme: scheme,
|
Scheme: scheme,
|
||||||
Host: sys.peerClients[index].host.String(),
|
Host: sys.peerClients[index].host.String(),
|
||||||
@ -1572,7 +1574,7 @@ func (sys *NotificationSys) Speedtest(ctx context.Context, size int, concurrent
|
|||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
r, err := selfSpeedtest(ctx, size, concurrent, duration)
|
r, err := selfSpeedtest(ctx, size, concurrent, duration, storageClass)
|
||||||
u := &url.URL{
|
u := &url.URL{
|
||||||
Scheme: scheme,
|
Scheme: scheme,
|
||||||
Host: globalLocalNodeName,
|
Host: globalLocalNodeName,
|
||||||
|
@ -1013,11 +1013,13 @@ func (client *peerRESTClient) GetPeerMetrics(ctx context.Context) (<-chan Metric
|
|||||||
return ch, nil
|
return ch, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *peerRESTClient) Speedtest(ctx context.Context, size, concurrent int, duration time.Duration) (SpeedtestResult, error) {
|
func (client *peerRESTClient) Speedtest(ctx context.Context, size,
|
||||||
|
concurrent int, duration time.Duration, storageClass string) (SpeedtestResult, error) {
|
||||||
values := make(url.Values)
|
values := make(url.Values)
|
||||||
values.Set(peerRESTSize, strconv.Itoa(size))
|
values.Set(peerRESTSize, strconv.Itoa(size))
|
||||||
values.Set(peerRESTConcurrent, strconv.Itoa(concurrent))
|
values.Set(peerRESTConcurrent, strconv.Itoa(concurrent))
|
||||||
values.Set(peerRESTDuration, duration.String())
|
values.Set(peerRESTDuration, duration.String())
|
||||||
|
values.Set(peerRESTStorageClass, storageClass)
|
||||||
|
|
||||||
respBody, err := client.callWithContext(context.Background(), peerRESTMethodSpeedtest, values, nil, -1)
|
respBody, err := client.callWithContext(context.Background(), peerRESTMethodSpeedtest, values, nil, -1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
const (
|
const (
|
||||||
peerRESTVersion = "v16" // Add new ServiceSignals.
|
peerRESTVersion = "v17" // Add "storage-class" option for SpeedTest
|
||||||
peerRESTVersionPrefix = SlashSeparator + peerRESTVersion
|
peerRESTVersionPrefix = SlashSeparator + peerRESTVersion
|
||||||
peerRESTPrefix = minioReservedBucketPath + "/peer"
|
peerRESTPrefix = minioReservedBucketPath + "/peer"
|
||||||
peerRESTPath = peerRESTPrefix + peerRESTVersionPrefix
|
peerRESTPath = peerRESTPrefix + peerRESTVersionPrefix
|
||||||
@ -89,6 +89,7 @@ const (
|
|||||||
peerRESTSize = "size"
|
peerRESTSize = "size"
|
||||||
peerRESTConcurrent = "concurrent"
|
peerRESTConcurrent = "concurrent"
|
||||||
peerRESTDuration = "duration"
|
peerRESTDuration = "duration"
|
||||||
|
peerRESTStorageClass = "storage-class"
|
||||||
|
|
||||||
peerRESTListenBucket = "bucket"
|
peerRESTListenBucket = "bucket"
|
||||||
peerRESTListenPrefix = "prefix"
|
peerRESTListenPrefix = "prefix"
|
||||||
|
@ -38,6 +38,7 @@ import (
|
|||||||
b "github.com/minio/minio/internal/bucket/bandwidth"
|
b "github.com/minio/minio/internal/bucket/bandwidth"
|
||||||
"github.com/minio/minio/internal/event"
|
"github.com/minio/minio/internal/event"
|
||||||
"github.com/minio/minio/internal/hash"
|
"github.com/minio/minio/internal/hash"
|
||||||
|
xhttp "github.com/minio/minio/internal/http"
|
||||||
"github.com/minio/minio/internal/logger"
|
"github.com/minio/minio/internal/logger"
|
||||||
"github.com/minio/pkg/randreader"
|
"github.com/minio/pkg/randreader"
|
||||||
"github.com/tinylib/msgp/msgp"
|
"github.com/tinylib/msgp/msgp"
|
||||||
@ -1134,7 +1135,7 @@ func newRandomReader(size int) io.Reader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Runs the speedtest on local MinIO process.
|
// Runs the speedtest on local MinIO process.
|
||||||
func selfSpeedtest(ctx context.Context, size, concurrent int, duration time.Duration) (SpeedtestResult, error) {
|
func selfSpeedtest(ctx context.Context, size, concurrent int, duration time.Duration, storageClass string) (SpeedtestResult, error) {
|
||||||
objAPI := newObjectLayerFn()
|
objAPI := newObjectLayerFn()
|
||||||
if objAPI == nil {
|
if objAPI == nil {
|
||||||
return SpeedtestResult{}, errServerNotInitialized
|
return SpeedtestResult{}, errServerNotInitialized
|
||||||
@ -1173,7 +1174,11 @@ func selfSpeedtest(ctx context.Context, size, concurrent int, duration time.Dura
|
|||||||
}
|
}
|
||||||
reader := NewPutObjReader(hashReader)
|
reader := NewPutObjReader(hashReader)
|
||||||
objInfo, err := objAPI.PutObject(uploadsCtx, bucket, fmt.Sprintf("%s.%d.%d",
|
objInfo, err := objAPI.PutObject(uploadsCtx, bucket, fmt.Sprintf("%s.%d.%d",
|
||||||
objNamePrefix, i, objCountPerThread[i]), reader, ObjectOptions{})
|
objNamePrefix, i, objCountPerThread[i]), reader, ObjectOptions{
|
||||||
|
UserDefined: map[string]string{
|
||||||
|
xhttp.AmzStorageClass: storageClass,
|
||||||
|
},
|
||||||
|
})
|
||||||
if err != nil && !uploadsStopped {
|
if err != nil && !uploadsStopped {
|
||||||
retError = err.Error()
|
retError = err.Error()
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
@ -1257,6 +1262,7 @@ func (s *peerRESTServer) SpeedtestHandler(w http.ResponseWriter, r *http.Request
|
|||||||
sizeStr := r.Form.Get(peerRESTSize)
|
sizeStr := r.Form.Get(peerRESTSize)
|
||||||
durationStr := r.Form.Get(peerRESTDuration)
|
durationStr := r.Form.Get(peerRESTDuration)
|
||||||
concurrentStr := r.Form.Get(peerRESTConcurrent)
|
concurrentStr := r.Form.Get(peerRESTConcurrent)
|
||||||
|
storageClass := r.Form.Get(peerRESTStorageClass)
|
||||||
|
|
||||||
size, err := strconv.Atoi(sizeStr)
|
size, err := strconv.Atoi(sizeStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1275,7 +1281,7 @@ func (s *peerRESTServer) SpeedtestHandler(w http.ResponseWriter, r *http.Request
|
|||||||
|
|
||||||
done := keepHTTPResponseAlive(w)
|
done := keepHTTPResponseAlive(w)
|
||||||
|
|
||||||
result, err := selfSpeedtest(r.Context(), size, concurrent, duration)
|
result, err := selfSpeedtest(r.Context(), size, concurrent, duration, storageClass)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
result.Error = err.Error()
|
result.Error = err.Error()
|
||||||
}
|
}
|
||||||
|
45
cmd/utils.go
45
cmd/utils.go
@ -972,18 +972,21 @@ func auditLogInternal(ctx context.Context, bucket, object string, opts AuditLogO
|
|||||||
logger.AuditLog(ctx, nil, nil, nil)
|
logger.AuditLog(ctx, nil, nil, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type speedTestOpts struct {
|
||||||
|
throughputSize int
|
||||||
|
concurrencyStart int
|
||||||
|
duration time.Duration
|
||||||
|
autotune bool
|
||||||
|
storageClass string
|
||||||
|
}
|
||||||
|
|
||||||
// Get the max throughput and iops numbers.
|
// Get the max throughput and iops numbers.
|
||||||
func speedTest(ctx context.Context, throughputSize, concurrencyStart int, duration time.Duration, autotune bool) chan madmin.SpeedTestResult {
|
func speedTest(ctx context.Context, opts speedTestOpts) chan madmin.SpeedTestResult {
|
||||||
ch := make(chan madmin.SpeedTestResult, 1)
|
ch := make(chan madmin.SpeedTestResult, 1)
|
||||||
go func() {
|
go func() {
|
||||||
defer close(ch)
|
defer close(ch)
|
||||||
|
|
||||||
objAPI := newObjectLayerFn()
|
concurrency := opts.concurrencyStart
|
||||||
if objAPI == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
concurrency := concurrencyStart
|
|
||||||
|
|
||||||
throughputHighestGet := uint64(0)
|
throughputHighestGet := uint64(0)
|
||||||
throughputHighestPut := uint64(0)
|
throughputHighestPut := uint64(0)
|
||||||
@ -992,12 +995,12 @@ func speedTest(ctx context.Context, throughputSize, concurrencyStart int, durati
|
|||||||
sendResult := func() {
|
sendResult := func() {
|
||||||
var result madmin.SpeedTestResult
|
var result madmin.SpeedTestResult
|
||||||
|
|
||||||
durationSecs := duration.Seconds()
|
durationSecs := opts.duration.Seconds()
|
||||||
|
|
||||||
result.GETStats.ThroughputPerSec = throughputHighestGet / uint64(durationSecs)
|
result.GETStats.ThroughputPerSec = throughputHighestGet / uint64(durationSecs)
|
||||||
result.GETStats.ObjectsPerSec = throughputHighestGet / uint64(throughputSize) / uint64(durationSecs)
|
result.GETStats.ObjectsPerSec = throughputHighestGet / uint64(opts.throughputSize) / uint64(durationSecs)
|
||||||
result.PUTStats.ThroughputPerSec = throughputHighestPut / uint64(durationSecs)
|
result.PUTStats.ThroughputPerSec = throughputHighestPut / uint64(durationSecs)
|
||||||
result.PUTStats.ObjectsPerSec = throughputHighestPut / uint64(throughputSize) / uint64(durationSecs)
|
result.PUTStats.ObjectsPerSec = throughputHighestPut / uint64(opts.throughputSize) / uint64(durationSecs)
|
||||||
for i := 0; i < len(throughputHighestResults); i++ {
|
for i := 0; i < len(throughputHighestResults); i++ {
|
||||||
errStr := ""
|
errStr := ""
|
||||||
if throughputHighestResults[i].Error != "" {
|
if throughputHighestResults[i].Error != "" {
|
||||||
@ -1006,27 +1009,21 @@ func speedTest(ctx context.Context, throughputSize, concurrencyStart int, durati
|
|||||||
result.PUTStats.Servers = append(result.PUTStats.Servers, madmin.SpeedTestStatServer{
|
result.PUTStats.Servers = append(result.PUTStats.Servers, madmin.SpeedTestStatServer{
|
||||||
Endpoint: throughputHighestResults[i].Endpoint,
|
Endpoint: throughputHighestResults[i].Endpoint,
|
||||||
ThroughputPerSec: throughputHighestResults[i].Uploads / uint64(durationSecs),
|
ThroughputPerSec: throughputHighestResults[i].Uploads / uint64(durationSecs),
|
||||||
ObjectsPerSec: throughputHighestResults[i].Uploads / uint64(throughputSize) / uint64(durationSecs),
|
ObjectsPerSec: throughputHighestResults[i].Uploads / uint64(opts.throughputSize) / uint64(durationSecs),
|
||||||
Err: errStr,
|
Err: errStr,
|
||||||
})
|
})
|
||||||
result.GETStats.Servers = append(result.GETStats.Servers, madmin.SpeedTestStatServer{
|
result.GETStats.Servers = append(result.GETStats.Servers, madmin.SpeedTestStatServer{
|
||||||
Endpoint: throughputHighestResults[i].Endpoint,
|
Endpoint: throughputHighestResults[i].Endpoint,
|
||||||
ThroughputPerSec: throughputHighestResults[i].Downloads / uint64(durationSecs),
|
ThroughputPerSec: throughputHighestResults[i].Downloads / uint64(durationSecs),
|
||||||
ObjectsPerSec: throughputHighestResults[i].Downloads / uint64(throughputSize) / uint64(durationSecs),
|
ObjectsPerSec: throughputHighestResults[i].Downloads / uint64(opts.throughputSize) / uint64(durationSecs),
|
||||||
Err: errStr,
|
Err: errStr,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
numDisks := 0
|
result.Size = opts.throughputSize
|
||||||
if pools, ok := objAPI.(*erasureServerPools); ok {
|
result.Disks = globalEndpoints.NEndpoints()
|
||||||
for _, set := range pools.serverPools {
|
|
||||||
numDisks = set.setCount * set.setDriveCount
|
|
||||||
}
|
|
||||||
}
|
|
||||||
result.Disks = numDisks
|
|
||||||
result.Servers = len(globalNotificationSys.peerClients) + 1
|
result.Servers = len(globalNotificationSys.peerClients) + 1
|
||||||
result.Version = Version
|
result.Version = Version
|
||||||
result.Size = throughputSize
|
|
||||||
result.Concurrent = concurrency
|
result.Concurrent = concurrency
|
||||||
|
|
||||||
ch <- result
|
ch <- result
|
||||||
@ -1040,10 +1037,13 @@ func speedTest(ctx context.Context, throughputSize, concurrencyStart int, durati
|
|||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
results := globalNotificationSys.Speedtest(ctx, throughputSize, concurrency, duration)
|
results := globalNotificationSys.Speedtest(ctx,
|
||||||
|
opts.throughputSize, concurrency,
|
||||||
|
opts.duration, opts.storageClass)
|
||||||
sort.Slice(results, func(i, j int) bool {
|
sort.Slice(results, func(i, j int) bool {
|
||||||
return results[i].Endpoint < results[j].Endpoint
|
return results[i].Endpoint < results[j].Endpoint
|
||||||
})
|
})
|
||||||
|
|
||||||
totalPut := uint64(0)
|
totalPut := uint64(0)
|
||||||
totalGet := uint64(0)
|
totalGet := uint64(0)
|
||||||
for _, result := range results {
|
for _, result := range results {
|
||||||
@ -1085,10 +1085,11 @@ func speedTest(ctx context.Context, throughputSize, concurrencyStart int, durati
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if !autotune {
|
if !opts.autotune {
|
||||||
sendResult()
|
sendResult()
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
sendResult()
|
sendResult()
|
||||||
// Try with a higher concurrency to see if we get better throughput
|
// Try with a higher concurrency to see if we get better throughput
|
||||||
concurrency += (concurrency + 1) / 2
|
concurrency += (concurrency + 1) / 2
|
||||||
|
2
go.mod
2
go.mod
@ -50,7 +50,7 @@ require (
|
|||||||
github.com/minio/kes v0.14.0
|
github.com/minio/kes v0.14.0
|
||||||
github.com/minio/madmin-go v1.1.16
|
github.com/minio/madmin-go v1.1.16
|
||||||
github.com/minio/mc v0.0.0-20211118223026-df75eed32e9e // indirect
|
github.com/minio/mc v0.0.0-20211118223026-df75eed32e9e // indirect
|
||||||
github.com/minio/minio-go/v7 v7.0.16-0.20211117164632-e517704ccb36
|
github.com/minio/minio-go/v7 v7.0.16
|
||||||
github.com/minio/parquet-go v1.1.0
|
github.com/minio/parquet-go v1.1.0
|
||||||
github.com/minio/pkg v1.1.9
|
github.com/minio/pkg v1.1.9
|
||||||
github.com/minio/selfupdate v0.3.1
|
github.com/minio/selfupdate v0.3.1
|
||||||
|
4
go.sum
4
go.sum
@ -1100,8 +1100,8 @@ github.com/minio/minio-go/v7 v7.0.11-0.20210302210017-6ae69c73ce78/go.mod h1:mTh
|
|||||||
github.com/minio/minio-go/v7 v7.0.15-0.20211004160302-3b57c1e369ca/go.mod h1:pUV0Pc+hPd1nccgmzQF/EXh48l/Z/yps6QPF1aaie4g=
|
github.com/minio/minio-go/v7 v7.0.15-0.20211004160302-3b57c1e369ca/go.mod h1:pUV0Pc+hPd1nccgmzQF/EXh48l/Z/yps6QPF1aaie4g=
|
||||||
github.com/minio/minio-go/v7 v7.0.15/go.mod h1:pUV0Pc+hPd1nccgmzQF/EXh48l/Z/yps6QPF1aaie4g=
|
github.com/minio/minio-go/v7 v7.0.15/go.mod h1:pUV0Pc+hPd1nccgmzQF/EXh48l/Z/yps6QPF1aaie4g=
|
||||||
github.com/minio/minio-go/v7 v7.0.16-0.20211108161804-a7a36ee131df/go.mod h1:pUV0Pc+hPd1nccgmzQF/EXh48l/Z/yps6QPF1aaie4g=
|
github.com/minio/minio-go/v7 v7.0.16-0.20211108161804-a7a36ee131df/go.mod h1:pUV0Pc+hPd1nccgmzQF/EXh48l/Z/yps6QPF1aaie4g=
|
||||||
github.com/minio/minio-go/v7 v7.0.16-0.20211117164632-e517704ccb36 h1:amnEPz1PuZxUUSKQvQn7E4Pd+B7tIqmqiFeuc9yy2r4=
|
github.com/minio/minio-go/v7 v7.0.16 h1:GspaSBS8lOuEUCAqMe0W3UxSoyOA4b4F8PTspRVI+k4=
|
||||||
github.com/minio/minio-go/v7 v7.0.16-0.20211117164632-e517704ccb36/go.mod h1:pUV0Pc+hPd1nccgmzQF/EXh48l/Z/yps6QPF1aaie4g=
|
github.com/minio/minio-go/v7 v7.0.16/go.mod h1:pUV0Pc+hPd1nccgmzQF/EXh48l/Z/yps6QPF1aaie4g=
|
||||||
github.com/minio/operator v0.0.0-20211011212245-31460bbbc4b7 h1:dkfuMNslMjGoJ4ArAMSoQhidYNdm3SgzLBP+f96O3/E=
|
github.com/minio/operator v0.0.0-20211011212245-31460bbbc4b7 h1:dkfuMNslMjGoJ4ArAMSoQhidYNdm3SgzLBP+f96O3/E=
|
||||||
github.com/minio/operator v0.0.0-20211011212245-31460bbbc4b7/go.mod h1:lDpuz8nwsfhKlfiBaA3Z8AW019fWEAjO2gltfLbdorE=
|
github.com/minio/operator v0.0.0-20211011212245-31460bbbc4b7/go.mod h1:lDpuz8nwsfhKlfiBaA3Z8AW019fWEAjO2gltfLbdorE=
|
||||||
github.com/minio/operator/logsearchapi v0.0.0-20211011212245-31460bbbc4b7 h1:vFtQqCt67ETp0JAkOKRWTKkgwFv14Vc1jJSxmQ8wJE0=
|
github.com/minio/operator/logsearchapi v0.0.0-20211011212245-31460bbbc4b7 h1:vFtQqCt67ETp0JAkOKRWTKkgwFv14Vc1jJSxmQ8wJE0=
|
||||||
|
Loading…
x
Reference in New Issue
Block a user