mirror of
https://github.com/minio/minio.git
synced 2025-11-20 01:50:24 -05:00
add support for speedtest drive (#14182)
This commit is contained in:
131
cmd/utils.go
131
cmd/utils.go
@@ -37,7 +37,6 @@ import (
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"runtime/trace"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
@@ -997,136 +996,6 @@ func auditLogInternal(ctx context.Context, bucket, object string, opts AuditLogO
|
||||
logger.AuditLog(ctx, nil, nil, nil)
|
||||
}
|
||||
|
||||
type speedTestOpts struct {
|
||||
throughputSize int
|
||||
concurrencyStart int
|
||||
duration time.Duration
|
||||
autotune bool
|
||||
storageClass string
|
||||
}
|
||||
|
||||
// Get the max throughput and iops numbers.
|
||||
func speedTest(ctx context.Context, opts speedTestOpts) chan madmin.SpeedTestResult {
|
||||
ch := make(chan madmin.SpeedTestResult, 1)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
concurrency := opts.concurrencyStart
|
||||
|
||||
throughputHighestGet := uint64(0)
|
||||
throughputHighestPut := uint64(0)
|
||||
var throughputHighestResults []SpeedtestResult
|
||||
|
||||
sendResult := func() {
|
||||
var result madmin.SpeedTestResult
|
||||
|
||||
durationSecs := opts.duration.Seconds()
|
||||
|
||||
result.GETStats.ThroughputPerSec = throughputHighestGet / uint64(durationSecs)
|
||||
result.GETStats.ObjectsPerSec = throughputHighestGet / uint64(opts.throughputSize) / uint64(durationSecs)
|
||||
result.PUTStats.ThroughputPerSec = throughputHighestPut / uint64(durationSecs)
|
||||
result.PUTStats.ObjectsPerSec = throughputHighestPut / uint64(opts.throughputSize) / uint64(durationSecs)
|
||||
for i := 0; i < len(throughputHighestResults); i++ {
|
||||
errStr := ""
|
||||
if throughputHighestResults[i].Error != "" {
|
||||
errStr = throughputHighestResults[i].Error
|
||||
}
|
||||
result.PUTStats.Servers = append(result.PUTStats.Servers, madmin.SpeedTestStatServer{
|
||||
Endpoint: throughputHighestResults[i].Endpoint,
|
||||
ThroughputPerSec: throughputHighestResults[i].Uploads / uint64(durationSecs),
|
||||
ObjectsPerSec: throughputHighestResults[i].Uploads / uint64(opts.throughputSize) / uint64(durationSecs),
|
||||
Err: errStr,
|
||||
})
|
||||
result.GETStats.Servers = append(result.GETStats.Servers, madmin.SpeedTestStatServer{
|
||||
Endpoint: throughputHighestResults[i].Endpoint,
|
||||
ThroughputPerSec: throughputHighestResults[i].Downloads / uint64(durationSecs),
|
||||
ObjectsPerSec: throughputHighestResults[i].Downloads / uint64(opts.throughputSize) / uint64(durationSecs),
|
||||
Err: errStr,
|
||||
})
|
||||
}
|
||||
|
||||
result.Size = opts.throughputSize
|
||||
result.Disks = globalEndpoints.NEndpoints()
|
||||
result.Servers = len(globalNotificationSys.peerClients) + 1
|
||||
result.Version = Version
|
||||
result.Concurrent = concurrency
|
||||
|
||||
ch <- result
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// If the client got disconnected stop the speedtest.
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
results := globalNotificationSys.Speedtest(ctx,
|
||||
opts.throughputSize, concurrency,
|
||||
opts.duration, opts.storageClass)
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].Endpoint < results[j].Endpoint
|
||||
})
|
||||
|
||||
totalPut := uint64(0)
|
||||
totalGet := uint64(0)
|
||||
for _, result := range results {
|
||||
totalPut += result.Uploads
|
||||
totalGet += result.Downloads
|
||||
}
|
||||
|
||||
if totalGet < throughputHighestGet {
|
||||
// Following check is for situations
|
||||
// when Writes() scale higher than Reads()
|
||||
// - practically speaking this never happens
|
||||
// and should never happen - however it has
|
||||
// been seen recently due to hardware issues
|
||||
// causes Reads() to go slower than Writes().
|
||||
//
|
||||
// Send such results anyways as this shall
|
||||
// expose a problem underneath.
|
||||
if totalPut > throughputHighestPut {
|
||||
throughputHighestResults = results
|
||||
throughputHighestPut = totalPut
|
||||
// let the client see lower value as well
|
||||
throughputHighestGet = totalGet
|
||||
}
|
||||
sendResult()
|
||||
break
|
||||
}
|
||||
|
||||
doBreak := float64(totalGet-throughputHighestGet)/float64(totalGet) < 0.025
|
||||
|
||||
throughputHighestGet = totalGet
|
||||
throughputHighestResults = results
|
||||
throughputHighestPut = totalPut
|
||||
|
||||
if doBreak {
|
||||
sendResult()
|
||||
break
|
||||
}
|
||||
|
||||
for _, result := range results {
|
||||
if result.Error != "" {
|
||||
// Break out on errors.
|
||||
sendResult()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
sendResult()
|
||||
if !opts.autotune {
|
||||
break
|
||||
}
|
||||
|
||||
// Try with a higher concurrency to see if we get better throughput
|
||||
concurrency += (concurrency + 1) / 2
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
func newTLSConfig(getCert certs.GetCertificateFunc) *tls.Config {
|
||||
if getCert == nil {
|
||||
return nil
|
||||
|
||||
Reference in New Issue
Block a user