mirror of
https://github.com/minio/minio.git
synced 2025-01-11 15:03:22 -05:00
use single encoder for sending speedtest results (#13579)
Bonus: if runs have PUT higher then capture it anyways to display an unexpected result, which provides a way to understand what might be slowing things down on the system. For example on a Data24 WDC setup it is clearly visible there is a bug in the hardware. ``` ./mc admin speedtest wdc/ ⠧ Running speedtest (With 64 MiB object size, 32 concurrency) PUT: 31 GiB/s GET: 24 GiB/s ⠹ Running speedtest (With 64 MiB object size, 48 concurrency) PUT: 38 GiB/s GET: 24 GiB/s MinIO 2021-11-04T06:08:33Z, 6 servers, 48 drives PUT: 38 GiB/s, 605 objs/s GET: 24 GiB/s, 383 objs/s ``` Reads are almost 14GiB/sec slower than Writes which is practically not possible.
This commit is contained in:
parent
112f9ae087
commit
c3d24fb26d
@ -951,9 +951,18 @@ func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Reques
|
|||||||
duration = time.Second * 10
|
duration = time.Second * 10
|
||||||
}
|
}
|
||||||
|
|
||||||
|
deleteBucket := func() {
|
||||||
|
loc := pathJoin(minioMetaSpeedTestBucket, minioMetaSpeedTestBucketPrefix)
|
||||||
|
objectAPI.DeleteBucket(context.Background(), loc, DeleteBucketOptions{
|
||||||
|
Force: true,
|
||||||
|
NoRecreate: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||||
defer keepAliveTicker.Stop()
|
defer keepAliveTicker.Stop()
|
||||||
|
|
||||||
|
enc := json.NewEncoder(w)
|
||||||
ch := speedTest(ctx, size, concurrent, duration, autotune)
|
ch := speedTest(ctx, size, concurrent, duration, autotune)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -961,19 +970,19 @@ func (a adminAPIHandlers) SpeedtestHandler(w http.ResponseWriter, r *http.Reques
|
|||||||
return
|
return
|
||||||
case <-keepAliveTicker.C:
|
case <-keepAliveTicker.C:
|
||||||
// Write a blank entry to prevent client from disconnecting
|
// Write a blank entry to prevent client from disconnecting
|
||||||
if err := json.NewEncoder(w).Encode(madmin.SpeedTestResult{}); err != nil {
|
if err := enc.Encode(madmin.SpeedTestResult{}); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.(http.Flusher).Flush()
|
w.(http.Flusher).Flush()
|
||||||
case result, ok := <-ch:
|
case result, ok := <-ch:
|
||||||
if !ok {
|
if !ok {
|
||||||
defer objectAPI.DeleteBucket(context.Background(), pathJoin(minioMetaSpeedTestBucket, minioMetaSpeedTestBucketPrefix), DeleteBucketOptions{Force: true, NoRecreate: true})
|
deleteBucket()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := json.NewEncoder(w).Encode(result); err != nil {
|
if err := enc.Encode(result); err != nil {
|
||||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
w.(http.Flusher).Flush()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1920,13 +1929,15 @@ func (a adminAPIHandlers) BandwidthMonitorHandler(w http.ResponseWriter, r *http
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
enc := json.NewEncoder(w)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case report, ok := <-reportCh:
|
case report, ok := <-reportCh:
|
||||||
if !ok {
|
if !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := json.NewEncoder(w).Encode(report); err != nil {
|
if err := enc.Encode(report); err != nil {
|
||||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
37
cmd/utils.go
37
cmd/utils.go
@ -987,7 +987,7 @@ func speedTest(ctx context.Context, throughputSize, concurrencyStart int, durati
|
|||||||
|
|
||||||
throughputHighestGet := uint64(0)
|
throughputHighestGet := uint64(0)
|
||||||
throughputHighestPut := uint64(0)
|
throughputHighestPut := uint64(0)
|
||||||
var throughputHighestGetResults []SpeedtestResult
|
var throughputHighestResults []SpeedtestResult
|
||||||
|
|
||||||
sendResult := func() {
|
sendResult := func() {
|
||||||
var result madmin.SpeedTestResult
|
var result madmin.SpeedTestResult
|
||||||
@ -998,21 +998,21 @@ func speedTest(ctx context.Context, throughputSize, concurrencyStart int, durati
|
|||||||
result.GETStats.ObjectsPerSec = throughputHighestGet / uint64(throughputSize) / uint64(durationSecs)
|
result.GETStats.ObjectsPerSec = throughputHighestGet / uint64(throughputSize) / uint64(durationSecs)
|
||||||
result.PUTStats.ThroughputPerSec = throughputHighestPut / uint64(durationSecs)
|
result.PUTStats.ThroughputPerSec = throughputHighestPut / uint64(durationSecs)
|
||||||
result.PUTStats.ObjectsPerSec = throughputHighestPut / uint64(throughputSize) / uint64(durationSecs)
|
result.PUTStats.ObjectsPerSec = throughputHighestPut / uint64(throughputSize) / uint64(durationSecs)
|
||||||
for i := 0; i < len(throughputHighestGetResults); i++ {
|
for i := 0; i < len(throughputHighestResults); i++ {
|
||||||
errStr := ""
|
errStr := ""
|
||||||
if throughputHighestGetResults[i].Error != "" {
|
if throughputHighestResults[i].Error != "" {
|
||||||
errStr = throughputHighestGetResults[i].Error
|
errStr = throughputHighestResults[i].Error
|
||||||
}
|
}
|
||||||
result.PUTStats.Servers = append(result.PUTStats.Servers, madmin.SpeedTestStatServer{
|
result.PUTStats.Servers = append(result.PUTStats.Servers, madmin.SpeedTestStatServer{
|
||||||
Endpoint: throughputHighestGetResults[i].Endpoint,
|
Endpoint: throughputHighestResults[i].Endpoint,
|
||||||
ThroughputPerSec: throughputHighestGetResults[i].Uploads / uint64(durationSecs),
|
ThroughputPerSec: throughputHighestResults[i].Uploads / uint64(durationSecs),
|
||||||
ObjectsPerSec: throughputHighestGetResults[i].Uploads / uint64(throughputSize) / uint64(durationSecs),
|
ObjectsPerSec: throughputHighestResults[i].Uploads / uint64(throughputSize) / uint64(durationSecs),
|
||||||
Err: errStr,
|
Err: errStr,
|
||||||
})
|
})
|
||||||
result.GETStats.Servers = append(result.GETStats.Servers, madmin.SpeedTestStatServer{
|
result.GETStats.Servers = append(result.GETStats.Servers, madmin.SpeedTestStatServer{
|
||||||
Endpoint: throughputHighestGetResults[i].Endpoint,
|
Endpoint: throughputHighestResults[i].Endpoint,
|
||||||
ThroughputPerSec: throughputHighestGetResults[i].Downloads / uint64(durationSecs),
|
ThroughputPerSec: throughputHighestResults[i].Downloads / uint64(durationSecs),
|
||||||
ObjectsPerSec: throughputHighestGetResults[i].Downloads / uint64(throughputSize) / uint64(durationSecs),
|
ObjectsPerSec: throughputHighestResults[i].Downloads / uint64(throughputSize) / uint64(durationSecs),
|
||||||
Err: errStr,
|
Err: errStr,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -1052,6 +1052,21 @@ func speedTest(ctx context.Context, throughputSize, concurrencyStart int, durati
|
|||||||
}
|
}
|
||||||
|
|
||||||
if totalGet < throughputHighestGet {
|
if totalGet < throughputHighestGet {
|
||||||
|
// Following check is for situations
|
||||||
|
// when Writes() scale higher than Reads()
|
||||||
|
// - practically speaking this never happens
|
||||||
|
// and should never happen - however it has
|
||||||
|
// been seen recently due to hardware issues
|
||||||
|
// causes Reads() to go slower than Writes().
|
||||||
|
//
|
||||||
|
// Send such results anyways as this shall
|
||||||
|
// expose a problem underneath.
|
||||||
|
if totalPut > throughputHighestPut {
|
||||||
|
throughputHighestResults = results
|
||||||
|
throughputHighestPut = totalPut
|
||||||
|
// let the client see lower value as well
|
||||||
|
throughputHighestGet = totalGet
|
||||||
|
}
|
||||||
sendResult()
|
sendResult()
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -1062,7 +1077,7 @@ func speedTest(ctx context.Context, throughputSize, concurrencyStart int, durati
|
|||||||
}
|
}
|
||||||
|
|
||||||
throughputHighestGet = totalGet
|
throughputHighestGet = totalGet
|
||||||
throughputHighestGetResults = results
|
throughputHighestResults = results
|
||||||
throughputHighestPut = totalPut
|
throughputHighestPut = totalPut
|
||||||
|
|
||||||
if doBreak {
|
if doBreak {
|
||||||
|
Loading…
Reference in New Issue
Block a user