use single encoder for sending speedtest results (#13579)

Bonus: if runs have PUT higher then capture it anyways
to display an unexpected result, which provides a way
to understand what might be slowing things down on the
system.

For example on a Data24 WDC setup it is clearly visible
there is a bug in the hardware.

```
./mc admin speedtest wdc/
⠧ Running speedtest (With 64 MiB object size, 32 concurrency) PUT: 31 GiB/s GET: 24 GiB/s
⠹ Running speedtest (With 64 MiB object size, 48 concurrency) PUT: 38 GiB/s GET: 24 GiB/s

MinIO 2021-11-04T06:08:33Z, 6 servers, 48 drives
PUT: 38 GiB/s, 605 objs/s
GET: 24 GiB/s, 383 objs/s
```

Reads are almost 14GiB/sec slower than Writes which
is practically not possible.
This commit is contained in:
Harshavardhana
2021-11-04 12:11:52 -07:00
committed by GitHub
parent 112f9ae087
commit c3d24fb26d
2 changed files with 42 additions and 16 deletions

View File

@@ -987,7 +987,7 @@ func speedTest(ctx context.Context, throughputSize, concurrencyStart int, durati
throughputHighestGet := uint64(0)
throughputHighestPut := uint64(0)
var throughputHighestGetResults []SpeedtestResult
var throughputHighestResults []SpeedtestResult
sendResult := func() {
var result madmin.SpeedTestResult
@@ -998,21 +998,21 @@ func speedTest(ctx context.Context, throughputSize, concurrencyStart int, durati
result.GETStats.ObjectsPerSec = throughputHighestGet / uint64(throughputSize) / uint64(durationSecs)
result.PUTStats.ThroughputPerSec = throughputHighestPut / uint64(durationSecs)
result.PUTStats.ObjectsPerSec = throughputHighestPut / uint64(throughputSize) / uint64(durationSecs)
for i := 0; i < len(throughputHighestGetResults); i++ {
for i := 0; i < len(throughputHighestResults); i++ {
errStr := ""
if throughputHighestGetResults[i].Error != "" {
errStr = throughputHighestGetResults[i].Error
if throughputHighestResults[i].Error != "" {
errStr = throughputHighestResults[i].Error
}
result.PUTStats.Servers = append(result.PUTStats.Servers, madmin.SpeedTestStatServer{
Endpoint: throughputHighestGetResults[i].Endpoint,
ThroughputPerSec: throughputHighestGetResults[i].Uploads / uint64(durationSecs),
ObjectsPerSec: throughputHighestGetResults[i].Uploads / uint64(throughputSize) / uint64(durationSecs),
Endpoint: throughputHighestResults[i].Endpoint,
ThroughputPerSec: throughputHighestResults[i].Uploads / uint64(durationSecs),
ObjectsPerSec: throughputHighestResults[i].Uploads / uint64(throughputSize) / uint64(durationSecs),
Err: errStr,
})
result.GETStats.Servers = append(result.GETStats.Servers, madmin.SpeedTestStatServer{
Endpoint: throughputHighestGetResults[i].Endpoint,
ThroughputPerSec: throughputHighestGetResults[i].Downloads / uint64(durationSecs),
ObjectsPerSec: throughputHighestGetResults[i].Downloads / uint64(throughputSize) / uint64(durationSecs),
Endpoint: throughputHighestResults[i].Endpoint,
ThroughputPerSec: throughputHighestResults[i].Downloads / uint64(durationSecs),
ObjectsPerSec: throughputHighestResults[i].Downloads / uint64(throughputSize) / uint64(durationSecs),
Err: errStr,
})
}
@@ -1052,6 +1052,21 @@ func speedTest(ctx context.Context, throughputSize, concurrencyStart int, durati
}
if totalGet < throughputHighestGet {
// Following check is for situations
// when Writes() scale higher than Reads()
// - practically speaking this never happens
// and should never happen - however it has
// been seen recently due to hardware issues
// causes Reads() to go slower than Writes().
//
// Send such results anyways as this shall
// expose a problem underneath.
if totalPut > throughputHighestPut {
throughputHighestResults = results
throughputHighestPut = totalPut
// let the client see lower value as well
throughputHighestGet = totalGet
}
sendResult()
break
}
@@ -1062,7 +1077,7 @@ func speedTest(ctx context.Context, throughputSize, concurrencyStart int, durati
}
throughputHighestGet = totalGet
throughputHighestGetResults = results
throughputHighestResults = results
throughputHighestPut = totalPut
if doBreak {