2022-02-02 01:38:05 -05:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2022-06-14 14:22:07 -04:00
|
|
|
"fmt"
|
|
|
|
"net/url"
|
2022-07-29 03:00:06 -04:00
|
|
|
"runtime"
|
2022-02-02 01:38:05 -05:00
|
|
|
"sort"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/minio/dperf/pkg/dperf"
|
2023-06-19 20:53:08 -04:00
|
|
|
"github.com/minio/madmin-go/v3"
|
2022-02-02 01:38:05 -05:00
|
|
|
)
|
|
|
|
|
2022-07-12 13:12:47 -04:00
|
|
|
const speedTest = "speedtest"
|
|
|
|
|
2022-02-02 01:38:05 -05:00
|
|
|
type speedTestOpts struct {
|
2022-07-12 13:12:47 -04:00
|
|
|
objectSize int
|
2022-02-02 01:38:05 -05:00
|
|
|
concurrencyStart int
|
2022-07-12 13:12:47 -04:00
|
|
|
concurrency int
|
2022-02-02 01:38:05 -05:00
|
|
|
duration time.Duration
|
|
|
|
autotune bool
|
|
|
|
storageClass string
|
2022-07-12 13:12:47 -04:00
|
|
|
bucketName string
|
2023-12-30 02:56:50 -05:00
|
|
|
enableSha256 bool
|
2022-02-02 01:38:05 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get the max throughput and iops numbers.
|
|
|
|
func objectSpeedTest(ctx context.Context, opts speedTestOpts) chan madmin.SpeedTestResult {
|
|
|
|
ch := make(chan madmin.SpeedTestResult, 1)
|
|
|
|
go func() {
|
|
|
|
defer close(ch)
|
|
|
|
|
|
|
|
concurrency := opts.concurrencyStart
|
|
|
|
|
2022-07-29 03:00:06 -04:00
|
|
|
if opts.autotune {
|
|
|
|
// if we have less drives than concurrency then choose
|
|
|
|
// only the concurrency to be number of drives to start
|
|
|
|
// with - since default '32' might be big and may not
|
|
|
|
// complete in total time of 10s.
|
|
|
|
if globalEndpoints.NEndpoints() < concurrency {
|
|
|
|
concurrency = globalEndpoints.NEndpoints()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we have local disks per pool less than
|
|
|
|
// the concurrency make sure we choose only the "start"
|
|
|
|
// concurrency to be equal to the lowest number of
|
|
|
|
// local disks per server.
|
|
|
|
for _, localDiskCount := range globalEndpoints.NLocalDisksPathsPerPool() {
|
|
|
|
if localDiskCount < concurrency {
|
|
|
|
concurrency = localDiskCount
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Any concurrency less than '4' just stick to '4' concurrent
|
|
|
|
// operations for now to begin with.
|
|
|
|
if concurrency < 4 {
|
|
|
|
concurrency = 4
|
|
|
|
}
|
|
|
|
|
|
|
|
// if GOMAXPROCS is set to a lower value then choose to use
|
|
|
|
// concurrency == GOMAXPROCS instead.
|
|
|
|
if runtime.GOMAXPROCS(0) < concurrency {
|
|
|
|
concurrency = runtime.GOMAXPROCS(0)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-02 01:38:05 -05:00
|
|
|
throughputHighestGet := uint64(0)
|
|
|
|
throughputHighestPut := uint64(0)
|
2022-07-12 13:12:47 -04:00
|
|
|
var throughputHighestResults []SpeedTestResult
|
2022-02-02 01:38:05 -05:00
|
|
|
|
|
|
|
sendResult := func() {
|
|
|
|
var result madmin.SpeedTestResult
|
|
|
|
|
|
|
|
durationSecs := opts.duration.Seconds()
|
|
|
|
|
|
|
|
result.GETStats.ThroughputPerSec = throughputHighestGet / uint64(durationSecs)
|
2022-07-12 13:12:47 -04:00
|
|
|
result.GETStats.ObjectsPerSec = throughputHighestGet / uint64(opts.objectSize) / uint64(durationSecs)
|
2022-02-02 01:38:05 -05:00
|
|
|
result.PUTStats.ThroughputPerSec = throughputHighestPut / uint64(durationSecs)
|
2022-07-12 13:12:47 -04:00
|
|
|
result.PUTStats.ObjectsPerSec = throughputHighestPut / uint64(opts.objectSize) / uint64(durationSecs)
|
2022-08-05 12:40:03 -04:00
|
|
|
var totalUploadTimes madmin.TimeDurations
|
|
|
|
var totalDownloadTimes madmin.TimeDurations
|
|
|
|
var totalDownloadTTFB madmin.TimeDurations
|
2022-02-02 01:38:05 -05:00
|
|
|
for i := 0; i < len(throughputHighestResults); i++ {
|
|
|
|
errStr := ""
|
|
|
|
if throughputHighestResults[i].Error != "" {
|
|
|
|
errStr = throughputHighestResults[i].Error
|
|
|
|
}
|
2022-06-14 14:22:07 -04:00
|
|
|
|
|
|
|
// if the default concurrency yields zero results, throw an error.
|
|
|
|
if throughputHighestResults[i].Downloads == 0 && opts.concurrencyStart == concurrency {
|
|
|
|
errStr = fmt.Sprintf("no results for downloads upon first attempt, concurrency %d and duration %s", opts.concurrencyStart, opts.duration)
|
|
|
|
}
|
|
|
|
|
|
|
|
// if the default concurrency yields zero results, throw an error.
|
|
|
|
if throughputHighestResults[i].Uploads == 0 && opts.concurrencyStart == concurrency {
|
|
|
|
errStr = fmt.Sprintf("no results for uploads upon first attempt, concurrency %d and duration %s", opts.concurrencyStart, opts.duration)
|
|
|
|
}
|
|
|
|
|
2022-02-02 01:38:05 -05:00
|
|
|
result.PUTStats.Servers = append(result.PUTStats.Servers, madmin.SpeedTestStatServer{
|
|
|
|
Endpoint: throughputHighestResults[i].Endpoint,
|
|
|
|
ThroughputPerSec: throughputHighestResults[i].Uploads / uint64(durationSecs),
|
2022-07-12 13:12:47 -04:00
|
|
|
ObjectsPerSec: throughputHighestResults[i].Uploads / uint64(opts.objectSize) / uint64(durationSecs),
|
2022-02-02 01:38:05 -05:00
|
|
|
Err: errStr,
|
|
|
|
})
|
2022-08-05 12:40:03 -04:00
|
|
|
|
2022-02-02 01:38:05 -05:00
|
|
|
result.GETStats.Servers = append(result.GETStats.Servers, madmin.SpeedTestStatServer{
|
|
|
|
Endpoint: throughputHighestResults[i].Endpoint,
|
|
|
|
ThroughputPerSec: throughputHighestResults[i].Downloads / uint64(durationSecs),
|
2022-07-12 13:12:47 -04:00
|
|
|
ObjectsPerSec: throughputHighestResults[i].Downloads / uint64(opts.objectSize) / uint64(durationSecs),
|
2022-02-02 01:38:05 -05:00
|
|
|
Err: errStr,
|
|
|
|
})
|
2022-08-05 12:40:03 -04:00
|
|
|
|
|
|
|
totalUploadTimes = append(totalUploadTimes, throughputHighestResults[i].UploadTimes...)
|
|
|
|
totalDownloadTimes = append(totalDownloadTimes, throughputHighestResults[i].DownloadTimes...)
|
|
|
|
totalDownloadTTFB = append(totalDownloadTTFB, throughputHighestResults[i].DownloadTTFB...)
|
2022-02-02 01:38:05 -05:00
|
|
|
}
|
|
|
|
|
2022-08-05 12:40:03 -04:00
|
|
|
result.PUTStats.Response = totalUploadTimes.Measure()
|
|
|
|
result.GETStats.Response = totalDownloadTimes.Measure()
|
|
|
|
result.GETStats.TTFB = totalDownloadTTFB.Measure()
|
|
|
|
|
2022-07-12 13:12:47 -04:00
|
|
|
result.Size = opts.objectSize
|
2022-02-02 01:38:05 -05:00
|
|
|
result.Disks = globalEndpoints.NEndpoints()
|
|
|
|
result.Servers = len(globalNotificationSys.peerClients) + 1
|
|
|
|
result.Version = Version
|
|
|
|
result.Concurrent = concurrency
|
|
|
|
|
2022-07-29 03:00:06 -04:00
|
|
|
select {
|
|
|
|
case ch <- result:
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
}
|
2022-02-02 01:38:05 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
// If the client got disconnected stop the speedtest.
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2022-07-12 13:12:47 -04:00
|
|
|
sopts := speedTestOpts{
|
|
|
|
objectSize: opts.objectSize,
|
|
|
|
concurrency: concurrency,
|
|
|
|
duration: opts.duration,
|
|
|
|
storageClass: opts.storageClass,
|
|
|
|
bucketName: opts.bucketName,
|
2023-12-30 02:56:50 -05:00
|
|
|
enableSha256: opts.enableSha256,
|
2022-07-12 13:12:47 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
results := globalNotificationSys.SpeedTest(ctx, sopts)
|
2022-02-02 01:38:05 -05:00
|
|
|
sort.Slice(results, func(i, j int) bool {
|
|
|
|
return results[i].Endpoint < results[j].Endpoint
|
|
|
|
})
|
|
|
|
|
|
|
|
totalPut := uint64(0)
|
|
|
|
totalGet := uint64(0)
|
|
|
|
for _, result := range results {
|
|
|
|
totalPut += result.Uploads
|
|
|
|
totalGet += result.Downloads
|
|
|
|
}
|
|
|
|
|
|
|
|
if totalGet < throughputHighestGet {
|
|
|
|
// Following check is for situations
|
|
|
|
// when Writes() scale higher than Reads()
|
|
|
|
// - practically speaking this never happens
|
|
|
|
// and should never happen - however it has
|
|
|
|
// been seen recently due to hardware issues
|
|
|
|
// causes Reads() to go slower than Writes().
|
|
|
|
//
|
|
|
|
// Send such results anyways as this shall
|
|
|
|
// expose a problem underneath.
|
|
|
|
if totalPut > throughputHighestPut {
|
|
|
|
throughputHighestResults = results
|
|
|
|
throughputHighestPut = totalPut
|
|
|
|
// let the client see lower value as well
|
|
|
|
throughputHighestGet = totalGet
|
|
|
|
}
|
|
|
|
sendResult()
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2022-08-05 12:40:03 -04:00
|
|
|
// We break if we did not see 2.5% growth rate in total GET
|
|
|
|
// requests, we have reached our peak at this point.
|
2022-02-02 01:38:05 -05:00
|
|
|
doBreak := float64(totalGet-throughputHighestGet)/float64(totalGet) < 0.025
|
|
|
|
|
|
|
|
throughputHighestGet = totalGet
|
|
|
|
throughputHighestResults = results
|
|
|
|
throughputHighestPut = totalPut
|
|
|
|
|
|
|
|
if doBreak {
|
|
|
|
sendResult()
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, result := range results {
|
|
|
|
if result.Error != "" {
|
|
|
|
// Break out on errors.
|
|
|
|
sendResult()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sendResult()
|
|
|
|
if !opts.autotune {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try with a higher concurrency to see if we get better throughput
|
|
|
|
concurrency += (concurrency + 1) / 2
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
return ch
|
|
|
|
}
|
|
|
|
|
|
|
|
func driveSpeedTest(ctx context.Context, opts madmin.DriveSpeedTestOpts) madmin.DriveSpeedTestResult {
|
|
|
|
perf := &dperf.DrivePerf{
|
|
|
|
Serial: opts.Serial,
|
|
|
|
BlockSize: opts.BlockSize,
|
|
|
|
FileSize: opts.FileSize,
|
|
|
|
}
|
2022-02-25 21:06:38 -05:00
|
|
|
|
|
|
|
localPaths := globalEndpoints.LocalDisksPaths()
|
2023-11-14 01:32:08 -05:00
|
|
|
var ignoredPaths []string
|
2022-02-25 21:06:38 -05:00
|
|
|
paths := func() (tmpPaths []string) {
|
|
|
|
for _, lp := range localPaths {
|
2023-11-14 01:32:08 -05:00
|
|
|
if _, err := Lstat(pathJoin(lp, minioMetaBucket, formatConfigFile)); err == nil {
|
|
|
|
tmpPaths = append(tmpPaths, pathJoin(lp, minioMetaTmpBucket))
|
|
|
|
} else {
|
|
|
|
// Use dperf on only formatted drives.
|
|
|
|
ignoredPaths = append(ignoredPaths, lp)
|
|
|
|
}
|
2022-02-02 01:38:05 -05:00
|
|
|
}
|
2022-02-25 21:06:38 -05:00
|
|
|
return tmpPaths
|
2022-02-02 01:38:05 -05:00
|
|
|
}()
|
|
|
|
|
2022-06-14 14:22:07 -04:00
|
|
|
scheme := "http"
|
|
|
|
if globalIsTLS {
|
|
|
|
scheme = "https"
|
|
|
|
}
|
|
|
|
|
|
|
|
u := &url.URL{
|
|
|
|
Scheme: scheme,
|
|
|
|
Host: globalLocalNodeName,
|
|
|
|
}
|
|
|
|
|
2022-02-02 01:38:05 -05:00
|
|
|
perfs, err := perf.Run(ctx, paths...)
|
|
|
|
return madmin.DriveSpeedTestResult{
|
2022-06-14 14:22:07 -04:00
|
|
|
Endpoint: u.String(),
|
2022-02-02 01:38:05 -05:00
|
|
|
Version: Version,
|
2022-02-25 21:06:38 -05:00
|
|
|
DrivePerf: func() (results []madmin.DrivePerf) {
|
|
|
|
for idx, r := range perfs {
|
2022-02-02 01:38:05 -05:00
|
|
|
result := madmin.DrivePerf{
|
2022-02-25 21:06:38 -05:00
|
|
|
Path: localPaths[idx],
|
2022-02-02 01:38:05 -05:00
|
|
|
ReadThroughput: r.ReadThroughput,
|
|
|
|
WriteThroughput: r.WriteThroughput,
|
|
|
|
Error: func() string {
|
|
|
|
if r.Error != nil {
|
|
|
|
return r.Error.Error()
|
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}(),
|
|
|
|
}
|
|
|
|
results = append(results, result)
|
|
|
|
}
|
2023-11-14 01:32:08 -05:00
|
|
|
for _, inp := range ignoredPaths {
|
|
|
|
results = append(results, madmin.DrivePerf{
|
|
|
|
Path: inp,
|
|
|
|
Error: errFaultyDisk.Error(),
|
|
|
|
})
|
|
|
|
}
|
2022-02-02 01:38:05 -05:00
|
|
|
return results
|
|
|
|
}(),
|
|
|
|
Error: func() string {
|
|
|
|
if err != nil {
|
|
|
|
return err.Error()
|
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}(),
|
|
|
|
}
|
|
|
|
}
|