mirror of
https://github.com/minio/minio.git
synced 2025-02-09 12:48:08 -05:00
fix: speed up OBD tests avoid unnecessary memory allocation (#10141)
replace dummy buffer with nullReader{} instead, to avoid large memory allocations in memory constrainted environments. allows running obd tests in such environments.
This commit is contained in:
parent
98691f75bc
commit
f200a7fb6a
@ -23,7 +23,6 @@ import (
|
|||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"math"
|
"math"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -122,19 +121,22 @@ type progressReader struct {
|
|||||||
|
|
||||||
func (p *progressReader) Read(b []byte) (int, error) {
|
func (p *progressReader) Read(b []byte) (int, error) {
|
||||||
n, err := p.r.Read(b)
|
n, err := p.r.Read(b)
|
||||||
if err != nil && err != io.EOF {
|
if n >= 0 {
|
||||||
return n, err
|
p.progressChan <- int64(n)
|
||||||
}
|
}
|
||||||
p.progressChan <- int64(n)
|
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type nullReader struct{}
|
||||||
|
|
||||||
|
func (r *nullReader) Read(b []byte) (int, error) {
|
||||||
|
return len(b), nil
|
||||||
|
}
|
||||||
|
|
||||||
func (client *peerRESTClient) doNetOBDTest(ctx context.Context, dataSize int64, threadCount uint) (info madmin.NetOBDInfo, err error) {
|
func (client *peerRESTClient) doNetOBDTest(ctx context.Context, dataSize int64, threadCount uint) (info madmin.NetOBDInfo, err error) {
|
||||||
latencies := []float64{}
|
latencies := []float64{}
|
||||||
throughputs := []float64{}
|
throughputs := []float64{}
|
||||||
|
|
||||||
buf := make([]byte, dataSize)
|
|
||||||
|
|
||||||
buflimiter := make(chan struct{}, threadCount)
|
buflimiter := make(chan struct{}, threadCount)
|
||||||
errChan := make(chan error, threadCount)
|
errChan := make(chan error, threadCount)
|
||||||
|
|
||||||
@ -163,7 +165,7 @@ func (client *peerRESTClient) doNetOBDTest(ctx context.Context, dataSize int64,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
wg := sync.WaitGroup{}
|
var wg sync.WaitGroup
|
||||||
finish := func() {
|
finish := func() {
|
||||||
<-buflimiter
|
<-buflimiter
|
||||||
wg.Done()
|
wg.Done()
|
||||||
@ -183,27 +185,26 @@ func (client *peerRESTClient) doNetOBDTest(ctx context.Context, dataSize int64,
|
|||||||
}
|
}
|
||||||
|
|
||||||
go func(i int) {
|
go func(i int) {
|
||||||
bufReader := bytes.NewReader(buf)
|
progress := &progressReader{
|
||||||
bufReadCloser := ioutil.NopCloser(&progressReader{
|
r: io.LimitReader(&nullReader{}, dataSize),
|
||||||
r: bufReader,
|
|
||||||
progressChan: transferChan,
|
progressChan: transferChan,
|
||||||
})
|
}
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
before := atomic.LoadInt64(&totalTransferred)
|
before := atomic.LoadInt64(&totalTransferred)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(innerCtx, 10*time.Second)
|
ctx, cancel := context.WithTimeout(innerCtx, 10*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
respBody, err := client.callWithContext(ctx, peerRESTMethodNetOBDInfo, nil, bufReadCloser, dataSize)
|
|
||||||
if err != nil {
|
|
||||||
|
|
||||||
if netErr, ok := err.(*rest.NetworkError); ok {
|
// Turn off healthCheckFn for OBD tests to cater for higher load on the peers.
|
||||||
if urlErr, ok := netErr.Err.(*url.Error); ok {
|
clnt := newPeerRESTClient(client.host)
|
||||||
if urlErr.Err.Error() == context.DeadlineExceeded.Error() {
|
clnt.restClient.HealthCheckFn = nil
|
||||||
slowSample()
|
|
||||||
finish()
|
respBody, err := clnt.callWithContext(ctx, peerRESTMethodNetOBDInfo, nil, progress, dataSize)
|
||||||
return
|
if err != nil {
|
||||||
}
|
if errors.Is(err, context.DeadlineExceeded) {
|
||||||
}
|
slowSample()
|
||||||
|
finish()
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
errChan <- err
|
errChan <- err
|
||||||
@ -329,15 +330,8 @@ func (client *peerRESTClient) NetOBDInfo(ctx context.Context) (info madmin.NetOB
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if netErr, ok := err.(*rest.NetworkError); ok {
|
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||||
if urlErr, ok := netErr.Err.(*url.Error); ok {
|
continue
|
||||||
if urlErr.Err.Error() == context.Canceled.Error() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if urlErr.Err.Error() == context.DeadlineExceeded.Error() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return info, err
|
return info, err
|
||||||
|
@ -65,13 +65,6 @@ func GetOBDInfo(ctx context.Context, drive, fsPath string) (Latency, Throughput,
|
|||||||
os.Remove(fsPath)
|
os.Remove(fsPath)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// going to leave this here incase we decide to go back to caching again
|
|
||||||
// if gl, ok := globalLatency[drive]; ok {
|
|
||||||
// if gt, ok := globalThroughput[drive]; ok {
|
|
||||||
// return gl, gt, nil
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
blockSize := 4 * humanize.MiByte
|
blockSize := 4 * humanize.MiByte
|
||||||
fileSize := 256 * humanize.MiByte
|
fileSize := 256 * humanize.MiByte
|
||||||
|
|
||||||
@ -94,6 +87,9 @@ func GetOBDInfo(ctx context.Context, drive, fsPath string) (Latency, Throughput,
|
|||||||
latencies[i] = float64(latencyInSecs)
|
latencies[i] = float64(latencyInSecs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sync every full writes fdatasync
|
||||||
|
Fdatasync(w)
|
||||||
|
|
||||||
for i := range latencies {
|
for i := range latencies {
|
||||||
throughput := float64(blockSize) / latencies[i]
|
throughput := float64(blockSize) / latencies[i]
|
||||||
throughputs[i] = throughput
|
throughputs[i] = throughput
|
||||||
|
Loading…
x
Reference in New Issue
Block a user