fix: remove unusued PerfInfoHandler code (#9328)

- Removes PerfInfo admin API as its not OBDInfo
- Keep the drive path without the metaBucket in OBD
  global latency map.
- Remove all the unused code related to PerfInfo API
- Do not redefined global mib,gib constants use
  humanize.MiByte and humanize.GiByte instead always
This commit is contained in:
Harshavardhana
2020-04-12 19:37:09 -07:00
committed by GitHub
parent 7d636a7c13
commit 4314ee1670
28 changed files with 53 additions and 2080 deletions

View File

@@ -16,10 +16,6 @@
package disk
import (
"os"
)
// Info stat fs struct is container which holds following values
// Total - total size of the volume / disk
// Free - free size of the volume / disk
@@ -36,60 +32,3 @@ type Info struct {
// Usage is calculated per tenant.
Usage uint64
}
// Performance holds informantion about read and write speed of a disk
type Performance struct {
Path string `json:"path"`
Error string `json:"error,omitempty"`
WriteSpeed float64 `json:"writeSpeed"`
ReadSpeed float64 `json:"readSpeed"`
}
// GetPerformance returns given disk's read and write performance
func GetPerformance(path string, size int64) Performance {
perf := Performance{}
write, read, err := doPerfMeasure(path, size)
if err != nil {
perf.Error = err.Error()
return perf
}
perf.WriteSpeed = write
perf.ReadSpeed = read
return perf
}
// Calculate the write and read performance - write and read 10 tmp (1 MiB)
// files and find the average time taken (Bytes / Sec)
func doPerfMeasure(fsPath string, size int64) (writeSpeed, readSpeed float64, err error) {
// Remove the file created for speed test purposes
defer os.RemoveAll(fsPath)
// Create a file with O_DIRECT flag
w, err := OpenFileDirectIO(fsPath, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0666)
if err != nil {
return 0, 0, err
}
// Fetch aligned buf for direct-io
buf := AlignedBlock(speedTestBlockSize)
writeSpeed, err = speedTestWrite(w, buf, size)
w.Close()
if err != nil {
return 0, 0, err
}
// Open file to compute read speed
r, err := OpenFileDirectIO(fsPath, os.O_RDONLY, 0666)
if err != nil {
return 0, 0, err
}
defer r.Close()
readSpeed, err = speedTestRead(r, buf, size)
if err != nil {
return 0, 0, err
}
return writeSpeed, readSpeed, nil
}

View File

@@ -21,19 +21,13 @@ import (
"context"
"fmt"
"os"
"path/filepath"
"runtime"
"time"
"github.com/dustin/go-humanize"
"github.com/montanaflynn/stats"
)
const (
kb = uint64(1 << 10)
mb = uint64(kb << 10)
gb = uint64(mb << 10)
)
var globalLatency = map[string]Latency{}
var globalThroughput = map[string]Throughput{}
@@ -58,19 +52,20 @@ type Throughput struct {
}
// GetOBDInfo about the drive
func GetOBDInfo(ctx context.Context, endpoint string) (Latency, Throughput, error) {
func GetOBDInfo(ctx context.Context, drive string, fsPath string) (Latency, Throughput, error) {
runtime.LockOSThread()
f, err := OpenFileDirectIO(endpoint, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0755)
// Create a file with O_DIRECT flag, choose default umask and also make sure
// we are exclusively writing to a new file using O_EXCL.
w, err := OpenFileDirectIO(fsPath, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0666)
if err != nil {
return Latency{}, Throughput{}, err
}
defer func() {
f.Close()
os.Remove(f.Name())
}()
drive := filepath.Dir(endpoint)
defer func() {
w.Close()
os.Remove(fsPath)
}()
// going to leave this here incase we decide to go back to caching again
// if gl, ok := globalLatency[drive]; ok {
@@ -79,27 +74,26 @@ func GetOBDInfo(ctx context.Context, endpoint string) (Latency, Throughput, erro
// }
// }
blockSize := 4 * mb
fileSize := 256 * mb
blockSize := 4 * humanize.MiByte
fileSize := 256 * humanize.MiByte
latencies := make([]float64, fileSize/blockSize)
throughputs := make([]float64, fileSize/blockSize)
dioFile := os.NewFile(uintptr(f.Fd()), endpoint)
data := make([]byte, blockSize)
data := AlignedBlock(blockSize)
for i := uint64(0); i < (fileSize / blockSize); i++ {
for i := 0; i < (fileSize / blockSize); i++ {
if ctx.Err() != nil {
return Latency{}, Throughput{}, ctx.Err()
}
startTime := time.Now()
if n, err := dioFile.Write(data); err != nil {
if n, err := w.Write(data); err != nil {
return Latency{}, Throughput{}, err
} else if uint64(n) != blockSize {
} else if n != blockSize {
return Latency{}, Throughput{}, fmt.Errorf("Expected to write %d, but only wrote %d", blockSize, n)
}
latency := time.Since(startTime)
latencies[i] = float64(latency.Seconds())
latencyInSecs := time.Since(startTime).Seconds()
latencies[i] = float64(latencyInSecs)
}
runtime.UnlockOSThread()

View File

@@ -1,97 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package disk
import (
"io"
"math"
"time"
humanize "github.com/dustin/go-humanize"
)
var speedTestBlockSize = 4 * humanize.MiByte
// speedTestWrite computes the write speed by writing
// `speedTestFileSize` bytes of data to `w` in 4MiB direct-aligned
// blocks present in `buf`
func speedTestWrite(w io.Writer, buf []byte, size int64) (float64, error) {
// Write speedTestFileSize of data and record write speed
startTime := time.Now()
remaining := size
for remaining > 0 {
var toWrite int
// there's more remaining to write than the buffer can hold
if int64(len(buf)) < remaining {
toWrite = len(buf)
} else { // buffer can hold all there is to write
toWrite = int(remaining)
}
written, err := w.Write(buf[:toWrite])
if err != nil {
return 0, err
}
remaining = remaining - int64(written)
}
elapsedTime := time.Since(startTime).Seconds()
totalWriteMBs := float64(size) / humanize.MiByte
writeSpeed := totalWriteMBs / elapsedTime
return roundToTwoDecimals(writeSpeed), nil
}
// speedTestRead computes the read speed by reading
// `speedTestFileSize` bytes from the reader `r` using 4MiB size `buf`
func speedTestRead(r io.Reader, buf []byte, size int64) (float64, error) {
// Read speedTestFileSize and record read speed
startTime := time.Now()
remaining := size
for remaining > 0 {
// reads `speedTestBlockSize` on every read
n, err := io.ReadFull(r, buf)
if err == io.ErrUnexpectedEOF || err == nil {
remaining = remaining - int64(n)
continue
}
// Nothing more left to read from the Reader
if err == io.EOF {
break
}
// Error while reading from the underlying Reader
if err != nil {
return 0, err
}
}
if remaining > 0 {
return 0, io.ErrUnexpectedEOF
}
elapsedTime := time.Since(startTime).Seconds()
totalReadMBs := float64(size) / humanize.MiByte
readSpeed := totalReadMBs / elapsedTime
return roundToTwoDecimals(readSpeed), nil
}
func roundToTwoDecimals(num float64) float64 {
return math.Round(num*100) / 100
}