mirror of
https://github.com/minio/minio.git
synced 2025-01-24 13:13:16 -05:00
use strconv variants to improve on performance per 'op' (#17626)
``` BenchmarkItoa BenchmarkItoa-8 673628088 1.946 ns/op 0 B/op 0 allocs/op BenchmarkFormatInt BenchmarkFormatInt-8 592919769 2.012 ns/op 0 B/op 0 allocs/op BenchmarkSprint BenchmarkSprint-8 26149144 49.06 ns/op 2 B/op 1 allocs/op BenchmarkSprintBool BenchmarkSprintBool-8 26440180 45.92 ns/op 4 B/op 1 allocs/op BenchmarkFormatBool BenchmarkFormatBool-8 1000000000 0.2558 ns/op 0 B/op 0 allocs/op ```
This commit is contained in:
parent
5b7c83341b
commit
82075e8e3a
6
.github/workflows/helm-lint.yml
vendored
6
.github/workflows/helm-lint.yml
vendored
@ -14,12 +14,6 @@ concurrency:
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
|
@ -206,7 +206,7 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
|
||||
go storeDataUsageInBackend(ctx, objAPI, results)
|
||||
err := objAPI.NSScanner(ctx, results, uint32(cycleInfo.current), scanMode)
|
||||
logger.LogIf(ctx, err)
|
||||
res := map[string]string{"cycle": fmt.Sprint(cycleInfo.current)}
|
||||
res := map[string]string{"cycle": strconv.FormatUint(cycleInfo.current, 10)}
|
||||
if err != nil {
|
||||
res["error"] = err.Error()
|
||||
}
|
||||
@ -813,11 +813,11 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
||||
f.newCache.deleteRecursive(thisHash)
|
||||
f.newCache.replaceHashed(thisHash, folder.parent, *flat)
|
||||
total := map[string]string{
|
||||
"objects": fmt.Sprint(flat.Objects),
|
||||
"size": fmt.Sprint(flat.Size),
|
||||
"objects": strconv.FormatUint(flat.Objects, 10),
|
||||
"size": strconv.FormatInt(flat.Size, 10),
|
||||
}
|
||||
if flat.Versions > 0 {
|
||||
total["versions"] = fmt.Sprint(flat.Versions)
|
||||
total["versions"] = strconv.FormatUint(flat.Versions, 10)
|
||||
}
|
||||
stop(total)
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
"encoding/gob"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@ -733,7 +732,7 @@ func (client *storageRESTClient) StatInfoFile(ctx context.Context, volume, path
|
||||
values := make(url.Values)
|
||||
values.Set(storageRESTVolume, volume)
|
||||
values.Set(storageRESTFilePath, path)
|
||||
values.Set(storageRESTGlob, fmt.Sprint(glob))
|
||||
values.Set(storageRESTGlob, strconv.FormatBool(glob))
|
||||
respBody, err := client.call(ctx, storageRESTMethodStatInfoFile, values, nil, -1)
|
||||
if err != nil {
|
||||
return stat, err
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2023 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
@ -29,6 +29,7 @@ import (
|
||||
pathutil "path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@ -504,17 +505,17 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates
|
||||
doneSz := globalScannerMetrics.timeSize(scannerMetricReadMetadata)
|
||||
buf, err := s.readMetadata(ctx, item.Path)
|
||||
doneSz(len(buf))
|
||||
res["metasize"] = fmt.Sprint(len(buf))
|
||||
res["metasize"] = strconv.Itoa(len(buf))
|
||||
if err != nil {
|
||||
res["err"] = err.Error()
|
||||
return sizeSummary{}, errSkipFile
|
||||
}
|
||||
defer metaDataPoolPut(buf)
|
||||
|
||||
// Remove filename which is the meta file.
|
||||
item.transformMetaDir()
|
||||
|
||||
fivs, err := getFileInfoVersions(buf, item.bucket, item.objectPath())
|
||||
metaDataPoolPut(buf)
|
||||
if err != nil {
|
||||
res["err"] = err.Error()
|
||||
return sizeSummary{}, errSkipFile
|
||||
@ -563,7 +564,7 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates
|
||||
|
||||
// apply tier sweep action on free versions
|
||||
if len(fivs.FreeVersions) > 0 {
|
||||
res["free-versions"] = fmt.Sprint(len(fivs.FreeVersions))
|
||||
res["free-versions"] = strconv.Itoa(len(fivs.FreeVersions))
|
||||
}
|
||||
for _, freeVersion := range fivs.FreeVersions {
|
||||
oi := freeVersion.ToObjectInfo(item.bucket, item.objectPath(), versioned)
|
||||
@ -575,13 +576,13 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates
|
||||
// These are rather expensive. Skip if nobody listens.
|
||||
if globalTrace.NumSubscribers(madmin.TraceScanner) > 0 {
|
||||
if sizeS.versions > 0 {
|
||||
res["versions"] = fmt.Sprint()
|
||||
res["versions"] = strconv.FormatUint(sizeS.versions, 10)
|
||||
}
|
||||
res["size"] = fmt.Sprint(sizeS.totalSize)
|
||||
res["size"] = strconv.FormatInt(sizeS.totalSize, 10)
|
||||
if len(sizeS.tiers) > 0 {
|
||||
for name, tier := range sizeS.tiers {
|
||||
res["size-"+name] = fmt.Sprint(tier.TotalSize)
|
||||
res["versions-"+name] = fmt.Sprint(tier.NumVersions)
|
||||
res["size-"+name] = strconv.FormatUint(tier.TotalSize, 10)
|
||||
res["versions-"+name] = strconv.Itoa(tier.NumVersions)
|
||||
}
|
||||
}
|
||||
if sizeS.failedCount > 0 {
|
||||
@ -591,7 +592,7 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates
|
||||
res["repl-pending"] = fmt.Sprintf("%d versions, %d bytes", sizeS.pendingCount, sizeS.pendingSize)
|
||||
}
|
||||
for tgt, st := range sizeS.replTargetStats {
|
||||
res["repl-size-"+tgt] = fmt.Sprint(st.replicatedSize)
|
||||
res["repl-size-"+tgt] = strconv.FormatInt(st.replicatedSize, 10)
|
||||
if st.failedCount > 0 {
|
||||
res["repl-failed-"+tgt] = fmt.Sprintf("%d versions, %d bytes", st.failedCount, st.failedSize)
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user