From 82075e8e3afa835260dab82e0a20c69a0ca07fe8 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Tue, 11 Jul 2023 07:46:58 -0700 Subject: [PATCH] use strconv variants to improve on performance per 'op' (#17626) ``` BenchmarkItoa BenchmarkItoa-8 673628088 1.946 ns/op 0 B/op 0 allocs/op BenchmarkFormatInt BenchmarkFormatInt-8 592919769 2.012 ns/op 0 B/op 0 allocs/op BenchmarkSprint BenchmarkSprint-8 26149144 49.06 ns/op 2 B/op 1 allocs/op BenchmarkSprintBool BenchmarkSprintBool-8 26440180 45.92 ns/op 4 B/op 1 allocs/op BenchmarkFormatBool BenchmarkFormatBool-8 1000000000 0.2558 ns/op 0 B/op 0 allocs/op ``` --- .github/workflows/helm-lint.yml | 6 ------ cmd/data-scanner.go | 8 ++++---- cmd/storage-rest-client.go | 3 +-- cmd/xl-storage.go | 19 ++++++++++--------- 4 files changed, 15 insertions(+), 21 deletions(-) diff --git a/.github/workflows/helm-lint.yml b/.github/workflows/helm-lint.yml index 4d8693a81..17aaba380 100644 --- a/.github/workflows/helm-lint.yml +++ b/.github/workflows/helm-lint.yml @@ -14,12 +14,6 @@ concurrency: permissions: contents: read -# This ensures that previous jobs for the PR are canceled when the PR is -# updated. -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref }} - cancel-in-progress: true - permissions: contents: read diff --git a/cmd/data-scanner.go b/cmd/data-scanner.go index 9664b0635..e39152e63 100644 --- a/cmd/data-scanner.go +++ b/cmd/data-scanner.go @@ -206,7 +206,7 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) { go storeDataUsageInBackend(ctx, objAPI, results) err := objAPI.NSScanner(ctx, results, uint32(cycleInfo.current), scanMode) logger.LogIf(ctx, err) - res := map[string]string{"cycle": fmt.Sprint(cycleInfo.current)} + res := map[string]string{"cycle": strconv.FormatUint(cycleInfo.current, 10)} if err != nil { res["error"] = err.Error() } @@ -813,11 +813,11 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int f.newCache.deleteRecursive(thisHash) f.newCache.replaceHashed(thisHash, folder.parent, *flat) total := map[string]string{ - "objects": fmt.Sprint(flat.Objects), - "size": fmt.Sprint(flat.Size), + "objects": strconv.FormatUint(flat.Objects, 10), + "size": strconv.FormatInt(flat.Size, 10), } if flat.Versions > 0 { - total["versions"] = fmt.Sprint(flat.Versions) + total["versions"] = strconv.FormatUint(flat.Versions, 10) } stop(total) } diff --git a/cmd/storage-rest-client.go b/cmd/storage-rest-client.go index 8ea7ae234..cd9daa2a1 100644 --- a/cmd/storage-rest-client.go +++ b/cmd/storage-rest-client.go @@ -23,7 +23,6 @@ import ( "encoding/gob" "encoding/hex" "errors" - "fmt" "io" "net/http" "net/url" @@ -733,7 +732,7 @@ func (client *storageRESTClient) StatInfoFile(ctx context.Context, volume, path values := make(url.Values) values.Set(storageRESTVolume, volume) values.Set(storageRESTFilePath, path) - values.Set(storageRESTGlob, fmt.Sprint(glob)) + values.Set(storageRESTGlob, strconv.FormatBool(glob)) respBody, err := client.call(ctx, storageRESTMethodStatInfoFile, values, nil, -1) if err != nil { return stat, err diff --git a/cmd/xl-storage.go b/cmd/xl-storage.go index a46d30ce9..fd4eb3577 100644 --- a/cmd/xl-storage.go +++ b/cmd/xl-storage.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2021 MinIO, Inc. +// Copyright (c) 2015-2023 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -29,6 +29,7 @@ import ( pathutil "path" "path/filepath" "runtime" + "strconv" "strings" "sync" "sync/atomic" @@ -504,17 +505,17 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates doneSz := globalScannerMetrics.timeSize(scannerMetricReadMetadata) buf, err := s.readMetadata(ctx, item.Path) doneSz(len(buf)) - res["metasize"] = fmt.Sprint(len(buf)) + res["metasize"] = strconv.Itoa(len(buf)) if err != nil { res["err"] = err.Error() return sizeSummary{}, errSkipFile } - defer metaDataPoolPut(buf) // Remove filename which is the meta file. item.transformMetaDir() fivs, err := getFileInfoVersions(buf, item.bucket, item.objectPath()) + metaDataPoolPut(buf) if err != nil { res["err"] = err.Error() return sizeSummary{}, errSkipFile @@ -563,7 +564,7 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates // apply tier sweep action on free versions if len(fivs.FreeVersions) > 0 { - res["free-versions"] = fmt.Sprint(len(fivs.FreeVersions)) + res["free-versions"] = strconv.Itoa(len(fivs.FreeVersions)) } for _, freeVersion := range fivs.FreeVersions { oi := freeVersion.ToObjectInfo(item.bucket, item.objectPath(), versioned) @@ -575,13 +576,13 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates // These are rather expensive. Skip if nobody listens. if globalTrace.NumSubscribers(madmin.TraceScanner) > 0 { if sizeS.versions > 0 { - res["versions"] = fmt.Sprint() + res["versions"] = strconv.FormatUint(sizeS.versions, 10) } - res["size"] = fmt.Sprint(sizeS.totalSize) + res["size"] = strconv.FormatInt(sizeS.totalSize, 10) if len(sizeS.tiers) > 0 { for name, tier := range sizeS.tiers { - res["size-"+name] = fmt.Sprint(tier.TotalSize) - res["versions-"+name] = fmt.Sprint(tier.NumVersions) + res["size-"+name] = strconv.FormatUint(tier.TotalSize, 10) + res["versions-"+name] = strconv.Itoa(tier.NumVersions) } } if sizeS.failedCount > 0 { @@ -591,7 +592,7 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates res["repl-pending"] = fmt.Sprintf("%d versions, %d bytes", sizeS.pendingCount, sizeS.pendingSize) } for tgt, st := range sizeS.replTargetStats { - res["repl-size-"+tgt] = fmt.Sprint(st.replicatedSize) + res["repl-size-"+tgt] = strconv.FormatInt(st.replicatedSize, 10) if st.failedCount > 0 { res["repl-failed-"+tgt] = fmt.Sprintf("%d versions, %d bytes", st.failedCount, st.failedSize) }