From 90f5e1e5f62cbc47be6d0a3ca0634bfd84c2248c Mon Sep 17 00:00:00 2001 From: Klaus Post Date: Tue, 18 Feb 2025 08:25:55 -0800 Subject: [PATCH] tests: Do not allow forced type asserts (#20905) --- .golangci.yml | 11 ++- cmd/admin-bucket-handlers.go | 10 +-- cmd/admin-handlers-idp-config.go | 2 - cmd/admin-handlers-users.go | 4 +- cmd/admin-handlers.go | 29 ++++---- cmd/api-response.go | 1 - cmd/auth-handler.go | 1 - cmd/batch-expire.go | 2 +- cmd/batch-handlers.go | 2 - cmd/bitrot.go | 2 +- cmd/bucket-handlers.go | 3 - cmd/bucket-handlers_test.go | 2 - cmd/bucket-replication-stats.go | 1 - cmd/bucket-replication.go | 2 +- cmd/common-main_test.go | 4 +- cmd/data-scanner.go | 3 +- cmd/dynamic-timeouts.go | 1 - cmd/dynamic-timeouts_test.go | 1 - cmd/encryption-v1.go | 2 +- cmd/encryption-v1_test.go | 2 - cmd/endpoint.go | 1 - cmd/erasure-coding.go | 1 - cmd/erasure-healing-common_test.go | 2 - cmd/erasure-healing.go | 3 - cmd/erasure-multipart.go | 1 - cmd/erasure-object_test.go | 1 - cmd/handler-api.go | 1 - cmd/iam-etcd-store.go | 1 - cmd/iam-object-store.go | 1 - cmd/iam-store.go | 13 ++-- cmd/iam.go | 1 - cmd/listen-notification-handlers.go | 7 +- cmd/local-locker_test.go | 1 - cmd/metacache-stream.go | 5 +- cmd/metrics-v2.go | 1 - cmd/metrics-v3-cluster-usage.go | 1 - cmd/notification.go | 1 - cmd/object-api-listobjects_test.go | 5 -- cmd/object-api-multipart_test.go | 1 - cmd/object-handlers.go | 3 - cmd/object-handlers_test.go | 5 +- cmd/object_api_suite_test.go | 2 - cmd/os_unix.go | 17 +++-- cmd/os_windows.go | 1 - cmd/postpolicyform.go | 15 ++-- cmd/sftp-server.go | 4 -- cmd/signature-v2_test.go | 1 - cmd/signature-v4-parser_test.go | 4 -- cmd/site-replication.go | 5 +- cmd/storage-rest-client.go | 6 +- cmd/storage-rest-server.go | 24 +++---- cmd/test-utils_test.go | 1 - cmd/untar.go | 14 ++-- cmd/update_test.go | 2 +- cmd/xl-storage-format-v2.go | 8 +-- cmd/xl-storage-format_test.go | 1 - cmd/xl-storage.go | 4 +- docs/debugging/xl-meta/main.go | 4 +- internal/bpool/pool.go | 45 ++++++++++++ internal/bucket/bandwidth/monitor.go | 1 - internal/bucket/bandwidth/reader.go | 1 - internal/bucket/lifecycle/lifecycle.go | 1 - .../bucket/replication/replication_test.go | 1 - internal/bucket/replication/rule_test.go | 1 - internal/config/certs_test.go | 23 +++---- internal/config/dns/etcd_dns.go | 1 - internal/config/identity/openid/openid.go | 1 - internal/config/notify/parse.go | 2 - internal/config/subnet/config.go | 9 +-- internal/disk/stat_test.go | 2 +- internal/event/name.go | 1 - internal/event/target/elasticsearch.go | 3 +- internal/grid/connection.go | 13 ++-- internal/grid/grid.go | 14 ++-- internal/grid/handlers.go | 20 +++--- internal/grid/muxserver.go | 1 - internal/grid/types.go | 35 +++++----- internal/handlers/forwarder.go | 14 ++-- internal/http/flush.go | 27 ++++++++ internal/http/listener.go | 14 ++-- internal/ioutil/ioutil.go | 69 ++++++++++--------- internal/ioutil/ioutil_test.go | 10 +-- internal/jwt/parser.go | 24 +++---- internal/kms/config_test.go | 2 +- internal/lock/lock_test.go | 6 +- internal/logger/config.go | 1 - internal/logger/target/http/http.go | 9 +-- internal/logger/target/kafka/kafka.go | 1 - internal/mountinfo/mountinfo_linux.go | 14 ++-- internal/mountinfo/mountinfo_windows.go | 4 +- internal/s3select/csv/reader.go | 39 ++++++----- internal/s3select/json/preader.go | 34 ++++----- internal/s3select/json/reader.go | 2 +- internal/s3select/message.go | 4 +- internal/s3select/parquet/reader.go | 1 - internal/s3select/select.go | 13 ++-- internal/s3select/sql/funceval.go | 1 - internal/s3select/sql/stringfuncs.go | 1 - internal/s3select/sql/timestampfuncs.go | 1 - internal/s3select/sql/value.go | 9 ++- 100 files changed, 371 insertions(+), 358 deletions(-) create mode 100644 internal/bpool/pool.go create mode 100644 internal/http/flush.go diff --git a/.golangci.yml b/.golangci.yml index 239de6e7c..6dfa38565 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -21,16 +21,25 @@ linters: - misspell - revive - staticcheck - - tenv - typecheck - unconvert - unused + - usetesting + - forcetypeassert + - whitespace issues: exclude-use-default: false + max-issues-per-linter: 100 + max-same-issues: 100 exclude: - "empty-block:" - "unused-parameter:" - "dot-imports:" - should have a package comment - error strings should not be capitalized or end with punctuation or a newline + exclude-rules: + # Exclude some linters from running on tests files. + - path: _test\.go + linters: + - forcetypeassert diff --git a/cmd/admin-bucket-handlers.go b/cmd/admin-bucket-handlers.go index 59d1fbb56..4ea93878f 100644 --- a/cmd/admin-bucket-handlers.go +++ b/cmd/admin-bucket-handlers.go @@ -38,6 +38,7 @@ import ( objectlock "github.com/minio/minio/internal/bucket/object/lock" "github.com/minio/minio/internal/bucket/versioning" "github.com/minio/minio/internal/event" + xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/kms" "github.com/minio/mux" "github.com/minio/pkg/v3/policy" @@ -980,7 +981,6 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r * rpt.SetStatus(bucket, "", err) continue } - } rptData, err := json.Marshal(rpt.BucketMetaImportErrs) @@ -1039,7 +1039,7 @@ func (a adminAPIHandlers) ReplicationDiffHandler(w http.ResponseWriter, r *http. } if len(diffCh) == 0 { // Flush if nothing is queued - w.(http.Flusher).Flush() + xhttp.Flush(w) } case <-keepAliveTicker.C: if len(diffCh) > 0 { @@ -1048,7 +1048,7 @@ func (a adminAPIHandlers) ReplicationDiffHandler(w http.ResponseWriter, r *http. if _, err := w.Write([]byte(" ")); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) case <-ctx.Done(): return } @@ -1098,7 +1098,7 @@ func (a adminAPIHandlers) ReplicationMRFHandler(w http.ResponseWriter, r *http.R } if len(mrfCh) == 0 { // Flush if nothing is queued - w.(http.Flusher).Flush() + xhttp.Flush(w) } case <-keepAliveTicker.C: if len(mrfCh) > 0 { @@ -1107,7 +1107,7 @@ func (a adminAPIHandlers) ReplicationMRFHandler(w http.ResponseWriter, r *http.R if _, err := w.Write([]byte(" ")); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) case <-ctx.Done(): return } diff --git a/cmd/admin-handlers-idp-config.go b/cmd/admin-handlers-idp-config.go index 8ba9dc5f8..7b5792f03 100644 --- a/cmd/admin-handlers-idp-config.go +++ b/cmd/admin-handlers-idp-config.go @@ -125,7 +125,6 @@ func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.R } if err = validateConfig(ctx, cfg, subSys); err != nil { - var validationErr ldap.Validation if errors.As(err, &validationErr) { // If we got an LDAP validation error, we need to send appropriate @@ -416,7 +415,6 @@ func (a adminAPIHandlers) DeleteIdentityProviderCfg(w http.ResponseWriter, r *ht return } if err = validateConfig(ctx, cfg, subSys); err != nil { - var validationErr ldap.Validation if errors.As(err, &validationErr) { // If we got an LDAP validation error, we need to send appropriate diff --git a/cmd/admin-handlers-users.go b/cmd/admin-handlers-users.go index 320b186df..1091f46d2 100644 --- a/cmd/admin-handlers-users.go +++ b/cmd/admin-handlers-users.go @@ -1487,8 +1487,8 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ return } effectivePolicy = globalIAMSys.GetCombinedPolicy(policies...) - } + buf, err = json.MarshalIndent(effectivePolicy, "", " ") if err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) @@ -2279,7 +2279,6 @@ func (a adminAPIHandlers) importIAM(w http.ResponseWriter, r *http.Request, apiV // import policies first { - f, err := zr.Open(pathJoin(iamAssetsDir, allPoliciesFile)) switch { case errors.Is(err, os.ErrNotExist): @@ -2362,7 +2361,6 @@ func (a adminAPIHandlers) importIAM(w http.ResponseWriter, r *http.Request, apiV } else { added.Users = append(added.Users, accessKey) } - } } } diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index 57a4c7aa7..b28c08c9b 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -829,7 +829,7 @@ func (a adminAPIHandlers) MetricsHandler(w http.ResponseWriter, r *http.Request) } // Flush before waiting for next... - w.(http.Flusher).Flush() + xhttp.Flush(w) select { case <-ticker.C: @@ -1359,7 +1359,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) { if _, err := w.Write([]byte(" ")); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) case hr := <-respCh: switch hr.apiErr { case noError: @@ -1367,7 +1367,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) { if _, err := w.Write(hr.respBytes); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) } else { writeSuccessResponseJSON(w, hr.respBytes) } @@ -1394,7 +1394,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) { if _, err := w.Write(errorRespJSON); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) } break forLoop } @@ -1611,7 +1611,6 @@ func (a adminAPIHandlers) ClientDevNull(w http.ResponseWriter, r *http.Request) if err != nil || ctx.Err() != nil || totalRx > 100*humanize.GiByte { break } - } w.WriteHeader(http.StatusOK) } @@ -1840,7 +1839,7 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http. return } } - w.(http.Flusher).Flush() + xhttp.Flush(w) case result, ok := <-ch: if !ok { return @@ -1849,7 +1848,7 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http. return } prevResult = result - w.(http.Flusher).Flush() + xhttp.Flush(w) } } } @@ -1958,7 +1957,7 @@ func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.R if err := enc.Encode(madmin.DriveSpeedTestResult{}); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) case result, ok := <-ch: if !ok { return @@ -1966,7 +1965,7 @@ func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.R if err := enc.Encode(result); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) } } } @@ -2083,7 +2082,7 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) { grid.PutByteBuffer(entry) if len(traceCh) == 0 { // Flush if nothing is queued - w.(http.Flusher).Flush() + xhttp.Flush(w) } case <-keepAliveTicker.C: if len(traceCh) > 0 { @@ -2092,7 +2091,7 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) { if _, err := w.Write([]byte(" ")); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) case <-ctx.Done(): return } @@ -2184,7 +2183,7 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque grid.PutByteBuffer(log) if len(logCh) == 0 { // Flush if nothing is queued - w.(http.Flusher).Flush() + xhttp.Flush(w) } case <-keepAliveTicker.C: if len(logCh) > 0 { @@ -2193,7 +2192,7 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque if _, err := w.Write([]byte(" ")); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) case <-ctx.Done(): return } @@ -2963,13 +2962,13 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque } if len(healthInfoCh) == 0 { // Flush if nothing is queued - w.(http.Flusher).Flush() + xhttp.Flush(w) } case <-ticker.C: if _, err := w.Write([]byte(" ")); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) case <-healthCtx.Done(): return } diff --git a/cmd/api-response.go b/cmd/api-response.go index cb279cb09..28501b5b3 100644 --- a/cmd/api-response.go +++ b/cmd/api-response.go @@ -520,7 +520,6 @@ func cleanReservedKeys(metadata map[string]string) map[string]string { } case crypto.SSEC: m[xhttp.AmzServerSideEncryptionCustomerAlgorithm] = xhttp.AmzEncryptionAES - } var toRemove []string diff --git a/cmd/auth-handler.go b/cmd/auth-handler.go index 3ba46d35e..dd4895910 100644 --- a/cmd/auth-handler.go +++ b/cmd/auth-handler.go @@ -162,7 +162,6 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string) s3Err := ErrAccessDenied if _, ok := r.Header[xhttp.AmzContentSha256]; ok && getRequestAuthType(r) == authTypeSigned { - // Get credential information from the request. cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3) if s3Err != ErrNone { diff --git a/cmd/batch-expire.go b/cmd/batch-expire.go index 300d4a8e1..ced17bcfd 100644 --- a/cmd/batch-expire.go +++ b/cmd/batch-expire.go @@ -195,8 +195,8 @@ func (ef BatchJobExpireFilter) Matches(obj ObjectInfo, now time.Time) bool { return false } } - } + if len(ef.Metadata) > 0 && !obj.DeleteMarker { for _, kv := range ef.Metadata { // Object (version) must match all x-amz-meta and diff --git a/cmd/batch-handlers.go b/cmd/batch-handlers.go index 5083e99f3..d1f20bb97 100644 --- a/cmd/batch-handlers.go +++ b/cmd/batch-handlers.go @@ -1450,7 +1450,6 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest, cred = r.Source.Creds remoteBkt = r.Source.Bucket pathStyle = r.Source.Path - } u, err := url.Parse(remoteEp) @@ -2310,7 +2309,6 @@ func lookupStyle(s string) miniogo.BucketLookupType { lookup = miniogo.BucketLookupDNS default: lookup = miniogo.BucketLookupAuto - } return lookup } diff --git a/cmd/bitrot.go b/cmd/bitrot.go index f858f1b48..b4dbab01e 100644 --- a/cmd/bitrot.go +++ b/cmd/bitrot.go @@ -178,7 +178,7 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w return errFileCorrupt } - bufp := xioutil.ODirectPoolSmall.Get().(*[]byte) + bufp := xioutil.ODirectPoolSmall.Get() defer xioutil.ODirectPoolSmall.Put(bufp) for left > 0 { diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index de442e3a8..6463c4903 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -344,11 +344,9 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R Created: dnsRecords[0].CreationDate, }) } - sort.Slice(bucketsInfo, func(i, j int) bool { return bucketsInfo[i].Name < bucketsInfo[j].Name }) - } else { // Invoke the list buckets. var err error @@ -841,7 +839,6 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req } writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return - } apiErr := ErrBucketAlreadyExists if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(sr)...)).IsEmpty() { diff --git a/cmd/bucket-handlers_test.go b/cmd/bucket-handlers_test.go index 48718dfb9..49e390011 100644 --- a/cmd/bucket-handlers_test.go +++ b/cmd/bucket-handlers_test.go @@ -188,7 +188,6 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri if errorResponse.Code != testCase.errorResponse.Code { t.Errorf("Test %d: %s: Expected the error code to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Code, errorResponse.Code) } - } // Test for Anonymous/unsigned http request. @@ -290,7 +289,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api if recV2.Code != testCase.expectedRespStatus { t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code) } - } // Test for Anonymous/unsigned http request. diff --git a/cmd/bucket-replication-stats.go b/cmd/bucket-replication-stats.go index d20a0ff47..297179284 100644 --- a/cmd/bucket-replication-stats.go +++ b/cmd/bucket-replication-stats.go @@ -112,7 +112,6 @@ func (r *ReplicationStats) collectWorkerMetrics(ctx context.Context) { r.wlock.Lock() r.workers.update() r.wlock.Unlock() - } } } diff --git a/cmd/bucket-replication.go b/cmd/bucket-replication.go index d2d91f4b8..fdbc1be1d 100644 --- a/cmd/bucket-replication.go +++ b/cmd/bucket-replication.go @@ -146,7 +146,7 @@ func validateReplicationDestination(ctx context.Context, bucket string, rCfg *re if errInt == nil { err = nil } else { - err = errInt.(error) + err, _ = errInt.(error) } } switch err.(type) { diff --git a/cmd/common-main_test.go b/cmd/common-main_test.go index f7516eac0..bcd804299 100644 --- a/cmd/common-main_test.go +++ b/cmd/common-main_test.go @@ -45,7 +45,7 @@ func Test_readFromSecret(t *testing.T) { for _, testCase := range testCases { testCase := testCase t.Run("", func(t *testing.T) { - tmpfile, err := os.CreateTemp("", "testfile") + tmpfile, err := os.CreateTemp(t.TempDir(), "testfile") if err != nil { t.Error(err) } @@ -157,7 +157,7 @@ MINIO_ROOT_PASSWORD=minio123`, for _, testCase := range testCases { testCase := testCase t.Run("", func(t *testing.T) { - tmpfile, err := os.CreateTemp("", "testfile") + tmpfile, err := os.CreateTemp(t.TempDir(), "testfile") if err != nil { t.Error(err) } diff --git a/cmd/data-scanner.go b/cmd/data-scanner.go index 1b9acee09..e4ae60f68 100644 --- a/cmd/data-scanner.go +++ b/cmd/data-scanner.go @@ -858,8 +858,8 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int } } } - } + if compact { stop := globalScannerMetrics.log(scannerMetricCompactFolder, folder.name) f.newCache.deleteRecursive(thisHash) @@ -873,7 +873,6 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int } stop(total) } - } // Compact if too many children... if !into.Compacted { diff --git a/cmd/dynamic-timeouts.go b/cmd/dynamic-timeouts.go index 04c78c63a..bc9b1c42f 100644 --- a/cmd/dynamic-timeouts.go +++ b/cmd/dynamic-timeouts.go @@ -98,7 +98,6 @@ func (dt *dynamicTimeout) logEntry(duration time.Duration) { // We leak entries while we copy if entries == dynamicTimeoutLogSize { - // Make copy on stack in order to call adjust() logCopy := [dynamicTimeoutLogSize]time.Duration{} copy(logCopy[:], dt.log[:]) diff --git a/cmd/dynamic-timeouts_test.go b/cmd/dynamic-timeouts_test.go index 42f66c998..c8f4c42f8 100644 --- a/cmd/dynamic-timeouts_test.go +++ b/cmd/dynamic-timeouts_test.go @@ -167,7 +167,6 @@ func testDynamicTimeoutAdjust(t *testing.T, timeout *dynamicTimeout, f func() fl const successTimeout = 20 * time.Second for i := 0; i < dynamicTimeoutLogSize; i++ { - rnd := f() duration := time.Duration(float64(successTimeout) * rnd) diff --git a/cmd/encryption-v1.go b/cmd/encryption-v1.go index 5013c221f..179fe894c 100644 --- a/cmd/encryption-v1.go +++ b/cmd/encryption-v1.go @@ -347,8 +347,8 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt return errInvalidSSEParameters // AWS returns special error for equal but invalid keys. } return crypto.ErrInvalidCustomerKey // To provide strict AWS S3 compatibility we return: access denied. - } + if subtle.ConstantTimeCompare(oldKey, newKey) == 1 && sealedKey.Algorithm == crypto.SealAlgorithm { return nil // don't rotate on equal keys if seal algorithm is latest } diff --git a/cmd/encryption-v1_test.go b/cmd/encryption-v1_test.go index 7001f4c0e..ec441b4f0 100644 --- a/cmd/encryption-v1_test.go +++ b/cmd/encryption-v1_test.go @@ -362,7 +362,6 @@ func TestGetDecryptedRange(t *testing.T) { t.Errorf("Case %d: test failed: %d %d %d %d %d", i, o, l, skip, sn, ps) } } - } // Multipart object tests @@ -538,7 +537,6 @@ func TestGetDecryptedRange(t *testing.T) { i, o, l, skip, sn, ps, oRef, lRef, skipRef, snRef, psRef) } } - } } diff --git a/cmd/endpoint.go b/cmd/endpoint.go index 979702480..0dfe62e20 100644 --- a/cmd/endpoint.go +++ b/cmd/endpoint.go @@ -213,7 +213,6 @@ func NewEndpoint(arg string) (ep Endpoint, e error) { u.Path = u.Path[1:] } } - } else { // Only check if the arg is an ip address and ask for scheme since its absent. // localhost, example.com, any FQDN cannot be disambiguated from a regular file path such as diff --git a/cmd/erasure-coding.go b/cmd/erasure-coding.go index fb9b326af..c825f3181 100644 --- a/cmd/erasure-coding.go +++ b/cmd/erasure-coding.go @@ -201,7 +201,6 @@ func erasureSelfTest() { ok = false continue } - } } if !ok { diff --git a/cmd/erasure-healing-common_test.go b/cmd/erasure-healing-common_test.go index b33371357..d249e04b0 100644 --- a/cmd/erasure-healing-common_test.go +++ b/cmd/erasure-healing-common_test.go @@ -304,7 +304,6 @@ func TestListOnlineDisks(t *testing.T) { f.Close() break } - } rQuorum := len(errs) - z.serverPools[0].sets[0].defaultParityCount @@ -485,7 +484,6 @@ func TestListOnlineDisksSmallObjects(t *testing.T) { f.Close() break } - } rQuorum := len(errs) - z.serverPools[0].sets[0].defaultParityCount diff --git a/cmd/erasure-healing.go b/cmd/erasure-healing.go index fa4ddbd0f..d3d5b8de5 100644 --- a/cmd/erasure-healing.go +++ b/cmd/erasure-healing.go @@ -584,7 +584,6 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object readers[i] = newBitrotReader(disk, copyPartsMetadata[i].Data, bucket, partPath, tillOffset, checksumAlgo, checksumInfo.Hash, erasure.ShardSize()) prefer[i] = disk.Hostname() == "" - } writers := make([]io.Writer, len(outDatedDisks)) for i, disk := range outDatedDisks { @@ -643,9 +642,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object if disksToHealCount == 0 { return result, fmt.Errorf("all drives had write errors, unable to heal %s/%s", bucket, object) } - } - } defer er.deleteAll(context.Background(), minioMetaTmpBucket, tmpID) diff --git a/cmd/erasure-multipart.go b/cmd/erasure-multipart.go index c440928c1..c69400587 100644 --- a/cmd/erasure-multipart.go +++ b/cmd/erasure-multipart.go @@ -1049,7 +1049,6 @@ func readParts(ctx context.Context, disks []StorageAPI, bucket string, partMetaP PartNumber: partNumbers[pidx], }.Error(), } - } return partInfosInQuorum, nil } diff --git a/cmd/erasure-object_test.go b/cmd/erasure-object_test.go index e3492f075..71b9d3b8d 100644 --- a/cmd/erasure-object_test.go +++ b/cmd/erasure-object_test.go @@ -246,7 +246,6 @@ func TestDeleteObjectsVersioned(t *testing.T) { VersionID: objInfo.VersionID, }, } - } names = append(names, ObjectToDelete{ ObjectV: ObjectV{ diff --git a/cmd/handler-api.go b/cmd/handler-api.go index 609ff03f1..7ccc67262 100644 --- a/cmd/handler-api.go +++ b/cmd/handler-api.go @@ -366,7 +366,6 @@ func maxClients(f http.HandlerFunc) http.HandlerFunc { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrTooManyRequests), r.URL) - } } } diff --git a/cmd/iam-etcd-store.go b/cmd/iam-etcd-store.go index 925399ea2..16a3df5b3 100644 --- a/cmd/iam-etcd-store.go +++ b/cmd/iam-etcd-store.go @@ -493,7 +493,6 @@ func (ies *IAMEtcdStore) watch(ctx context.Context, keyPath string) <-chan iamWa keyPath: string(event.Kv.Key), } } - } } } diff --git a/cmd/iam-object-store.go b/cmd/iam-object-store.go index 0d9672f4a..f519e9059 100644 --- a/cmd/iam-object-store.go +++ b/cmd/iam-object-store.go @@ -278,7 +278,6 @@ func (iamOS *IAMObjectStore) loadUserIdentity(ctx context.Context, user string, iamOS.deleteIAMConfig(ctx, getMappedPolicyPath(user, userType, false)) } return u, errNoSuchUser - } u.Credentials.Claims = jwtClaims.Map() } diff --git a/cmd/iam-store.go b/cmd/iam-store.go index 6d17a7fa7..c1620cd45 100644 --- a/cmd/iam-store.go +++ b/cmd/iam-store.go @@ -845,7 +845,11 @@ func (store *IAMStoreSys) PolicyDBGet(name string, groups ...string) ([]string, if err != nil { return nil, err } - return val.([]string), nil + res, ok := val.([]string) + if !ok { + return nil, errors.New("unexpected policy type") + } + return res, nil } return getPolicies() } @@ -1218,7 +1222,6 @@ func (store *IAMStoreSys) PolicyDBUpdate(ctx context.Context, name string, isGro cache.iamGroupPolicyMap.Delete(name) } } else { - if err = store.saveMappedPolicy(ctx, name, userType, isGroup, newPolicyMapping); err != nil { return } @@ -1620,7 +1623,6 @@ func (store *IAMStoreSys) MergePolicies(policyName string) (string, policy.Polic policies = append(policies, policy) toMerge = append(toMerge, p.Policy) } - } return strings.Join(policies, ","), policy.MergePolicies(toMerge...) @@ -2917,7 +2919,10 @@ func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) error return err } - newCache := val.(*iamCache) + newCache, ok := val.(*iamCache) + if !ok { + return nil + } cache := store.lock() defer store.unlock() diff --git a/cmd/iam.go b/cmd/iam.go index 896460599..aaeea6ac7 100644 --- a/cmd/iam.go +++ b/cmd/iam.go @@ -2286,7 +2286,6 @@ func (sys *IAMSys) IsAllowedSTS(args policy.Args, parentUser string) bool { } policies = policySet.ToSlice() } - } // Defensive code: Do not allow any operation if no policy is found in the session token diff --git a/cmd/listen-notification-handlers.go b/cmd/listen-notification-handlers.go index 50743f7d6..9f3210daf 100644 --- a/cmd/listen-notification-handlers.go +++ b/cmd/listen-notification-handlers.go @@ -26,6 +26,7 @@ import ( "github.com/minio/minio/internal/event" "github.com/minio/minio/internal/grid" + xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/pubsub" "github.com/minio/mux" @@ -200,19 +201,19 @@ func (api objectAPIHandlers) ListenNotificationHandler(w http.ResponseWriter, r } if len(mergeCh) == 0 { // Flush if nothing is queued - w.(http.Flusher).Flush() + xhttp.Flush(w) } grid.PutByteBuffer(ev) case <-emptyEventTicker: if err := enc.Encode(struct{ Records []event.Event }{}); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) case <-keepAliveTicker: if _, err := w.Write([]byte(" ")); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) case <-ctx.Done(): return } diff --git a/cmd/local-locker_test.go b/cmd/local-locker_test.go index d37e55027..80a9a509e 100644 --- a/cmd/local-locker_test.go +++ b/cmd/local-locker_test.go @@ -136,7 +136,6 @@ func TestLocalLockerUnlock(t *testing.T) { } wResources[i] = names wUIDs[i] = uid - } for i := range rResources { name := mustGetUUID() diff --git a/cmd/metacache-stream.go b/cmd/metacache-stream.go index c0023d801..0925683d5 100644 --- a/cmd/metacache-stream.go +++ b/cmd/metacache-stream.go @@ -27,6 +27,7 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/klauspost/compress/s2" + "github.com/minio/minio/internal/bpool" xioutil "github.com/minio/minio/internal/ioutil" "github.com/tinylib/msgp/msgp" "github.com/valyala/bytebufferpool" @@ -236,7 +237,7 @@ func (w *metacacheWriter) Reset(out io.Writer) { } } -var s2DecPool = sync.Pool{New: func() interface{} { +var s2DecPool = bpool.Pool[*s2.Reader]{New: func() *s2.Reader { // Default alloc block for network transfer. return s2.NewReader(nil, s2.ReaderAllocBlock(16<<10)) }} @@ -253,7 +254,7 @@ type metacacheReader struct { // newMetacacheReader creates a new cache reader. // Nothing will be read from the stream yet. func newMetacacheReader(r io.Reader) *metacacheReader { - dec := s2DecPool.Get().(*s2.Reader) + dec := s2DecPool.Get() dec.Reset(r) mr := msgpNewReader(dec) return &metacacheReader{ diff --git a/cmd/metrics-v2.go b/cmd/metrics-v2.go index c4684e30f..7ddd518f3 100644 --- a/cmd/metrics-v2.go +++ b/cmd/metrics-v2.go @@ -2474,7 +2474,6 @@ func getReplicationNodeMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { } downtimeDuration.Value = float64(dwntime / time.Second) ml = append(ml, downtimeDuration) - } return ml }) diff --git a/cmd/metrics-v3-cluster-usage.go b/cmd/metrics-v3-cluster-usage.go index 614d30d13..38dc0aef3 100644 --- a/cmd/metrics-v3-cluster-usage.go +++ b/cmd/metrics-v3-cluster-usage.go @@ -177,7 +177,6 @@ func loadClusterUsageBucketMetrics(ctx context.Context, m MetricValues, c *metri for k, v := range usage.ObjectVersionsHistogram { m.Set(usageBucketObjectVersionCountDistribution, float64(v), "range", k, "bucket", bucket) } - } return nil } diff --git a/cmd/notification.go b/cmd/notification.go index fe9e6b6dd..3ae544a1b 100644 --- a/cmd/notification.go +++ b/cmd/notification.go @@ -1184,7 +1184,6 @@ func (sys *NotificationSys) GetPeerOnlineCount() (nodesOnline, nodesOffline int) defer wg.Done() nodesOnlineIndex[idx] = client.restClient.HealthCheckFn() }(idx, client) - } wg.Wait() diff --git a/cmd/object-api-listobjects_test.go b/cmd/object-api-listobjects_test.go index cc6d422e4..786f8cf09 100644 --- a/cmd/object-api-listobjects_test.go +++ b/cmd/object-api-listobjects_test.go @@ -389,7 +389,6 @@ func _testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler, v if err != nil { t.Fatalf("%s : %s", instanceType, err.Error()) } - } // Formulating the result data set to be expected from ListObjects call inside the tests, @@ -1014,7 +1013,6 @@ func _testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler, v t.Errorf("Test %d: %s: Expected NextMarker to be empty since listing is not truncated, but instead found `%v`", i+1, instanceType, result.NextMarker) } } - } }) } @@ -1166,7 +1164,6 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand if err != nil { t.Fatalf("%s : %s", instanceType, err.Error()) } - } // Formualting the result data set to be expected from ListObjects call inside the tests, @@ -1785,12 +1782,10 @@ func testListObjectsContinuation(obj ObjectLayer, instanceType string, t1 TestEr if err != nil { t.Fatalf("%s : %s", instanceType, err.Error()) } - } // Formulating the result data set to be expected from ListObjects call inside the tests, // This will be used in testCases and used for asserting the correctness of ListObjects output in the tests. - resultCases := []ListObjectsInfo{ { Objects: []ObjectInfo{ diff --git a/cmd/object-api-multipart_test.go b/cmd/object-api-multipart_test.go index 92eb10bfc..5949eabce 100644 --- a/cmd/object-api-multipart_test.go +++ b/cmd/object-api-multipart_test.go @@ -443,7 +443,6 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan if err != nil { t.Fatalf("%s : %s", instanceType, err.Error()) } - } // Expected Results set for asserting ListObjectMultipart test. diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index 9bd929cfc..cff95980a 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -1538,7 +1538,6 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re srcInfo.UserDefined[xhttp.AmzObjectTagging] = objTags srcInfo.UserDefined[ReservedMetadataPrefixLower+TaggingTimestamp] = UTCNow().Format(time.RFC3339Nano) } - } srcInfo.UserDefined = filterReplicationStatusMetadata(srcInfo.UserDefined) @@ -1631,7 +1630,6 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re srcInfo.metadataOnly && srcOpts.VersionID == "" && !crypto.Requested(r.Header) && !crypto.IsSourceEncrypted(srcInfo.UserDefined) { - // If x-amz-metadata-directive is not set to REPLACE then we need // to error out if source and destination are same. writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopyDest), r.URL) @@ -2410,7 +2408,6 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h if dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(metadata, "", "", replication.ObjectReplicationType, opts)); dsc.ReplicateAny() { metadata[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano) metadata[ReservedMetadataPrefixLower+ReplicationStatus] = dsc.PendingStatus() - } var objectEncryptionKey crypto.ObjectKey diff --git a/cmd/object-handlers_test.go b/cmd/object-handlers_test.go index 074683583..a0577e0f7 100644 --- a/cmd/object-handlers_test.go +++ b/cmd/object-handlers_test.go @@ -814,7 +814,6 @@ func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName str caseNumber++ } } - } // HTTP request for testing when `objectLayer` is set to `nil`. @@ -1567,8 +1566,8 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a } } } - } + if testCase.expectedRespStatus == http.StatusOK { buffer := new(bytes.Buffer) // Fetch the object to check whether the content is same as the one uploaded via PutObject. @@ -3520,7 +3519,6 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string t.Errorf("Case %d: MinIO %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code) } - } // Test for Anonymous/unsigned http request. @@ -4198,7 +4196,6 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str // validate the error response. if test.expectedErr != noAPIErr { - var errBytes []byte // read the response body. errBytes, err = io.ReadAll(rec.Result().Body) diff --git a/cmd/object_api_suite_test.go b/cmd/object_api_suite_test.go index b01cbab19..a6e1093ee 100644 --- a/cmd/object_api_suite_test.go +++ b/cmd/object_api_suite_test.go @@ -228,7 +228,6 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH if objInfo.Size != int64(len(value)) { t.Errorf("%s: Size mismatch of the GetObject data.", instanceType) } - } } @@ -384,7 +383,6 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) { // check results with Marker. { - result, err = obj.ListObjects(context.Background(), "bucket", "", "newPrefix", "", 3) if err != nil { t.Fatalf("%s: %s", instanceType, err) diff --git a/cmd/os_unix.go b/cmd/os_unix.go index 55d363197..4ad248646 100644 --- a/cmd/os_unix.go +++ b/cmd/os_unix.go @@ -25,10 +25,10 @@ import ( "fmt" "os" "strings" - "sync" "syscall" "unsafe" + "github.com/minio/minio/internal/bpool" "golang.org/x/sys/unix" ) @@ -106,15 +106,15 @@ const blockSize = 8 << 10 // 8192 // By default at least 128 entries in single getdents call (1MiB buffer) var ( - direntPool = sync.Pool{ - New: func() interface{} { + direntPool = bpool.Pool[*[]byte]{ + New: func() *[]byte { buf := make([]byte, blockSize*128) return &buf }, } - direntNamePool = sync.Pool{ - New: func() interface{} { + direntNamePool = bpool.Pool[*[]byte]{ + New: func() *[]byte { buf := make([]byte, blockSize) return &buf }, @@ -183,11 +183,10 @@ func readDirFn(dirPath string, fn func(name string, typ os.FileMode) error) erro } return osErrToFileErr(err) } - } defer syscall.Close(fd) - bufp := direntPool.Get().(*[]byte) + bufp := direntPool.Get() defer direntPool.Put(bufp) buf := *bufp @@ -273,11 +272,11 @@ func readDirWithOpts(dirPath string, opts readDirOpts) (entries []string, err er } defer syscall.Close(fd) - bufp := direntPool.Get().(*[]byte) + bufp := direntPool.Get() defer direntPool.Put(bufp) buf := *bufp - nameTmp := direntNamePool.Get().(*[]byte) + nameTmp := direntNamePool.Get() defer direntNamePool.Put(nameTmp) tmp := *nameTmp diff --git a/cmd/os_windows.go b/cmd/os_windows.go index 97230e221..bf03f2b90 100644 --- a/cmd/os_windows.go +++ b/cmd/os_windows.go @@ -173,7 +173,6 @@ func readDirWithOpts(dirPath string, opts readDirOpts) (entries []string, err er count-- entries = append(entries, name) - } return entries, nil diff --git a/cmd/postpolicyform.go b/cmd/postpolicyform.go index 74b5ef870..9119c1e7a 100644 --- a/cmd/postpolicyform.go +++ b/cmd/postpolicyform.go @@ -131,16 +131,17 @@ func sanitizePolicy(r io.Reader) (io.Reader, error) { d := jstream.NewDecoder(r, 0).ObjectAsKVS().MaxDepth(10) sset := set.NewStringSet() for mv := range d.Stream() { - var kvs jstream.KVS if mv.ValueType == jstream.Object { // This is a JSON object type (that preserves key order) - kvs = mv.Value.(jstream.KVS) - for _, kv := range kvs { - if sset.Contains(kv.Key) { - // Reject duplicate conditions or expiration. - return nil, fmt.Errorf("input policy has multiple %s, please fix your client code", kv.Key) + kvs, ok := mv.Value.(jstream.KVS) + if ok { + for _, kv := range kvs { + if sset.Contains(kv.Key) { + // Reject duplicate conditions or expiration. + return nil, fmt.Errorf("input policy has multiple %s, please fix your client code", kv.Key) + } + sset.Add(kv.Key) } - sset.Add(kv.Key) } e.Encode(kvs) } diff --git a/cmd/sftp-server.go b/cmd/sftp-server.go index 640caf605..2255a142b 100644 --- a/cmd/sftp-server.go +++ b/cmd/sftp-server.go @@ -174,7 +174,6 @@ internalAuth: if subtle.ConstantTimeCompare([]byte(ui.Credentials.SecretKey), pass) != 1 { return nil, errAuthentication } - } copts := map[string]string{ @@ -223,14 +222,11 @@ func processLDAPAuthentication(key ssh.PublicKey, pass []byte, user string) (per if err != nil { return nil, err } - } else if key != nil { - lookupResult, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(user) if err != nil { return nil, err } - } if lookupResult == nil { diff --git a/cmd/signature-v2_test.go b/cmd/signature-v2_test.go index 5125a952c..5f39bcf54 100644 --- a/cmd/signature-v2_test.go +++ b/cmd/signature-v2_test.go @@ -159,7 +159,6 @@ func TestDoesPresignedV2SignatureMatch(t *testing.T) { t.Errorf("(%d) expected to get success, instead got %s", i, niceError(errCode)) } } - } } diff --git a/cmd/signature-v4-parser_test.go b/cmd/signature-v4-parser_test.go index b3fb93394..6f6eb949a 100644 --- a/cmd/signature-v4-parser_test.go +++ b/cmd/signature-v4-parser_test.go @@ -298,7 +298,6 @@ func TestParseSignature(t *testing.T) { t.Errorf("Test %d: Expected the result to be \"%s\", but got \"%s\". ", i+1, testCase.expectedSignStr, actualSignStr) } } - } } @@ -343,7 +342,6 @@ func TestParseSignedHeaders(t *testing.T) { t.Errorf("Test %d: Expected the result to be \"%v\", but got \"%v\". ", i+1, testCase.expectedSignedHeaders, actualSignedHeaders) } } - } } @@ -514,7 +512,6 @@ func TestParseSignV4(t *testing.T) { t.Errorf("Test %d: Expected the result to be \"%v\", but got \"%v\". ", i+1, testCase.expectedAuthField, parsedAuthField.SignedHeaders) } } - } } @@ -880,6 +877,5 @@ func TestParsePreSignV4(t *testing.T) { t.Errorf("Test %d: Expected date to be %v, but got %v", i+1, testCase.expectedPreSignValues.Date.UTC().Format(iso8601Format), parsedPreSign.Date.UTC().Format(iso8601Format)) } } - } } diff --git a/cmd/site-replication.go b/cmd/site-replication.go index 642d4eb37..38ef4ad6c 100644 --- a/cmd/site-replication.go +++ b/cmd/site-replication.go @@ -1037,7 +1037,6 @@ func (c *SiteReplicationSys) PeerBucketConfigureReplHandler(ctx context.Context, if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil { return wrapSRErr(err) } - } // no replication rule for this peer or target ARN missing in bucket targets if targetARN == "" { @@ -1406,7 +1405,6 @@ func (c *SiteReplicationSys) PeerSvcAccChangeHandler(ctx context.Context, change if err := globalIAMSys.DeleteServiceAccount(ctx, change.Delete.AccessKey, true); err != nil { return wrapSRErr(err) } - } return nil @@ -1430,8 +1428,8 @@ func (c *SiteReplicationSys) PeerPolicyMappingHandler(ctx context.Context, mappi userType := IAMUserType(mapping.UserType) isGroup := mapping.IsGroup entityName := mapping.UserOrGroup - if globalIAMSys.GetUsersSysType() == LDAPUsersSysType && userType == stsUser { + if globalIAMSys.GetUsersSysType() == LDAPUsersSysType && userType == stsUser { // Validate that the user or group exists in LDAP and use the normalized // form of the entityName (which will be an LDAP DN). var err error @@ -3062,7 +3060,6 @@ func (c *SiteReplicationSys) siteReplicationStatus(ctx context.Context, objAPI O sum.ReplicatedGroupPolicyMappings++ info.StatsSummary[ps.DeploymentID] = sum } - } } diff --git a/cmd/storage-rest-client.go b/cmd/storage-rest-client.go index 84f81c8e7..cad47b696 100644 --- a/cmd/storage-rest-client.go +++ b/cmd/storage-rest-client.go @@ -29,11 +29,11 @@ import ( "path" "strconv" "strings" - "sync" "sync/atomic" "time" "github.com/minio/madmin-go/v3" + "github.com/minio/minio/internal/bpool" "github.com/minio/minio/internal/cachevalue" "github.com/minio/minio/internal/grid" xhttp "github.com/minio/minio/internal/http" @@ -508,13 +508,13 @@ func (client *storageRESTClient) RenameData(ctx context.Context, srcVolume, srcP } // where we keep old *Readers -var readMsgpReaderPool = sync.Pool{New: func() interface{} { return &msgp.Reader{} }} +var readMsgpReaderPool = bpool.Pool[*msgp.Reader]{New: func() *msgp.Reader { return &msgp.Reader{} }} // mspNewReader returns a *Reader that reads from the provided reader. // The reader will be buffered. // Return with readMsgpReaderPoolPut when done. func msgpNewReader(r io.Reader) *msgp.Reader { - p := readMsgpReaderPool.Get().(*msgp.Reader) + p := readMsgpReaderPool.Get() if p.R == nil { p.R = xbufio.NewReaderSize(r, 32<<10) } else { diff --git a/cmd/storage-rest-server.go b/cmd/storage-rest-server.go index 13c6752e0..7af81ec30 100644 --- a/cmd/storage-rest-server.go +++ b/cmd/storage-rest-server.go @@ -34,6 +34,7 @@ import ( "sync" "time" + "github.com/minio/minio/internal/bpool" "github.com/minio/minio/internal/grid" "github.com/tinylib/msgp/msgp" @@ -831,7 +832,7 @@ func keepHTTPReqResponseAlive(w http.ResponseWriter, r *http.Request) (resp func // Response not ready, write a filler byte. write([]byte{32}) if canWrite { - w.(http.Flusher).Flush() + xhttp.Flush(w) } case err := <-doneCh: if err != nil { @@ -905,7 +906,7 @@ func keepHTTPResponseAlive(w http.ResponseWriter) func(error) { // Response not ready, write a filler byte. write([]byte{32}) if canWrite { - w.(http.Flusher).Flush() + xhttp.Flush(w) } case err := <-doneCh: if err != nil { @@ -1025,7 +1026,7 @@ func streamHTTPResponse(w http.ResponseWriter) *httpStreamResponse { // Response not ready, write a filler byte. write([]byte{32}) if canWrite { - w.(http.Flusher).Flush() + xhttp.Flush(w) } case err := <-doneCh: if err != nil { @@ -1043,7 +1044,7 @@ func streamHTTPResponse(w http.ResponseWriter) *httpStreamResponse { write(tmp[:]) write(block) if canWrite { - w.(http.Flusher).Flush() + xhttp.Flush(w) } } } @@ -1051,29 +1052,23 @@ func streamHTTPResponse(w http.ResponseWriter) *httpStreamResponse { return &h } -var poolBuf8k = sync.Pool{ - New: func() interface{} { +var poolBuf8k = bpool.Pool[*[]byte]{ + New: func() *[]byte { b := make([]byte, 8192) return &b }, } -var poolBuf128k = sync.Pool{ - New: func() interface{} { - b := make([]byte, 128<<10) - return b - }, -} - // waitForHTTPStream will wait for responses where // streamHTTPResponse has been used. // The returned reader contains the payload and must be closed if no error is returned. func waitForHTTPStream(respBody io.ReadCloser, w io.Writer) error { var tmp [1]byte // 8K copy buffer, reused for less allocs... - bufp := poolBuf8k.Get().(*[]byte) + bufp := poolBuf8k.Get() buf := *bufp defer poolBuf8k.Put(bufp) + for { _, err := io.ReadFull(respBody, tmp[:]) if err != nil { @@ -1438,7 +1433,6 @@ func registerStorageRESTHandlers(router *mux.Router, endpointServerPools Endpoin } } }(endpoint) - } } } diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index 89ebda28c..3ae977341 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -794,7 +794,6 @@ func assembleStreamingChunks(req *http.Request, body io.ReadSeeker, chunkSize in if n <= 0 { break } - } req.Body = io.NopCloser(bytes.NewReader(stream)) return req, nil diff --git a/cmd/untar.go b/cmd/untar.go index 006a3d48a..0f8c428a6 100644 --- a/cmd/untar.go +++ b/cmd/untar.go @@ -36,6 +36,7 @@ import ( "github.com/klauspost/compress/s2" "github.com/klauspost/compress/zstd" gzip "github.com/klauspost/pgzip" + xioutil "github.com/minio/minio/internal/ioutil" "github.com/pierrec/lz4/v4" ) @@ -182,7 +183,6 @@ func untar(ctx context.Context, r io.Reader, putObject func(reader io.Reader, in header, err := tarReader.Next() switch { - // if no more files are found return case err == io.EOF: wg.Wait() @@ -226,13 +226,10 @@ func untar(ctx context.Context, r io.Reader, putObject func(reader io.Reader, in // Do small files async n++ - if header.Size <= smallFileThreshold { + if header.Size <= xioutil.MediumBlock { asyncWriters <- struct{}{} - b := poolBuf128k.Get().([]byte) - if cap(b) < int(header.Size) { - b = make([]byte, smallFileThreshold) - } - b = b[:header.Size] + bufp := xioutil.ODirectPoolMedium.Get() + b := (*bufp)[:header.Size] if _, err := io.ReadFull(tarReader, b); err != nil { return err } @@ -243,8 +240,7 @@ func untar(ctx context.Context, r io.Reader, putObject func(reader io.Reader, in rc.Close() <-asyncWriters wg.Done() - //nolint:staticcheck // SA6002 we are fine with the tiny alloc - poolBuf128k.Put(b) + xioutil.ODirectPoolMedium.Put(bufp) }() if err := putObject(&rc, fi, name); err != nil { if o.ignoreErrs { diff --git a/cmd/update_test.go b/cmd/update_test.go index b5896e0c3..e1af9e38c 100644 --- a/cmd/update_test.go +++ b/cmd/update_test.go @@ -210,7 +210,7 @@ func TestIsKubernetes(t *testing.T) { // Tests if the environment we are running is Helm chart. func TestGetHelmVersion(t *testing.T) { createTempFile := func(content string) string { - tmpfile, err := os.CreateTemp("", "helm-testfile-") + tmpfile, err := os.CreateTemp(t.TempDir(), "helm-testfile-") if err != nil { t.Fatalf("Unable to create temporary file. %s", err) } diff --git a/cmd/xl-storage-format-v2.go b/cmd/xl-storage-format-v2.go index f5e8de0d0..37f32b0fb 100644 --- a/cmd/xl-storage-format-v2.go +++ b/cmd/xl-storage-format-v2.go @@ -26,12 +26,12 @@ import ( "io" "sort" "strings" - "sync" "time" "github.com/cespare/xxhash/v2" "github.com/google/uuid" jsoniter "github.com/json-iterator/go" + "github.com/minio/minio/internal/bpool" "github.com/minio/minio/internal/bucket/lifecycle" "github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/config/storageclass" @@ -703,18 +703,17 @@ func (j xlMetaV2Object) ToFileInfo(volume, path string, allParts bool) (FileInfo const metaDataReadDefault = 4 << 10 // Return used metadata byte slices here. -var metaDataPool = sync.Pool{New: func() interface{} { return make([]byte, 0, metaDataReadDefault) }} +var metaDataPool = bpool.Pool[[]byte]{New: func() []byte { return make([]byte, 0, metaDataReadDefault) }} // metaDataPoolGet will return a byte slice with capacity at least metaDataReadDefault. // It will be length 0. func metaDataPoolGet() []byte { - return metaDataPool.Get().([]byte)[:0] + return metaDataPool.Get()[:0] } // metaDataPoolPut will put an unused small buffer back into the pool. func metaDataPoolPut(buf []byte) { if cap(buf) >= metaDataReadDefault && cap(buf) < metaDataReadDefault*4 { - //nolint:staticcheck // SA6002 we are fine with the tiny alloc metaDataPool.Put(buf) } } @@ -1982,7 +1981,6 @@ func mergeXLV2Versions(quorum int, strict bool, requestedVersions int, versions if !latest.header.FreeVersion() { nVersions++ } - } else { // Find latest. var latestCount int diff --git a/cmd/xl-storage-format_test.go b/cmd/xl-storage-format_test.go index 19bad9d13..9ba58d769 100644 --- a/cmd/xl-storage-format_test.go +++ b/cmd/xl-storage-format_test.go @@ -225,7 +225,6 @@ func compareXLMetaV1(t *testing.T, unMarshalXLMeta, jsoniterXLMeta xlMetaV1Objec if val != jsoniterVal { t.Errorf("Expected the value for Meta data key \"%s\" to be \"%s\", but got \"%s\".", key, val, jsoniterVal) } - } } diff --git a/cmd/xl-storage.go b/cmd/xl-storage.go index b32a0b444..d83324a08 100644 --- a/cmd/xl-storage.go +++ b/cmd/xl-storage.go @@ -2166,10 +2166,10 @@ func (s *xlStorage) writeAllDirect(ctx context.Context, filePath string, fileSiz var bufp *[]byte switch { case fileSize <= xioutil.SmallBlock: - bufp = xioutil.ODirectPoolSmall.Get().(*[]byte) + bufp = xioutil.ODirectPoolSmall.Get() defer xioutil.ODirectPoolSmall.Put(bufp) default: - bufp = xioutil.ODirectPoolLarge.Get().(*[]byte) + bufp = xioutil.ODirectPoolLarge.Get() defer xioutil.ODirectPoolLarge.Put(bufp) } diff --git a/docs/debugging/xl-meta/main.go b/docs/debugging/xl-meta/main.go index e95329959..e062bf7ae 100644 --- a/docs/debugging/xl-meta/main.go +++ b/docs/debugging/xl-meta/main.go @@ -369,8 +369,8 @@ FLAGS: defer f.Close() r = f } - if strings.HasSuffix(file, ".zip") { - zr, err := zip.NewReader(r.(io.ReaderAt), sz) + if ra, ok := r.(io.ReaderAt); ok && strings.HasSuffix(file, ".zip") { + zr, err := zip.NewReader(ra, sz) if err != nil { return err } diff --git a/internal/bpool/pool.go b/internal/bpool/pool.go new file mode 100644 index 000000000..63b56eff6 --- /dev/null +++ b/internal/bpool/pool.go @@ -0,0 +1,45 @@ +// Copyright (c) 2015-2025 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package bpool + +import "sync" + +// Pool is a single type sync.Pool with a few extra properties: +// If New is not set Get may return the zero value of T. +type Pool[T any] struct { + New func() T + p sync.Pool +} + +// Get will retuen a new T +func (p *Pool[T]) Get() T { + v, ok := p.p.Get().(T) + if ok { + return v + } + if p.New == nil { + var t T + return t + } + return p.New() +} + +// Put a used T. +func (p *Pool[T]) Put(t T) { + p.p.Put(t) +} diff --git a/internal/bucket/bandwidth/monitor.go b/internal/bucket/bandwidth/monitor.go index 74814e9ad..48a989a2d 100644 --- a/internal/bucket/bandwidth/monitor.go +++ b/internal/bucket/bandwidth/monitor.go @@ -127,7 +127,6 @@ func (m *Monitor) getReport(selectBucket SelectionFunction) *BucketBandwidthRepo } } m.tlock.RUnlock() - } return report } diff --git a/internal/bucket/bandwidth/reader.go b/internal/bucket/bandwidth/reader.go index e82199bde..da0957677 100644 --- a/internal/bucket/bandwidth/reader.go +++ b/internal/bucket/bandwidth/reader.go @@ -64,7 +64,6 @@ func (r *MonitoredReader) Read(buf []byte) (n int, err error) { r.opts.HeaderSize = 0 need = int(math.Min(float64(b-hdr), float64(need))) // use remaining tokens towards payload tokens = need + hdr - } else { // part of header can be accommodated r.opts.HeaderSize -= b - 1 need = 1 // to ensure we read at least one byte for every Read diff --git a/internal/bucket/lifecycle/lifecycle.go b/internal/bucket/lifecycle/lifecycle.go index 68da9251c..e0dddd843 100644 --- a/internal/bucket/lifecycle/lifecycle.go +++ b/internal/bucket/lifecycle/lifecycle.go @@ -207,7 +207,6 @@ func (lc Lifecycle) HasActiveRules(prefix string) bool { if !rule.Transition.IsNull() { // this allows for Transition.Days to be zero. return true } - } return false } diff --git a/internal/bucket/replication/replication_test.go b/internal/bucket/replication/replication_test.go index 8fae740e1..26c72b28d 100644 --- a/internal/bucket/replication/replication_test.go +++ b/internal/bucket/replication/replication_test.go @@ -365,7 +365,6 @@ func TestHasActiveRules(t *testing.T) { t.Fatalf("Expected result with recursive set to true: `%v`, got: `%v`", tc.expectedRec, got) } }) - } } diff --git a/internal/bucket/replication/rule_test.go b/internal/bucket/replication/rule_test.go index df7192553..0e883e4e9 100644 --- a/internal/bucket/replication/rule_test.go +++ b/internal/bucket/replication/rule_test.go @@ -67,6 +67,5 @@ func TestMetadataReplicate(t *testing.T) { t.Fatalf("Expected result with recursive set to false: `%v`, got: `%v`", tc.expectedResult, got) } }) - } } diff --git a/internal/config/certs_test.go b/internal/config/certs_test.go index 4c989d37b..d102a2492 100644 --- a/internal/config/certs_test.go +++ b/internal/config/certs_test.go @@ -22,10 +22,11 @@ import ( "testing" ) -func createTempFile(prefix, content string) (tempFile string, err error) { +func createTempFile(t testing.TB, prefix, content string) (tempFile string, err error) { + t.Helper() var tmpfile *os.File - if tmpfile, err = os.CreateTemp("", prefix); err != nil { + if tmpfile, err = os.CreateTemp(t.TempDir(), prefix); err != nil { return tempFile, err } @@ -42,14 +43,13 @@ func createTempFile(prefix, content string) (tempFile string, err error) { } func TestParsePublicCertFile(t *testing.T) { - tempFile1, err := createTempFile("public-cert-file", "") + tempFile1, err := createTempFile(t, "public-cert-file", "") if err != nil { t.Fatalf("Unable to create temporary file. %v", err) } defer os.Remove(tempFile1) - tempFile2, err := createTempFile("public-cert-file", - `-----BEGIN CERTIFICATE----- + tempFile2, err := createTempFile(t, "public-cert-file", `-----BEGIN CERTIFICATE----- MIICdTCCAd4CCQCO5G/W1xcE9TANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJa WTEOMAwGA1UECBMFTWluaW8xETAPBgNVBAcTCEludGVybmV0MQ4wDAYDVQQKEwVN aW5pbzEOMAwGA1UECxMFTWluaW8xDjAMBgNVBAMTBU1pbmlvMR0wGwYJKoZIhvcN @@ -70,8 +70,7 @@ M9ofSEt/bdRD } defer os.Remove(tempFile2) - tempFile3, err := createTempFile("public-cert-file", - `-----BEGIN CERTIFICATE----- + tempFile3, err := createTempFile(t, "public-cert-file", `-----BEGIN CERTIFICATE----- MIICdTCCAd4CCQCO5G/W1xcE9TANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJa WTEOMAwGA1UECBMFTWluaW8xETAPBgNVBAcTCEludGVybmV0MQ4wDAYDVQQKEwVN aW5pbzEOMAwGA1UECxMFTWluaW8xDjAMBgNVBAMTBU1pbmlvMR0wGwYJKoZIhvcN @@ -92,8 +91,7 @@ M9ofSEt/bdRD } defer os.Remove(tempFile3) - tempFile4, err := createTempFile("public-cert-file", - `-----BEGIN CERTIFICATE----- + tempFile4, err := createTempFile(t, "public-cert-file", `-----BEGIN CERTIFICATE----- MIICdTCCAd4CCQCO5G/W1xcE9TANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJa WTEOMAwGA1UECBMFTWluaW8xETAPBgNVBAcTCEludGVybmV0MQ4wDAYDVQQKEwVN aW5pbzEOMAwGA1UECxMFTWluaW8xDjAMBgNVBAMTBU1pbmlvMR0wGwYJKoZIhvcN @@ -114,8 +112,7 @@ M9ofSEt/bdRD } defer os.Remove(tempFile4) - tempFile5, err := createTempFile("public-cert-file", - `-----BEGIN CERTIFICATE----- + tempFile5, err := createTempFile(t, "public-cert-file", `-----BEGIN CERTIFICATE----- MIICdTCCAd4CCQCO5G/W1xcE9TANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJa WTEOMAwGA1UECBMFTWluaW8xETAPBgNVBAcTCEludGVybmV0MQ4wDAYDVQQKEwVN aW5pbzEOMAwGA1UECxMFTWluaW8xDjAMBgNVBAMTBU1pbmlvMR0wGwYJKoZIhvcN @@ -184,11 +181,11 @@ func TestLoadX509KeyPair(t *testing.T) { os.Unsetenv(EnvCertPassword) }) for i, testCase := range loadX509KeyPairTests { - privateKey, err := createTempFile("private.key", testCase.privateKey) + privateKey, err := createTempFile(t, "private.key", testCase.privateKey) if err != nil { t.Fatalf("Test %d: failed to create tmp private key file: %v", i, err) } - certificate, err := createTempFile("public.crt", testCase.certificate) + certificate, err := createTempFile(t, "public.crt", testCase.certificate) if err != nil { os.Remove(privateKey) t.Fatalf("Test %d: failed to create tmp certificate file: %v", i, err) diff --git a/internal/config/dns/etcd_dns.go b/internal/config/dns/etcd_dns.go index ddf81461c..120eab5ef 100644 --- a/internal/config/dns/etcd_dns.go +++ b/internal/config/dns/etcd_dns.go @@ -143,7 +143,6 @@ func (c *CoreDNS) list(key string, domain bool) ([]SrvRecord, error) { srvRecord.Key = msgUnPath(srvRecord.Key) srvRecords = append(srvRecords, srvRecord) - } sort.Slice(srvRecords, func(i int, j int) bool { return srvRecords[i].Key < srvRecords[j].Key diff --git a/internal/config/identity/openid/openid.go b/internal/config/identity/openid/openid.go index f9076fdf6..c2f502a68 100644 --- a/internal/config/identity/openid/openid.go +++ b/internal/config/identity/openid/openid.go @@ -534,7 +534,6 @@ func (r *Config) GetSettings() madmin.OpenIDSettings { HashedClientSecret: hashedSecret, } } - } return res diff --git a/internal/config/notify/parse.go b/internal/config/notify/parse.go index 0f7dc4404..46d478934 100644 --- a/internal/config/notify/parse.go +++ b/internal/config/notify/parse.go @@ -125,7 +125,6 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t return nil, err } targets = append(targets, t) - } case config.NotifyKafkaSubSys: kafkaTargets, err := GetNotifyKafka(cfg[config.NotifyKafkaSubSys]) @@ -142,7 +141,6 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t return nil, err } targets = append(targets, t) - } case config.NotifyMQTTSubSys: diff --git a/internal/config/subnet/config.go b/internal/config/subnet/config.go index 7dd5fc38c..9e2420a64 100644 --- a/internal/config/subnet/config.go +++ b/internal/config/subnet/config.go @@ -127,7 +127,6 @@ func LookupConfig(kvs config.KVS, transport http.RoundTripper) (cfg Config, err if err != nil { return cfg, err } - } cfg.License = strings.TrimSpace(env.Get(config.EnvMinIOSubnetLicense, kvs.Get(config.License))) @@ -142,9 +141,11 @@ func LookupConfig(kvs config.KVS, transport http.RoundTripper) (cfg Config, err // Make sure to clone the transport before editing the ProxyURL if proxyURL != nil { - ctransport := transport.(*http.Transport).Clone() - ctransport.Proxy = http.ProxyURL((*url.URL)(proxyURL)) - cfg.transport = ctransport + if tr, ok := transport.(*http.Transport); ok { + ctransport := tr.Clone() + ctransport.Proxy = http.ProxyURL((*url.URL)(proxyURL)) + cfg.transport = ctransport + } } else { cfg.transport = transport } diff --git a/internal/disk/stat_test.go b/internal/disk/stat_test.go index 83f426057..73ca017f5 100644 --- a/internal/disk/stat_test.go +++ b/internal/disk/stat_test.go @@ -103,7 +103,7 @@ func TestReadDriveStats(t *testing.T) { for _, testCase := range testCases { testCase := testCase t.Run("", func(t *testing.T) { - tmpfile, err := os.CreateTemp("", "testfile") + tmpfile, err := os.CreateTemp(t.TempDir(), "testfile") if err != nil { t.Error(err) } diff --git a/internal/event/name.go b/internal/event/name.go index 184c10383..0dee9cc3b 100644 --- a/internal/event/name.go +++ b/internal/event/name.go @@ -85,7 +85,6 @@ var _ = uint64(1 << objectSingleTypesEnd) // Expand - returns expanded values of abbreviated event type. func (name Name) Expand() []Name { switch name { - case ObjectAccessedAll: return []Name{ ObjectAccessedGet, ObjectAccessedHead, diff --git a/internal/event/target/elasticsearch.go b/internal/event/target/elasticsearch.go index 35532334e..5a4d61e41 100644 --- a/internal/event/target/elasticsearch.go +++ b/internal/event/target/elasticsearch.go @@ -463,8 +463,7 @@ func (c *esClientV7) createIndex(args ElasticsearchArgs) error { indices, ok := v["indices"].([]interface{}) if ok { for _, index := range indices { - name := index.(map[string]interface{})["name"] - if name == args.Index { + if name, ok := index.(map[string]interface{}); ok && name["name"] == args.Index { found = true break } diff --git a/internal/grid/connection.go b/internal/grid/connection.go index 0cf2e02cc..25f2baef6 100644 --- a/internal/grid/connection.go +++ b/internal/grid/connection.go @@ -1748,20 +1748,20 @@ func (c *Connection) debugMsg(d debugMsg, args ...any) { case debugSetConnPingDuration: c.connMu.Lock() defer c.connMu.Unlock() - c.connPingInterval = args[0].(time.Duration) + c.connPingInterval, _ = args[0].(time.Duration) if c.connPingInterval < time.Second { panic("CONN ping interval too low") } case debugSetClientPingDuration: c.connMu.Lock() defer c.connMu.Unlock() - c.clientPingInterval = args[0].(time.Duration) + c.clientPingInterval, _ = args[0].(time.Duration) case debugAddToDeadline: - c.addDeadline = args[0].(time.Duration) + c.addDeadline, _ = args[0].(time.Duration) case debugIsOutgoingClosed: // params: muxID uint64, isClosed func(bool) - muxID := args[0].(uint64) - resp := args[1].(func(b bool)) + muxID, _ := args[0].(uint64) + resp, _ := args[1].(func(b bool)) mid, ok := c.outgoing.Load(muxID) if !ok || mid == nil { resp(true) @@ -1772,7 +1772,8 @@ func (c *Connection) debugMsg(d debugMsg, args ...any) { mid.respMu.Unlock() case debugBlockInboundMessages: c.connMu.Lock() - block := (<-chan struct{})(args[0].(chan struct{})) + a, _ := args[0].(chan struct{}) + block := (<-chan struct{})(a) c.blockMessages.Store(&block) c.connMu.Unlock() } diff --git a/internal/grid/grid.go b/internal/grid/grid.go index 0b192318e..b458729a1 100644 --- a/internal/grid/grid.go +++ b/internal/grid/grid.go @@ -28,11 +28,11 @@ import ( "net/http" "strconv" "strings" - "sync" "time" "github.com/gobwas/ws" "github.com/gobwas/ws/wsutil" + "github.com/minio/minio/internal/bpool" ) // ErrDisconnected is returned when the connection to the remote has been lost during the call. @@ -68,15 +68,15 @@ const ( defaultSingleRequestTimeout = time.Minute ) -var internalByteBuffer = sync.Pool{ - New: func() any { +var internalByteBuffer = bpool.Pool[*[]byte]{ + New: func() *[]byte { m := make([]byte, 0, defaultBufferSize) return &m }, } -var internal32KByteBuffer = sync.Pool{ - New: func() any { +var internal32KByteBuffer = bpool.Pool[*[]byte]{ + New: func() *[]byte { m := make([]byte, 0, biggerBufMin) return &m }, @@ -87,7 +87,7 @@ var internal32KByteBuffer = sync.Pool{ // When replacing PutByteBuffer should also be replaced // There is no minimum size. var GetByteBuffer = func() []byte { - b := *internalByteBuffer.Get().(*[]byte) + b := *internalByteBuffer.Get() return b[:0] } @@ -101,7 +101,7 @@ func GetByteBufferCap(wantSz int) []byte { PutByteBuffer(b) } if wantSz <= maxBufferSize { - b := *internal32KByteBuffer.Get().(*[]byte) + b := *internal32KByteBuffer.Get() if cap(b) >= wantSz { return b[:0] } diff --git a/internal/grid/handlers.go b/internal/grid/handlers.go index 802906469..1d20fa9b9 100644 --- a/internal/grid/handlers.go +++ b/internal/grid/handlers.go @@ -23,8 +23,8 @@ import ( "errors" "fmt" "strings" - "sync" + "github.com/minio/minio/internal/bpool" "github.com/minio/minio/internal/hash/sha256" xioutil "github.com/minio/minio/internal/ioutil" "github.com/tinylib/msgp/msgp" @@ -431,12 +431,12 @@ func recycleFunc[RT RoundTripper](newRT func() RT) (newFn func() RT, recycle fun } } } - pool := sync.Pool{ - New: func() interface{} { + pool := bpool.Pool[RT]{ + New: func() RT { return newRT() }, } - return func() RT { return pool.Get().(RT) }, + return pool.Get, func(r RT) { if r != rZero { //nolint:staticcheck // SA6002 IT IS A GENERIC VALUE! @@ -632,8 +632,8 @@ type StreamTypeHandler[Payload, Req, Resp RoundTripper] struct { // Will be 0 if newReq is nil. InCapacity int - reqPool sync.Pool - respPool sync.Pool + reqPool bpool.Pool[Req] + respPool bpool.Pool[Resp] id HandlerID newPayload func() Payload nilReq Req @@ -653,13 +653,13 @@ func NewStream[Payload, Req, Resp RoundTripper](h HandlerID, newPayload func() P s := newStreamHandler[Payload, Req, Resp](h) if newReq != nil { - s.reqPool.New = func() interface{} { + s.reqPool.New = func() Req { return newReq() } } else { s.InCapacity = 0 } - s.respPool.New = func() interface{} { + s.respPool.New = func() Resp { return newResp() } s.newPayload = newPayload @@ -682,7 +682,7 @@ func (h *StreamTypeHandler[Payload, Req, Resp]) NewPayload() Payload { // NewRequest creates a new request. // The struct may be reused, so caller should clear any fields. func (h *StreamTypeHandler[Payload, Req, Resp]) NewRequest() Req { - return h.reqPool.Get().(Req) + return h.reqPool.Get() } // PutRequest will accept a request for reuse. @@ -706,7 +706,7 @@ func (h *StreamTypeHandler[Payload, Req, Resp]) PutResponse(r Resp) { // NewResponse creates a new response. // Handlers can use this to create a reusable response. func (h *StreamTypeHandler[Payload, Req, Resp]) NewResponse() Resp { - return h.respPool.Get().(Resp) + return h.respPool.Get() } func newStreamHandler[Payload, Req, Resp RoundTripper](h HandlerID) *StreamTypeHandler[Payload, Req, Resp] { diff --git a/internal/grid/muxserver.go b/internal/grid/muxserver.go index 87e81acda..06a3f1f74 100644 --- a/internal/grid/muxserver.go +++ b/internal/grid/muxserver.go @@ -388,6 +388,5 @@ func (m *muxServer) close() { if m.outBlock != nil { xioutil.SafeClose(m.outBlock) m.outBlock = nil - } } diff --git a/internal/grid/types.go b/internal/grid/types.go index 723728bc5..c7b0c2826 100644 --- a/internal/grid/types.go +++ b/internal/grid/types.go @@ -27,6 +27,7 @@ import ( "strings" "sync" + "github.com/minio/minio/internal/bpool" "github.com/tinylib/msgp/msgp" ) @@ -53,7 +54,7 @@ func (m *MSS) Get(key string) string { // Set a key, value pair. func (m *MSS) Set(key, value string) { if m == nil { - *m = mssPool.Get().(map[string]string) + *m = mssPool.Get() } (*m)[key] = value } @@ -130,7 +131,7 @@ func (m *MSS) Msgsize() int { // NewMSS returns a new MSS. func NewMSS() *MSS { - m := MSS(mssPool.Get().(map[string]string)) + m := MSS(mssPool.Get()) for k := range m { delete(m, k) } @@ -143,8 +144,8 @@ func NewMSSWith(m map[string]string) *MSS { return &m2 } -var mssPool = sync.Pool{ - New: func() interface{} { +var mssPool = bpool.Pool[map[string]string]{ + New: func() map[string]string { return make(map[string]string, 5) }, } @@ -152,7 +153,7 @@ var mssPool = sync.Pool{ // Recycle the underlying map. func (m *MSS) Recycle() { if m != nil && *m != nil { - mssPool.Put(map[string]string(*m)) + mssPool.Put(*m) *m = nil } } @@ -279,15 +280,15 @@ func (b *Bytes) Recycle() { // URLValues can be used for url.Values. type URLValues map[string][]string -var urlValuesPool = sync.Pool{ - New: func() interface{} { +var urlValuesPool = bpool.Pool[map[string][]string]{ + New: func() map[string][]string { return make(map[string][]string, 10) }, } // NewURLValues returns a new URLValues. func NewURLValues() *URLValues { - u := URLValues(urlValuesPool.Get().(map[string][]string)) + u := URLValues(urlValuesPool.Get()) return &u } @@ -342,7 +343,7 @@ func (u *URLValues) UnmarshalMsg(bts []byte) (o []byte, err error) { return } if *u == nil { - *u = urlValuesPool.Get().(map[string][]string) + *u = urlValuesPool.Get() } if len(*u) > 0 { for key := range *u { @@ -424,9 +425,11 @@ func NewJSONPool[T any]() *JSONPool[T] { func (p *JSONPool[T]) new() *T { var zero T - t := p.pool.Get().(*T) - *t = zero - return t + if t, ok := p.pool.Get().(*T); ok { + *t = zero + return t + } + return &zero } // JSON is a wrapper around a T object that can be serialized. @@ -557,15 +560,15 @@ func (NoPayload) Recycle() {} // ArrayOf wraps an array of Messagepack compatible objects. type ArrayOf[T RoundTripper] struct { - aPool sync.Pool // Arrays - ePool sync.Pool // Elements + aPool sync.Pool // Arrays + ePool bpool.Pool[T] // Elements } // NewArrayOf returns a new ArrayOf. // You must provide a function that returns a new instance of T. func NewArrayOf[T RoundTripper](newFn func() T) *ArrayOf[T] { return &ArrayOf[T]{ - ePool: sync.Pool{New: func() any { + ePool: bpool.Pool[T]{New: func() T { return newFn() }}, } @@ -609,7 +612,7 @@ func (p *ArrayOf[T]) putA(v []T) { } func (p *ArrayOf[T]) newE() T { - return p.ePool.Get().(T) + return p.ePool.Get() } // Array provides a wrapper for an underlying array of serializable objects. diff --git a/internal/handlers/forwarder.go b/internal/handlers/forwarder.go index 38bc58b22..fd36a6bea 100644 --- a/internal/handlers/forwarder.go +++ b/internal/handlers/forwarder.go @@ -24,8 +24,9 @@ import ( "net/http/httputil" "net/url" "strings" - "sync" "time" + + "github.com/minio/minio/internal/bpool" ) const defaultFlushInterval = time.Duration(100) * time.Millisecond @@ -53,7 +54,7 @@ func NewForwarder(f *Forwarder) *Forwarder { type bufPool struct { sz int - pool sync.Pool + pool bpool.Pool[*[]byte] } func (b *bufPool) Put(buf []byte) { @@ -66,13 +67,16 @@ func (b *bufPool) Put(buf []byte) { } func (b *bufPool) Get() []byte { - bufp := b.pool.Get().(*[]byte) + bufp := b.pool.Get() + if bufp == nil || cap(*bufp) < b.sz { + return make([]byte, 0, b.sz) + } return (*bufp)[:b.sz] } func newBufPool(sz int) httputil.BufferPool { - return &bufPool{sz: sz, pool: sync.Pool{ - New: func() interface{} { + return &bufPool{sz: sz, pool: bpool.Pool[*[]byte]{ + New: func() *[]byte { buf := make([]byte, sz) return &buf }, diff --git a/internal/http/flush.go b/internal/http/flush.go new file mode 100644 index 000000000..e17cf7d56 --- /dev/null +++ b/internal/http/flush.go @@ -0,0 +1,27 @@ +// Copyright (c) 2015-2025 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package http + +import "net/http" + +// Flush the ResponseWriter. +func Flush(w http.ResponseWriter) { + if f, ok := w.(http.Flusher); ok { + f.Flush() + } +} diff --git a/internal/http/listener.go b/internal/http/listener.go index 2f15dd58b..be3c1a1a8 100644 --- a/internal/http/listener.go +++ b/internal/http/listener.go @@ -100,13 +100,15 @@ func (listener *httpListener) Addr() (addr net.Addr) { return addr } - tcpAddr := addr.(*net.TCPAddr) - if ip := net.ParseIP("0.0.0.0"); ip != nil { - tcpAddr.IP = ip - } + if tcpAddr, ok := addr.(*net.TCPAddr); ok { + if ip := net.ParseIP("0.0.0.0"); ip != nil { + tcpAddr.IP = ip + } - addr = tcpAddr - return addr + addr = tcpAddr + return addr + } + panic("unknown address type on listener") } // Addrs - returns all address information of TCP listeners. diff --git a/internal/ioutil/ioutil.go b/internal/ioutil/ioutil.go index 49a4300c5..2f214b778 100644 --- a/internal/ioutil/ioutil.go +++ b/internal/ioutil/ioutil.go @@ -25,10 +25,10 @@ import ( "io" "os" "runtime/debug" - "sync" "time" "github.com/dustin/go-humanize" + "github.com/minio/minio/internal/bpool" "github.com/minio/minio/internal/disk" ) @@ -39,28 +39,39 @@ const ( LargeBlock = 1 * humanize.MiByte // Default r/w block size for normal objects. ) +// AlignedBytePool is a pool of fixed size aligned blocks +type AlignedBytePool struct { + size int + p bpool.Pool[*[]byte] +} + +// NewAlignedBytePool creates a new pool with the specified size. +func NewAlignedBytePool(sz int) *AlignedBytePool { + return &AlignedBytePool{size: sz, p: bpool.Pool[*[]byte]{New: func() *[]byte { + b := disk.AlignedBlock(sz) + return &b + }}} +} + // aligned sync.Pool's var ( - ODirectPoolLarge = sync.Pool{ - New: func() interface{} { - b := disk.AlignedBlock(LargeBlock) - return &b - }, - } - ODirectPoolMedium = sync.Pool{ - New: func() interface{} { - b := disk.AlignedBlock(MediumBlock) - return &b - }, - } - ODirectPoolSmall = sync.Pool{ - New: func() interface{} { - b := disk.AlignedBlock(SmallBlock) - return &b - }, - } + ODirectPoolLarge = NewAlignedBytePool(LargeBlock) + ODirectPoolMedium = NewAlignedBytePool(MediumBlock) + ODirectPoolSmall = NewAlignedBytePool(SmallBlock) ) +// Get a block. +func (p *AlignedBytePool) Get() *[]byte { + return p.p.Get() +} + +// Put a block. +func (p *AlignedBytePool) Put(pb *[]byte) { + if pb != nil && len(*pb) == p.size { + p.p.Put(pb) + } +} + // WriteOnCloser implements io.WriteCloser and always // executes at least one write operation if it is closed. // @@ -250,15 +261,6 @@ func NopCloser(w io.Writer) io.WriteCloser { return nopCloser{w} } -const copyBufferSize = 32 * 1024 - -var copyBufPool = sync.Pool{ - New: func() interface{} { - b := make([]byte, copyBufferSize) - return &b - }, -} - // SkipReader skips a given number of bytes and then returns all // remaining data. type SkipReader struct { @@ -274,12 +276,11 @@ func (s *SkipReader) Read(p []byte) (int, error) { } if s.skipCount > 0 { tmp := p - if s.skipCount > l && l < copyBufferSize { + if s.skipCount > l && l < SmallBlock { // We may get a very small buffer, so we grab a temporary buffer. - bufp := copyBufPool.Get().(*[]byte) - buf := *bufp - tmp = buf[:copyBufferSize] - defer copyBufPool.Put(bufp) + bufp := ODirectPoolSmall.Get() + tmp = *bufp + defer ODirectPoolSmall.Put(bufp) l = int64(len(tmp)) } for s.skipCount > 0 { @@ -309,7 +310,7 @@ type writerOnly struct { // Copy is exactly like io.Copy but with reusable buffers. func Copy(dst io.Writer, src io.Reader) (written int64, err error) { - bufp := ODirectPoolMedium.Get().(*[]byte) + bufp := ODirectPoolMedium.Get() defer ODirectPoolMedium.Put(bufp) buf := *bufp diff --git a/internal/ioutil/ioutil_test.go b/internal/ioutil/ioutil_test.go index 587c72d62..b71c2f2e0 100644 --- a/internal/ioutil/ioutil_test.go +++ b/internal/ioutil/ioutil_test.go @@ -102,7 +102,7 @@ func TestCloseOnWriter(t *testing.T) { // Test for AppendFile. func TestAppendFile(t *testing.T) { - f, err := os.CreateTemp("", "") + f, err := os.CreateTemp(t.TempDir(), "") if err != nil { t.Fatal(err) } @@ -111,7 +111,7 @@ func TestAppendFile(t *testing.T) { f.WriteString("aaaaaaaaaa") f.Close() - f, err = os.CreateTemp("", "") + f, err = os.CreateTemp(t.TempDir(), "") if err != nil { t.Fatal(err) } @@ -162,7 +162,7 @@ func TestSkipReader(t *testing.T) { } func TestSameFile(t *testing.T) { - f, err := os.CreateTemp("", "") + f, err := os.CreateTemp(t.TempDir(), "") if err != nil { t.Errorf("Error creating tmp file: %v", err) } @@ -193,7 +193,7 @@ func TestSameFile(t *testing.T) { } func TestCopyAligned(t *testing.T) { - f, err := os.CreateTemp("", "") + f, err := os.CreateTemp(t.TempDir(), "") if err != nil { t.Errorf("Error creating tmp file: %v", err) } @@ -202,7 +202,7 @@ func TestCopyAligned(t *testing.T) { r := strings.NewReader("hello world") - bufp := ODirectPoolSmall.Get().(*[]byte) + bufp := ODirectPoolSmall.Get() defer ODirectPoolSmall.Put(bufp) written, err := CopyAligned(f, io.LimitReader(r, 5), *bufp, r.Size(), f) diff --git a/internal/jwt/parser.go b/internal/jwt/parser.go index 7c5811250..831d19cf0 100644 --- a/internal/jwt/parser.go +++ b/internal/jwt/parser.go @@ -30,13 +30,13 @@ import ( "errors" "fmt" "hash" - "sync" "time" "github.com/buger/jsonparser" "github.com/dustin/go-humanize" jwtgo "github.com/golang-jwt/jwt/v4" jsoniter "github.com/json-iterator/go" + "github.com/minio/minio/internal/bpool" ) // SigningMethodHMAC - Implements the HMAC-SHA family of signing methods signing methods @@ -44,7 +44,7 @@ import ( type SigningMethodHMAC struct { Name string Hash crypto.Hash - HasherPool sync.Pool + HasherPool bpool.Pool[hash.Hash] } // Specific instances for HS256, HS384, HS512 @@ -57,13 +57,13 @@ var ( const base64BufferSize = 64 * humanize.KiByte var ( - base64BufPool sync.Pool + base64BufPool bpool.Pool[*[]byte] hmacSigners []*SigningMethodHMAC ) func init() { - base64BufPool = sync.Pool{ - New: func() interface{} { + base64BufPool = bpool.Pool[*[]byte]{ + New: func() *[]byte { buf := make([]byte, base64BufferSize) return &buf }, @@ -76,7 +76,7 @@ func init() { } for i := range hmacSigners { h := hmacSigners[i].Hash - hmacSigners[i].HasherPool.New = func() interface{} { + hmacSigners[i].HasherPool.New = func() hash.Hash { return h.New() } } @@ -89,13 +89,13 @@ func (s *SigningMethodHMAC) HashBorrower() HashBorrower { // HashBorrower keeps track of borrowed hashers and allows to return them all. type HashBorrower struct { - pool *sync.Pool + pool *bpool.Pool[hash.Hash] borrowed []hash.Hash } // Borrow a single hasher. func (h *HashBorrower) Borrow() hash.Hash { - hasher := h.pool.Get().(hash.Hash) + hasher := h.pool.Get() h.borrowed = append(h.borrowed, hasher) hasher.Reset() return hasher @@ -323,10 +323,10 @@ func ParseWithStandardClaims(tokenStr string, claims *StandardClaims, key []byte return jwtgo.NewValidationError("no key was provided.", jwtgo.ValidationErrorUnverifiable) } - bufp := base64BufPool.Get().(*[]byte) + bufp := base64BufPool.Get() defer base64BufPool.Put(bufp) - tokenBuf := base64BufPool.Get().(*[]byte) + tokenBuf := base64BufPool.Get() defer base64BufPool.Put(tokenBuf) token := *tokenBuf @@ -419,10 +419,10 @@ func ParseWithClaims(tokenStr string, claims *MapClaims, fn func(*MapClaims) ([] return jwtgo.NewValidationError("no Keyfunc was provided.", jwtgo.ValidationErrorUnverifiable) } - bufp := base64BufPool.Get().(*[]byte) + bufp := base64BufPool.Get() defer base64BufPool.Put(bufp) - tokenBuf := base64BufPool.Get().(*[]byte) + tokenBuf := base64BufPool.Get() defer base64BufPool.Put(tokenBuf) token := *tokenBuf diff --git a/internal/kms/config_test.go b/internal/kms/config_test.go index 65e720273..d63d06525 100644 --- a/internal/kms/config_test.go +++ b/internal/kms/config_test.go @@ -26,7 +26,7 @@ func TestIsPresent(t *testing.T) { for i, test := range isPresentTests { os.Clearenv() for k, v := range test.Env { - os.Setenv(k, v) + t.Setenv(k, v) } ok, err := IsPresent() diff --git a/internal/lock/lock_test.go b/internal/lock/lock_test.go index 7f01b86cc..11dad9037 100644 --- a/internal/lock/lock_test.go +++ b/internal/lock/lock_test.go @@ -25,7 +25,7 @@ import ( // Test lock fails. func TestLockFail(t *testing.T) { - f, err := os.CreateTemp("", "lock") + f, err := os.CreateTemp(t.TempDir(), "lock") if err != nil { t.Fatal(err) } @@ -55,7 +55,7 @@ func TestLockDirFail(t *testing.T) { // Tests rwlock methods. func TestRWLockedFile(t *testing.T) { - f, err := os.CreateTemp("", "lock") + f, err := os.CreateTemp(t.TempDir(), "lock") if err != nil { t.Fatal(err) } @@ -118,7 +118,7 @@ func TestRWLockedFile(t *testing.T) { // Tests lock and unlock semantics. func TestLockAndUnlock(t *testing.T) { - f, err := os.CreateTemp("", "lock") + f, err := os.CreateTemp(t.TempDir(), "lock") if err != nil { t.Fatal(err) } diff --git a/internal/logger/config.go b/internal/logger/config.go index b5eda7648..592bd4f7c 100644 --- a/internal/logger/config.go +++ b/internal/logger/config.go @@ -370,7 +370,6 @@ func lookupLegacyConfigForSubSys(ctx context.Context, subSys string) Config { Endpoint: url, } } - } return cfg } diff --git a/internal/logger/target/http/http.go b/internal/logger/target/http/http.go index 2fe9284ce..b3fd2def4 100644 --- a/internal/logger/target/http/http.go +++ b/internal/logger/target/http/http.go @@ -466,7 +466,6 @@ func (h *Target) startQueueProcessor(ctx context.Context, mainWorker bool) { return } } - } } @@ -538,9 +537,11 @@ func New(config Config) (*Target, error) { if h.config.Proxy != "" { proxyURL, _ := url.Parse(h.config.Proxy) transport := h.config.Transport - ctransport := transport.(*http.Transport).Clone() - ctransport.Proxy = http.ProxyURL(proxyURL) - h.config.Transport = ctransport + if tr, ok := transport.(*http.Transport); ok { + ctransport := tr.Clone() + ctransport.Proxy = http.ProxyURL(proxyURL) + h.config.Transport = ctransport + } } h.client = &http.Client{Transport: h.config.Transport} diff --git a/internal/logger/target/kafka/kafka.go b/internal/logger/target/kafka/kafka.go index c66920419..a6034adf4 100644 --- a/internal/logger/target/kafka/kafka.go +++ b/internal/logger/target/kafka/kafka.go @@ -188,7 +188,6 @@ func (h *Target) startKafkaLogger() { // We are not allowed to add when logCh is nil h.wg.Add(1) defer h.wg.Done() - } h.logChMu.RUnlock() diff --git a/internal/mountinfo/mountinfo_linux.go b/internal/mountinfo/mountinfo_linux.go index 4217a81a3..f489dca9e 100644 --- a/internal/mountinfo/mountinfo_linux.go +++ b/internal/mountinfo/mountinfo_linux.go @@ -56,13 +56,13 @@ func IsLikelyMountPoint(path string) bool { } // If the directory has a different device as parent, then it is a mountpoint. - if s1.Sys().(*syscall.Stat_t).Dev != s2.Sys().(*syscall.Stat_t).Dev { - // path/.. on a different device as path - return true - } - - // path/.. is the same i-node as path - this check is for bind mounts. - return s1.Sys().(*syscall.Stat_t).Ino == s2.Sys().(*syscall.Stat_t).Ino + ss1, ok1 := s1.Sys().(*syscall.Stat_t) + ss2, ok2 := s2.Sys().(*syscall.Stat_t) + return ok1 && ok2 && + // path/.. on a different device as path + (ss1.Dev != ss2.Dev || + // path/.. is the same i-node as path - this check is for bind mounts. + ss1.Ino == ss2.Ino) } // CheckCrossDevice - check if any list of paths has any sub-mounts at /proc/mounts. diff --git a/internal/mountinfo/mountinfo_windows.go b/internal/mountinfo/mountinfo_windows.go index 244da9aa9..a40a70d20 100644 --- a/internal/mountinfo/mountinfo_windows.go +++ b/internal/mountinfo/mountinfo_windows.go @@ -40,7 +40,9 @@ var mountPointCache sync.Map func IsLikelyMountPoint(path string) bool { path = filepath.Dir(path) if v, ok := mountPointCache.Load(path); ok { - return v.(bool) + if b, ok := v.(bool); ok { + return b + } } wpath, _ := windows.UTF16PtrFromString(path) wvolume := make([]uint16, len(path)+1) diff --git a/internal/s3select/csv/reader.go b/internal/s3select/csv/reader.go index 032a9b0c1..9d5f11c1e 100644 --- a/internal/s3select/csv/reader.go +++ b/internal/s3select/csv/reader.go @@ -27,25 +27,26 @@ import ( "unicode/utf8" csv "github.com/minio/csvparser" + "github.com/minio/minio/internal/bpool" "github.com/minio/minio/internal/s3select/sql" ) // Reader - CSV record reader for S3Select. type Reader struct { args *ReaderArgs - readCloser io.ReadCloser // raw input - buf *bufio.Reader // input to the splitter - columnNames []string // names of columns - nameIndexMap map[string]int64 // name to column index - current [][]string // current block of results to be returned - recordsRead int // number of records read in current slice - input chan *queueItem // input for workers - queue chan *queueItem // output from workers in order - err error // global error state, only touched by Reader.Read - bufferPool sync.Pool // pool of []byte objects for input - csvDstPool sync.Pool // pool of [][]string used for output - close chan struct{} // used for shutting down the splitter before end of stream - readerWg sync.WaitGroup // used to keep track of async reader. + readCloser io.ReadCloser // raw input + buf *bufio.Reader // input to the splitter + columnNames []string // names of columns + nameIndexMap map[string]int64 // name to column index + current [][]string // current block of results to be returned + recordsRead int // number of records read in current slice + input chan *queueItem // input for workers + queue chan *queueItem // output from workers in order + err error // global error state, only touched by Reader.Read + bufferPool bpool.Pool[[]byte] // pool of []byte objects for input + csvDstPool bpool.Pool[[][]string] // pool of [][]string used for output + close chan struct{} // used for shutting down the splitter before end of stream + readerWg sync.WaitGroup // used to keep track of async reader. } // queueItem is an item in the queue. @@ -69,7 +70,7 @@ func (r *Reader) Read(dst sql.Record) (sql.Record, error) { r.err = io.EOF return nil, r.err } - //nolint:staticcheck // SA6002 Using pointer would allocate more since we would have to copy slice header before taking a pointer. + r.csvDstPool.Put(r.current) r.current = <-item.dst r.err = item.err @@ -182,12 +183,12 @@ func (r *Reader) startReaders(newReader func(io.Reader) *csv.Reader) error { } } - r.bufferPool.New = func() interface{} { + r.bufferPool.New = func() []byte { return make([]byte, csvSplitSize+1024) } // Return first block - next, nextErr := r.nextSplit(csvSplitSize, r.bufferPool.Get().([]byte)) + next, nextErr := r.nextSplit(csvSplitSize, r.bufferPool.Get()) // Check if first block is valid. if !utf8.Valid(next) { return errInvalidTextEncodingError() @@ -224,7 +225,7 @@ func (r *Reader) startReaders(newReader func(io.Reader) *csv.Reader) error { // Exit on any error. return } - next, nextErr = r.nextSplit(csvSplitSize, r.bufferPool.Get().([]byte)) + next, nextErr = r.nextSplit(csvSplitSize, r.bufferPool.Get()) } }() @@ -236,8 +237,8 @@ func (r *Reader) startReaders(newReader func(io.Reader) *csv.Reader) error { in.dst <- nil continue } - dst, ok := r.csvDstPool.Get().([][]string) - if !ok { + dst := r.csvDstPool.Get() + if len(dst) < 1000 { dst = make([][]string, 0, 1000) } diff --git a/internal/s3select/json/preader.go b/internal/s3select/json/preader.go index d8d016f78..9ceef5d6b 100644 --- a/internal/s3select/json/preader.go +++ b/internal/s3select/json/preader.go @@ -24,6 +24,7 @@ import ( "runtime" "sync" + "github.com/minio/minio/internal/bpool" "github.com/minio/minio/internal/s3select/jstream" "github.com/minio/minio/internal/s3select/sql" ) @@ -32,17 +33,17 @@ import ( // Operates concurrently on line-delimited JSON. type PReader struct { args *ReaderArgs - readCloser io.ReadCloser // raw input - buf *bufio.Reader // input to the splitter - current []jstream.KVS // current block of results to be returned - recordsRead int // number of records read in current slice - input chan *queueItem // input for workers - queue chan *queueItem // output from workers in order - err error // global error state, only touched by Reader.Read - bufferPool sync.Pool // pool of []byte objects for input - kvDstPool sync.Pool // pool of []jstream.KV used for output - close chan struct{} // used for shutting down the splitter before end of stream - readerWg sync.WaitGroup // used to keep track of async reader. + readCloser io.ReadCloser // raw input + buf *bufio.Reader // input to the splitter + current []jstream.KVS // current block of results to be returned + recordsRead int // number of records read in current slice + input chan *queueItem // input for workers + queue chan *queueItem // output from workers in order + err error // global error state, only touched by Reader.Read + bufferPool bpool.Pool[[]byte] // pool of []byte objects for input + kvDstPool bpool.Pool[[]jstream.KVS] // pool of []jstream.KVS used for output + close chan struct{} // used for shutting down the splitter before end of stream + readerWg sync.WaitGroup // used to keep track of async reader. } // queueItem is an item in the queue. @@ -66,7 +67,6 @@ func (r *PReader) Read(dst sql.Record) (sql.Record, error) { r.err = io.EOF return nil, r.err } - //nolint:staticcheck // SA6002 Using pointer would allocate more since we would have to copy slice header before taking a pointer. r.kvDstPool.Put(r.current) r.current = <-item.dst r.err = item.err @@ -133,7 +133,7 @@ const jsonSplitSize = 128 << 10 // and a number of workers based on GOMAXPROCS. // If an error is returned no goroutines have been started and r.err will have been set. func (r *PReader) startReaders() { - r.bufferPool.New = func() interface{} { + r.bufferPool.New = func() []byte { return make([]byte, jsonSplitSize+1024) } @@ -148,7 +148,7 @@ func (r *PReader) startReaders() { defer close(r.queue) defer r.readerWg.Done() for { - next, err := r.nextSplit(jsonSplitSize, r.bufferPool.Get().([]byte)) + next, err := r.nextSplit(jsonSplitSize, r.bufferPool.Get()) q := queueItem{ input: next, dst: make(chan []jstream.KVS, 1), @@ -180,8 +180,8 @@ func (r *PReader) startReaders() { in.dst <- nil continue } - dst, ok := r.kvDstPool.Get().([]jstream.KVS) - if !ok { + dst := r.kvDstPool.Get() + if len(dst) < 1000 { dst = make([]jstream.KVS, 0, 1000) } @@ -193,7 +193,7 @@ func (r *PReader) startReaders() { if mv.ValueType == jstream.Object { // This is a JSON object type (that preserves key // order) - kvs = mv.Value.(jstream.KVS) + kvs, _ = mv.Value.(jstream.KVS) } else { // To be AWS S3 compatible Select for JSON needs to // output non-object JSON as single column value diff --git a/internal/s3select/json/reader.go b/internal/s3select/json/reader.go index 70a758d92..780a1a972 100644 --- a/internal/s3select/json/reader.go +++ b/internal/s3select/json/reader.go @@ -51,7 +51,7 @@ func (r *Reader) Read(dst sql.Record) (sql.Record, error) { if v.ValueType == jstream.Object { // This is a JSON object type (that preserves key // order) - kvs = v.Value.(jstream.KVS) + kvs, _ = v.Value.(jstream.KVS) } else { // To be AWS S3 compatible Select for JSON needs to // output non-object JSON as single column value diff --git a/internal/s3select/message.go b/internal/s3select/message.go index 0f931dc92..e2ed15945 100644 --- a/internal/s3select/message.go +++ b/internal/s3select/message.go @@ -26,6 +26,8 @@ import ( "strconv" "sync/atomic" "time" + + xhttp "github.com/minio/minio/internal/http" ) // A message is in the format specified in @@ -262,7 +264,7 @@ func (writer *messageWriter) write(data []byte) bool { return false } - writer.writer.(http.Flusher).Flush() + xhttp.Flush(writer.writer) return true } diff --git a/internal/s3select/parquet/reader.go b/internal/s3select/parquet/reader.go index f8dd311ee..ae050e5ee 100644 --- a/internal/s3select/parquet/reader.go +++ b/internal/s3select/parquet/reader.go @@ -56,7 +56,6 @@ func (pr *Reader) Read(dst sql.Record) (rec sql.Record, rerr error) { kvs := jstream.KVS{} for _, col := range pr.r.Columns() { - var value interface{} if v, ok := nextRow[col.FlatName()]; ok { value, err = convertFromAnnotation(col.Element(), v) diff --git a/internal/s3select/select.go b/internal/s3select/select.go index 48a1391ae..a5dfc7b17 100644 --- a/internal/s3select/select.go +++ b/internal/s3select/select.go @@ -32,6 +32,7 @@ import ( "github.com/klauspost/compress/s2" "github.com/klauspost/compress/zstd" gzip "github.com/klauspost/pgzip" + "github.com/minio/minio/internal/bpool" "github.com/minio/minio/internal/config" xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/s3select/csv" @@ -81,15 +82,15 @@ func init() { parquetSupport = env.Get("MINIO_API_SELECT_PARQUET", config.EnableOff) == config.EnableOn } -var bufPool = sync.Pool{ - New: func() interface{} { +var bufPool = bpool.Pool[*bytes.Buffer]{ + New: func() *bytes.Buffer { // make a buffer with a reasonable capacity. return bytes.NewBuffer(make([]byte, 0, maxRecordSize)) }, } -var bufioWriterPool = sync.Pool{ - New: func() interface{} { +var bufioWriterPool = bpool.Pool[*bufio.Writer]{ + New: func() *bufio.Writer { // io.Discard is just used to create the writer. Actual destination // writer is set later by Reset() before using it. return bufio.NewWriter(xioutil.Discard) @@ -468,7 +469,7 @@ func (s3Select *S3Select) marshal(buf *bytes.Buffer, record sql.Record) error { switch s3Select.Output.format { case csvFormat: // Use bufio Writer to prevent csv.Writer from allocating a new buffer. - bufioWriter := bufioWriterPool.Get().(*bufio.Writer) + bufioWriter := bufioWriterPool.Get() defer func() { bufioWriter.Reset(xioutil.Discard) bufioWriterPool.Put(bufioWriter) @@ -530,7 +531,7 @@ func (s3Select *S3Select) Evaluate(w http.ResponseWriter) { } var err error sendRecord := func() bool { - buf := bufPool.Get().(*bytes.Buffer) + buf := bufPool.Get() buf.Reset() for _, outputRecord := range outputQueue { diff --git a/internal/s3select/sql/funceval.go b/internal/s3select/sql/funceval.go index 26294abf0..44ffd2069 100644 --- a/internal/s3select/sql/funceval.go +++ b/internal/s3select/sql/funceval.go @@ -107,7 +107,6 @@ func (e *FuncExpr) evalSQLFnNode(r Record, tableAlias string) (res *Value, err e case sqlFnDateDiff: return handleDateDiff(r, e.DateDiff, tableAlias) - } // For all simple argument functions, we evaluate the arguments here diff --git a/internal/s3select/sql/stringfuncs.go b/internal/s3select/sql/stringfuncs.go index b6d24f5e8..28abdf6af 100644 --- a/internal/s3select/sql/stringfuncs.go +++ b/internal/s3select/sql/stringfuncs.go @@ -107,7 +107,6 @@ func evalSQLLike(text, pattern string, escape rune) (match bool, err error) { default: s = append(s, r) } - } if hasLeadingPercent { return strings.HasSuffix(text, string(s)), nil diff --git a/internal/s3select/sql/timestampfuncs.go b/internal/s3select/sql/timestampfuncs.go index 4622f992a..d652275f5 100644 --- a/internal/s3select/sql/timestampfuncs.go +++ b/internal/s3select/sql/timestampfuncs.go @@ -175,7 +175,6 @@ func dateDiff(timePart string, ts1, ts2 time.Time) (*Value, error) { seconds := duration / time.Second return FromInt(int64(seconds)), nil default: - } return nil, errNotImplemented } diff --git a/internal/s3select/sql/value.go b/internal/s3select/sql/value.go index 8a778bcf1..7bd6f780d 100644 --- a/internal/s3select/sql/value.go +++ b/internal/s3select/sql/value.go @@ -663,8 +663,13 @@ func inferTypeForArithOp(a *Value) error { a.setFloat(f) return nil } - - err := fmt.Errorf("Could not convert %q to a number", string(a.value.([]byte))) + var s string + if v, ok := a.value.([]byte); ok { + s = string(v) + } else { + s = fmt.Sprint(a.value) + } + err := fmt.Errorf("Could not convert %q to a number", s) return errInvalidDataType(err) }