From e88d494775d8548816395cf2b0c014a3626cda74 Mon Sep 17 00:00:00 2001 From: Taran Pelkey Date: Sat, 29 Mar 2025 20:56:02 -0400 Subject: [PATCH] Migrate golanglint-ci config to V2 (#21081) --- .golangci.yml | 75 ++++++++++++------- cmd/admin-handlers-idp-ldap.go | 5 +- cmd/admin-handlers-pools.go | 6 +- cmd/admin-handlers-users.go | 21 +----- cmd/admin-handlers-users_test.go | 2 +- cmd/admin-handlers.go | 6 +- cmd/api-datatypes.go | 4 +- cmd/api-headers_test.go | 3 +- cmd/api-router.go | 4 +- cmd/batch-handlers.go | 69 +++++++++-------- cmd/bootstrap-messages.go | 4 +- cmd/bucket-lifecycle-audit.go | 2 +- cmd/bucket-lifecycle.go | 2 +- cmd/bucket-object-lock.go | 2 +- cmd/bucket-replication.go | 2 +- cmd/common-main.go | 12 +-- cmd/data-scanner.go | 3 +- cmd/data-scanner_test.go | 2 +- cmd/data-usage-cache.go | 1 - cmd/dummy-data-generator_test.go | 2 +- cmd/encryption-v1.go | 3 +- cmd/endpoint.go | 24 +++--- cmd/erasure-healing-common.go | 7 +- cmd/erasure-healing.go | 8 +- cmd/erasure-object.go | 4 +- cmd/erasure-object_test.go | 4 +- cmd/erasure-server-pool-decom.go | 6 +- cmd/iam-store.go | 2 +- cmd/iam.go | 3 +- cmd/metacache-set.go | 2 +- cmd/metacache-walk.go | 2 +- cmd/metrics-resource.go | 2 +- cmd/metrics-v2_test.go | 12 +-- cmd/metrics-v3-cache.go | 4 +- cmd/metrics-v3-system-drive.go | 2 +- cmd/mrf.go | 2 - cmd/object-api-interface.go | 4 +- cmd/object-api-utils.go | 7 +- cmd/object-handlers.go | 7 +- cmd/object-multipart-handlers.go | 4 +- cmd/peer-rest-server.go | 3 +- cmd/perf-tests.go | 10 +-- cmd/site-replication.go | 27 +++---- cmd/storage-rest-client.go | 3 +- cmd/streaming-signature-v4.go | 5 +- cmd/sts-handlers_test.go | 6 +- cmd/test-utils_test.go | 19 ++--- cmd/warm-backend-gcs.go | 4 +- cmd/xl-storage-disk-id-check.go | 4 +- cmd/xl-storage.go | 4 +- cmd/xl-storage_test.go | 2 +- docs/debugging/xl-meta/main.go | 4 +- internal/bucket/lifecycle/expiration.go | 6 +- internal/bucket/lifecycle/transition.go | 6 +- internal/bucket/replication/replication.go | 2 +- internal/config/lambda/event/arn.go | 2 +- internal/etag/reader.go | 4 +- internal/event/arn.go | 2 +- internal/event/target/amqp.go | 6 +- .../target/kafka_scram_client_contrib.go | 4 +- internal/event/target/mysql.go | 2 +- internal/event/target/nsq.go | 2 +- internal/event/target/postgresql.go | 2 +- internal/event/target/redis.go | 2 +- internal/event/targetlist.go | 4 - internal/grid/benchmark_test.go | 19 ++--- internal/grid/connection.go | 1 - internal/grid/grid.go | 2 +- internal/grid/types.go | 13 ++-- internal/http/response-recorder.go | 2 +- internal/http/server.go | 2 +- internal/logger/logrotate.go | 3 +- internal/logger/reqinfo.go | 1 - internal/logger/target/http/http.go | 4 +- .../kafka/kafka_scram_client_contrib.go | 4 +- internal/rest/client.go | 1 - internal/ringbuffer/ring_buffer.go | 6 +- 77 files changed, 239 insertions(+), 290 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 6dfa38565..0533d7cd9 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,45 +1,64 @@ -linters-settings: - gofumpt: - simplify: true - - misspell: - locale: US - - staticcheck: - checks: ['all', '-ST1005', '-ST1000', '-SA4000', '-SA9004', '-SA1019', '-SA1008', '-U1000', '-ST1016'] - +version: "2" linters: - disable-all: true + default: none enable: - durationcheck + - forcetypeassert - gocritic - - gofumpt - - goimports - gomodguard - govet - ineffassign - misspell - revive - staticcheck - - typecheck - unconvert - unused - usetesting - - forcetypeassert - whitespace - + settings: + misspell: + locale: US + staticcheck: + checks: + - all + - -SA1008 + - -SA1019 + - -SA4000 + - -SA9004 + - -ST1000 + - -ST1005 + - -ST1016 + - -U1000 + exclusions: + generated: lax + rules: + - linters: + - forcetypeassert + path: _test\.go + - path: (.+)\.go$ + text: 'empty-block:' + - path: (.+)\.go$ + text: 'unused-parameter:' + - path: (.+)\.go$ + text: 'dot-imports:' + - path: (.+)\.go$ + text: should have a package comment + - path: (.+)\.go$ + text: error strings should not be capitalized or end with punctuation or a newline + paths: + - third_party$ + - builtin$ + - examples$ issues: - exclude-use-default: false max-issues-per-linter: 100 max-same-issues: 100 - exclude: - - "empty-block:" - - "unused-parameter:" - - "dot-imports:" - - should have a package comment - - error strings should not be capitalized or end with punctuation or a newline - exclude-rules: - # Exclude some linters from running on tests files. - - path: _test\.go - linters: - - forcetypeassert +formatters: + enable: + - gofumpt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/cmd/admin-handlers-idp-ldap.go b/cmd/admin-handlers-idp-ldap.go index b818237f2..af97796ec 100644 --- a/cmd/admin-handlers-idp-ldap.go +++ b/cmd/admin-handlers-idp-ldap.go @@ -214,10 +214,7 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R } // Check if we are creating svc account for request sender. - isSvcAccForRequestor := false - if targetUser == requestorUser || targetUser == requestorParentUser { - isSvcAccForRequestor = true - } + isSvcAccForRequestor := targetUser == requestorUser || targetUser == requestorParentUser var ( targetGroups []string diff --git a/cmd/admin-handlers-pools.go b/cmd/admin-handlers-pools.go index d3227b6ab..40d655934 100644 --- a/cmd/admin-handlers-pools.go +++ b/cmd/admin-handlers-pools.go @@ -258,7 +258,7 @@ func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request) // concurrent rebalance-start commands. if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal { for nodeIdx, proxyEp := range globalProxyEndpoints { - if proxyEp.Endpoint.Host == ep.Host { + if proxyEp.Host == ep.Host { if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success { return } @@ -329,7 +329,7 @@ func (a adminAPIHandlers) RebalanceStatus(w http.ResponseWriter, r *http.Request // pools may temporarily have out of date info on the others. if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal { for nodeIdx, proxyEp := range globalProxyEndpoints { - if proxyEp.Endpoint.Host == ep.Host { + if proxyEp.Host == ep.Host { if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success { return } @@ -383,7 +383,7 @@ func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w h return } for nodeIdx, proxyEp := range globalProxyEndpoints { - if proxyEp.Endpoint.Host == host && !proxyEp.IsLocal { + if proxyEp.Host == host && !proxyEp.IsLocal { if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success { return true } diff --git a/cmd/admin-handlers-users.go b/cmd/admin-handlers-users.go index 4706ef59f..dca603469 100644 --- a/cmd/admin-handlers-users.go +++ b/cmd/admin-handlers-users.go @@ -197,12 +197,7 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) { return } - checkDenyOnly := false - if name == cred.AccessKey { - // Check that there is no explicit deny - otherwise it's allowed - // to view one's own info. - checkDenyOnly = true - } + checkDenyOnly := name == cred.AccessKey if !globalIAMSys.IsAllowed(policy.Args{ AccountName: cred.AccessKey, @@ -493,12 +488,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) { return } - checkDenyOnly := false - if accessKey == cred.AccessKey { - // Check that there is no explicit deny - otherwise it's allowed - // to change one's own password. - checkDenyOnly = true - } + checkDenyOnly := accessKey == cred.AccessKey if !globalIAMSys.IsAllowed(policy.Args{ AccountName: cred.AccessKey, @@ -689,10 +679,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque } // Check if we are creating svc account for request sender. - isSvcAccForRequestor := false - if targetUser == requestorUser || targetUser == requestorParentUser { - isSvcAccForRequestor = true - } + isSvcAccForRequestor := targetUser == requestorUser || targetUser == requestorParentUser // If we are creating svc account for request sender, ensure // that targetUser is a real user (i.e. not derived @@ -2673,7 +2660,7 @@ func addExpirationToCondValues(exp *time.Time, condValues map[string][]string) e if exp == nil || exp.IsZero() || exp.Equal(timeSentinel) { return nil } - dur := exp.Sub(time.Now()) + dur := time.Until(*exp) if dur <= 0 { return errors.New("unsupported expiration time") } diff --git a/cmd/admin-handlers-users_test.go b/cmd/admin-handlers-users_test.go index b3f31a386..dcedb014a 100644 --- a/cmd/admin-handlers-users_test.go +++ b/cmd/admin-handlers-users_test.go @@ -160,7 +160,7 @@ func (s *TestSuiteIAM) SetUpSuite(c *check) { } func (s *TestSuiteIAM) RestartIAMSuite(c *check) { - s.TestSuiteCommon.RestartTestServer(c) + s.RestartTestServer(c) s.iamSetup(c) } diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index 31c43ac76..a09ac2d9b 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -2676,7 +2676,7 @@ func fetchHealthInfo(healthCtx context.Context, objectAPI ObjectLayer, query *ur // disk metrics are already included under drive info of each server getRealtimeMetrics := func() *madmin.RealtimeMetrics { var m madmin.RealtimeMetrics - var types madmin.MetricType = madmin.MetricsAll &^ madmin.MetricsDisk + types := madmin.MetricsAll &^ madmin.MetricsDisk mLocal := collectLocalMetrics(types, collectMetricsOpts{}) m.Merge(&mLocal) cctx, cancel := context.WithTimeout(healthCtx, time.Second/2) @@ -2720,7 +2720,7 @@ func fetchHealthInfo(healthCtx context.Context, objectAPI ObjectLayer, query *ur poolsArgs := re.ReplaceAllString(cmdLine, `$3`) var anonPools []string - if !(strings.Contains(poolsArgs, "{") && strings.Contains(poolsArgs, "}")) { + if !strings.Contains(poolsArgs, "{") || !strings.Contains(poolsArgs, "}") { // No ellipses pattern. Anonymize host name from every pool arg pools := strings.Fields(poolsArgs) anonPools = make([]string, len(pools)) @@ -3420,7 +3420,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ } // save the format.json as part of inspect by default - if !(volume == minioMetaBucket && file == formatConfigFile) { + if volume != minioMetaBucket || file != formatConfigFile { err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn) } if !errors.Is(err, errFileNotFound) { diff --git a/cmd/api-datatypes.go b/cmd/api-datatypes.go index 84a18986f..cc3bcb1c0 100644 --- a/cmd/api-datatypes.go +++ b/cmd/api-datatypes.go @@ -44,10 +44,10 @@ type DeleteMarkerMTime struct { // MarshalXML encodes expiration date if it is non-zero and encodes // empty string otherwise func (t DeleteMarkerMTime) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { - if t.Time.IsZero() { + if t.IsZero() { return nil } - return e.EncodeElement(t.Time.Format(time.RFC3339), startElement) + return e.EncodeElement(t.Format(time.RFC3339), startElement) } // ObjectV object version key/versionId diff --git a/cmd/api-headers_test.go b/cmd/api-headers_test.go index 3c3030f09..9db65460e 100644 --- a/cmd/api-headers_test.go +++ b/cmd/api-headers_test.go @@ -34,7 +34,8 @@ func TestNewRequestID(t *testing.T) { e = char // Ensure that it is alphanumeric, in this case, between 0-9 and A-Z. - if !(('0' <= e && e <= '9') || ('A' <= e && e <= 'Z')) { + isAlnum := ('0' <= e && e <= '9') || ('A' <= e && e <= 'Z') + if !isAlnum { t.Fail() } } diff --git a/cmd/api-router.go b/cmd/api-router.go index 4d755522f..da85fa155 100644 --- a/cmd/api-router.go +++ b/cmd/api-router.go @@ -227,13 +227,13 @@ func s3APIMiddleware(f http.HandlerFunc, flags ...s3HFlag) http.HandlerFunc { } // Skip wrapping with the gzip middleware if specified. - var gzippedHandler http.HandlerFunc = tracedHandler + gzippedHandler := tracedHandler if !handlerFlags.has(noGZS3HFlag) { gzippedHandler = gzipHandler(gzippedHandler) } // Skip wrapping with throttling middleware if specified. - var throttledHandler http.HandlerFunc = gzippedHandler + throttledHandler := gzippedHandler if !handlerFlags.has(noThrottleS3HFlag) { throttledHandler = maxClients(throttledHandler) } diff --git a/cmd/batch-handlers.go b/cmd/batch-handlers.go index d1f20bb97..59c82e50f 100644 --- a/cmd/batch-handlers.go +++ b/cmd/batch-handlers.go @@ -39,7 +39,6 @@ import ( "github.com/lithammer/shortuuid/v4" "github.com/minio/madmin-go/v3" "github.com/minio/minio-go/v7" - miniogo "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/encrypt" "github.com/minio/minio-go/v7/pkg/tags" @@ -47,7 +46,6 @@ import ( "github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/hash" xhttp "github.com/minio/minio/internal/http" - "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/pkg/v3/console" "github.com/minio/pkg/v3/env" @@ -142,7 +140,7 @@ func (r BatchJobReplicateV1) Notify(ctx context.Context, ri *batchJobInfo) error } // ReplicateFromSource - this is not implemented yet where source is 'remote' and target is local. -func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api ObjectLayer, core *miniogo.Core, srcObjInfo ObjectInfo, retry bool) error { +func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api ObjectLayer, core *minio.Core, srcObjInfo ObjectInfo, retry bool) error { srcBucket := r.Source.Bucket tgtBucket := r.Target.Bucket srcObject := srcObjInfo.Name @@ -189,7 +187,7 @@ func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api Objec } return r.copyWithMultipartfromSource(ctx, api, core, srcObjInfo, opts, partsCount) } - gopts := miniogo.GetObjectOptions{ + gopts := minio.GetObjectOptions{ VersionID: srcObjInfo.VersionID, } if err := gopts.SetMatchETag(srcObjInfo.ETag); err != nil { @@ -210,7 +208,7 @@ func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api Objec return err } -func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, api ObjectLayer, c *miniogo.Core, srcObjInfo ObjectInfo, opts ObjectOptions, partsCount int) (err error) { +func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, api ObjectLayer, c *minio.Core, srcObjInfo ObjectInfo, opts ObjectOptions, partsCount int) (err error) { srcBucket := r.Source.Bucket tgtBucket := r.Target.Bucket srcObject := srcObjInfo.Name @@ -251,7 +249,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a ) for i := 0; i < partsCount; i++ { - gopts := miniogo.GetObjectOptions{ + gopts := minio.GetObjectOptions{ VersionID: srcObjInfo.VersionID, PartNumber: i + 1, } @@ -382,7 +380,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay cred := r.Source.Creds - c, err := miniogo.New(u.Host, &miniogo.Options{ + c, err := minio.New(u.Host, &minio.Options{ Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken), Secure: u.Scheme == "https", Transport: getRemoteInstanceTransport(), @@ -393,7 +391,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay } c.SetAppInfo("minio-"+batchJobPrefix, r.APIVersion+" "+job.ID) - core := &miniogo.Core{Client: c} + core := &minio.Core{Client: c} workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_REPLICATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2))) if err != nil { @@ -414,14 +412,14 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay minioSrc := r.Source.Type == BatchJobReplicateResourceMinIO ctx, cancel := context.WithCancel(ctx) - objInfoCh := make(chan miniogo.ObjectInfo, 1) + objInfoCh := make(chan minio.ObjectInfo, 1) go func() { prefixes := r.Source.Prefix.F() if len(prefixes) == 0 { prefixes = []string{""} } for _, prefix := range prefixes { - prefixObjInfoCh := c.ListObjects(ctx, r.Source.Bucket, miniogo.ListObjectsOptions{ + prefixObjInfoCh := c.ListObjects(ctx, r.Source.Bucket, minio.ListObjectsOptions{ Prefix: prefix, WithVersions: minioSrc, Recursive: true, @@ -444,7 +442,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay // all user metadata or just storageClass. If its only storageClass // List() already returns relevant information for filter to be applied. if isMetadata && !isStorageClassOnly { - oi2, err := c.StatObject(ctx, r.Source.Bucket, obj.Key, miniogo.StatObjectOptions{}) + oi2, err := c.StatObject(ctx, r.Source.Bucket, obj.Key, minio.StatObjectOptions{}) if err == nil { oi = toObjectInfo(r.Source.Bucket, obj.Key, oi2) } else { @@ -540,7 +538,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay } // toObjectInfo converts minio.ObjectInfo to ObjectInfo -func toObjectInfo(bucket, object string, objInfo miniogo.ObjectInfo) ObjectInfo { +func toObjectInfo(bucket, object string, objInfo minio.ObjectInfo) ObjectInfo { tags, _ := tags.MapToObjectTags(objInfo.UserTags) oi := ObjectInfo{ Bucket: bucket, @@ -643,7 +641,7 @@ func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLa } // ReplicateToTarget read from source and replicate to configured target -func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectLayer, c *miniogo.Core, srcObjInfo ObjectInfo, retry bool) error { +func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectLayer, c *minio.Core, srcObjInfo ObjectInfo, retry bool) error { srcBucket := r.Source.Bucket tgtBucket := r.Target.Bucket tgtPrefix := r.Target.Prefix @@ -652,9 +650,9 @@ func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectL if srcObjInfo.DeleteMarker || !srcObjInfo.VersionPurgeStatus.Empty() { if retry && !s3Type { - if _, err := c.StatObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), miniogo.StatObjectOptions{ + if _, err := c.StatObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), minio.StatObjectOptions{ VersionID: srcObjInfo.VersionID, - Internal: miniogo.AdvancedGetOptions{ + Internal: minio.AdvancedGetOptions{ ReplicationProxyRequest: "false", }, }); isErrMethodNotAllowed(ErrorRespToObjectError(err, tgtBucket, pathJoin(tgtPrefix, srcObject))) { @@ -671,19 +669,19 @@ func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectL dmVersionID = "" versionID = "" } - return c.RemoveObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), miniogo.RemoveObjectOptions{ + return c.RemoveObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), minio.RemoveObjectOptions{ VersionID: versionID, - Internal: miniogo.AdvancedRemoveOptions{ + Internal: minio.AdvancedRemoveOptions{ ReplicationDeleteMarker: dmVersionID != "", ReplicationMTime: srcObjInfo.ModTime, - ReplicationStatus: miniogo.ReplicationStatusReplica, + ReplicationStatus: minio.ReplicationStatusReplica, ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside }, }) } if retry && !s3Type { // when we are retrying avoid copying if necessary. - gopts := miniogo.GetObjectOptions{} + gopts := minio.GetObjectOptions{} if err := gopts.SetMatchETag(srcObjInfo.ETag); err != nil { return err } @@ -717,7 +715,7 @@ func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectL return err } if r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 { - putOpts.Internal = miniogo.AdvancedPutOptions{} + putOpts.Internal = minio.AdvancedPutOptions{} } if isMP { if err := replicateObjectWithMultipart(ctx, c, tgtBucket, pathJoin(tgtPrefix, objInfo.Name), rd, objInfo, putOpts); err != nil { @@ -1124,7 +1122,8 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba } // if one of source or target is non MinIO, just replicate the top most version like `mc mirror` - return !((r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3) && !info.IsLatest) + isSourceOrTargetS3 := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 + return !isSourceOrTargetS3 || info.IsLatest } u, err := url.Parse(r.Target.Endpoint) @@ -1134,7 +1133,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba cred := r.Target.Creds - c, err := miniogo.NewCore(u.Host, &miniogo.Options{ + c, err := minio.NewCore(u.Host, &minio.Options{ Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken), Secure: u.Scheme == "https", Transport: getRemoteInstanceTransport(), @@ -1157,14 +1156,14 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba if r.Source.Snowball.Disable != nil && !*r.Source.Snowball.Disable && r.Source.Type.isMinio() && r.Target.Type.isMinio() { go func() { // Snowball currently needs the high level minio-go Client, not the Core one - cl, err := miniogo.New(u.Host, &miniogo.Options{ + cl, err := minio.New(u.Host, &minio.Options{ Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken), Secure: u.Scheme == "https", Transport: getRemoteInstanceTransport(), BucketLookup: lookupStyle(r.Target.Path), }) if err != nil { - batchLogOnceIf(ctx, err, job.ID+"miniogo.New") + batchLogOnceIf(ctx, err, job.ID+"minio.New") return } @@ -1274,7 +1273,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba stopFn := globalBatchJobsMetrics.trace(batchJobMetricReplication, job.ID, attempts) success := true if err := r.ReplicateToTarget(ctx, api, c, result, retry); err != nil { - if miniogo.ToErrorResponse(err).Code == "PreconditionFailed" { + if minio.ToErrorResponse(err).Code == "PreconditionFailed" { // pre-condition failed means we already have the object copied over. return } @@ -1457,7 +1456,7 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest, return err } - c, err := miniogo.NewCore(u.Host, &miniogo.Options{ + c, err := minio.NewCore(u.Host, &minio.Options{ Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken), Secure: u.Scheme == "https", Transport: getRemoteInstanceTransport(), @@ -1470,7 +1469,7 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest, vcfg, err := c.GetBucketVersioning(ctx, remoteBkt) if err != nil { - if miniogo.ToErrorResponse(err).Code == "NoSuchBucket" { + if minio.ToErrorResponse(err).Code == "NoSuchBucket" { return batchReplicationJobError{ Code: "NoSuchTargetBucket", Description: "The specified target bucket does not exist", @@ -1575,13 +1574,13 @@ func (j *BatchJobRequest) load(ctx context.Context, api ObjectLayer, name string return err } -func batchReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, isMP bool, err error) { +func batchReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts minio.PutObjectOptions, isMP bool, err error) { // TODO: support custom storage class for remote replication putOpts, isMP, err = putReplicationOpts(ctx, "", objInfo) if err != nil { return putOpts, isMP, err } - putOpts.Internal = miniogo.AdvancedPutOptions{ + putOpts.Internal = minio.AdvancedPutOptions{ SourceVersionID: objInfo.VersionID, SourceMTime: objInfo.ModTime, SourceETag: objInfo.ETag, @@ -1740,7 +1739,7 @@ func (a adminAPIHandlers) StartBatchJob(w http.ResponseWriter, r *http.Request) return } - buf, err := io.ReadAll(ioutil.HardLimitReader(r.Body, humanize.MiByte*4)) + buf, err := io.ReadAll(xioutil.HardLimitReader(r.Body, humanize.MiByte*4)) if err != nil { writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) return @@ -2300,15 +2299,15 @@ func (m *batchJobMetrics) trace(d batchJobMetric, job string, attempts int) func } } -func lookupStyle(s string) miniogo.BucketLookupType { - var lookup miniogo.BucketLookupType +func lookupStyle(s string) minio.BucketLookupType { + var lookup minio.BucketLookupType switch s { case "on": - lookup = miniogo.BucketLookupPath + lookup = minio.BucketLookupPath case "off": - lookup = miniogo.BucketLookupDNS + lookup = minio.BucketLookupDNS default: - lookup = miniogo.BucketLookupAuto + lookup = minio.BucketLookupAuto } return lookup } diff --git a/cmd/bootstrap-messages.go b/cmd/bootstrap-messages.go index 39d82e5f0..c01b4529f 100644 --- a/cmd/bootstrap-messages.go +++ b/cmd/bootstrap-messages.go @@ -48,9 +48,7 @@ func (bs *bootstrapTracer) Events() []madmin.TraceInfo { traceInfo := make([]madmin.TraceInfo, 0, bootstrapTraceLimit) bs.mu.RLock() - for _, i := range bs.info { - traceInfo = append(traceInfo, i) - } + traceInfo = append(traceInfo, bs.info...) bs.mu.RUnlock() return traceInfo diff --git a/cmd/bucket-lifecycle-audit.go b/cmd/bucket-lifecycle-audit.go index ccbaaaf0f..1fa76e090 100644 --- a/cmd/bucket-lifecycle-audit.go +++ b/cmd/bucket-lifecycle-audit.go @@ -26,7 +26,7 @@ import ( //go:generate stringer -type lcEventSrc -trimprefix lcEventSrc_ $GOFILE type lcEventSrc uint8 -//revive:disable:var-naming Underscores is used here to indicate where common prefix ends and the enumeration name begins +//nolint:staticcheck,revive // Underscores are used here to indicate where common prefix ends and the enumeration name begins const ( lcEventSrc_None lcEventSrc = iota lcEventSrc_Heal diff --git a/cmd/bucket-lifecycle.go b/cmd/bucket-lifecycle.go index 0f1ae3b3c..5a787f409 100644 --- a/cmd/bucket-lifecycle.go +++ b/cmd/bucket-lifecycle.go @@ -152,7 +152,7 @@ func (f freeVersionTask) OpHash() uint64 { } func (n newerNoncurrentTask) OpHash() uint64 { - return xxh3.HashString(n.bucket + n.versions[0].ObjectV.ObjectName) + return xxh3.HashString(n.bucket + n.versions[0].ObjectName) } func (j jentry) OpHash() uint64 { diff --git a/cmd/bucket-object-lock.go b/cmd/bucket-object-lock.go index e08e13839..686ae240d 100644 --- a/cmd/bucket-object-lock.go +++ b/cmd/bucket-object-lock.go @@ -305,7 +305,7 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob return mode, retainDate, legalHold, toAPIErrorCode(ctx, err) } rMode, rDate, err := objectlock.ParseObjectLockRetentionHeaders(rq.Header) - if err != nil && !(replica && rMode == "" && rDate.IsZero()) { + if err != nil && (!replica || rMode != "" || !rDate.IsZero()) { return mode, retainDate, legalHold, toAPIErrorCode(ctx, err) } if retentionPermErr != ErrNone { diff --git a/cmd/bucket-replication.go b/cmd/bucket-replication.go index 8c33ec305..7189309aa 100644 --- a/cmd/bucket-replication.go +++ b/cmd/bucket-replication.go @@ -3750,7 +3750,7 @@ func (p *ReplicationPool) queueMRFHeal() error { } func (p *ReplicationPool) initialized() bool { - return !(p == nil || p.objLayer == nil) + return p != nil && p.objLayer != nil } // getMRF returns MRF entries for this node. diff --git a/cmd/common-main.go b/cmd/common-main.go index 3c45b7aa7..81caa3c9f 100644 --- a/cmd/common-main.go +++ b/cmd/common-main.go @@ -382,7 +382,7 @@ func buildServerCtxt(ctx *cli.Context, ctxt *serverCtxt) (err error) { } // Check "no-compat" flag from command line argument. - ctxt.StrictS3Compat = !(ctx.IsSet("no-compat") || ctx.GlobalIsSet("no-compat")) + ctxt.StrictS3Compat = !ctx.IsSet("no-compat") && !ctx.GlobalIsSet("no-compat") switch { case ctx.IsSet("config-dir"): @@ -717,9 +717,7 @@ func serverHandleEnvVars() { logger.Fatal(err, "Invalid MINIO_BROWSER_REDIRECT_URL value in environment variable") } // Look for if URL has invalid values and return error. - if !((u.Scheme == "http" || u.Scheme == "https") && - u.Opaque == "" && - !u.ForceQuery && u.RawQuery == "" && u.Fragment == "") { + if !isValidURLEndpoint((*url.URL)(u)) { err := fmt.Errorf("URL contains unexpected resources, expected URL to be one of http(s)://console.example.com or as a subpath via API endpoint http(s)://minio.example.com/minio format: %v", u) logger.Fatal(err, "Invalid MINIO_BROWSER_REDIRECT_URL value is environment variable") } @@ -734,9 +732,7 @@ func serverHandleEnvVars() { logger.Fatal(err, "Invalid MINIO_SERVER_URL value in environment variable") } // Look for if URL has invalid values and return error. - if !((u.Scheme == "http" || u.Scheme == "https") && - (u.Path == "/" || u.Path == "") && u.Opaque == "" && - !u.ForceQuery && u.RawQuery == "" && u.Fragment == "") { + if !isValidURLEndpoint((*url.URL)(u)) { err := fmt.Errorf("URL contains unexpected resources, expected URL to be of http(s)://minio.example.com format: %v", u) logger.Fatal(err, "Invalid MINIO_SERVER_URL value is environment variable") } @@ -915,7 +911,7 @@ func handleKMSConfig() { } func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secureConn bool, err error) { - if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) { + if !isFile(getPublicCertFile()) || !isFile(getPrivateKeyFile()) { return nil, nil, false, nil } diff --git a/cmd/data-scanner.go b/cmd/data-scanner.go index 8c5917077..b3b38009a 100644 --- a/cmd/data-scanner.go +++ b/cmd/data-scanner.go @@ -37,7 +37,6 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/bucket/lifecycle" "github.com/minio/minio/internal/bucket/object/lock" - objectlock "github.com/minio/minio/internal/bucket/object/lock" "github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/bucket/versioning" "github.com/minio/minio/internal/color" @@ -991,7 +990,7 @@ func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi Obje versionID := oi.VersionID var vc *versioning.Versioning - var lr objectlock.Retention + var lr lock.Retention var rcfg *replication.Config if !isMinioMetaBucketName(i.bucket) { vc, err = globalBucketVersioningSys.Get(i.bucket) diff --git a/cmd/data-scanner_test.go b/cmd/data-scanner_test.go index 325131e49..0f2344d85 100644 --- a/cmd/data-scanner_test.go +++ b/cmd/data-scanner_test.go @@ -137,7 +137,7 @@ func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) { close(workers[0]) wg.Wait() for _, obj := range expired { - switch obj.ObjectV.VersionID { + switch obj.VersionID { case uuids[2].String(), uuids[3].String(), uuids[4].String(): default: t.Errorf("Unexpected versionID being expired: %#v\n", obj) diff --git a/cmd/data-usage-cache.go b/cmd/data-usage-cache.go index 745d3c6a8..9e030a115 100644 --- a/cmd/data-usage-cache.go +++ b/cmd/data-usage-cache.go @@ -118,7 +118,6 @@ func (ats *allTierStats) populateStats(stats map[string]madmin.TierStats) { NumObjects: st.NumObjects, } } - return } // tierStats holds per-tier stats of a remote tier. diff --git a/cmd/dummy-data-generator_test.go b/cmd/dummy-data-generator_test.go index 7281e8b8b..23c5ee2da 100644 --- a/cmd/dummy-data-generator_test.go +++ b/cmd/dummy-data-generator_test.go @@ -165,7 +165,7 @@ func TestCmpReaders(t *testing.T) { r1 := bytes.NewReader([]byte("abc")) r2 := bytes.NewReader([]byte("abc")) ok, msg := cmpReaders(r1, r2) - if !(ok && msg == "") { + if !ok || msg != "" { t.Fatalf("unexpected") } } diff --git a/cmd/encryption-v1.go b/cmd/encryption-v1.go index 179fe894c..9935ff9bd 100644 --- a/cmd/encryption-v1.go +++ b/cmd/encryption-v1.go @@ -1015,7 +1015,7 @@ func DecryptObjectInfo(info *ObjectInfo, r *http.Request) (encrypted bool, err e if encrypted { if crypto.SSEC.IsEncrypted(info.UserDefined) { - if !(crypto.SSEC.IsRequested(headers) || crypto.SSECopy.IsRequested(headers)) { + if !crypto.SSEC.IsRequested(headers) && !crypto.SSECopy.IsRequested(headers) { if r.Header.Get(xhttp.MinIOSourceReplicationRequest) != "true" { return encrypted, errEncryptedObject } @@ -1112,7 +1112,6 @@ func (o *ObjectInfo) decryptPartsChecksums(h http.Header) { o.Parts[i].Checksums = cs[i] } } - return } // metadataEncryptFn provides an encryption function for metadata. diff --git a/cmd/endpoint.go b/cmd/endpoint.go index 0dfe62e20..c0429945a 100644 --- a/cmd/endpoint.go +++ b/cmd/endpoint.go @@ -138,6 +138,17 @@ func (endpoint *Endpoint) SetDiskIndex(i int) { endpoint.DiskIdx = i } +func isValidURLEndpoint(u *url.URL) bool { + // URL style of endpoint. + // Valid URL style endpoint is + // - Scheme field must contain "http" or "https" + // - All field should be empty except Host and Path. + isURLOk := (u.Scheme == "http" || u.Scheme == "https") && + u.User == nil && u.Opaque == "" && !u.ForceQuery && + u.RawQuery == "" && u.Fragment == "" + return isURLOk +} + // NewEndpoint - returns new endpoint based on given arguments. func NewEndpoint(arg string) (ep Endpoint, e error) { // isEmptyPath - check whether given path is not empty. @@ -157,8 +168,7 @@ func NewEndpoint(arg string) (ep Endpoint, e error) { // Valid URL style endpoint is // - Scheme field must contain "http" or "https" // - All field should be empty except Host and Path. - if !((u.Scheme == "http" || u.Scheme == "https") && - u.User == nil && u.Opaque == "" && !u.ForceQuery && u.RawQuery == "" && u.Fragment == "") { + if !isValidURLEndpoint(u) { return ep, fmt.Errorf("invalid URL endpoint format") } @@ -604,11 +614,8 @@ func (endpoints Endpoints) UpdateIsLocal() error { startTime := time.Now() keepAliveTicker := time.NewTicker(500 * time.Millisecond) defer keepAliveTicker.Stop() - for { + for !foundLocal && (epsResolved != len(endpoints)) { // Break if the local endpoint is found already Or all the endpoints are resolved. - if foundLocal || (epsResolved == len(endpoints)) { - break - } // Retry infinitely on Kubernetes and Docker swarm. // This is needed as the remote hosts are sometime @@ -794,11 +801,8 @@ func (p PoolEndpointList) UpdateIsLocal() error { startTime := time.Now() keepAliveTicker := time.NewTicker(1 * time.Second) defer keepAliveTicker.Stop() - for { + for !foundLocal && (epsResolved != epCount) { // Break if the local endpoint is found already Or all the endpoints are resolved. - if foundLocal || (epsResolved == epCount) { - break - } // Retry infinitely on Kubernetes and Docker swarm. // This is needed as the remote hosts are sometime diff --git a/cmd/erasure-healing-common.go b/cmd/erasure-healing-common.go index 24c6a3b9a..fa3ee2b97 100644 --- a/cmd/erasure-healing-common.go +++ b/cmd/erasure-healing-common.go @@ -323,12 +323,7 @@ func checkObjectWithAllParts(ctx context.Context, onlineDisks []StorageAPI, part } } - erasureDistributionReliable := true - if inconsistent > len(partsMetadata)/2 { - // If there are too many inconsistent files, then we can't trust erasure.Distribution (most likely - // because of bugs found in CopyObject/PutObjectTags) https://github.com/minio/minio/pull/10772 - erasureDistributionReliable = false - } + erasureDistributionReliable := inconsistent <= len(partsMetadata)/2 metaErrs := make([]error, len(errs)) diff --git a/cmd/erasure-healing.go b/cmd/erasure-healing.go index efe8b78e9..73c3fbb5e 100644 --- a/cmd/erasure-healing.go +++ b/cmd/erasure-healing.go @@ -943,12 +943,12 @@ func isObjectDirDangling(errs []error) (ok bool) { var foundNotEmpty int var otherFound int for _, readErr := range errs { - switch { - case readErr == nil: + switch readErr { + case nil: found++ - case readErr == errFileNotFound || readErr == errVolumeNotFound: + case errFileNotFound, errVolumeNotFound: notFound++ - case readErr == errVolumeNotEmpty: + case errVolumeNotEmpty: foundNotEmpty++ default: otherFound++ diff --git a/cmd/erasure-object.go b/cmd/erasure-object.go index 37fcb9d4e..8a458c3ac 100644 --- a/cmd/erasure-object.go +++ b/cmd/erasure-object.go @@ -813,8 +813,6 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s PoolIndex: er.poolIndex, }) } - - return }() validResp := 0 @@ -1634,7 +1632,7 @@ func (er erasureObjects) deleteObjectVersion(ctx context.Context, bucket, object func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) { if !opts.NoAuditLog { for _, obj := range objects { - auditObjectErasureSet(ctx, "DeleteObjects", obj.ObjectV.ObjectName, &er) + auditObjectErasureSet(ctx, "DeleteObjects", obj.ObjectName, &er) } } diff --git a/cmd/erasure-object_test.go b/cmd/erasure-object_test.go index 71b9d3b8d..4307af3a1 100644 --- a/cmd/erasure-object_test.go +++ b/cmd/erasure-object_test.go @@ -189,7 +189,7 @@ func TestDeleteObjectsVersionedTwoPools(t *testing.T) { t.Errorf("Test %d: Failed to remove object `%v` with the error: `%v`", testIdx, names[i], delErrs[i]) } _, statErr := obj.GetObjectInfo(ctx, bucketName, objectName, ObjectOptions{ - VersionID: names[i].ObjectV.VersionID, + VersionID: names[i].VersionID, }) switch statErr.(type) { case VersionNotFound: @@ -265,7 +265,7 @@ func TestDeleteObjectsVersioned(t *testing.T) { for i, test := range testCases { _, statErr := obj.GetObjectInfo(ctx, test.bucket, test.object, ObjectOptions{ - VersionID: names[i].ObjectV.VersionID, + VersionID: names[i].VersionID, }) switch statErr.(type) { case VersionNotFound: diff --git a/cmd/erasure-server-pool-decom.go b/cmd/erasure-server-pool-decom.go index 2e2615882..078b41a07 100644 --- a/cmd/erasure-server-pool-decom.go +++ b/cmd/erasure-server-pool-decom.go @@ -1014,11 +1014,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool defer wk.Give() // We will perpetually retry listing if it fails, since we cannot // possibly give up in this matter - for { - if contextCanceled(ctx) { - break - } - + for !contextCanceled(ctx) { err := set.listObjectsToDecommission(ctx, bi, func(entry metaCacheEntry) { wk.Take() diff --git a/cmd/iam-store.go b/cmd/iam-store.go index c1620cd45..3f8c86bfa 100644 --- a/cmd/iam-store.go +++ b/cmd/iam-store.go @@ -2103,7 +2103,7 @@ func (store *IAMStoreSys) getParentUsers(cache *iamCache) map[string]ParentUserI cred := ui.Credentials // Only consider service account or STS credentials with // non-empty session tokens. - if !(cred.IsServiceAccount() || cred.IsTemp()) || + if (!cred.IsServiceAccount() && !cred.IsTemp()) || cred.SessionToken == "" { continue } diff --git a/cmd/iam.go b/cmd/iam.go index aaeea6ac7..7ee09c895 100644 --- a/cmd/iam.go +++ b/cmd/iam.go @@ -32,7 +32,6 @@ import ( "sync/atomic" "time" - libldap "github.com/go-ldap/ldap/v3" "github.com/minio/madmin-go/v3" "github.com/minio/minio-go/v7/pkg/set" "github.com/minio/minio/internal/arn" @@ -1691,7 +1690,7 @@ func (sys *IAMSys) NormalizeLDAPMappingImport(ctx context.Context, isGroup bool, // We map keys that correspond to LDAP DNs and validate that they exist in // the LDAP server. - var dnValidator func(*libldap.Conn, string) (*ldap.DNSearchResult, bool, error) = sys.LDAPConfig.GetValidatedUserDN + dnValidator := sys.LDAPConfig.GetValidatedUserDN if isGroup { dnValidator = sys.LDAPConfig.GetValidatedGroupDN } diff --git a/cmd/metacache-set.go b/cmd/metacache-set.go index da57ea9d5..9307fa3d6 100644 --- a/cmd/metacache-set.go +++ b/cmd/metacache-set.go @@ -215,7 +215,7 @@ func (o *listPathOptions) gatherResults(ctx context.Context, in <-chan metaCache continue } if o.Lifecycle != nil || o.Replication.Config != nil { - if skipped := triggerExpiryAndRepl(ctx, *o, entry); skipped == true { + if skipped := triggerExpiryAndRepl(ctx, *o, entry); skipped { results.lastSkippedEntry = entry.name continue } diff --git a/cmd/metacache-walk.go b/cmd/metacache-walk.go index 9c9419fcc..9c6d48ff5 100644 --- a/cmd/metacache-walk.go +++ b/cmd/metacache-walk.go @@ -69,7 +69,7 @@ const ( // On success a sorted meta cache stream will be returned. // Metadata has data stripped, if any. func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writer) (err error) { - legacyFS := !(s.fsType == xfs || s.fsType == ext4) + legacyFS := s.fsType != xfs && s.fsType != ext4 s.RLock() legacy := s.formatLegacy diff --git a/cmd/metrics-resource.go b/cmd/metrics-resource.go index a34d8091b..2c6c44d59 100644 --- a/cmd/metrics-resource.go +++ b/cmd/metrics-resource.go @@ -278,7 +278,7 @@ func collectDriveMetrics(m madmin.RealtimeMetrics) { } func collectLocalResourceMetrics() { - var types madmin.MetricType = madmin.MetricsDisk | madmin.MetricNet | madmin.MetricsMem | madmin.MetricsCPU + types := madmin.MetricsDisk | madmin.MetricNet | madmin.MetricsMem | madmin.MetricsCPU m := collectLocalMetrics(types, collectMetricsOpts{}) for _, hm := range m.ByHost { diff --git a/cmd/metrics-v2_test.go b/cmd/metrics-v2_test.go index 1d1f4a6fa..a3994fd08 100644 --- a/cmd/metrics-v2_test.go +++ b/cmd/metrics-v2_test.go @@ -76,10 +76,8 @@ func TestGetHistogramMetrics_BucketCount(t *testing.T) { // Send observations once every 1ms, to simulate delay between // observations. This is to test the channel based // synchronization used internally. - select { - case <-ticker.C: - ttfbHist.With(prometheus.Labels{"api": obs.label}).Observe(obs.val) - } + <-ticker.C + ttfbHist.With(prometheus.Labels{"api": obs.label}).Observe(obs.val) } metrics := getHistogramMetrics(ttfbHist, getBucketTTFBDistributionMD(), false, false) @@ -137,10 +135,8 @@ func TestGetHistogramMetrics_Values(t *testing.T) { // Send observations once every 1ms, to simulate delay between // observations. This is to test the channel based // synchronization used internally. - select { - case <-ticker.C: - ttfbHist.With(prometheus.Labels{"api": obs.label}).Observe(obs.val) - } + <-ticker.C + ttfbHist.With(prometheus.Labels{"api": obs.label}).Observe(obs.val) } // Accumulate regular-cased API label metrics for 'PutObject' for deeper verification diff --git a/cmd/metrics-v3-cache.go b/cmd/metrics-v3-cache.go index 1e8856a78..0a6c10377 100644 --- a/cmd/metrics-v3-cache.go +++ b/cmd/metrics-v3-cache.go @@ -205,7 +205,7 @@ func newDriveMetricsCache() *cachevalue.Cache[storageMetrics] { func newCPUMetricsCache() *cachevalue.Cache[madmin.CPUMetrics] { loadCPUMetrics := func(ctx context.Context) (v madmin.CPUMetrics, err error) { - var types madmin.MetricType = madmin.MetricsCPU + types := madmin.MetricsCPU m := collectLocalMetrics(types, collectMetricsOpts{ hosts: map[string]struct{}{ @@ -230,7 +230,7 @@ func newCPUMetricsCache() *cachevalue.Cache[madmin.CPUMetrics] { func newMemoryMetricsCache() *cachevalue.Cache[madmin.MemInfo] { loadMemoryMetrics := func(ctx context.Context) (v madmin.MemInfo, err error) { - var types madmin.MetricType = madmin.MetricsMem + types := madmin.MetricsMem m := collectLocalMetrics(types, collectMetricsOpts{ hosts: map[string]struct{}{ diff --git a/cmd/metrics-v3-system-drive.go b/cmd/metrics-v3-system-drive.go index 1da77bf86..d25a623c4 100644 --- a/cmd/metrics-v3-system-drive.go +++ b/cmd/metrics-v3-system-drive.go @@ -131,7 +131,7 @@ var ( ) func getCurrentDriveIOStats() map[string]madmin.DiskIOStats { - var types madmin.MetricType = madmin.MetricsDisk + types := madmin.MetricsDisk driveRealtimeMetrics := collectLocalMetrics(types, collectMetricsOpts{ hosts: map[string]struct{}{ globalLocalNodeName: {}, diff --git a/cmd/mrf.go b/cmd/mrf.go index f97583ad9..af9b0e03c 100644 --- a/cmd/mrf.go +++ b/cmd/mrf.go @@ -208,8 +208,6 @@ func (m *mrfState) startMRFPersistence() { localDrive.Delete(GlobalContext, minioMetaBucket, pathJoin(healMRFDir, "list.bin"), DeleteOptions{}) break } - - return } var healSleeper = newDynamicSleeper(5, time.Second, false) diff --git a/cmd/object-api-interface.go b/cmd/object-api-interface.go index 3f9b1e0c9..1cc6782fb 100644 --- a/cmd/object-api-interface.go +++ b/cmd/object-api-interface.go @@ -208,8 +208,8 @@ func (o *ObjectOptions) SetDeleteReplicationState(dsc ReplicateDecision, vID str o.DeleteReplication = ReplicationState{ ReplicateDecisionStr: dsc.String(), } - switch { - case o.VersionID == "": + switch o.VersionID { + case "": o.DeleteReplication.ReplicationStatusInternal = dsc.PendingStatus() o.DeleteReplication.Targets = replicationStatusesMap(o.DeleteReplication.ReplicationStatusInternal) default: diff --git a/cmd/object-api-utils.go b/cmd/object-api-utils.go index 45e29c00f..4c29097f0 100644 --- a/cmd/object-api-utils.go +++ b/cmd/object-api-utils.go @@ -47,7 +47,6 @@ import ( "github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/hash" xhttp "github.com/minio/minio/internal/http" - "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/logger" "github.com/minio/pkg/v3/trie" @@ -146,7 +145,7 @@ func IsValidBucketName(bucket string) bool { allNumbers = allNumbers && !isNotNumber } // Does the bucket name look like an IP address? - return !(len(pieces) == 4 && allNumbers) + return len(pieces) != 4 || !allNumbers } // IsValidObjectName verifies an object name in accordance with Amazon's @@ -885,7 +884,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, h return nil, err } if decryptSkip > 0 { - inputReader = ioutil.NewSkipReader(inputReader, decryptSkip) + inputReader = xioutil.NewSkipReader(inputReader, decryptSkip) } oi.Size = decLength } @@ -970,7 +969,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, h // Apply the skipLen and limit on the // decrypted stream - decReader = io.LimitReader(ioutil.NewSkipReader(decReader, skipLen), decRangeLength) + decReader = io.LimitReader(xioutil.NewSkipReader(decReader, skipLen), decRangeLength) // Assemble the GetObjectReader r = &GetObjectReader{ diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index cff95980a..2270fb5a8 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -408,7 +408,7 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj perr error ) - if (isErrObjectNotFound(err) || isErrVersionNotFound(err) || isErrReadQuorum(err)) && !(gr != nil && gr.ObjInfo.DeleteMarker) { + if (isErrObjectNotFound(err) || isErrVersionNotFound(err) || isErrReadQuorum(err)) && (gr == nil || !gr.ObjInfo.DeleteMarker) { proxytgts := getProxyTargets(ctx, bucket, object, opts) if !proxytgts.Empty() { globalReplicationStats.Load().incProxy(bucket, getObjectAPI, false) @@ -537,8 +537,7 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj setHeadGetRespHeaders(w, r.Form) - var iw io.Writer - iw = w + var iw io.Writer = w statusCodeWritten := false httpWriter := xioutil.WriteOnClose(iw) @@ -707,8 +706,6 @@ func (api objectAPIHandlers) getObjectAttributesHandler(ctx context.Context, obj UserAgent: r.UserAgent(), Host: handlers.GetSourceIP(r), }) - - return } // GetObjectHandler - GET Object diff --git a/cmd/object-multipart-handlers.go b/cmd/object-multipart-handlers.go index dfcafbcdf..c36d0abb0 100644 --- a/cmd/object-multipart-handlers.go +++ b/cmd/object-multipart-handlers.go @@ -130,7 +130,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r break } } - if !(ssecRep && sourceReplReq) { + if !ssecRep || !sourceReplReq { if err = setEncryptionMetadata(r, bucket, object, encMetadata); err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return @@ -803,7 +803,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http } } - if !(sourceReplReq && crypto.SSEC.IsEncrypted(mi.UserDefined)) { + if !sourceReplReq || !crypto.SSEC.IsEncrypted(mi.UserDefined) { // Calculating object encryption key key, err = decryptObjectMeta(key, bucket, object, mi.UserDefined) if err != nil { diff --git a/cmd/peer-rest-server.go b/cmd/peer-rest-server.go index 3c8756d00..4a47b4083 100644 --- a/cmd/peer-rest-server.go +++ b/cmd/peer-rest-server.go @@ -39,7 +39,6 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/madmin-go/v3/logger/log" "github.com/minio/minio/internal/bucket/bandwidth" - b "github.com/minio/minio/internal/bucket/bandwidth" "github.com/minio/minio/internal/event" "github.com/minio/minio/internal/grid" xioutil "github.com/minio/minio/internal/ioutil" @@ -1035,7 +1034,7 @@ func (s *peerRESTServer) IsValid(w http.ResponseWriter, r *http.Request) bool { // GetBandwidth gets the bandwidth for the buckets requested. func (s *peerRESTServer) GetBandwidth(params *grid.URLValues) (*bandwidth.BucketBandwidthReport, *grid.RemoteErr) { buckets := params.Values().Get("buckets") - selectBuckets := b.SelectBuckets(buckets) + selectBuckets := bandwidth.SelectBuckets(buckets) return globalBucketMonitor.GetReport(selectBuckets), nil } diff --git a/cmd/perf-tests.go b/cmd/perf-tests.go index 5e2c77be3..33b01722d 100644 --- a/cmd/perf-tests.go +++ b/cmd/perf-tests.go @@ -338,10 +338,7 @@ func netperf(ctx context.Context, duration time.Duration) madmin.NetperfNodeResu time.Sleep(duration) xioutil.SafeClose(r.eof) wg.Wait() - for { - if globalNetPerfRX.ActiveConnections() == 0 { - break - } + for globalNetPerfRX.ActiveConnections() != 0 { time.Sleep(time.Second) } rx := float64(globalNetPerfRX.RXSample) @@ -396,10 +393,7 @@ func siteNetperf(ctx context.Context, duration time.Duration) madmin.SiteNetPerf time.Sleep(duration) xioutil.SafeClose(r.eof) wg.Wait() - for { - if globalSiteNetPerfRX.ActiveConnections() == 0 || contextCanceled(ctx) { - break - } + for globalSiteNetPerfRX.ActiveConnections() != 0 && !contextCanceled(ctx) { time.Sleep(time.Second) } rx := float64(globalSiteNetPerfRX.RXSample) diff --git a/cmd/site-replication.go b/cmd/site-replication.go index f2005f012..81a19b435 100644 --- a/cmd/site-replication.go +++ b/cmd/site-replication.go @@ -37,7 +37,6 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio-go/v7" - minioClient "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/replication" "github.com/minio/minio-go/v7/pkg/set" @@ -478,8 +477,8 @@ func (c *SiteReplicationSys) AddPeerClusters(ctx context.Context, psites []madmi var secretKey string var svcCred auth.Credentials sa, _, err := globalIAMSys.getServiceAccount(ctx, siteReplicatorSvcAcc) - switch { - case err == errNoSuchServiceAccount: + switch err { + case errNoSuchServiceAccount: _, secretKey, err = auth.GenerateCredentials() if err != nil { return madmin.ReplicateAddStatus{}, errSRServiceAccount(fmt.Errorf("unable to create local service account: %w", err)) @@ -492,7 +491,7 @@ func (c *SiteReplicationSys) AddPeerClusters(ctx context.Context, psites []madmi if err != nil { return madmin.ReplicateAddStatus{}, errSRServiceAccount(fmt.Errorf("unable to create local service account: %w", err)) } - case err == nil: + case nil: svcCred = sa.Credentials secretKey = svcCred.SecretKey default: @@ -738,7 +737,6 @@ func (c *SiteReplicationSys) Netperf(ctx context.Context, duration time.Duration resultsMu.Lock() results.NodeResults = append(results.NodeResults, result) resultsMu.Unlock() - return }() continue } @@ -756,7 +754,6 @@ func (c *SiteReplicationSys) Netperf(ctx context.Context, duration time.Duration resultsMu.Lock() results.NodeResults = append(results.NodeResults, result) resultsMu.Unlock() - return }() } wg.Wait() @@ -2625,7 +2622,7 @@ func getAdminClient(endpoint, accessKey, secretKey string) (*madmin.AdminClient, return client, nil } -func getS3Client(pc madmin.PeerSite) (*minioClient.Client, error) { +func getS3Client(pc madmin.PeerSite) (*minio.Client, error) { ep, err := url.Parse(pc.Endpoint) if err != nil { return nil, err @@ -2634,7 +2631,7 @@ func getS3Client(pc madmin.PeerSite) (*minioClient.Client, error) { return nil, RemoteTargetConnectionErr{Endpoint: ep.String(), Err: fmt.Errorf("remote target is offline for endpoint %s", ep.String())} } - return minioClient.New(ep.Host, &minioClient.Options{ + return minio.New(ep.Host, &minio.Options{ Creds: credentials.NewStaticV4(pc.AccessKey, pc.SecretKey, ""), Secure: ep.Scheme == "https", Transport: globalRemoteTargetTransport, @@ -3106,7 +3103,7 @@ func (c *SiteReplicationSys) siteReplicationStatus(ctx context.Context, objAPI O var policies []*policy.Policy uPolicyCount := 0 for _, ps := range pslc { - plcy, err := policy.ParseConfig(bytes.NewReader([]byte(ps.SRIAMPolicy.Policy))) + plcy, err := policy.ParseConfig(bytes.NewReader([]byte(ps.Policy))) if err != nil { continue } @@ -3323,7 +3320,7 @@ func (c *SiteReplicationSys) siteReplicationStatus(ctx context.Context, objAPI O uRuleCount := 0 for _, rl := range ilmExpRules { var rule lifecycle.Rule - if err := xml.Unmarshal([]byte(rl.ILMExpiryRule.ILMRule), &rule); err != nil { + if err := xml.Unmarshal([]byte(rl.ILMRule), &rule); err != nil { continue } rules = append(rules, &rule) @@ -3600,7 +3597,7 @@ func isILMExpRuleReplicated(cntReplicated, total int, rules []*lifecycle.Rule) b if err != nil { return false } - if !(string(prevRData) == string(rData)) { + if string(prevRData) != string(rData) { return false } } @@ -4416,7 +4413,7 @@ func (c *SiteReplicationSys) healILMExpiryConfig(ctx context.Context, objAPI Obj // If latest peers ILM expiry flags are equal to current peer, no need to heal flagEqual := true for id, peer := range latestPeers { - if !(ps.Peers[id].ReplicateILMExpiry == peer.ReplicateILMExpiry) { + if ps.Peers[id].ReplicateILMExpiry != peer.ReplicateILMExpiry { flagEqual = false break } @@ -5478,12 +5475,12 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer, ) for dID, ss := range us { if lastUpdate.IsZero() { - lastUpdate = ss.userInfo.UserInfo.UpdatedAt + lastUpdate = ss.userInfo.UpdatedAt latestID = dID latestUserStat = ss } - if !ss.userInfo.UserInfo.UpdatedAt.IsZero() && ss.userInfo.UserInfo.UpdatedAt.After(lastUpdate) { - lastUpdate = ss.userInfo.UserInfo.UpdatedAt + if !ss.userInfo.UpdatedAt.IsZero() && ss.userInfo.UpdatedAt.After(lastUpdate) { + lastUpdate = ss.userInfo.UpdatedAt latestID = dID latestUserStat = ss } diff --git a/cmd/storage-rest-client.go b/cmd/storage-rest-client.go index cad47b696..d35dd4259 100644 --- a/cmd/storage-rest-client.go +++ b/cmd/storage-rest-client.go @@ -37,7 +37,6 @@ import ( "github.com/minio/minio/internal/cachevalue" "github.com/minio/minio/internal/grid" xhttp "github.com/minio/minio/internal/http" - "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/rest" xnet "github.com/minio/pkg/v3/net" @@ -928,7 +927,7 @@ func (client *storageRESTClient) ReadMultiple(ctx context.Context, req ReadMulti pr, pw := io.Pipe() go func() { - pw.CloseWithError(waitForHTTPStream(respBody, ioutil.NewDeadlineWriter(pw, globalDriveConfig.GetMaxTimeout()))) + pw.CloseWithError(waitForHTTPStream(respBody, xioutil.NewDeadlineWriter(pw, globalDriveConfig.GetMaxTimeout()))) }() mr := msgp.NewReader(pr) defer readMsgpReaderPoolPut(mr) diff --git a/cmd/streaming-signature-v4.go b/cmd/streaming-signature-v4.go index 039cb7eec..c02313392 100644 --- a/cmd/streaming-signature-v4.go +++ b/cmd/streaming-signature-v4.go @@ -592,9 +592,10 @@ func readChunkLine(b *bufio.Reader) ([]byte, []byte, error) { if err != nil { // We always know when EOF is coming. // If the caller asked for a line, there should be a line. - if err == io.EOF { + switch err { + case io.EOF: err = io.ErrUnexpectedEOF - } else if err == bufio.ErrBufferFull { + case bufio.ErrBufferFull: err = errLineTooLong } return nil, nil, err diff --git a/cmd/sts-handlers_test.go b/cmd/sts-handlers_test.go index 0de49d49c..daa46b053 100644 --- a/cmd/sts-handlers_test.go +++ b/cmd/sts-handlers_test.go @@ -642,7 +642,7 @@ func (s *TestSuiteIAM) TestSTSForRoot(c *check) { gotBuckets := set.NewStringSet() for _, b := range accInfo.Buckets { gotBuckets.Add(b.Name) - if !(b.Access.Read && b.Access.Write) { + if !b.Access.Read || !b.Access.Write { c.Fatalf("root user should have read and write access to bucket: %v", b.Name) } } @@ -1443,7 +1443,7 @@ func (s *TestSuiteIAM) TestLDAPUnicodeVariationsLegacyAPI(c *check) { idx := slices.IndexFunc(policyResult.PolicyMappings, func(e madmin.PolicyEntities) bool { return e.Policy == policy && slices.Contains(e.Groups, actualGroupDN) }) - if !(idx >= 0) { + if idx < 0 { c.Fatalf("expected groupDN (%s) to be present in mapping list: %#v", actualGroupDN, policyResult) } } @@ -1606,7 +1606,7 @@ func (s *TestSuiteIAM) TestLDAPUnicodeVariations(c *check) { idx := slices.IndexFunc(policyResult.PolicyMappings, func(e madmin.PolicyEntities) bool { return e.Policy == policy && slices.Contains(e.Groups, actualGroupDN) }) - if !(idx >= 0) { + if idx < 0 { c.Fatalf("expected groupDN (%s) to be present in mapping list: %#v", actualGroupDN, policyResult) } } diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index 3ae977341..e729e6980 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -226,10 +226,7 @@ func prepareErasure(ctx context.Context, nDisks int) (ObjectLayer, []string, err for _, sets := range pool.erasureDisks { for _, s := range sets { if !s.IsLocal() { - for { - if s.IsOnline() { - break - } + for !s.IsOnline() { time.Sleep(100 * time.Millisecond) if time.Since(t) > 10*time.Second { return nil, nil, errors.New("timeout waiting for disk to come online") @@ -642,8 +639,8 @@ func signStreamingRequest(req *http.Request, accessKey, secretKey string, currTi for _, k := range headers { buf.WriteString(k) buf.WriteByte(':') - switch { - case k == "host": + switch k { + case "host": buf.WriteString(req.URL.Host) fallthrough default: @@ -996,8 +993,8 @@ func signRequestV4(req *http.Request, accessKey, secretKey string) error { for _, k := range headers { buf.WriteString(k) buf.WriteByte(':') - switch { - case k == "host": + switch k { + case "host": buf.WriteString(req.URL.Host) fallthrough default: @@ -1089,8 +1086,8 @@ func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeek // Save for subsequent use var hashedPayload string var md5Base64 string - switch { - case body == nil: + switch body { + case nil: hashedPayload = getSHA256Hash([]byte{}) default: payloadBytes, err := io.ReadAll(body) @@ -2393,7 +2390,7 @@ func unzipArchive(zipFilePath, targetDir string) error { if err != nil { return err } - for _, file := range zipReader.Reader.File { + for _, file := range zipReader.File { zippedFile, err := file.Open() if err != nil { return err diff --git a/cmd/warm-backend-gcs.go b/cmd/warm-backend-gcs.go index 8cacb71c0..cbfcd4224 100644 --- a/cmd/warm-backend-gcs.go +++ b/cmd/warm-backend-gcs.go @@ -51,9 +51,9 @@ func (gcs *warmBackendGCS) PutWithMeta(ctx context.Context, key string, data io. object := gcs.client.Bucket(gcs.Bucket).Object(gcs.getDest(key)) w := object.NewWriter(ctx) if gcs.StorageClass != "" { - w.ObjectAttrs.StorageClass = gcs.StorageClass + w.StorageClass = gcs.StorageClass } - w.ObjectAttrs.Metadata = meta + w.Metadata = meta if _, err := xioutil.Copy(w, data); err != nil { return "", gcsToObjectError(err, gcs.Bucket, key) } diff --git a/cmd/xl-storage-disk-id-check.go b/cmd/xl-storage-disk-id-check.go index ee5ec6928..11501124a 100644 --- a/cmd/xl-storage-disk-id-check.go +++ b/cmd/xl-storage-disk-id-check.go @@ -159,7 +159,7 @@ func (e *lockedLastMinuteLatency) addSize(value time.Duration, sz int64) { a.Total = atomic.LoadInt64(&old.Total) a.N = atomic.LoadInt64(&old.N) e.mu.Lock() - e.lastMinuteLatency.addAll(t-1, a) + e.addAll(t-1, a) e.mu.Unlock() acc = newAcc } else { @@ -177,7 +177,7 @@ func (e *lockedLastMinuteLatency) addSize(value time.Duration, sz int64) { func (e *lockedLastMinuteLatency) total() AccElem { e.mu.Lock() defer e.mu.Unlock() - return e.lastMinuteLatency.getTotal() + return e.getTotal() } func newXLStorageDiskIDCheck(storage *xlStorage, healthCheck bool) *xlStorageDiskIDCheck { diff --git a/cmd/xl-storage.go b/cmd/xl-storage.go index d83324a08..a270d2ff5 100644 --- a/cmd/xl-storage.go +++ b/cmd/xl-storage.go @@ -1716,7 +1716,7 @@ func (s *xlStorage) ReadVersion(ctx context.Context, origvolume, volume, path, v // If written with header we are fine. return fi, nil } - if fi.Size == 0 || !(fi.VersionID != "" && fi.VersionID != nullVersionID) { + if fi.Size == 0 || (fi.VersionID == "" || fi.VersionID == nullVersionID) { // If versioned we have no conflicts. fi.SetInlineData() return fi, nil @@ -3024,7 +3024,7 @@ func (s *xlStorage) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolum srcIsDir := HasSuffix(srcPath, SlashSeparator) dstIsDir := HasSuffix(dstPath, SlashSeparator) // Either src and dst have to be directories or files, else return error. - if !(srcIsDir && dstIsDir || !srcIsDir && !dstIsDir) { + if (!srcIsDir || !dstIsDir) && (srcIsDir || dstIsDir) { return errFileAccessDenied } srcFilePath := pathutil.Join(srcVolumeDir, srcPath) diff --git a/cmd/xl-storage_test.go b/cmd/xl-storage_test.go index 5933d084d..f4a5d89ad 100644 --- a/cmd/xl-storage_test.go +++ b/cmd/xl-storage_test.go @@ -1194,7 +1194,7 @@ func TestXLStorageReadFile(t *testing.T) { expectErrno = uintptr(errno) } } - if !(expectErrno != 0 && resultErrno != 0 && expectErrno == resultErrno) { + if expectErrno == 0 || resultErrno == 0 || expectErrno != resultErrno { t.Errorf("Case: %d %#v, expected: %s, got: %s", i+1, testCase, testCase.expectedErr, err) } } diff --git a/docs/debugging/xl-meta/main.go b/docs/debugging/xl-meta/main.go index e062bf7ae..4ac25b4b1 100644 --- a/docs/debugging/xl-meta/main.go +++ b/docs/debugging/xl-meta/main.go @@ -68,6 +68,7 @@ FLAGS: {{range .VisibleFlags}}{{.}} {{end}} ` + //nolint:staticcheck isPart := regexp.MustCompile("[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/part\\.[0-9]+$") app.HideHelpCommand = true @@ -1508,7 +1509,7 @@ func reconPartial(shards [][]byte, k int, parityOK []bool, splitData [][]byte, s } } } - fmt.Println("Reconstructed", reconstructed, "bytes and verified", verified, "bytes of partial shard with config", shardConfig) + fmt.Println("Reconstructed", reconstructed, "bytes and verified", verified, "bytes of partial shard with config", string(shardConfig)) } // bitrot returns a shard beginning at startOffset after doing bitrot checks. @@ -1555,6 +1556,7 @@ func shardSize(blockSize, dataBlocks int) (sz int) { return } +//nolint:staticcheck var rePartNum = regexp.MustCompile("/part\\.([0-9]+)/") func getPartNum(s string) int { diff --git a/internal/bucket/lifecycle/expiration.go b/internal/bucket/lifecycle/expiration.go index d33a4301d..8acc20398 100644 --- a/internal/bucket/lifecycle/expiration.go +++ b/internal/bucket/lifecycle/expiration.go @@ -82,7 +82,7 @@ func (eDate *ExpirationDate) UnmarshalXML(d *xml.Decoder, startElement xml.Start hr, m, sec := expDate.Clock() nsec := expDate.Nanosecond() loc := expDate.Location() - if !(hr == 0 && m == 0 && sec == 0 && nsec == 0 && loc.String() == time.UTC.String()) { + if hr != 0 || m != 0 || sec != 0 || nsec != 0 || loc.String() != time.UTC.String() { return errLifecycleDateNotMidnight } @@ -93,7 +93,7 @@ func (eDate *ExpirationDate) UnmarshalXML(d *xml.Decoder, startElement xml.Start // MarshalXML encodes expiration date if it is non-zero and encodes // empty string otherwise func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { - if eDate.Time.IsZero() { + if eDate.IsZero() { return nil } return e.EncodeElement(eDate.Format(time.RFC3339), startElement) @@ -202,7 +202,7 @@ func (e Expiration) IsDaysNull() bool { // IsDateNull returns true if date field is null func (e Expiration) IsDateNull() bool { - return e.Date.Time.IsZero() + return e.Date.IsZero() } // IsNull returns true if both date and days fields are null diff --git a/internal/bucket/lifecycle/transition.go b/internal/bucket/lifecycle/transition.go index 96673d5d2..397f4c015 100644 --- a/internal/bucket/lifecycle/transition.go +++ b/internal/bucket/lifecycle/transition.go @@ -53,7 +53,7 @@ func (tDate *TransitionDate) UnmarshalXML(d *xml.Decoder, startElement xml.Start hr, m, sec := trnDate.Clock() nsec := trnDate.Nanosecond() loc := trnDate.Location() - if !(hr == 0 && m == 0 && sec == 0 && nsec == 0 && loc.String() == time.UTC.String()) { + if hr != 0 || m != 0 || sec != 0 || nsec != 0 || loc.String() != time.UTC.String() { return errTransitionDateNotMidnight } @@ -64,7 +64,7 @@ func (tDate *TransitionDate) UnmarshalXML(d *xml.Decoder, startElement xml.Start // MarshalXML encodes expiration date if it is non-zero and encodes // empty string otherwise func (tDate TransitionDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { - if tDate.Time.IsZero() { + if tDate.IsZero() { return nil } return e.EncodeElement(tDate.Format(time.RFC3339), startElement) @@ -151,7 +151,7 @@ func (t Transition) Validate() error { // IsDateNull returns true if date field is null func (t Transition) IsDateNull() bool { - return t.Date.Time.IsZero() + return t.Date.IsZero() } // IsNull returns true if both date and days fields are null diff --git a/internal/bucket/replication/replication.go b/internal/bucket/replication/replication.go index 8a179ebdd..409e1c191 100644 --- a/internal/bucket/replication/replication.go +++ b/internal/bucket/replication/replication.go @@ -176,7 +176,7 @@ func (c Config) HasExistingObjectReplication(arn string) (hasARN, isEnabled bool // FilterActionableRules returns the rules actions that need to be executed // after evaluating prefix/tag filtering func (c Config) FilterActionableRules(obj ObjectOpts) []Rule { - if obj.Name == "" && !(obj.OpType == ResyncReplicationType || obj.OpType == AllReplicationType) { + if obj.Name == "" && (obj.OpType != ResyncReplicationType && obj.OpType != AllReplicationType) { return nil } var rules []Rule diff --git a/internal/config/lambda/event/arn.go b/internal/config/lambda/event/arn.go index 79dc284f4..65a9644d4 100644 --- a/internal/config/lambda/event/arn.go +++ b/internal/config/lambda/event/arn.go @@ -29,7 +29,7 @@ type ARN struct { // String - returns string representation. func (arn ARN) String() string { - if arn.TargetID.ID == "" && arn.TargetID.Name == "" && arn.region == "" { + if arn.ID == "" && arn.Name == "" && arn.region == "" { return "" } diff --git a/internal/etag/reader.go b/internal/etag/reader.go index 9bbbd5036..56da3da95 100644 --- a/internal/etag/reader.go +++ b/internal/etag/reader.go @@ -178,9 +178,7 @@ func (u UUIDHash) Sum(b []byte) []byte { } // Reset - implement hash.Hash Reset -func (u UUIDHash) Reset() { - return -} +func (u UUIDHash) Reset() {} // Size - implement hash.Hash Size func (u UUIDHash) Size() int { diff --git a/internal/event/arn.go b/internal/event/arn.go index 4355338ba..6c2635603 100644 --- a/internal/event/arn.go +++ b/internal/event/arn.go @@ -30,7 +30,7 @@ type ARN struct { // String - returns string representation. func (arn ARN) String() string { - if arn.TargetID.ID == "" && arn.TargetID.Name == "" && arn.region == "" { + if arn.ID == "" && arn.Name == "" && arn.region == "" { return "" } diff --git a/internal/event/target/amqp.go b/internal/event/target/amqp.go index c987088da..0707bae33 100644 --- a/internal/event/target/amqp.go +++ b/internal/event/target/amqp.go @@ -55,9 +55,11 @@ type AMQPArgs struct { QueueLimit uint64 `json:"queueLimit"` } -//lint:file-ignore ST1003 We cannot change these exported names. - // AMQP input constants. +// +// ST1003 We cannot change these exported names. +// +//nolint:staticcheck const ( AmqpQueueDir = "queue_dir" AmqpQueueLimit = "queue_limit" diff --git a/internal/event/target/kafka_scram_client_contrib.go b/internal/event/target/kafka_scram_client_contrib.go index 6bb02ed41..be8eaceaa 100644 --- a/internal/event/target/kafka_scram_client_contrib.go +++ b/internal/event/target/kafka_scram_client_contrib.go @@ -62,11 +62,11 @@ type XDGSCRAMClient struct { // and authzID via the SASLprep algorithm, as recommended by RFC-5802. If // SASLprep fails, the method returns an error. func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { - x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) + x.Client, err = x.NewClient(userName, password, authzID) if err != nil { return err } - x.ClientConversation = x.Client.NewConversation() + x.ClientConversation = x.NewConversation() return nil } diff --git a/internal/event/target/mysql.go b/internal/event/target/mysql.go index acccedd89..0f311232a 100644 --- a/internal/event/target/mysql.go +++ b/internal/event/target/mysql.go @@ -375,7 +375,7 @@ func (target *MySQLTarget) initMySQL() error { err = target.db.Ping() if err != nil { - if !(xnet.IsConnRefusedErr(err) || xnet.IsConnResetErr(err)) { + if !xnet.IsConnRefusedErr(err) && !xnet.IsConnResetErr(err) { target.loggerOnce(context.Background(), err, target.ID().String()) } } else { diff --git a/internal/event/target/nsq.go b/internal/event/target/nsq.go index a44cae108..ec04d9937 100644 --- a/internal/event/target/nsq.go +++ b/internal/event/target/nsq.go @@ -243,7 +243,7 @@ func (target *NSQTarget) initNSQ() error { err = target.producer.Ping() if err != nil { // To treat "connection refused" errors as errNotConnected. - if !(xnet.IsConnRefusedErr(err) || xnet.IsConnResetErr(err)) { + if !xnet.IsConnRefusedErr(err) && !xnet.IsConnResetErr(err) { target.loggerOnce(context.Background(), err, target.ID().String()) } target.producer.Stop() diff --git a/internal/event/target/postgresql.go b/internal/event/target/postgresql.go index 1a99db673..9bd9a886f 100644 --- a/internal/event/target/postgresql.go +++ b/internal/event/target/postgresql.go @@ -376,7 +376,7 @@ func (target *PostgreSQLTarget) initPostgreSQL() error { err = target.db.Ping() if err != nil { - if !(xnet.IsConnRefusedErr(err) || xnet.IsConnResetErr(err)) { + if !xnet.IsConnRefusedErr(err) && !xnet.IsConnResetErr(err) { target.loggerOnce(context.Background(), err, target.ID().String()) } } else { diff --git a/internal/event/target/redis.go b/internal/event/target/redis.go index 78d6c7555..8f7d42729 100644 --- a/internal/event/target/redis.go +++ b/internal/event/target/redis.go @@ -293,7 +293,7 @@ func (target *RedisTarget) initRedis() error { _, pingErr := conn.Do("PING") if pingErr != nil { - if !(xnet.IsConnRefusedErr(pingErr) || xnet.IsConnResetErr(pingErr)) { + if !xnet.IsConnRefusedErr(pingErr) && !xnet.IsConnResetErr(pingErr) { target.loggerOnce(context.Background(), pingErr, target.ID().String()) } return pingErr diff --git a/internal/event/targetlist.go b/internal/event/targetlist.go index 7d8ae2f0a..3ebe6f0fe 100644 --- a/internal/event/targetlist.go +++ b/internal/event/targetlist.go @@ -114,7 +114,6 @@ func (list *TargetList) incCurrentSendCalls(id TargetID) { stats.currentSendCalls++ list.targetStats[id] = stats - return } func (list *TargetList) decCurrentSendCalls(id TargetID) { @@ -129,7 +128,6 @@ func (list *TargetList) decCurrentSendCalls(id TargetID) { stats.currentSendCalls-- list.targetStats[id] = stats - return } func (list *TargetList) incFailedEvents(id TargetID) { @@ -143,7 +141,6 @@ func (list *TargetList) incFailedEvents(id TargetID) { stats.failedEvents++ list.targetStats[id] = stats - return } func (list *TargetList) incTotalEvents(id TargetID) { @@ -157,7 +154,6 @@ func (list *TargetList) incTotalEvents(id TargetID) { stats.totalEvents++ list.targetStats[id] = stats - return } type asyncEvent struct { diff --git a/internal/grid/benchmark_test.go b/internal/grid/benchmark_test.go index e5cfd0c68..2ecdb5d97 100644 --- a/internal/grid/benchmark_test.go +++ b/internal/grid/benchmark_test.go @@ -466,19 +466,14 @@ func benchmarkGridStreamTwoway(b *testing.B, n int) { // Send 10x requests. Handle: func(ctx context.Context, payload []byte, in <-chan []byte, out chan<- []byte) *RemoteErr { got := 0 - for { - select { - case b, ok := <-in: - if !ok { - if got != messages { - return NewRemoteErrf("wrong number of requests. want %d, got %d", messages, got) - } - return nil - } - out <- b - got++ - } + for b := range in { + out <- b + got++ } + if got != messages { + return NewRemoteErrf("wrong number of requests. want %d, got %d", messages, got) + } + return nil }, Subroute: "some-subroute", diff --git a/internal/grid/connection.go b/internal/grid/connection.go index 25f2baef6..b79d6efa9 100644 --- a/internal/grid/connection.go +++ b/internal/grid/connection.go @@ -1511,7 +1511,6 @@ func (c *Connection) handlePing(ctx context.Context, m message) { pong := pongMsg{NotFound: true, T: ping.T} gridLogIf(ctx, c.queueMsg(m, &pong)) } - return } func (c *Connection) handleDisconnectClientMux(m message) { diff --git a/internal/grid/grid.go b/internal/grid/grid.go index b458729a1..42872b063 100644 --- a/internal/grid/grid.go +++ b/internal/grid/grid.go @@ -192,7 +192,7 @@ func bytesOrLength(b []byte) string { if len(b) > 100 { return fmt.Sprintf("%d bytes", len(b)) } - return fmt.Sprint(b) + return fmt.Sprint(string(b)) } // ConnDialer is a function that dials a connection to the given address. diff --git a/internal/grid/types.go b/internal/grid/types.go index c7b0c2826..b24de8bf3 100644 --- a/internal/grid/types.go +++ b/internal/grid/types.go @@ -387,15 +387,14 @@ func (u *URLValues) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (u URLValues) Msgsize() (s int) { s = msgp.MapHeaderSize - if u != nil { - for zb0006, zb0007 := range u { - _ = zb0007 - s += msgp.StringPrefixSize + len(zb0006) + msgp.ArrayHeaderSize - for zb0008 := range zb0007 { - s += msgp.StringPrefixSize + len(zb0007[zb0008]) - } + for zb0006, zb0007 := range u { + _ = zb0007 + s += msgp.StringPrefixSize + len(zb0006) + msgp.ArrayHeaderSize + for zb0008 := range zb0007 { + s += msgp.StringPrefixSize + len(zb0007[zb0008]) } } + return } diff --git a/internal/http/response-recorder.go b/internal/http/response-recorder.go index 0777eac4f..d6a397dc7 100644 --- a/internal/http/response-recorder.go +++ b/internal/http/response-recorder.go @@ -172,7 +172,7 @@ func (lrw *ResponseRecorder) WriteHeader(code int) { if !lrw.headersLogged { lrw.ttfbHeader = time.Now().UTC().Sub(lrw.StartTime) lrw.StatusCode = code - lrw.writeHeaders(&lrw.headers, code, lrw.ResponseWriter.Header()) + lrw.writeHeaders(&lrw.headers, code, lrw.Header()) lrw.headersLogged = true lrw.ResponseWriter.WriteHeader(code) } diff --git a/internal/http/server.go b/internal/http/server.go index 10b471c65..aa6201fd5 100644 --- a/internal/http/server.go +++ b/internal/http/server.go @@ -129,7 +129,7 @@ func (srv *Server) Init(listenCtx context.Context, listenErrCallback func(listen } serve = func() error { - return srv.Server.Serve(l) + return srv.Serve(l) } return diff --git a/internal/logger/logrotate.go b/internal/logger/logrotate.go index 9bd52d0c4..0f47901c9 100644 --- a/internal/logger/logrotate.go +++ b/internal/logger/logrotate.go @@ -147,8 +147,7 @@ func (w *Writer) compress() error { } defer gw.Close() - var wc io.WriteCloser - wc = gzip.NewWriter(gw) + var wc io.WriteCloser = gzip.NewWriter(gw) if _, err = io.Copy(wc, r); err != nil { return err } diff --git a/internal/logger/reqinfo.go b/internal/logger/reqinfo.go index 85a2af006..0280c8aee 100644 --- a/internal/logger/reqinfo.go +++ b/internal/logger/reqinfo.go @@ -147,7 +147,6 @@ func (r *ReqInfo) PopulateTagsMap(tagsMap map[string]string) { for _, t := range r.tags { tagsMap[t.Key] = t.Val } - return } // SetReqInfo sets ReqInfo in the context. diff --git a/internal/logger/target/http/http.go b/internal/logger/target/http/http.go index b3fd2def4..55c933e40 100644 --- a/internal/logger/target/http/http.go +++ b/internal/logger/target/http/http.go @@ -353,9 +353,9 @@ func (h *Target) startQueueProcessor(ctx context.Context, mainWorker bool) { if count < h.batchSize { tickered := false select { - case _ = <-ticker.C: + case <-ticker.C: tickered = true - case entry, _ = <-globalBuffer: + case entry = <-globalBuffer: case entry, ok = <-h.logCh: if !ok { return diff --git a/internal/logger/target/kafka/kafka_scram_client_contrib.go b/internal/logger/target/kafka/kafka_scram_client_contrib.go index 3e11bea3c..e012c3497 100644 --- a/internal/logger/target/kafka/kafka_scram_client_contrib.go +++ b/internal/logger/target/kafka/kafka_scram_client_contrib.go @@ -63,11 +63,11 @@ type XDGSCRAMClient struct { // and authzID via the SASLprep algorithm, as recommended by RFC-5802. If // SASLprep fails, the method returns an error. func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { - x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) + x.Client, err = x.NewClient(userName, password, authzID) if err != nil { return err } - x.ClientConversation = x.Client.NewConversation() + x.ClientConversation = x.NewConversation() return nil } diff --git a/internal/rest/client.go b/internal/rest/client.go index d8e0901bc..4cbfb765c 100644 --- a/internal/rest/client.go +++ b/internal/rest/client.go @@ -284,7 +284,6 @@ func (c *Client) dumpHTTP(req *http.Request, resp *http.Response) { } // Returns success. - return } // ErrClientClosed returned when *Client is closed. diff --git a/internal/ringbuffer/ring_buffer.go b/internal/ringbuffer/ring_buffer.go index 314a0d748..76790bb6f 100644 --- a/internal/ringbuffer/ring_buffer.go +++ b/internal/ringbuffer/ring_buffer.go @@ -85,10 +85,8 @@ func (r *RingBuffer) SetBlocking(block bool) *RingBuffer { // A goroutine will be started and run until the provided context is canceled. func (r *RingBuffer) WithCancel(ctx context.Context) *RingBuffer { go func() { - select { - case <-ctx.Done(): - r.CloseWithError(ctx.Err()) - } + <-ctx.Done() + r.CloseWithError(ctx.Err()) }() return r }