From 661b263e7717f01bfbfeb21e7e62af19aba30dc3 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Tue, 16 Nov 2021 09:28:29 -0800 Subject: [PATCH] add gocritic/ruleguard checks back again, cleanup code. (#13665) - remove some duplicated code - reported a bug, separately fixed in #13664 - using strings.ReplaceAll() when needed - using filepath.ToSlash() use when needed - remove all non-Go style comments from the codebase Co-authored-by: Aditya Manthramurthy --- .golangci.yml | 9 +- Makefile | 2 +- cmd/admin-handlers-config-kv.go | 8 +- cmd/admin-handlers-users.go | 7 +- cmd/admin-handlers.go | 5 +- cmd/admin-heal-ops.go | 2 +- cmd/admin-router.go | 9 +- cmd/api-errors.go | 10 +- cmd/api-router.go | 7 +- cmd/bucket-handlers.go | 2 +- cmd/bucket-listobjects-handlers.go | 4 +- cmd/bucket-policy.go | 7 +- cmd/bucket-replication-utils.go | 2 +- cmd/bucket-replication-utils_test.go | 27 +++-- cmd/bucket-replication.go | 2 +- cmd/bucket-replication_test.go | 24 ++-- cmd/bucket-stats.go | 2 +- cmd/bucket-targets.go | 2 +- cmd/common-main.go | 2 + cmd/config-current.go | 8 +- cmd/config-versions.go | 3 - cmd/consolelogger.go | 2 +- cmd/data-scanner.go | 4 +- cmd/data-update-tracker.go | 4 +- cmd/disk-cache-backend.go | 2 +- cmd/dummy-data-generator_test.go | 2 +- cmd/endpoint-ellipses.go | 2 +- cmd/endpoint.go | 1 + cmd/erasure-bucket.go | 2 +- cmd/erasure-coding.go | 2 +- cmd/erasure-decode_test.go | 2 +- cmd/erasure-healing-common_test.go | 6 +- cmd/erasure-healing_test.go | 2 +- cmd/erasure-metadata.go | 26 ++--- cmd/erasure-object.go | 2 +- cmd/fs-v1-helpers.go | 2 +- cmd/fs-v1-rwpool.go | 2 +- cmd/fs-v1.go | 4 +- cmd/gateway/azure/gateway-azure.go | 4 +- cmd/gateway/gcs/gateway-gcs.go | 2 +- cmd/gateway/s3/gateway-s3-sse.go | 2 +- cmd/handler-utils.go | 2 +- cmd/iam-store.go | 6 +- cmd/iam.go | 89 +++++--------- cmd/metacache-entries.go | 12 +- cmd/metacache-entries_test.go | 1 + cmd/metrics-v2.go | 6 +- cmd/net.go | 12 +- cmd/object-api-common.go | 2 +- cmd/object-api-errors.go | 8 +- cmd/object-api-listobjects_test.go | 16 +-- cmd/object-api-multipart_test.go | 28 ++--- cmd/object-api-utils.go | 2 +- cmd/object-handlers.go | 18 +-- cmd/object-handlers_test.go | 16 +-- cmd/object_api_suite_test.go | 4 +- cmd/post-policy_test.go | 12 +- cmd/postpolicyform.go | 12 +- cmd/server_test.go | 6 +- cmd/signature-v4-parser.go | 2 +- cmd/signature-v4.go | 8 +- cmd/storage-rest_test.go | 5 - cmd/streaming-signature-v4.go | 2 +- cmd/test-utils_test.go | 44 +++---- cmd/update_test.go | 4 +- cmd/url_test.go | 4 +- cmd/utils.go | 4 +- cmd/utils_test.go | 6 +- cmd/xl-storage.go | 13 ++- cmd/xl-storage_test.go | 7 +- docs/debugging/inspect/main.go | 12 +- internal/auth/credentials.go | 4 +- internal/bucket/bandwidth/monitor.go | 4 +- internal/bucket/lifecycle/lifecycle.go | 2 +- internal/bucket/object/lock/lock.go | 1 + internal/bucket/object/lock/lock_test.go | 5 + internal/bucket/replication/destination.go | 10 +- internal/bucket/replication/replication.go | 2 +- .../bucket/replication/replication_test.go | 110 +++++++++--------- internal/bucket/replication/rule_test.go | 8 +- internal/config/certs.go | 6 +- internal/config/certsinfo.go | 1 + internal/config/dns/operator_dns.go | 4 +- internal/config/heal/heal.go | 2 +- internal/config/identity/ldap/config.go | 8 +- internal/config/identity/openid/jwks_test.go | 1 + internal/config/legacy.go | 2 +- internal/crypto/key.go | 6 +- internal/crypto/key_test.go | 2 +- internal/crypto/sse-kms.go | 2 +- internal/crypto/sse-s3.go | 2 +- internal/disk/directio_unix.go | 6 +- internal/dsync/drwmutex.go | 8 +- internal/dsync/dsync-server_test.go | 6 +- internal/event/config.go | 2 +- internal/event/rules.go | 2 +- .../target/kafka_scram_client_contrib.go | 7 +- internal/event/target/mqtt.go | 6 +- internal/event/target/nats.go | 13 +-- internal/ioutil/append-file_nix.go | 2 +- internal/ioutil/ioutil.go | 6 +- internal/kms/single-key.go | 2 +- internal/logger/logonce.go | 8 +- .../kafka/kafka_scram_client_contrib.go | 7 +- internal/rest/client_test.go | 1 + internal/s3select/csv/reader_contrib_test.go | 3 +- internal/s3select/json/record.go | 2 +- internal/s3select/simdj/reader_amd64_test.go | 2 +- internal/s3select/sql/parser.go | 4 +- internal/s3select/sql/value.go | 2 + internal/smart/smart.go | 6 +- 111 files changed, 409 insertions(+), 450 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 9278ec7ab..80637358b 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -23,12 +23,19 @@ linters: - structcheck - unconvert - varcheck + - gocritic issues: exclude-use-default: false exclude: - should have a package comment - error strings should not be capitalized or end with punctuation or a newline + # todo fix these when we get enough time. + - "singleCaseSwitch: should rewrite switch statement to if statement" + - "unlambda: replace" + - "captLocal:" + - "ifElseChain:" + - "elseif:" service: - golangci-lint-version: 1.20.0 # use the fixed version to not introduce new linters unexpectedly + golangci-lint-version: 1.43.0 # use the fixed version to not introduce new linters unexpectedly diff --git a/Makefile b/Makefile index 9a4caf551..859e04093 100644 --- a/Makefile +++ b/Makefile @@ -19,7 +19,7 @@ help: ## print this help getdeps: ## fetch necessary dependencies @mkdir -p ${GOPATH}/bin - @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.40.1 + @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.43.0 @echo "Installing msgp" && go install -v github.com/tinylib/msgp@latest @echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest diff --git a/cmd/admin-handlers-config-kv.go b/cmd/admin-handlers-config-kv.go index a84822cbf..23c385f56 100644 --- a/cmd/admin-handlers-config-kv.go +++ b/cmd/admin-handlers-config-kv.go @@ -215,11 +215,9 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r * return } } - } else { - if err := delServerConfigHistory(ctx, objectAPI, restoreID); err != nil { - writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) - return - } + } else if err := delServerConfigHistory(ctx, objectAPI, restoreID); err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return } } diff --git a/cmd/admin-handlers-users.go b/cmd/admin-handlers-users.go index f50b4832c..192ff72a0 100644 --- a/cmd/admin-handlers-users.go +++ b/cmd/admin-handlers-users.go @@ -323,11 +323,12 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) status := vars["status"] var err error - if status == statusEnabled { + switch status { + case statusEnabled: err = globalIAMSys.SetGroupStatus(ctx, group, true) - } else if status == statusDisabled { + case statusDisabled: err = globalIAMSys.SetGroupStatus(ctx, group, false) - } else { + default: err = errInvalidArgument } if err != nil { diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index adb5ecc2b..5c70d5a31 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -1356,6 +1356,7 @@ func getServerInfo(ctx context.Context, r *http.Request) madmin.InfoMessage { ldap := madmin.LDAP{} if globalLDAPConfig.Enabled { ldapConn, err := globalLDAPConfig.Connect() + //nolint:gocritic if err != nil { ldap.Status = string(madmin.ItemOffline) } else if ldapConn == nil { @@ -1636,8 +1637,8 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque anonymizeCmdLine := func(cmdLine string) string { if !globalIsDistErasure { // FS mode - single server - hard code to `server1` - anonCmdLine := strings.Replace(cmdLine, globalLocalNodeName, "server1", -1) - return strings.Replace(anonCmdLine, globalMinioConsoleHost, "server1", -1) + anonCmdLine := strings.ReplaceAll(cmdLine, globalLocalNodeName, "server1") + return strings.ReplaceAll(anonCmdLine, globalMinioConsoleHost, "server1") } // Server start command regex groups: diff --git a/cmd/admin-heal-ops.go b/cmd/admin-heal-ops.go index d86f16b53..616ba2770 100644 --- a/cmd/admin-heal-ops.go +++ b/cmd/admin-heal-ops.go @@ -491,7 +491,7 @@ func (h *healSequence) getScannedItemsCount() int64 { defer h.mutex.RUnlock() for _, v := range h.scannedItemsMap { - count = count + v + count += v } return count } diff --git a/cmd/admin-router.go b/cmd/admin-router.go index 256def854..3449d3ef6 100644 --- a/cmd/admin-router.go +++ b/cmd/admin-router.go @@ -43,8 +43,6 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) { // Admin router adminRouter := router.PathPrefix(adminPathPrefix).Subrouter() - /// Service operations - adminVersions := []string{ adminAPIVersionPrefix, } @@ -71,7 +69,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) { adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(gz(httpTraceAll(adminAPI.DataUsageInfoHandler))) if globalIsDistErasure || globalIsErasure { - /// Heal operations + // Heal operations // Heal processing endpoint. adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler))) @@ -79,9 +77,6 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) { adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler))) adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(gz(httpTraceAll(adminAPI.BackgroundHealStatusHandler))) - - /// Health operations - } // Profiling operations @@ -106,7 +101,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) { adminRouter.Methods(http.MethodPut).Path(adminVersion+"/restore-config-history-kv").HandlerFunc(gz(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler))).Queries("restoreId", "{restoreId:.*}") } - /// Config import/export bulk operations + // Config import/export bulk operations if enableConfigOps { // Get config adminRouter.Methods(http.MethodGet).Path(adminVersion + "/config").HandlerFunc(gz(httpTraceHdrs(adminAPI.GetConfigHandler))) diff --git a/cmd/api-errors.go b/cmd/api-errors.go index 7e249a467..fabb2dc8b 100644 --- a/cmd/api-errors.go +++ b/cmd/api-errors.go @@ -973,7 +973,7 @@ var errorCodes = errorCodeMap{ HTTPStatusCode: http.StatusNotFound, }, - /// Bucket notification related errors. + // Bucket notification related errors. ErrEventNotification: { Code: "InvalidArgument", Description: "A specified event is not supported for notifications.", @@ -1120,14 +1120,14 @@ var errorCodes = errorCodeMap{ HTTPStatusCode: http.StatusForbidden, }, - /// S3 extensions. + // S3 extensions. ErrContentSHA256Mismatch: { Code: "XAmzContentSHA256Mismatch", Description: "The provided 'x-amz-content-sha256' header does not match what was computed.", HTTPStatusCode: http.StatusBadRequest, }, - /// MinIO extensions. + // MinIO extensions. ErrStorageFull: { Code: "XMinioStorageFull", Description: "Storage backend has reached its minimum free disk threshold. Please delete a few objects to proceed.", @@ -1370,7 +1370,7 @@ var errorCodes = errorCodeMap{ Description: "The continuation token provided is incorrect", HTTPStatusCode: http.StatusBadRequest, }, - //S3 Select API Errors + // S3 Select API Errors ErrEmptyRequestBody: { Code: "EmptyRequestBody", Description: "Request body cannot be empty.", @@ -2074,6 +2074,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) { default: var ie, iw int // This work-around is to handle the issue golang/go#30648 + //nolint:gocritic if _, ferr := fmt.Fscanf(strings.NewReader(err.Error()), "request declared a Content-Length of %d but only wrote %d bytes", &ie, &iw); ferr != nil { @@ -2229,6 +2230,7 @@ func toAPIError(ctx context.Context, err error) APIError { } // Add more Gateway SDKs here if any in future. default: + //nolint:gocritic if errors.Is(err, errMalformedEncoding) { apiErr = APIError{ Code: "BadRequest", diff --git a/cmd/api-router.go b/cmd/api-router.go index 920cac177..5f51a5dee 100644 --- a/cmd/api-router.go +++ b/cmd/api-router.go @@ -301,7 +301,8 @@ func registerAPIRouter(router *mux.Router) { router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc( collectAPIStats("restoreobject", maxClients(gz(httpTraceAll(api.PostRestoreObjectHandler))))).Queries("restore", "") - /// Bucket operations + // Bucket operations + // GetBucketLocation router.Methods(http.MethodGet).HandlerFunc( collectAPIStats("getbucketlocation", maxClients(gz(httpTraceAll(api.GetBucketLocationHandler))))).Queries("location", "") @@ -355,7 +356,7 @@ func registerAPIRouter(router *mux.Router) { // GetBucketTaggingHandler router.Methods(http.MethodGet).HandlerFunc( collectAPIStats("getbuckettagging", maxClients(gz(httpTraceAll(api.GetBucketTaggingHandler))))).Queries("tagging", "") - //DeleteBucketWebsiteHandler + // DeleteBucketWebsiteHandler router.Methods(http.MethodDelete).HandlerFunc( collectAPIStats("deletebucketwebsite", maxClients(gz(httpTraceAll(api.DeleteBucketWebsiteHandler))))).Queries("website", "") // DeleteBucketTaggingHandler @@ -452,7 +453,7 @@ func registerAPIRouter(router *mux.Router) { collectAPIStats("listobjectsv1", maxClients(gz(httpTraceAll(api.ListObjectsV1Handler))))) } - /// Root operation + // Root operation // ListenNotification apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc( diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index 3c115483b..5a5801b3a 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -903,7 +903,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") { // S3 feature to replace ${filename} found in Key form field // by the filename attribute passed in multipart - formValues.Set("Key", strings.Replace(formValues.Get("Key"), "${filename}", fileName, -1)) + formValues.Set("Key", strings.ReplaceAll(formValues.Get("Key"), "${filename}", fileName)) } object := trimLeadingSlash(formValues.Get("Key")) diff --git a/cmd/bucket-listobjects-handlers.go b/cmd/bucket-listobjects-handlers.go index 27b01d83e..3ffe118df 100644 --- a/cmd/bucket-listobjects-handlers.go +++ b/cmd/bucket-listobjects-handlers.go @@ -59,8 +59,8 @@ func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int } if encodingType != "" { - // Only url encoding type is supported - if strings.ToLower(encodingType) != "url" { + // AWS S3 spec only supports 'url' encoding type + if !strings.EqualFold(encodingType, "url") { return ErrInvalidEncodingMethod } } diff --git a/cmd/bucket-policy.go b/cmd/bucket-policy.go index e8db0ac17..b16c5c2d4 100644 --- a/cmd/bucket-policy.go +++ b/cmd/bucket-policy.go @@ -172,11 +172,12 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[ vStr, ok := v.(string) if ok { // Special case for AD/LDAP STS users - if k == ldapUser { + switch k { + case ldapUser: args["user"] = []string{vStr} - } else if k == ldapUserN { + case ldapUserN: args["username"] = []string{vStr} - } else { + default: args[k] = []string{vStr} } } diff --git a/cmd/bucket-replication-utils.go b/cmd/bucket-replication-utils.go index e3a2965e0..2557762e0 100644 --- a/cmd/bucket-replication-utils.go +++ b/cmd/bucket-replication-utils.go @@ -172,7 +172,7 @@ func (o *ObjectInfo) TargetReplicationStatus(arn string) (status replication.Sta type replicateTargetDecision struct { Replicate bool // Replicate to this target Synchronous bool // Synchronous replication configured. - Arn string //ARN of replication target + Arn string // ARN of replication target ID string } diff --git a/cmd/bucket-replication-utils_test.go b/cmd/bucket-replication-utils_test.go index 8737c5c23..c1b821ff8 100644 --- a/cmd/bucket-replication-utils_test.go +++ b/cmd/bucket-replication-utils_test.go @@ -32,7 +32,7 @@ var replicatedInfosTests = []struct { expectedOpType replication.Type expectedAction replicationAction }{ - { //1. empty tgtInfos slice + { // 1. empty tgtInfos slice name: "no replicated targets", tgtInfos: []replicatedTargetInfo{}, expectedCompletedSize: 0, @@ -41,7 +41,7 @@ var replicatedInfosTests = []struct { expectedOpType: replication.UnsetReplicationType, expectedAction: replicateNone, }, - { //2. replication completed to single target + { // 2. replication completed to single target name: "replication completed to single target", tgtInfos: []replicatedTargetInfo{ { @@ -59,7 +59,7 @@ var replicatedInfosTests = []struct { expectedOpType: replication.ObjectReplicationType, expectedAction: replicateAll, }, - { //3. replication completed to single target; failed to another + { // 3. replication completed to single target; failed to another name: "replication completed to single target", tgtInfos: []replicatedTargetInfo{ { @@ -84,7 +84,7 @@ var replicatedInfosTests = []struct { expectedOpType: replication.ObjectReplicationType, expectedAction: replicateAll, }, - { //4. replication pending on one target; failed to another + { // 4. replication pending on one target; failed to another name: "replication completed to single target", tgtInfos: []replicatedTargetInfo{ { @@ -137,7 +137,7 @@ var parseReplicationDecisionTest = []struct { expDsc ReplicateDecision expErr error }{ - { //1. + { // 1. name: "empty string", dsc: "", expDsc: ReplicateDecision{ @@ -146,7 +146,7 @@ var parseReplicationDecisionTest = []struct { expErr: nil, }, - { //2. + { // 2. name: "replicate decision for one target", dsc: "arn:minio:replication::id:bucket=true;false;arn:minio:replication::id:bucket;id", expErr: nil, @@ -156,7 +156,7 @@ var parseReplicationDecisionTest = []struct { }, }, }, - { //3. + { // 3. name: "replicate decision for multiple targets", dsc: "arn:minio:replication::id:bucket=true;false;arn:minio:replication::id:bucket;id,arn:minio:replication::id2:bucket=false;true;arn:minio:replication::id2:bucket;id2", expErr: nil, @@ -167,7 +167,7 @@ var parseReplicationDecisionTest = []struct { }, }, }, - { //4. + { // 4. name: "invalid format replicate decision for one target", dsc: "arn:minio:replication::id:bucket:true;false;arn:minio:replication::id:bucket;id", expErr: errInvalidReplicateDecisionFormat, @@ -181,7 +181,6 @@ var parseReplicationDecisionTest = []struct { func TestParseReplicateDecision(t *testing.T) { for i, test := range parseReplicationDecisionTest { - //dsc, err := parseReplicateDecision(test.dsc) dsc, err := parseReplicateDecision(test.expDsc.String()) if err != nil { @@ -208,22 +207,22 @@ var replicationStateTest = []struct { arn string expStatus replication.StatusType }{ - { //1. no replication status header + { // 1. no replication status header name: "no replicated targets", rs: ReplicationState{}, expStatus: replication.StatusType(""), }, - { //2. replication status for one target + { // 2. replication status for one target name: "replication status for one target", rs: ReplicationState{ReplicationStatusInternal: "arn1=PENDING;", Targets: map[string]replication.StatusType{"arn1": "PENDING"}}, expStatus: replication.Pending, }, - { //3. replication status for one target - incorrect format + { // 3. replication status for one target - incorrect format name: "replication status for one target", rs: ReplicationState{ReplicationStatusInternal: "arn1=PENDING"}, expStatus: replication.StatusType(""), }, - { //4. replication status for 3 targets, one of them failed + { // 4. replication status for 3 targets, one of them failed name: "replication status for 3 targets - one failed", rs: ReplicationState{ ReplicationStatusInternal: "arn1=COMPLETED;arn2=COMPLETED;arn3=FAILED;", @@ -231,7 +230,7 @@ var replicationStateTest = []struct { }, expStatus: replication.Failed, }, - { //5. replication status for replica version + { // 5. replication status for replica version name: "replication status for replica version", rs: ReplicationState{ReplicationStatusInternal: string(replication.Replica)}, expStatus: replication.Replica, diff --git a/cmd/bucket-replication.go b/cmd/bucket-replication.go index 85ac08e1e..6b3d73427 100644 --- a/cmd/bucket-replication.go +++ b/cmd/bucket-replication.go @@ -1740,7 +1740,7 @@ func resyncTarget(oi ObjectInfo, arn string, resetID string, resetBeforeDate tim } rs, ok := oi.UserDefined[targetResetHeader(arn)] if !ok { - rs, ok = oi.UserDefined[xhttp.MinIOReplicationResetStatus] //for backward compatibility + rs, ok = oi.UserDefined[xhttp.MinIOReplicationResetStatus] // for backward compatibility } if !ok { // existing object replication is enabled and object version is unreplicated so far. if resetID != "" && oi.ModTime.Before(resetBeforeDate) { // trigger replication if `mc replicate reset` requested diff --git a/cmd/bucket-replication_test.go b/cmd/bucket-replication_test.go index aee42ad98..8ffae9723 100644 --- a/cmd/bucket-replication_test.go +++ b/cmd/bucket-replication_test.go @@ -55,25 +55,25 @@ var replicationConfigTests = []struct { tgtStatuses map[string]replication.StatusType expectedSync bool }{ - { //1. no replication config + { // 1. no replication config name: "no replication config", info: ObjectInfo{Size: 100}, rcfg: replicationConfig{Config: nil}, expectedSync: false, }, - { //2. existing object replication config enabled, no versioning + { // 2. existing object replication config enabled, no versioning name: "existing object replication config enabled, no versioning", info: ObjectInfo{Size: 100}, rcfg: replicationConfig{Config: &configs[0]}, expectedSync: false, }, - { //3. existing object replication config enabled, versioning suspended + { // 3. existing object replication config enabled, versioning suspended name: "existing object replication config enabled, versioning suspended", info: ObjectInfo{Size: 100, VersionID: nullVersionID}, rcfg: replicationConfig{Config: &configs[0]}, expectedSync: false, }, - { //4. existing object replication enabled, versioning enabled; no reset in progress + { // 4. existing object replication enabled, versioning enabled; no reset in progress name: "existing object replication enabled, versioning enabled; no reset in progress", info: ObjectInfo{Size: 100, ReplicationStatus: replication.Completed, @@ -130,7 +130,7 @@ var replicationConfigTests2 = []struct { }}}}, expectedSync: true, }, - { //3. replication status unset + { // 3. replication status unset name: "existing object replication on pre-existing unreplicated object", info: ObjectInfo{Size: 100, ReplicationStatus: replication.StatusType(""), @@ -142,7 +142,7 @@ var replicationConfigTests2 = []struct { dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}}, expectedSync: true, }, - { //4. replication status Complete + { // 4. replication status Complete name: "existing object replication on object in Completed replication status", info: ObjectInfo{Size: 100, ReplicationStatusInternal: "arn1:COMPLETED", @@ -155,7 +155,7 @@ var replicationConfigTests2 = []struct { }}}}, expectedSync: false, }, - { //5. existing object replication enabled, versioning enabled, replication status Pending & reset ID present + { // 5. existing object replication enabled, versioning enabled, replication status Pending & reset ID present name: "existing object replication with reset in progress and object in Pending status", info: ObjectInfo{Size: 100, ReplicationStatusInternal: "arn1:PENDING;", @@ -172,7 +172,7 @@ var replicationConfigTests2 = []struct { }}}, }, }, - { //6. existing object replication enabled, versioning enabled, replication status Failed & reset ID present + { // 6. existing object replication enabled, versioning enabled, replication status Failed & reset ID present name: "existing object replication with reset in progress and object in Failed status", info: ObjectInfo{Size: 100, ReplicationStatusInternal: "arn1:FAILED;", @@ -189,7 +189,7 @@ var replicationConfigTests2 = []struct { }, expectedSync: true, }, - { //7. existing object replication enabled, versioning enabled, replication status unset & reset ID present + { // 7. existing object replication enabled, versioning enabled, replication status unset & reset ID present name: "existing object replication with reset in progress and object never replicated before", info: ObjectInfo{Size: 100, ReplicationStatus: replication.StatusType(""), @@ -207,7 +207,7 @@ var replicationConfigTests2 = []struct { expectedSync: true, }, - { //8. existing object replication enabled, versioning enabled, replication status Complete & reset ID present + { // 8. existing object replication enabled, versioning enabled, replication status Complete & reset ID present name: "existing object replication enabled - reset in progress for an object in Completed status", info: ObjectInfo{Size: 100, ReplicationStatusInternal: "arn1:COMPLETED;", @@ -224,7 +224,7 @@ var replicationConfigTests2 = []struct { }}}, }, }, - { //9. existing object replication enabled, versioning enabled, replication status Pending & reset ID different + { // 9. existing object replication enabled, versioning enabled, replication status Pending & reset ID different name: "existing object replication enabled, newer reset in progress on object in Pending replication status", info: ObjectInfo{Size: 100, ReplicationStatusInternal: "arn1:PENDING;", @@ -243,7 +243,7 @@ var replicationConfigTests2 = []struct { }}}, }, }, - { //10. existing object replication enabled, versioning enabled, replication status Complete & reset done + { // 10. existing object replication enabled, versioning enabled, replication status Complete & reset done name: "reset done on object in Completed Status - ineligbile for re-replication", info: ObjectInfo{Size: 100, ReplicationStatusInternal: "arn1:COMPLETED;", diff --git a/cmd/bucket-stats.go b/cmd/bucket-stats.go index 40144688f..bf6d60b2e 100644 --- a/cmd/bucket-stats.go +++ b/cmd/bucket-stats.go @@ -56,7 +56,7 @@ func (brs BucketReplicationStats) Clone() BucketReplicationStats { c := BucketReplicationStats{ Stats: make(map[string]*BucketReplicationStat, len(brs.Stats)), } - //this is called only by replicationStats cache and already holds a read lock before calling Clone() + // This is called only by replicationStats cache and already holds a read lock before calling Clone() for arn, st := range brs.Stats { c.Stats[arn] = &BucketReplicationStat{ FailedSize: atomic.LoadInt64(&st.FailedSize), diff --git a/cmd/bucket-targets.go b/cmd/bucket-targets.go index 124588523..5e38c9ea1 100644 --- a/cmd/bucket-targets.go +++ b/cmd/bucket-targets.go @@ -444,6 +444,6 @@ type TargetClient struct { StorageClass string // storage class on remote disableProxy bool healthCancelFn context.CancelFunc // cancellation function for client healthcheck - ARN string //ARN to uniquely identify remote target + ARN string // ARN to uniquely identify remote target ResetID string } diff --git a/cmd/common-main.go b/cmd/common-main.go index b2f2d9956..d7638ce77 100644 --- a/cmd/common-main.go +++ b/cmd/common-main.go @@ -526,6 +526,7 @@ func handleCommonEnvVars() { // Warn user if deprecated environment variables, // "MINIO_ACCESS_KEY" and "MINIO_SECRET_KEY", are defined // Check all error conditions first + //nolint:gocritic if !env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) { logger.Fatal(config.ErrMissingEnvCredentialRootUser(nil), "Unable to start MinIO") } else if env.IsSet(config.EnvRootUser) && !env.IsSet(config.EnvRootPassword) { @@ -544,6 +545,7 @@ func handleCommonEnvVars() { var user, password string haveRootCredentials := false haveAccessCredentials := false + //nolint:gocritic if env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) { user = env.Get(config.EnvRootUser, "") password = env.Get(config.EnvRootPassword, "") diff --git a/cmd/config-current.go b/cmd/config-current.go index 0d37dccaf..283d2c92f 100644 --- a/cmd/config-current.go +++ b/cmd/config-current.go @@ -696,9 +696,7 @@ func GetHelp(subSys, key string, envOnly bool) (Help, error) { // to list the ENV, for regular k/v EnableKey is // implicit, for ENVs we cannot make it implicit. if subSysHelp.MultipleTargets { - envK := config.EnvPrefix + strings.Join([]string{ - strings.ToTitle(subSys), strings.ToTitle(madmin.EnableKey), - }, config.EnvWordDelimiter) + envK := config.EnvPrefix + strings.ToTitle(subSys) + config.EnvWordDelimiter + strings.ToTitle(madmin.EnableKey) envHelp = append(envHelp, config.HelpKV{ Key: envK, Description: fmt.Sprintf("enable %s target, default is 'off'", subSys), @@ -707,9 +705,7 @@ func GetHelp(subSys, key string, envOnly bool) (Help, error) { }) } for _, hkv := range h { - envK := config.EnvPrefix + strings.Join([]string{ - strings.ToTitle(subSys), strings.ToTitle(hkv.Key), - }, config.EnvWordDelimiter) + envK := config.EnvPrefix + strings.ToTitle(subSys) + config.EnvWordDelimiter + strings.ToTitle(hkv.Key) envHelp = append(envHelp, config.HelpKV{ Key: envK, Description: hkv.Description, diff --git a/cmd/config-versions.go b/cmd/config-versions.go index 2c2298c19..370026f2e 100644 --- a/cmd/config-versions.go +++ b/cmd/config-versions.go @@ -34,14 +34,12 @@ import ( "github.com/minio/pkg/quick" ) -/////////////////// Config V1 /////////////////// type configV1 struct { Version string `json:"version"` AccessKey string `json:"accessKeyId"` SecretKey string `json:"secretAccessKey"` } -/////////////////// Config V2 /////////////////// type configV2 struct { Version string `json:"version"` Credentials struct { @@ -63,7 +61,6 @@ type configV2 struct { } `json:"fileLogger"` } -/////////////////// Config V3 /////////////////// // backendV3 type. type backendV3 struct { Type string `json:"type"` diff --git a/cmd/consolelogger.go b/cmd/consolelogger.go index 04f54c8f2..fd8f31570 100644 --- a/cmd/consolelogger.go +++ b/cmd/consolelogger.go @@ -32,7 +32,7 @@ import ( // number of log messages to buffer const defaultLogBufferCount = 10000 -//HTTPConsoleLoggerSys holds global console logger state +// HTTPConsoleLoggerSys holds global console logger state type HTTPConsoleLoggerSys struct { sync.RWMutex pubsub *pubsub.PubSub diff --git a/cmd/data-scanner.go b/cmd/data-scanner.go index 5bcff0044..dbb94d65e 100644 --- a/cmd/data-scanner.go +++ b/cmd/data-scanner.go @@ -775,7 +775,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int if flat.Objects < dataScannerCompactLeastObject { if f.dataUsageScannerDebug && flat.Objects > 1 { // Disabled, rather chatty: - //console.Debugf(scannerLogPrefix+" Only %d objects, compacting %s -> %+v\n", flat.Objects, folder.name, flat) + // console.Debugf(scannerLogPrefix+" Only %d objects, compacting %s -> %+v\n", flat.Objects, folder.name, flat) } compact = true } else { @@ -791,7 +791,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int } if f.dataUsageScannerDebug && compact { // Disabled, rather chatty: - //console.Debugf(scannerLogPrefix+" Only objects (%d), compacting %s -> %+v\n", flat.Objects, folder.name, flat) + // console.Debugf(scannerLogPrefix+" Only objects (%d), compacting %s -> %+v\n", flat.Objects, folder.name, flat) } } if compact { diff --git a/cmd/data-update-tracker.go b/cmd/data-update-tracker.go index 069e497e4..0e5870e7a 100644 --- a/cmd/data-update-tracker.go +++ b/cmd/data-update-tracker.go @@ -203,7 +203,7 @@ func (d *dataUpdateTracker) latestWithDir(dir string) uint64 { // start a saver goroutine. // All of these will exit when the context is canceled. func (d *dataUpdateTracker) start(ctx context.Context, drives ...string) { - if len(drives) <= 0 { + if len(drives) == 0 { logger.LogIf(ctx, errors.New("dataUpdateTracker.start: No drives specified")) return } @@ -220,7 +220,7 @@ func (d *dataUpdateTracker) start(ctx context.Context, drives ...string) { // If no valid data usage tracker can be found d will remain unchanged. // If object is shared the caller should lock it. func (d *dataUpdateTracker) load(ctx context.Context, drives ...string) { - if len(drives) <= 0 { + if len(drives) == 0 { logger.LogIf(ctx, errors.New("dataUpdateTracker.load: No drives specified")) return } diff --git a/cmd/disk-cache-backend.go b/cmd/disk-cache-backend.go index ef8593470..2a202aa51 100644 --- a/cmd/disk-cache-backend.go +++ b/cmd/disk-cache-backend.go @@ -773,7 +773,7 @@ func newCacheEncryptReader(content io.Reader, bucket, object string, metadata ma return nil, err } - reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20, CipherSuites: fips.CipherSuitesDARE()}) + reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey, MinVersion: sio.Version20, CipherSuites: fips.CipherSuitesDARE()}) if err != nil { return nil, crypto.ErrInvalidCustomerKey } diff --git a/cmd/dummy-data-generator_test.go b/cmd/dummy-data-generator_test.go index 857bbd55e..1038bf45f 100644 --- a/cmd/dummy-data-generator_test.go +++ b/cmd/dummy-data-generator_test.go @@ -61,7 +61,7 @@ func NewDummyDataGen(totalLength, skipOffset int64) io.ReadSeeker { panic("Negative rotations are not allowed") } - skipOffset = skipOffset % int64(len(alphabets)) + skipOffset %= int64(len(alphabets)) as := make([]byte, 2*len(alphabets)) copy(as, alphabets) copy(as[len(alphabets):], alphabets) diff --git a/cmd/endpoint-ellipses.go b/cmd/endpoint-ellipses.go index 1fbb787dc..9feb48a03 100644 --- a/cmd/endpoint-ellipses.go +++ b/cmd/endpoint-ellipses.go @@ -242,7 +242,7 @@ func getTotalSizes(argPatterns []ellipses.ArgPattern) []uint64 { for _, argPattern := range argPatterns { var totalSize uint64 = 1 for _, p := range argPattern { - totalSize = totalSize * uint64(len(p.Seq)) + totalSize *= uint64(len(p.Seq)) } totalSizes = append(totalSizes, totalSize) } diff --git a/cmd/endpoint.go b/cmd/endpoint.go index c622f9c67..e6381d464 100644 --- a/cmd/endpoint.go +++ b/cmd/endpoint.go @@ -497,6 +497,7 @@ func NewEndpoints(args ...string) (endpoints Endpoints, err error) { } // All endpoints have to be same type and scheme if applicable. + //nolint:gocritic if i == 0 { endpointType = endpoint.Type() scheme = endpoint.Scheme diff --git a/cmd/erasure-bucket.go b/cmd/erasure-bucket.go index 22f451781..ea218ffca 100644 --- a/cmd/erasure-bucket.go +++ b/cmd/erasure-bucket.go @@ -32,7 +32,7 @@ var bucketOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnform // list all errors that can be ignored in a bucket metadata operation. var bucketMetadataOpIgnoredErrs = append(bucketOpIgnoredErrs, errVolumeNotFound) -/// Bucket operations +// Bucket operations // MakeBucket - make a bucket. func (er erasureObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error { diff --git a/cmd/erasure-coding.go b/cmd/erasure-coding.go index f6f76b9c1..145300d50 100644 --- a/cmd/erasure-coding.go +++ b/cmd/erasure-coding.go @@ -95,7 +95,7 @@ func (e *Erasure) EncodeData(ctx context.Context, data []byte) ([][]byte, error) // It returns an error if the decoding failed. func (e *Erasure) DecodeDataBlocks(data [][]byte) error { var isZero = 0 - for _, b := range data[:] { + for _, b := range data { if len(b) == 0 { isZero++ break diff --git a/cmd/erasure-decode_test.go b/cmd/erasure-decode_test.go index 608811d86..e0fd4f901 100644 --- a/cmd/erasure-decode_test.go +++ b/cmd/erasure-decode_test.go @@ -110,7 +110,7 @@ func TestErasureDecode(t *testing.T) { for i, disk := range disks { writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(test.data), writeAlgorithm, erasure.ShardSize()) } - n, err := erasure.Encode(context.Background(), bytes.NewReader(data[:]), writers, buffer, erasure.dataBlocks+1) + n, err := erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1) closeBitrotWriters(writers) if err != nil { setup.Remove() diff --git a/cmd/erasure-healing-common_test.go b/cmd/erasure-healing-common_test.go index f563ef56b..fde516f88 100644 --- a/cmd/erasure-healing-common_test.go +++ b/cmd/erasure-healing-common_test.go @@ -235,7 +235,7 @@ func TestListOnlineDisks(t *testing.T) { if err != nil { t.Fatalf("Failed to open %s: %s\n", filePath, err) } - f.Write([]byte("oops")) // Will cause bitrot error + f.WriteString("oops") // Will cause bitrot error f.Close() break } @@ -414,7 +414,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) { if err != nil { t.Fatalf("Failed to open %s: %s\n", filePath, err) } - f.Write([]byte("oops")) // Will cause bitrot error + f.WriteString("oops") // Will cause bitrot error f.Close() break } @@ -563,7 +563,7 @@ func TestDisksWithAllParts(t *testing.T) { if err != nil { t.Fatalf("Failed to open %s: %s\n", filePath, err) } - f.Write([]byte("oops")) // Will cause bitrot error + f.WriteString("oops") // Will cause bitrot error f.Close() } } diff --git a/cmd/erasure-healing_test.go b/cmd/erasure-healing_test.go index 4c10ae6c2..ecc82b54f 100644 --- a/cmd/erasure-healing_test.go +++ b/cmd/erasure-healing_test.go @@ -163,7 +163,7 @@ func TestHealingDanglingObject(t *testing.T) { t.Fatal(err) } - //defer removeRoots(fsDirs) + defer removeRoots(fsDirs) // Everything is fine, should return nil objLayer, disks, err := initObjectLayer(ctx, mustGetPoolEndpoints(fsDirs...)) diff --git a/cmd/erasure-metadata.go b/cmd/erasure-metadata.go index dd3c88cad..a1a1da380 100644 --- a/cmd/erasure-metadata.go +++ b/cmd/erasure-metadata.go @@ -292,25 +292,25 @@ func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time. for i, meta := range metaArr { if meta.IsValid() && meta.ModTime.Equal(modTime) && meta.DataDir == dataDir { for _, part := range meta.Parts { - h.Write([]byte(fmt.Sprintf("part.%d", part.Number))) + fmt.Fprintf(h, "part.%d", part.Number) } - h.Write([]byte(fmt.Sprintf("%v", meta.Erasure.Distribution))) + fmt.Fprintf(h, "%v", meta.Erasure.Distribution) // make sure that length of Data is same - h.Write([]byte(fmt.Sprintf("%v", len(meta.Data)))) + fmt.Fprintf(h, "%v", len(meta.Data)) // ILM transition fields - h.Write([]byte(meta.TransitionStatus)) - h.Write([]byte(meta.TransitionTier)) - h.Write([]byte(meta.TransitionedObjName)) - h.Write([]byte(meta.TransitionVersionID)) + fmt.Fprint(h, meta.TransitionStatus) + fmt.Fprint(h, meta.TransitionTier) + fmt.Fprint(h, meta.TransitionedObjName) + fmt.Fprint(h, meta.TransitionVersionID) // Server-side replication fields - h.Write([]byte(fmt.Sprintf("%v", meta.MarkDeleted))) - h.Write([]byte(meta.Metadata[string(meta.ReplicationState.ReplicaStatus)])) - h.Write([]byte(meta.Metadata[meta.ReplicationState.ReplicationTimeStamp.Format(http.TimeFormat)])) - h.Write([]byte(meta.Metadata[meta.ReplicationState.ReplicaTimeStamp.Format(http.TimeFormat)])) - h.Write([]byte(meta.Metadata[meta.ReplicationState.ReplicationStatusInternal])) - h.Write([]byte(meta.Metadata[meta.ReplicationState.VersionPurgeStatusInternal])) + fmt.Fprintf(h, "%v", meta.MarkDeleted) + fmt.Fprint(h, meta.Metadata[string(meta.ReplicationState.ReplicaStatus)]) + fmt.Fprint(h, meta.Metadata[meta.ReplicationState.ReplicationTimeStamp.Format(http.TimeFormat)]) + fmt.Fprint(h, meta.Metadata[meta.ReplicationState.ReplicaTimeStamp.Format(http.TimeFormat)]) + fmt.Fprint(h, meta.Metadata[meta.ReplicationState.ReplicationStatusInternal]) + fmt.Fprint(h, meta.Metadata[meta.ReplicationState.VersionPurgeStatusInternal]) metaHashes[i] = hex.EncodeToString(h.Sum(nil)) h.Reset() diff --git a/cmd/erasure-object.go b/cmd/erasure-object.go index 56cbd283a..2ef61e00a 100644 --- a/cmd/erasure-object.go +++ b/cmd/erasure-object.go @@ -46,7 +46,7 @@ import ( // list all errors which can be ignored in object operations. var objectOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnformattedDisk) -/// Object Operations +// Object Operations func countOnlineDisks(onlineDisks []StorageAPI) (online int) { for _, onlineDisk := range onlineDisks { diff --git a/cmd/fs-v1-helpers.go b/cmd/fs-v1-helpers.go index 6c95ff093..1a996beff 100644 --- a/cmd/fs-v1-helpers.go +++ b/cmd/fs-v1-helpers.go @@ -327,7 +327,7 @@ func fsCreateFile(ctx context.Context, filePath string, reader io.Reader, falloc flags := os.O_CREATE | os.O_WRONLY if globalFSOSync { - flags = flags | os.O_SYNC + flags |= os.O_SYNC } writer, err := lock.Open(filePath, flags, 0666) if err != nil { diff --git a/cmd/fs-v1-rwpool.go b/cmd/fs-v1-rwpool.go index 07e6e31ac..3c3fe412d 100644 --- a/cmd/fs-v1-rwpool.go +++ b/cmd/fs-v1-rwpool.go @@ -109,7 +109,7 @@ func (fsi *fsIOPool) Open(path string) (*lock.RLockedFile, error) { } } - /// Save new reader on the map. + // Save new reader on the map. // It is possible by this time due to concurrent // i/o we might have another lock present. Lookup diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index e0796f8d7..e83ef2848 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -398,7 +398,7 @@ func (fs *FSObjects) scanBucket(ctx context.Context, bucket string, cache dataUs return cache, err } -/// Bucket operations +// Bucket operations // getBucketDir - will convert incoming bucket names to // corresponding valid bucket names on the backend in a platform @@ -601,7 +601,7 @@ func (fs *FSObjects) DeleteBucket(ctx context.Context, bucket string, opts Delet return nil } -/// Object Operations +// Object Operations // CopyObject - copy object source object to destination object. // if source object and destination object are same we only diff --git a/cmd/gateway/azure/gateway-azure.go b/cmd/gateway/azure/gateway-azure.go index 44f28ff6b..22bba7721 100644 --- a/cmd/gateway/azure/gateway-azure.go +++ b/cmd/gateway/azure/gateway-azure.go @@ -274,7 +274,7 @@ func s3MetaToAzureProperties(ctx context.Context, s3Metadata map[string]string) encodeKey := func(key string) string { tokens := strings.Split(key, "_") for i := range tokens { - tokens[i] = strings.Replace(tokens[i], "-", "_", -1) + tokens[i] = strings.ReplaceAll(tokens[i], "-", "_") } return strings.Join(tokens, "__") } @@ -367,7 +367,7 @@ func azurePropertiesToS3Meta(meta azblob.Metadata, props azblob.BlobHTTPHeaders, decodeKey := func(key string) string { tokens := strings.Split(key, "__") for i := range tokens { - tokens[i] = strings.Replace(tokens[i], "_", "-", -1) + tokens[i] = strings.ReplaceAll(tokens[i], "_", "-") } return strings.Join(tokens, "_") } diff --git a/cmd/gateway/gcs/gateway-gcs.go b/cmd/gateway/gcs/gateway-gcs.go index 998dbba69..7054d9e9a 100644 --- a/cmd/gateway/gcs/gateway-gcs.go +++ b/cmd/gateway/gcs/gateway-gcs.go @@ -531,7 +531,7 @@ func toGCSPageToken(name string) string { byte(length & 0xFF), } - length = length >> 7 + length >>= 7 if length > 0 { b = append(b, byte(length&0xFF)) } diff --git a/cmd/gateway/s3/gateway-s3-sse.go b/cmd/gateway/s3/gateway-s3-sse.go index c0e42ba13..bfaa86516 100644 --- a/cmd/gateway/s3/gateway-s3-sse.go +++ b/cmd/gateway/s3/gateway-s3-sse.go @@ -668,7 +668,7 @@ func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje return oi, e } - //delete any unencrypted version of object that might be on the backend + // delete any unencrypted version of object that might be on the backend defer l.s3Objects.DeleteObject(ctx, bucket, object, opts) // Save the final object size and modtime. diff --git a/cmd/handler-utils.go b/cmd/handler-utils.go index ecff4ec50..592d92662 100644 --- a/cmd/handler-utils.go +++ b/cmd/handler-utils.go @@ -289,7 +289,7 @@ func validateFormFieldSize(ctx context.Context, formValues http.Header) error { // Extract form fields and file data from a HTTP POST Policy func extractPostPolicyFormValues(ctx context.Context, form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) { - /// HTML Form values + // HTML Form values fileName = "" // Canonicalize the form values into http.Header. diff --git a/cmd/iam-store.go b/cmd/iam-store.go index bccc2207c..b34e75730 100644 --- a/cmd/iam-store.go +++ b/cmd/iam-store.go @@ -171,7 +171,7 @@ func newMappedPolicy(policy string) MappedPolicy { // key options type options struct { - ttl int64 //expiry in seconds + ttl int64 // expiry in seconds } type iamWatchEvent struct { @@ -558,9 +558,7 @@ func (store *IAMStoreSys) AddUsersToGroup(ctx context.Context, group string, mem // exist. gi = newGroupInfo(members) } else { - mergedMembers := append(gi.Members, members...) - uniqMembers := set.CreateStringSet(mergedMembers...).ToSlice() - gi.Members = uniqMembers + gi.Members = set.CreateStringSet(append(gi.Members, members...)...).ToSlice() } if err := store.saveGroupInfo(ctx, group, gi); err != nil { diff --git a/cmd/iam.go b/cmd/iam.go index 42ec9ba31..021777101 100644 --- a/cmd/iam.go +++ b/cmd/iam.go @@ -351,67 +351,34 @@ func (sys *IAMSys) loadWatchedEvent(ctx context.Context, event iamWatchEvent) (e ctx, cancel := context.WithTimeout(ctx, defaultContextTimeout) defer cancel() - if event.isCreated { - switch { - case usersPrefix: - accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigUsersPrefix)) - err = sys.store.UserNotificationHandler(ctx, accessKey, regUser) - case stsPrefix: - accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigSTSPrefix)) - err = sys.store.UserNotificationHandler(ctx, accessKey, stsUser) - case svcPrefix: - accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigServiceAccountsPrefix)) - err = sys.store.UserNotificationHandler(ctx, accessKey, svcUser) - case groupsPrefix: - group := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigGroupsPrefix)) - err = sys.store.GroupNotificationHandler(ctx, group) - case policyPrefix: - policyName := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigPoliciesPrefix)) - err = sys.store.PolicyNotificationHandler(ctx, policyName) - case policyDBUsersPrefix: - policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBUsersPrefix) - user := strings.TrimSuffix(policyMapFile, ".json") - err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, regUser) - case policyDBSTSUsersPrefix: - policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBSTSUsersPrefix) - user := strings.TrimSuffix(policyMapFile, ".json") - err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, stsUser) - case policyDBGroupsPrefix: - policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBGroupsPrefix) - user := strings.TrimSuffix(policyMapFile, ".json") - err = sys.store.PolicyMappingNotificationHandler(ctx, user, true, regUser) - } - } else { - // delete event - switch { - case usersPrefix: - accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigUsersPrefix)) - err = sys.store.UserNotificationHandler(ctx, accessKey, regUser) - case stsPrefix: - accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigSTSPrefix)) - err = sys.store.UserNotificationHandler(ctx, accessKey, stsUser) - case svcPrefix: - accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigServiceAccountsPrefix)) - err = sys.store.UserNotificationHandler(ctx, accessKey, svcUser) - case groupsPrefix: - group := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigGroupsPrefix)) - err = sys.store.GroupNotificationHandler(ctx, group) - case policyPrefix: - policyName := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigPoliciesPrefix)) - err = sys.store.PolicyNotificationHandler(ctx, policyName) - case policyDBUsersPrefix: - policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBUsersPrefix) - user := strings.TrimSuffix(policyMapFile, ".json") - err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, regUser) - case policyDBSTSUsersPrefix: - policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBSTSUsersPrefix) - user := strings.TrimSuffix(policyMapFile, ".json") - err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, stsUser) - case policyDBGroupsPrefix: - policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBGroupsPrefix) - user := strings.TrimSuffix(policyMapFile, ".json") - err = sys.store.PolicyMappingNotificationHandler(ctx, user, true, regUser) - } + switch { + case usersPrefix: + accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigUsersPrefix)) + err = sys.store.UserNotificationHandler(ctx, accessKey, regUser) + case stsPrefix: + accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigSTSPrefix)) + err = sys.store.UserNotificationHandler(ctx, accessKey, stsUser) + case svcPrefix: + accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigServiceAccountsPrefix)) + err = sys.store.UserNotificationHandler(ctx, accessKey, svcUser) + case groupsPrefix: + group := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigGroupsPrefix)) + err = sys.store.GroupNotificationHandler(ctx, group) + case policyPrefix: + policyName := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigPoliciesPrefix)) + err = sys.store.PolicyNotificationHandler(ctx, policyName) + case policyDBUsersPrefix: + policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBUsersPrefix) + user := strings.TrimSuffix(policyMapFile, ".json") + err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, regUser) + case policyDBSTSUsersPrefix: + policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBSTSUsersPrefix) + user := strings.TrimSuffix(policyMapFile, ".json") + err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, stsUser) + case policyDBGroupsPrefix: + policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBGroupsPrefix) + user := strings.TrimSuffix(policyMapFile, ".json") + err = sys.store.PolicyMappingNotificationHandler(ctx, user, true, regUser) } return err } diff --git a/cmd/metacache-entries.go b/cmd/metacache-entries.go index 802e03d43..3df3a0ac6 100644 --- a/cmd/metacache-entries.go +++ b/cmd/metacache-entries.go @@ -620,11 +620,9 @@ func mergeEntryChannels(ctx context.Context, in []chan metaCacheEntry, out chan< } best = other bestIdx = otherIdx - } else { + } else if err := selectFrom(otherIdx); err != nil { // Keep best, replace "other" - if err := selectFrom(otherIdx); err != nil { - return err - } + return err } continue } @@ -636,10 +634,8 @@ func mergeEntryChannels(ctx context.Context, in []chan metaCacheEntry, out chan< if best.name > last { out <- *best last = best.name - } else { - if serverDebugLog { - console.Debugln("mergeEntryChannels: discarding duplicate", best.name, "<=", last) - } + } else if serverDebugLog { + console.Debugln("mergeEntryChannels: discarding duplicate", best.name, "<=", last) } // Replace entry we just sent. if err := selectFrom(bestIdx); err != nil { diff --git a/cmd/metacache-entries_test.go b/cmd/metacache-entries_test.go index 518bd12b6..30995d7c6 100644 --- a/cmd/metacache-entries_test.go +++ b/cmd/metacache-entries_test.go @@ -81,6 +81,7 @@ func Test_metaCacheEntries_merge(t *testing.T) { } // Merge b into a a.merge(b, -1) + //nolint:gocritic want := append(loadMetacacheSampleNames, loadMetacacheSampleNames...) sort.Strings(want) got := a.entries().names() diff --git a/cmd/metrics-v2.go b/cmd/metrics-v2.go index 8b0300283..212615b2e 100644 --- a/cmd/metrics-v2.go +++ b/cmd/metrics-v2.go @@ -1623,20 +1623,18 @@ func (c *minioClusterCollector) Collect(out chan<- prometheus.Metric) { continue } for k, v := range metric.Histogram { - l := append(labels, metric.HistogramBucketLabel) - lv := append(values, k) out <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(string(metric.Description.Namespace), string(metric.Description.Subsystem), string(metric.Description.Name)), metric.Description.Help, - l, + append(labels, metric.HistogramBucketLabel), metric.StaticLabels, ), prometheus.GaugeValue, float64(v), - lv...) + append(values, k)...) } continue } diff --git a/cmd/net.go b/cmd/net.go index 1371e442f..ab4ec9c25 100644 --- a/cmd/net.go +++ b/cmd/net.go @@ -341,21 +341,17 @@ func sameLocalAddrs(addr1, addr2 string) (bool, error) { if host1 == "" { // If empty host means it is localhost addr1Local = true - } else { + } else if addr1Local, err = isLocalHost(host1, port1, port1); err != nil { // Host not empty, check if it is local - if addr1Local, err = isLocalHost(host1, port1, port1); err != nil { - return false, err - } + return false, err } if host2 == "" { // If empty host means it is localhost addr2Local = true - } else { + } else if addr2Local, err = isLocalHost(host2, port2, port2); err != nil { // Host not empty, check if it is local - if addr2Local, err = isLocalHost(host2, port2, port2); err != nil { - return false, err - } + return false, err } // If both of addresses point to the same machine, check if diff --git a/cmd/object-api-common.go b/cmd/object-api-common.go index baaf2bada..46c47ae52 100644 --- a/cmd/object-api-common.go +++ b/cmd/object-api-common.go @@ -52,7 +52,7 @@ var globalObjLayerMutex sync.RWMutex // Global object layer, only accessed by globalObjectAPI. var globalObjectAPI ObjectLayer -//Global cacheObjects, only accessed by newCacheObjectsFn(). +// Global cacheObjects, only accessed by newCacheObjectsFn(). var globalCacheObjectAPI CacheObjectLayer // Checks if the object is a directory, this logic uses diff --git a/cmd/object-api-errors.go b/cmd/object-api-errors.go index 2692797c6..34cfc5dd8 100644 --- a/cmd/object-api-errors.go +++ b/cmd/object-api-errors.go @@ -312,7 +312,7 @@ func (e ObjectExistsAsDirectory) Error() string { return "Object exists on : " + e.Bucket + " as directory " + e.Object } -//PrefixAccessDenied object access is denied. +// PrefixAccessDenied object access is denied. type PrefixAccessDenied GenericError func (e PrefixAccessDenied) Error() string { @@ -484,7 +484,7 @@ func (e InvalidObjectState) Error() string { return "The operation is not valid for the current state of the object " + e.Bucket + "/" + e.Object + "(" + e.VersionID + ")" } -/// Bucket related errors. +// Bucket related errors. // BucketNameInvalid - bucketname provided is invalid. type BucketNameInvalid GenericError @@ -494,7 +494,7 @@ func (e BucketNameInvalid) Error() string { return "Bucket name invalid: " + e.Bucket } -/// Object related errors. +// Object related errors. // ObjectNameInvalid - object name provided is invalid. type ObjectNameInvalid GenericError @@ -569,7 +569,7 @@ func (e OperationTimedOut) Error() string { return "Operation timed out" } -/// Multipart related errors. +// Multipart related errors. // MalformedUploadID malformed upload id. type MalformedUploadID struct { diff --git a/cmd/object-api-listobjects_test.go b/cmd/object-api-listobjects_test.go index 240ff4202..f8917e042 100644 --- a/cmd/object-api-listobjects_test.go +++ b/cmd/object-api-listobjects_test.go @@ -205,7 +205,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) { }, // ListObjectsResult-9. // Used for asserting the case with marker, but without prefix. - //marker is set to "newPrefix0" in the testCase, (testCase 33). + // marker is set to "newPrefix0" in the testCase, (testCase 33). { IsTruncated: false, Objects: []ObjectInfo{ @@ -217,7 +217,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) { }, }, // ListObjectsResult-10. - //marker is set to "newPrefix1" in the testCase, (testCase 34). + // marker is set to "newPrefix1" in the testCase, (testCase 34). { IsTruncated: false, Objects: []ObjectInfo{ @@ -228,7 +228,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) { }, }, // ListObjectsResult-11. - //marker is set to "obj0" in the testCase, (testCase 35). + // marker is set to "obj0" in the testCase, (testCase 35). { IsTruncated: false, Objects: []ObjectInfo{ @@ -548,7 +548,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) { {"empty-bucket", "", "", "", 111100000, ListObjectsInfo{}, nil, true}, // Testing for all 10 objects in the bucket (18). {"test-bucket-list-object", "", "", "", 10, resultCases[0], nil, true}, - //Testing for negative value of maxKey, this should set maxKeys to listObjectsLimit (19). + // Testing for negative value of maxKey, this should set maxKeys to listObjectsLimit (19). {"test-bucket-list-object", "", "", "", -1, resultCases[0], nil, true}, // Testing for very large value of maxKey, this should set maxKeys to listObjectsLimit (20). {"test-bucket-list-object", "", "", "", 1234567890, resultCases[0], nil, true}, @@ -905,7 +905,7 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand }, // ListObjectsResult-9. // Used for asserting the case with marker, but without prefix. - //marker is set to "newPrefix0" in the testCase, (testCase 33). + // marker is set to "newPrefix0" in the testCase, (testCase 33). { IsTruncated: false, Objects: []ObjectInfo{ @@ -917,7 +917,7 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand }, }, // ListObjectsResult-10. - //marker is set to "newPrefix1" in the testCase, (testCase 34). + // marker is set to "newPrefix1" in the testCase, (testCase 34). { IsTruncated: false, Objects: []ObjectInfo{ @@ -928,7 +928,7 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand }, }, // ListObjectsResult-11. - //marker is set to "obj0" in the testCase, (testCase 35). + // marker is set to "obj0" in the testCase, (testCase 35). { IsTruncated: false, Objects: []ObjectInfo{ @@ -1223,7 +1223,7 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand {"empty-bucket", "", "", "", 111100000, ListObjectsInfo{}, nil, true}, // Testing for all 10 objects in the bucket (16). {"test-bucket-list-object", "", "", "", 10, resultCases[0], nil, true}, - //Testing for negative value of maxKey, this should set maxKeys to listObjectsLimit (17). + // Testing for negative value of maxKey, this should set maxKeys to listObjectsLimit (17). {"test-bucket-list-object", "", "", "", -1, resultCases[0], nil, true}, // Testing for very large value of maxKey, this should set maxKeys to listObjectsLimit (18). {"test-bucket-list-object", "", "", "", 1234567890, resultCases[0], nil, true}, diff --git a/cmd/object-api-multipart_test.go b/cmd/object-api-multipart_test.go index d2390491d..40b21f10f 100644 --- a/cmd/object-api-multipart_test.go +++ b/cmd/object-api-multipart_test.go @@ -1065,7 +1065,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan {bucketNames[0], "Asia", "", "", "", 2, listMultipartResults[13], nil, true}, // setting delimiter (Test number 27). {bucketNames[0], "", "", "", SlashSeparator, 2, listMultipartResults[14], nil, true}, - //Test case with multiple uploadID listing for given object (Test number 28). + // Test case with multiple uploadID listing for given object (Test number 28). {bucketNames[1], "", "", "", "", 100, listMultipartResults[15], nil, true}, // Test case with multiple uploadID listing for given object, but uploadID marker set. // Testing whether the marker entry is skipped (Test number 29-30). @@ -1088,29 +1088,29 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan // Test case with `Prefix` and `UploadIDMarker` (Test number 37). {bucketNames[1], "min", "minio-object-1.txt", uploadIDs[1], "", 10, listMultipartResults[24], nil, true}, // Test case for bucket with multiple objects in it. - // Bucket used : `bucketNames[2]`. - // Objects used: `objectNames[1-5]`. + // Bucket used : `bucketNames[2]`. + // Objects used: `objectNames[1-5]`. // UploadId's used: uploadIds[4-8]. // (Test number 39). {bucketNames[2], "", "", "", "", 100, listMultipartResults[25], nil, true}, - //Test cases with prefixes. - //Testing listing with prefix set to "min" (Test number 40) . + // Test cases with prefixes. + // Testing listing with prefix set to "min" (Test number 40) . {bucketNames[2], "min", "", "", "", 100, listMultipartResults[26], nil, true}, - //Testing listing with prefix set to "ney" (Test number 41). + // Testing listing with prefix set to "ney" (Test number 41). {bucketNames[2], "ney", "", "", "", 100, listMultipartResults[27], nil, true}, - //Testing listing with prefix set to "par" (Test number 42). + // Testing listing with prefix set to "par" (Test number 42). {bucketNames[2], "parrot", "", "", "", 100, listMultipartResults[28], nil, true}, - //Testing listing with prefix set to object name "neymar.jpeg" (Test number 43). + // Testing listing with prefix set to object name "neymar.jpeg" (Test number 43). {bucketNames[2], "neymar.jpeg", "", "", "", 100, listMultipartResults[29], nil, true}, - // Testing listing with `MaxUploads` set to 3 (Test number 44). + // Testing listing with `MaxUploads` set to 3 (Test number 44). {bucketNames[2], "", "", "", "", 3, listMultipartResults[30], nil, true}, // In case of bucketNames[2], there are 6 entries (Test number 45). // Since all available entries are listed, IsTruncated is expected to be false // and NextMarkers are expected to empty. {bucketNames[2], "", "", "", "", 6, listMultipartResults[31], nil, true}, - // Test case with `KeyMarker` (Test number 47). + // Test case with `KeyMarker` (Test number 47). {bucketNames[2], "", objectNames[3], "", "", 10, listMultipartResults[33], nil, true}, - // Test case with `prefix` and `KeyMarker` (Test number 48). + // Test case with `prefix` and `KeyMarker` (Test number 48). {bucketNames[2], "minio-object", objectNames[1], "", "", 10, listMultipartResults[34], nil, true}, } @@ -1694,9 +1694,9 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T {bucketNames[0], objectNames[0], uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd"))}, {bucketNames[0], objectNames[0], uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd"))}, // Part with size larger than 5Mb. - {bucketNames[0], objectNames[0], uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(string(validPart)))}, - {bucketNames[0], objectNames[0], uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(string(validPart)))}, - {bucketNames[0], objectNames[0], uploadIDs[0], 7, string(validPart), validPartMD5, int64(len(string(validPart)))}, + {bucketNames[0], objectNames[0], uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(validPart))}, + {bucketNames[0], objectNames[0], uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(validPart))}, + {bucketNames[0], objectNames[0], uploadIDs[0], 7, string(validPart), validPartMD5, int64(len(validPart))}, } sha256sum := "" var opts ObjectOptions diff --git a/cmd/object-api-utils.go b/cmd/object-api-utils.go index d6944ed09..08ede6053 100644 --- a/cmd/object-api-utils.go +++ b/cmd/object-api-utils.go @@ -790,7 +790,7 @@ func (g *GetObjectReader) Close() error { return nil } -//SealMD5CurrFn seals md5sum with object encryption key and returns sealed +// SealMD5CurrFn seals md5sum with object encryption key and returns sealed // md5sum type SealMD5CurrFn func([]byte) []byte diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index 0587a7f70..8350d4f29 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -1574,7 +1574,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req return } - /// if Content-Length is unknown/missing, deny the request + // if Content-Length is unknown/missing, deny the request size := r.ContentLength rAuthType := getRequestAuthType(r) if rAuthType == authTypeStreamingSigned { @@ -1595,7 +1595,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req return } - /// maximum Upload size for objects in a single operation + // maximum Upload size for objects in a single operation if isMaxObjectSize(size) { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL) return @@ -1924,7 +1924,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h return } - /// if Content-Length is unknown/missing, deny the request + // if Content-Length is unknown/missing, deny the request size := r.ContentLength rAuthType := getRequestAuthType(r) if rAuthType == authTypeStreamingSigned { @@ -1946,7 +1946,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h return } - /// maximum Upload size for objects in a single operation + // maximum Upload size for objects in a single operation if isMaxObjectSize(size) { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL) return @@ -2155,7 +2155,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h writeSuccessResponseHeadersOnly(w) } -/// Multipart objectAPIHandlers +// Multipart objectAPIHandlers // NewMultipartUploadHandler - New multipart upload. // Notice: The S3 client can send secret keys in headers for encryption related jobs, @@ -2478,7 +2478,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt return } - /// maximum copy size for multipart objects in a single operation + // maximum copy size for multipart objects in a single operation if isMaxAllowedPartSize(length) { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL) return @@ -2670,7 +2670,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http return } - /// if Content-Length is unknown/missing, throw away + // if Content-Length is unknown/missing, throw away size := r.ContentLength rAuthType := getRequestAuthType(r) @@ -2693,7 +2693,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http return } - /// maximum Upload size for multipart objects in a single operation + // maximum Upload size for multipart objects in a single operation if isMaxAllowedPartSize(size) { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL) return @@ -3319,7 +3319,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite } } -/// Delete objectAPIHandlers +// Delete objectAPIHandlers // DeleteObjectHandler - delete an object func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { diff --git a/cmd/object-handlers_test.go b/cmd/object-handlers_test.go index c5d67d42b..a04253201 100644 --- a/cmd/object-handlers_test.go +++ b/cmd/object-handlers_test.go @@ -2706,13 +2706,13 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s {bucketName, objectName, uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd"))}, {bucketName, objectName, uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd"))}, // Part with size larger than 5 MiB. - {bucketName, objectName, uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(string(validPart)))}, - {bucketName, objectName, uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(string(validPart)))}, + {bucketName, objectName, uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(validPart))}, + {bucketName, objectName, uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(validPart))}, // Part with size larger than 5 MiB. // Parts uploaded for anonymous/unsigned API handler test. - {bucketName, objectName, uploadIDs[1], 1, string(validPart), validPartMD5, int64(len(string(validPart)))}, - {bucketName, objectName, uploadIDs[1], 2, string(validPart), validPartMD5, int64(len(string(validPart)))}, + {bucketName, objectName, uploadIDs[1], 1, string(validPart), validPartMD5, int64(len(validPart))}, + {bucketName, objectName, uploadIDs[1], 2, string(validPart), validPartMD5, int64(len(validPart))}, } // Iterating over creatPartCases to generate multipart chunks. for _, part := range parts { @@ -3077,13 +3077,13 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri {bucketName, objectName, uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd"))}, {bucketName, objectName, uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd"))}, // Part with size larger than 5 MiB. - {bucketName, objectName, uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(string(validPart)))}, - {bucketName, objectName, uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(string(validPart)))}, + {bucketName, objectName, uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(validPart))}, + {bucketName, objectName, uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(validPart))}, // Part with size larger than 5 MiB. // Parts uploaded for anonymous/unsigned API handler test. - {bucketName, objectName, uploadIDs[1], 1, string(validPart), validPartMD5, int64(len(string(validPart)))}, - {bucketName, objectName, uploadIDs[1], 2, string(validPart), validPartMD5, int64(len(string(validPart)))}, + {bucketName, objectName, uploadIDs[1], 1, string(validPart), validPartMD5, int64(len(validPart))}, + {bucketName, objectName, uploadIDs[1], 2, string(validPart), validPartMD5, int64(len(validPart))}, } // Iterating over createPartCases to generate multipart chunks. for _, part := range parts { diff --git a/cmd/object_api_suite_test.go b/cmd/object_api_suite_test.go index 6ad35ab6e..fc8c7c0cb 100644 --- a/cmd/object_api_suite_test.go +++ b/cmd/object_api_suite_test.go @@ -150,7 +150,7 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHan randomPerm := rand.Perm(10) randomString := "" for _, num := range randomPerm { - randomString = randomString + strconv.Itoa(num) + randomString += strconv.Itoa(num) } expectedETaghex := getMD5Hash([]byte(randomString)) @@ -189,7 +189,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH randomPerm := rand.Perm(100) randomString := "" for _, num := range randomPerm { - randomString = randomString + strconv.Itoa(num) + randomString += strconv.Itoa(num) } expectedETaghex := getMD5Hash([]byte(randomString)) diff --git a/cmd/post-policy_test.go b/cmd/post-policy_test.go index 321000f5a..213595dfe 100644 --- a/cmd/post-policy_test.go +++ b/cmd/post-policy_test.go @@ -61,8 +61,8 @@ func newPostPolicyBytesV4WithContentRange(credential, bucketName, objectKey stri keyConditionStr, contentLengthCondStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr) retStr := "{" retStr = retStr + expirationStr + "," - retStr = retStr + conditionStr - retStr = retStr + "}" + retStr += conditionStr + retStr += "}" return []byte(retStr) } @@ -89,8 +89,8 @@ func newPostPolicyBytesV4(credential, bucketName, objectKey string, expiration t conditionStr := fmt.Sprintf(`"conditions":[%s, %s, %s, %s, %s, %s]`, bucketConditionStr, keyConditionStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr) retStr := "{" retStr = retStr + expirationStr + "," - retStr = retStr + conditionStr - retStr = retStr + "}" + retStr += conditionStr + retStr += "}" return []byte(retStr) } @@ -108,8 +108,8 @@ func newPostPolicyBytesV2(bucketName, objectKey string, expiration time.Time) [] conditionStr := fmt.Sprintf(`"conditions":[%s, %s]`, bucketConditionStr, keyConditionStr) retStr := "{" retStr = retStr + expirationStr + "," - retStr = retStr + conditionStr - retStr = retStr + "}" + retStr += conditionStr + retStr += "}" return []byte(retStr) } diff --git a/cmd/postpolicyform.go b/cmd/postpolicyform.go index 34c88b51a..f6bfb9574 100644 --- a/cmd/postpolicyform.go +++ b/cmd/postpolicyform.go @@ -303,14 +303,12 @@ func checkPostPolicy(formValues http.Header, postPolicyForm PostPolicyForm) erro if !condPassed { return fmt.Errorf("Invalid according to Policy: Policy Condition failed") } - } else { + } else if strings.HasPrefix(policy.Key, "$x-amz-meta-") || strings.HasPrefix(policy.Key, "$x-amz-") { // This covers all conditions X-Amz-Meta-* and X-Amz-* - if strings.HasPrefix(policy.Key, "$x-amz-meta-") || strings.HasPrefix(policy.Key, "$x-amz-") { - // Check if policy condition is satisfied - condPassed = checkPolicyCond(op, formValues.Get(formCanonicalName), policy.Value) - if !condPassed { - return fmt.Errorf("Invalid according to Policy: Policy Condition failed: [%s, %s, %s]", op, policy.Key, policy.Value) - } + // Check if policy condition is satisfied + condPassed = checkPolicyCond(op, formValues.Get(formCanonicalName), policy.Value) + if !condPassed { + return fmt.Errorf("Invalid according to Policy: Policy Condition failed: [%s, %s, %s]", op, policy.Key, policy.Value) } } } diff --git a/cmd/server_test.go b/cmd/server_test.go index a635c49cc..29c5fb4cf 100644 --- a/cmd/server_test.go +++ b/cmd/server_test.go @@ -365,7 +365,7 @@ func (s *TestSuiteCommon) TestBucketPolicy(c *check) { // assert the http response status code. c.Assert(response.StatusCode, http.StatusOK) - /// Put a new bucket policy. + // Put a new bucket policy. request, err = newTestSignedRequest(http.MethodPut, getPutPolicyURL(s.endPoint, bucketName), int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), s.accessKey, s.secretKey, s.signer) c.Assert(err, nil) @@ -980,7 +980,7 @@ func (s *TestSuiteCommon) TestPutBucket(c *check) { wg.Wait() bucketName = getRandomBucketName() - //Block 2: testing for correctness of the functionality + // Block 2: testing for correctness of the functionality // HTTP request to create the bucket. request, err := newTestSignedRequest(http.MethodPut, getMakeBucketURL(s.endPoint, bucketName), 0, nil, s.accessKey, s.secretKey, s.signer) @@ -1273,7 +1273,7 @@ func (s *TestSuiteCommon) TestPutObjectLongName(c *check) { c.Assert(err, nil) c.Assert(response.StatusCode, http.StatusOK) - //make long object name. + // make long object name. longObjName = fmt.Sprintf("%0255d/%0255d/%0255d/%0255d/%0255d", 1, 1, 1, 1, 1) if IsDocker() || IsKubernetes() { longObjName = fmt.Sprintf("%0242d/%0242d/%0242d/%0242d/%0242d", 1, 1, 1, 1, 1) diff --git a/cmd/signature-v4-parser.go b/cmd/signature-v4-parser.go index a19e6ad1e..1ee156971 100644 --- a/cmd/signature-v4-parser.go +++ b/cmd/signature-v4-parser.go @@ -261,7 +261,7 @@ func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues // Replace all spaced strings, some clients can send spaced // parameters and some won't. So we pro-actively remove any spaces // to make parsing easier. - v4Auth = strings.Replace(v4Auth, " ", "", -1) + v4Auth = strings.ReplaceAll(v4Auth, " ", "") if v4Auth == "" { return sv, ErrAuthHeaderEmpty } diff --git a/cmd/signature-v4.go b/cmd/signature-v4.go index 044da6d4e..bda7535a6 100644 --- a/cmd/signature-v4.go +++ b/cmd/signature-v4.go @@ -103,7 +103,7 @@ func getSignedHeaders(signedHeaders http.Header) string { // // func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string { - rawQuery := strings.Replace(queryStr, "+", "%20", -1) + rawQuery := strings.ReplaceAll(queryStr, "+", "%20") encodedPath := s3utils.EncodePath(urlPath) canonicalRequest := strings.Join([]string{ method, @@ -130,9 +130,9 @@ func getScope(t time.Time, region string) string { // getStringToSign a string based on selected query values. func getStringToSign(canonicalRequest string, t time.Time, scope string) string { stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n" - stringToSign = stringToSign + scope + "\n" + stringToSign += scope + "\n" canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest)) - stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:]) + stringToSign += hex.EncodeToString(canonicalRequestBytes[:]) return stringToSign } @@ -306,7 +306,7 @@ func doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region s return ErrInvalidToken } - /// Verify finally if signature is same. + // Verify finally if signature is same. // Get canonical request. presignedCanonicalReq := getCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method) diff --git a/cmd/storage-rest_test.go b/cmd/storage-rest_test.go index b501db5d3..2696b27c6 100644 --- a/cmd/storage-rest_test.go +++ b/cmd/storage-rest_test.go @@ -29,13 +29,8 @@ import ( xnet "github.com/minio/pkg/net" ) -/////////////////////////////////////////////////////////////////////////////// -// // Storage REST server, storageRESTReceiver and StorageRESTClient are // inter-dependent, below test functions are sufficient to test all of them. -// -/////////////////////////////////////////////////////////////////////////////// - func testStorageAPIDiskInfo(t *testing.T, storage StorageAPI) { testCases := []struct { expectErr bool diff --git a/cmd/streaming-signature-v4.go b/cmd/streaming-signature-v4.go index c749bc1e0..06680ac25 100644 --- a/cmd/streaming-signature-v4.go +++ b/cmd/streaming-signature-v4.go @@ -436,7 +436,7 @@ func parseHexUint(v []byte) (n uint64, err error) { for i, b := range v { switch { case '0' <= b && b <= '9': - b = b - '0' + b -= '0' case 'a' <= b && b <= 'f': b = b - 'a' + 10 case 'A' <= b && b <= 'F': diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index 2d5ef6985..59d13d574 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -119,19 +119,19 @@ func TestMain(m *testing.M) { // concurrency level for certain parallel tests. const testConcurrencyLevel = 10 -/// -/// Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258 -/// -/// User-Agent: -/// -/// This is ignored from signing because signing this causes problems with generating pre-signed URLs -/// (that are executed by other agents) or when customers pass requests through proxies, which may -/// modify the user-agent. -/// -/// Authorization: -/// -/// Is skipped for obvious reasons -/// +// +// Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258 +// +// User-Agent: +// +// This is ignored from signing because signing this causes problems with generating pre-signed URLs +// (that are executed by other agents) or when customers pass requests through proxies, which may +// modify the user-agent. +// +// Authorization: +// +// Is skipped for obvious reasons +// var ignoredHeaders = map[string]bool{ "Authorization": true, "User-Agent": true, @@ -633,7 +633,7 @@ func signStreamingRequest(req *http.Request, accessKey, secretKey string, currTi signedHeaders := strings.Join(headers, ";") // Get canonical query string. - req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) + req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20") // Get canonical URI. canonicalURI := s3utils.EncodePath(req.URL.Path) @@ -665,8 +665,8 @@ func signStreamingRequest(req *http.Request, accessKey, secretKey string, currTi }, SlashSeparator) stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n" - stringToSign = stringToSign + scope + "\n" - stringToSign = stringToSign + getSHA256Hash([]byte(canonicalRequest)) + stringToSign += scope + "\n" + stringToSign += getSHA256Hash([]byte(canonicalRequest)) date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) region := sumHMAC(date, []byte(globalMinioDefaultRegion)) @@ -749,7 +749,7 @@ func assembleStreamingChunks(req *http.Request, body io.ReadSeeker, chunkSize in stringToSign = stringToSign + scope + "\n" stringToSign = stringToSign + signature + "\n" stringToSign = stringToSign + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + "\n" // hex(sum256("")) - stringToSign = stringToSign + getSHA256Hash(buffer[:n]) + stringToSign += getSHA256Hash(buffer[:n]) date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) region := sumHMAC(date, []byte(regionStr)) @@ -851,7 +851,7 @@ func preSignV4(req *http.Request, accessKeyID, secretAccessKey string, expires i extractedSignedHeaders := make(http.Header) extractedSignedHeaders.Set("host", req.Host) - queryStr := strings.Replace(query.Encode(), "+", "%20", -1) + queryStr := strings.ReplaceAll(query.Encode(), "+", "%20") canonicalRequest := getCanonicalRequest(extractedSignedHeaders, unsignedPayload, queryStr, req.URL.Path, req.Method) stringToSign := getStringToSign(canonicalRequest, date, scope) signingKey := getSigningKey(secretAccessKey, date, region, serviceS3) @@ -988,7 +988,7 @@ func signRequestV4(req *http.Request, accessKey, secretKey string) error { signedHeaders := strings.Join(headers, ";") // Get canonical query string. - req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) + req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20") // Get canonical URI. canonicalURI := s3utils.EncodePath(req.URL.Path) @@ -1021,7 +1021,7 @@ func signRequestV4(req *http.Request, accessKey, secretKey string) error { stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n" stringToSign = stringToSign + scope + "\n" - stringToSign = stringToSign + getSHA256Hash([]byte(canonicalRequest)) + stringToSign += getSHA256Hash([]byte(canonicalRequest)) date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) regionHMAC := sumHMAC(date, []byte(region)) @@ -1220,7 +1220,7 @@ func makeTestTargetURL(endPoint, bucketName, objectName string, queryValues url. urlStr = urlStr + bucketName + SlashSeparator } if objectName != "" { - urlStr = urlStr + s3utils.EncodePath(objectName) + urlStr += s3utils.EncodePath(objectName) } if len(queryValues) > 0 { urlStr = urlStr + "?" + queryValues.Encode() @@ -1504,7 +1504,7 @@ func removeRoots(roots []string) { } } -//removeDiskN - removes N disks from supplied disk slice. +// removeDiskN - removes N disks from supplied disk slice. func removeDiskN(disks []string, n int) { if n > len(disks) { n = len(disks) diff --git a/cmd/update_test.go b/cmd/update_test.go index 02b254b19..7d6b6c9d8 100644 --- a/cmd/update_test.go +++ b/cmd/update_test.go @@ -161,7 +161,7 @@ func TestUserAgent(t *testing.T) { str := getUserAgent(testCase.mode) expectedStr := testCase.expectedStr if IsDocker() { - expectedStr = strings.Replace(expectedStr, "; source", "; docker; source", -1) + expectedStr = strings.ReplaceAll(expectedStr, "; source", "; docker; source") } if str != expectedStr { t.Errorf("Test %d: expected: %s, got: %s", i+1, expectedStr, str) @@ -216,7 +216,7 @@ func TestGetHelmVersion(t *testing.T) { if err != nil { t.Fatalf("Unable to create temporary file. %s", err) } - if _, err = tmpfile.Write([]byte(content)); err != nil { + if _, err = tmpfile.WriteString(content); err != nil { t.Fatalf("Unable to create temporary file. %s", err) } if err = tmpfile.Close(); err != nil { diff --git a/cmd/url_test.go b/cmd/url_test.go index 6e1efc7ef..c5fd7781a 100644 --- a/cmd/url_test.go +++ b/cmd/url_test.go @@ -23,7 +23,7 @@ import ( ) func BenchmarkURLQueryForm(b *testing.B) { - req, err := http.NewRequest(http.MethodGet, "http://localhost:9000/bucket/name?uploadId=upload&partNumber=1", nil) + req, err := http.NewRequest(http.MethodGet, "http://localhost:9000/bucket/name?uploadId=upload&partNumber=1", http.NoBody) if err != nil { b.Fatal(err) } @@ -49,7 +49,7 @@ func BenchmarkURLQueryForm(b *testing.B) { // BenchmarkURLQuery - benchmark URL memory allocations func BenchmarkURLQuery(b *testing.B) { - req, err := http.NewRequest(http.MethodGet, "http://localhost:9000/bucket/name?uploadId=upload&partNumber=1", nil) + req, err := http.NewRequest(http.MethodGet, "http://localhost:9000/bucket/name?uploadId=upload&partNumber=1", http.NoBody) if err != nil { b.Fatal(err) } diff --git a/cmd/utils.go b/cmd/utils.go index 644bc730d..f2b32a31a 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -160,7 +160,7 @@ func hasContentMD5(h http.Header) bool { return ok } -/// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html +// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html const ( // Maximum object size per PUT request is 5TB. // This is a divergence from S3 limit on purpose to support @@ -409,7 +409,7 @@ func dumpRequest(r *http.Request) string { header.Set("Host", r.Host) // Replace all '%' to '%%' so that printer format parser // to ignore URL encoded values. - rawURI := strings.Replace(r.RequestURI, "%", "%%", -1) + rawURI := strings.ReplaceAll(r.RequestURI, "%", "%%") req := struct { Method string `json:"method"` RequestURI string `json:"reqURI"` diff --git a/cmd/utils_test.go b/cmd/utils_test.go index 01ccd9216..e6499cf91 100644 --- a/cmd/utils_test.go +++ b/cmd/utils_test.go @@ -238,9 +238,8 @@ func TestDumpRequest(t *testing.T) { RequestURI string `json:"reqURI"` Header http.Header `json:"header"` } - jsonReq = strings.Replace(jsonReq, "%%", "%", -1) res := jsonResult{} - if err = json.Unmarshal([]byte(jsonReq), &res); err != nil { + if err = json.Unmarshal([]byte(strings.ReplaceAll(jsonReq, "%%", "%")), &res); err != nil { t.Fatal(err) } @@ -399,7 +398,6 @@ func TestCeilFrac(t *testing.T) { // Test if isErrIgnored works correctly. func TestIsErrIgnored(t *testing.T) { var errIgnored = fmt.Errorf("ignored error") - ignoredErrs := append(baseIgnoredErrs, errIgnored) var testCases = []struct { err error ignored bool @@ -418,7 +416,7 @@ func TestIsErrIgnored(t *testing.T) { }, } for i, testCase := range testCases { - if ok := IsErrIgnored(testCase.err, ignoredErrs...); ok != testCase.ignored { + if ok := IsErrIgnored(testCase.err, append(baseIgnoredErrs, errIgnored)...); ok != testCase.ignored { t.Errorf("Test: %d, Expected %t, got %t", i+1, testCase.ignored, ok) } } diff --git a/cmd/xl-storage.go b/cmd/xl-storage.go index 13abf1285..a9c2158c2 100644 --- a/cmd/xl-storage.go +++ b/cmd/xl-storage.go @@ -276,7 +276,7 @@ func newXLStorage(ep Endpoint) (*xlStorage, error) { if err != nil { return p, err } - if _, err = w.Write(alignedBuf[:]); err != nil { + if _, err = w.Write(alignedBuf); err != nil { w.Close() return p, err } @@ -2394,10 +2394,13 @@ func (s *xlStorage) StatInfoFile(ctx context.Context, volume, path string, glob if err != nil { name = filePath } - if os.PathSeparator != '/' { - name = strings.Replace(name, string(os.PathSeparator), "/", -1) - } - stat = append(stat, StatInfo{ModTime: st.ModTime(), Size: st.Size(), Name: name, Dir: st.IsDir(), Mode: uint32(st.Mode())}) + stat = append(stat, StatInfo{ + Name: filepath.ToSlash(name), + Size: st.Size(), + Dir: st.IsDir(), + Mode: uint32(st.Mode()), + ModTime: st.ModTime(), + }) } return stat, nil } diff --git a/cmd/xl-storage_test.go b/cmd/xl-storage_test.go index e752698b4..aaf5f2e10 100644 --- a/cmd/xl-storage_test.go +++ b/cmd/xl-storage_test.go @@ -447,7 +447,7 @@ func TestXLStorageReadAll(t *testing.T) { continue } if err == nil { - if string(dataRead) != string([]byte("Hello, World")) { + if !bytes.Equal(dataRead, []byte("Hello, World")) { t.Errorf("TestXLStorage %d: Expected the data read to be \"%s\", but instead got \"%s\"", i+1, "Hello, World", string(dataRead)) } } @@ -1227,7 +1227,10 @@ func TestXLStorageReadFile(t *testing.T) { t.Errorf("Case: %d %#v, expected: %s, got :%s", i+1, testCase, testCase.expectedErr, err) } // Expected error retured, proceed further to validate the returned results. - if err == nil && err == testCase.expectedErr { + if err != nil && testCase.expectedErr == nil { + t.Errorf("Case: %d %#v, expected: %s, got :%s", i+1, testCase, testCase.expectedErr, err) + } + if err == nil { if !bytes.Equal(testCase.expectedBuf, buf) { t.Errorf("Case: %d %#v, expected: \"%s\", got: \"%s\"", i+1, testCase, string(testCase.expectedBuf), string(buf[:testCase.bufSize])) } diff --git a/docs/debugging/inspect/main.go b/docs/debugging/inspect/main.go index 7570b53f2..0678c59b8 100644 --- a/docs/debugging/inspect/main.go +++ b/docs/debugging/inspect/main.go @@ -56,12 +56,13 @@ func main() { fatalErr(json.Unmarshal(got, &input)) r, err := os.Open(input.File) fatalErr(err) - defer r.Close() dstName := strings.TrimSuffix(input.File, ".enc") + ".zip" w, err := os.Create(dstName) fatalErr(err) - defer w.Close() + decrypt(input.Key, r, w) + r.Close() + w.Close() fmt.Println("Output decrypted to", dstName) return } @@ -78,14 +79,13 @@ func main() { case 1: r, err := os.Open(args[0]) fatalErr(err) - defer r.Close() if len(*key) == 0 { reader := bufio.NewReader(os.Stdin) fmt.Print("Enter Decryption Key: ") text, _ := reader.ReadString('\n') // convert CRLF to LF - *key = strings.Replace(text, "\n", "", -1) + *key = strings.ReplaceAll(text, "\n", "") } *key = strings.TrimSpace(*key) fatalIf(len(*key) != 72, "Unexpected key length: %d, want 72", len(*key)) @@ -93,9 +93,11 @@ func main() { dstName := strings.TrimSuffix(args[0], ".enc") + ".zip" w, err := os.Create(dstName) fatalErr(err) - defer w.Close() decrypt(*key, r, w) + r.Close() + w.Close() + fmt.Println("Output decrypted to", dstName) return default: diff --git a/internal/auth/credentials.go b/internal/auth/credentials.go index 23d1cb432..654f79da5 100644 --- a/internal/auth/credentials.go +++ b/internal/auth/credentials.go @@ -222,8 +222,8 @@ func GenerateCredentials() (accessKey, secretKey string, err error) { return "", "", err } - secretKey = strings.Replace(string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLen]), - "/", "+", -1) + secretKey = strings.ReplaceAll(string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLen]), + "/", "+") return accessKey, secretKey, nil } diff --git a/internal/bucket/bandwidth/monitor.go b/internal/bucket/bandwidth/monitor.go index 57367eca8..bd50c94d3 100644 --- a/internal/bucket/bandwidth/monitor.go +++ b/internal/bucket/bandwidth/monitor.go @@ -42,7 +42,7 @@ type Monitor struct { NodeCount uint64 } -//NewMonitor returns a monitor with defaults. +// NewMonitor returns a monitor with defaults. func NewMonitor(ctx context.Context, numNodes uint64) *Monitor { m := &Monitor{ activeBuckets: make(map[string]*bucketMeasurement), @@ -63,7 +63,7 @@ func (m *Monitor) updateMeasurement(bucket string, bytes uint64) { } } -//SelectionFunction for buckets +// SelectionFunction for buckets type SelectionFunction func(bucket string) bool // SelectBuckets will select all the buckets passed in. diff --git a/internal/bucket/lifecycle/lifecycle.go b/internal/bucket/lifecycle/lifecycle.go index 9e1f43ec8..1ad24d2bf 100644 --- a/internal/bucket/lifecycle/lifecycle.go +++ b/internal/bucket/lifecycle/lifecycle.go @@ -57,7 +57,7 @@ const ( DeleteVersionAction // TransitionAction transitions a particular object after evaluating lifecycle transition rules TransitionAction - //TransitionVersionAction transitions a particular object version after evaluating lifecycle transition rules + // TransitionVersionAction transitions a particular object version after evaluating lifecycle transition rules TransitionVersionAction // DeleteRestoredAction means the temporarily restored object needs to be removed after evaluating lifecycle rules DeleteRestoredAction diff --git a/internal/bucket/object/lock/lock.go b/internal/bucket/object/lock/lock.go index 79a524b6b..6401cdffa 100644 --- a/internal/bucket/object/lock/lock.go +++ b/internal/bucket/object/lock/lock.go @@ -193,6 +193,7 @@ func (dr *DefaultRetention) UnmarshalXML(d *xml.Decoder, start xml.StartElement) return fmt.Errorf("either Days or Years must be specified, not both") } + //nolint:gocritic if retention.Days != nil { if *retention.Days == 0 { return fmt.Errorf("Default retention period must be a positive integer value for 'Days'") diff --git a/internal/bucket/object/lock/lock_test.go b/internal/bucket/object/lock/lock_test.go index f1bf2c604..a85392fcf 100644 --- a/internal/bucket/object/lock/lock_test.go +++ b/internal/bucket/object/lock/lock_test.go @@ -137,6 +137,7 @@ func TestUnmarshalDefaultRetention(t *testing.T) { } var dr DefaultRetention err = xml.Unmarshal(d, &dr) + //nolint:gocritic if tt.expectedErr == nil { if err != nil { t.Fatalf("error: expected = , got = %v", err) @@ -173,6 +174,7 @@ func TestParseObjectLockConfig(t *testing.T) { } for _, tt := range tests { _, err := ParseObjectLockConfig(strings.NewReader(tt.value)) + //nolint:gocritic if tt.expectedErr == nil { if err != nil { t.Fatalf("error: expected = , got = %v", err) @@ -209,6 +211,7 @@ func TestParseObjectRetention(t *testing.T) { } for _, tt := range tests { _, err := ParseObjectRetention(strings.NewReader(tt.value)) + //nolint:gocritic if tt.expectedErr == nil { if err != nil { t.Fatalf("error: expected = , got = %v", err) @@ -367,6 +370,7 @@ func TestParseObjectLockRetentionHeaders(t *testing.T) { for i, tt := range tests { _, _, err := ParseObjectLockRetentionHeaders(tt.header) + //nolint:gocritic if tt.expectedErr == nil { if err != nil { t.Fatalf("Case %d error: expected = , got = %v", i, err) @@ -494,6 +498,7 @@ func TestParseObjectLegalHold(t *testing.T) { } for i, tt := range tests { _, err := ParseObjectLegalHold(strings.NewReader(tt.value)) + //nolint:gocritic if tt.expectedErr == nil { if err != nil { t.Fatalf("Case %d error: expected = , got = %v", i, err) diff --git a/internal/bucket/replication/destination.go b/internal/bucket/replication/destination.go index a106c1b2b..0557bed82 100644 --- a/internal/bucket/replication/destination.go +++ b/internal/bucket/replication/destination.go @@ -37,7 +37,7 @@ type Destination struct { Bucket string `xml:"Bucket" json:"Bucket"` StorageClass string `xml:"StorageClass" json:"StorageClass"` ARN string - //EncryptionConfiguration TODO: not needed for MinIO + // EncryptionConfiguration TODO: not needed for MinIO } func (d Destination) isValidStorageClass() bool { @@ -57,14 +57,14 @@ func (d Destination) String() string { } -//LegacyArn returns true if arn format has prefix "arn:aws:s3:::" which was used -// prior to multi-destination +// LegacyArn returns true if arn format has prefix "arn:aws:s3:::" which was +// used prior to multi-destination func (d Destination) LegacyArn() bool { return strings.HasPrefix(d.ARN, DestinationARNPrefix) } -//TargetArn returns true if arn format has prefix "arn:minio:replication:::" used -// for multi-destination targets +// TargetArn returns true if arn format has prefix "arn:minio:replication:::" +// used for multi-destination targets func (d Destination) TargetArn() bool { return strings.HasPrefix(d.ARN, DestinationARNMinIOPrefix) } diff --git a/internal/bucket/replication/replication.go b/internal/bucket/replication/replication.go index 8d81405b2..94354c66b 100644 --- a/internal/bucket/replication/replication.go +++ b/internal/bucket/replication/replication.go @@ -175,7 +175,7 @@ func (c Config) FilterActionableRules(obj ObjectOpts) []Rule { rules = append(rules, rule) } } - sort.Slice(rules[:], func(i, j int) bool { + sort.Slice(rules, func(i, j int) bool { return rules[i].Priority > rules[j].Priority && rules[i].Destination.String() == rules[j].Destination.String() }) diff --git a/internal/bucket/replication/replication_test.go b/internal/bucket/replication/replication_test.go index 19ba309ef..d1e882af6 100644 --- a/internal/bucket/replication/replication_test.go +++ b/internal/bucket/replication/replication_test.go @@ -31,28 +31,28 @@ func TestParseAndValidateReplicationConfig(t *testing.T) { destBucket string sameTarget bool }{ - { //1 Invalid delete marker status in replication config + { // 1 Invalid delete marker status in replication config inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledstringkey-prefixarn:aws:s3:::destinationbucket`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: nil, expectedValidationErr: errInvalidDeleteMarkerReplicationStatus, }, - //2 Invalid delete replication status in replication config + // 2 Invalid delete replication status in replication config {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: nil, expectedValidationErr: errDeleteReplicationMissing, }, - //3 valid replication config + // 3 valid replication config {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: nil, expectedValidationErr: nil, }, - //4 missing role in config and destination ARN is in legacy format + // 4 missing role in config and destination ARN is in legacy format {inputConfig: `EnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, // destination bucket in config different from bucket specified destBucket: "destinationbucket", @@ -60,63 +60,63 @@ func TestParseAndValidateReplicationConfig(t *testing.T) { expectedParsingErr: nil, expectedValidationErr: errDestinationArnMissing, }, - //5 replication destination in different rules not identical + // 5 replication destination in different rules not identical {inputConfig: `EnabledDisabledDisabledkey-prefixarn:minio:replication:::destinationbucketEnabled3DisabledDisabledkey-prefixarn:minio:replication:::destinationbucket2`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: nil, expectedValidationErr: nil, }, - //6 missing rule status in replication config + // 6 missing rule status in replication config {inputConfig: `arn:aws:iam::AcctID:role/role-nameDisabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: nil, expectedValidationErr: errEmptyRuleStatus, }, - //7 invalid rule status in replication config + // 7 invalid rule status in replication config {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnssabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucketEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: nil, expectedValidationErr: errInvalidRuleStatus, }, - //8 invalid rule id exceeds length allowed in replication config + // 8 invalid rule id exceeds length allowed in replication config {inputConfig: `arn:aws:iam::AcctID:role/role-namevsUVERgOc8zZYagLSzSa5lE8qeI6nh1lyLNS4R9W052yfecrhhepGboswSWMMNO8CPcXM4GM3nKyQ72EadlMzzZBFoYWKn7ju5GoE5w9c57a0piHR1vexpdd9FrMquiruvAJ0MTGVupm0EegMVxoIOdjx7VgZhGrmi2XDvpVEFT7WmYMA9fSK297XkTHWyECaNHBySJ1Qp4vwX8tPNauKpfHx4kzUpnKe1PZbptGMWbY5qTcwlNuMhVSmgFffShqEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: nil, expectedValidationErr: errInvalidRuleID, }, - //9 invalid priority status in replication config + // 9 invalid priority status in replication config {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucketEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucket`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: nil, expectedValidationErr: errReplicationUniquePriority, }, - //10 no rule in replication config + // 10 no rule in replication config {inputConfig: `arn:aws:iam::AcctID:role/role-name`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: nil, expectedValidationErr: errReplicationNoRule, }, - //11 no destination in replication config + // 11 no destination in replication config {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefix`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: Errorf("invalid destination '%v'", ""), expectedValidationErr: nil, }, - //12 destination not matching ARN in replication config + // 12 destination not matching ARN in replication config {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixdestinationbucket2`, destBucket: "destinationbucket", sameTarget: false, expectedParsingErr: fmt.Errorf("invalid destination '%v'", "destinationbucket2"), expectedValidationErr: nil, }, - //13 missing role in config and destination ARN has target ARN + // 13 missing role in config and destination ARN has target ARN {inputConfig: `EnabledDisabledDisabledkey-prefixarn:minio:replication::8320b6d18f9032b4700f1f03b50d8d1853de8f22cab86931ee794e12f190852c:destinationbucket`, // destination bucket in config different from bucket specified destBucket: "destinationbucket", @@ -124,7 +124,7 @@ func TestParseAndValidateReplicationConfig(t *testing.T) { expectedParsingErr: nil, expectedValidationErr: nil, }, - //14 role absent in config and destination ARN has target ARN in invalid format + // 14 role absent in config and destination ARN has target ARN in invalid format {inputConfig: `EnabledDisabledDisabledkey-prefixarn:xx:replication::8320b6d18f9032b4700f1f03b50d8d1853de8f22cab86931ee794e12f190852c:destinationbucket`, // destination bucket in config different from bucket specified destBucket: "destinationbucket", @@ -232,53 +232,53 @@ func TestReplicate(t *testing.T) { expectedResult bool }{ // using config 1 - no filters, all replication enabled - {ObjectOpts{}, cfgs[0], false}, //1. invalid ObjectOpts missing object name - {ObjectOpts{Name: "c1test"}, cfgs[0], true}, //2. valid ObjectOpts passing empty Filter - {ObjectOpts{Name: "c1test", VersionID: "vid"}, cfgs[0], true}, //3. valid ObjectOpts passing empty Filter + {ObjectOpts{}, cfgs[0], false}, // 1. invalid ObjectOpts missing object name + {ObjectOpts{Name: "c1test"}, cfgs[0], true}, // 2. valid ObjectOpts passing empty Filter + {ObjectOpts{Name: "c1test", VersionID: "vid"}, cfgs[0], true}, // 3. valid ObjectOpts passing empty Filter - {ObjectOpts{Name: "c1test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[0], true}, //4. DeleteMarker version replication valid case - matches DeleteMarkerReplication status - {ObjectOpts{Name: "c1test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[0], true}, //5. permanent delete of version, matches DeleteReplication status - valid case - {ObjectOpts{Name: "c1test", VersionID: "vid", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[0], true}, //6. permanent delete of version, matches DeleteReplication status - {ObjectOpts{Name: "c1test", VersionID: "vid", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[0], false}, //7. permanent delete of version, disqualified by SSE-C - {ObjectOpts{Name: "c1test", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[0], false}, //8. setting DeleteMarker on SSE-C encrypted object, disqualified by SSE-C - {ObjectOpts{Name: "c1test", SSEC: true}, cfgs[0], false}, //9. replication of SSE-C encrypted object, disqualified + {ObjectOpts{Name: "c1test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[0], true}, // 4. DeleteMarker version replication valid case - matches DeleteMarkerReplication status + {ObjectOpts{Name: "c1test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[0], true}, // 5. permanent delete of version, matches DeleteReplication status - valid case + {ObjectOpts{Name: "c1test", VersionID: "vid", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[0], true}, // 6. permanent delete of version, matches DeleteReplication status + {ObjectOpts{Name: "c1test", VersionID: "vid", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[0], false}, // 7. permanent delete of version, disqualified by SSE-C + {ObjectOpts{Name: "c1test", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[0], false}, // 8. setting DeleteMarker on SSE-C encrypted object, disqualified by SSE-C + {ObjectOpts{Name: "c1test", SSEC: true}, cfgs[0], false}, // 9. replication of SSE-C encrypted object, disqualified // using config 2 - no filters, only replication of object, metadata enabled - {ObjectOpts{Name: "c2test"}, cfgs[1], true}, //10. valid ObjectOpts passing empty Filter - {ObjectOpts{Name: "c2test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[1], false}, //11. DeleteMarker version replication not allowed due to DeleteMarkerReplication status - {ObjectOpts{Name: "c2test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[1], false}, //12. permanent delete of version, disallowed by DeleteReplication status - {ObjectOpts{Name: "c2test", VersionID: "vid", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[1], false}, //13. permanent delete of DeleteMarker version, disallowed by DeleteReplication status - {ObjectOpts{Name: "c2test", VersionID: "vid", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[1], false}, //14. permanent delete of version, disqualified by SSE-C & DeleteReplication status - {ObjectOpts{Name: "c2test", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[1], false}, //15. setting DeleteMarker on SSE-C encrypted object, disqualified by SSE-C & DeleteMarkerReplication status - {ObjectOpts{Name: "c2test", SSEC: true}, cfgs[1], false}, //16. replication of SSE-C encrypted object, disqualified by default + {ObjectOpts{Name: "c2test"}, cfgs[1], true}, // 10. valid ObjectOpts passing empty Filter + {ObjectOpts{Name: "c2test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[1], false}, // 11. DeleteMarker version replication not allowed due to DeleteMarkerReplication status + {ObjectOpts{Name: "c2test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[1], false}, // 12. permanent delete of version, disallowed by DeleteReplication status + {ObjectOpts{Name: "c2test", VersionID: "vid", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[1], false}, // 13. permanent delete of DeleteMarker version, disallowed by DeleteReplication status + {ObjectOpts{Name: "c2test", VersionID: "vid", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[1], false}, // 14. permanent delete of version, disqualified by SSE-C & DeleteReplication status + {ObjectOpts{Name: "c2test", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[1], false}, // 15. setting DeleteMarker on SSE-C encrypted object, disqualified by SSE-C & DeleteMarkerReplication status + {ObjectOpts{Name: "c2test", SSEC: true}, cfgs[1], false}, // 16. replication of SSE-C encrypted object, disqualified by default // using config 2 - has more than one rule with overlapping prefixes - {ObjectOpts{Name: "xy/c3test", UserTags: "k1=v1"}, cfgs[2], true}, //17. matches rule 1 for replication of content/metadata - {ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1"}, cfgs[2], true}, //18. matches rule 1 for replication of content/metadata - {ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[2], false}, //19. matches rule 1 - DeleteMarker replication disallowed by rule - {ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], true}, //20. matches rule 1 - DeleteReplication allowed by rule for permanent delete of DeleteMarker - {ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], true}, //21. matches rule 1 - DeleteReplication allowed by rule for permanent delete of version - {ObjectOpts{Name: "xyz/c3test"}, cfgs[2], true}, //22. matches rule 2 for replication of content/metadata - {ObjectOpts{Name: "xy/c3test", UserTags: "k1=v2"}, cfgs[2], false}, //23. does not match rule1 because tag value does not pass filter - {ObjectOpts{Name: "xyz/c3test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[2], true}, //24. matches rule 2 - DeleteMarker replication allowed by rule - {ObjectOpts{Name: "xyz/c3test", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], false}, //25. matches rule 2 - DeleteReplication disallowed by rule for permanent delete of DeleteMarker - {ObjectOpts{Name: "xyz/c3test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], false}, //26. matches rule 1 - DeleteReplication disallowed by rule for permanent delete of version - {ObjectOpts{Name: "abc/c3test"}, cfgs[2], false}, //27. matches no rule because object prefix does not match + {ObjectOpts{Name: "xy/c3test", UserTags: "k1=v1"}, cfgs[2], true}, // 17. matches rule 1 for replication of content/metadata + {ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1"}, cfgs[2], true}, // 18. matches rule 1 for replication of content/metadata + {ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[2], false}, // 19. matches rule 1 - DeleteMarker replication disallowed by rule + {ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], true}, // 20. matches rule 1 - DeleteReplication allowed by rule for permanent delete of DeleteMarker + {ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], true}, // 21. matches rule 1 - DeleteReplication allowed by rule for permanent delete of version + {ObjectOpts{Name: "xyz/c3test"}, cfgs[2], true}, // 22. matches rule 2 for replication of content/metadata + {ObjectOpts{Name: "xy/c3test", UserTags: "k1=v2"}, cfgs[2], false}, // 23. does not match rule1 because tag value does not pass filter + {ObjectOpts{Name: "xyz/c3test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[2], true}, // 24. matches rule 2 - DeleteMarker replication allowed by rule + {ObjectOpts{Name: "xyz/c3test", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], false}, // 25. matches rule 2 - DeleteReplication disallowed by rule for permanent delete of DeleteMarker + {ObjectOpts{Name: "xyz/c3test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], false}, // 26. matches rule 1 - DeleteReplication disallowed by rule for permanent delete of version + {ObjectOpts{Name: "abc/c3test"}, cfgs[2], false}, // 27. matches no rule because object prefix does not match // using config 3 - has no overlapping rules - {ObjectOpts{Name: "xy/c4test", UserTags: "k1=v1"}, cfgs[3], true}, //28. matches rule 1 for replication of content/metadata - {ObjectOpts{Name: "xa/c4test", UserTags: "k1=v1"}, cfgs[3], false}, //29. no rule match object prefix not in rules - {ObjectOpts{Name: "xyz/c4test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[3], false}, //30. rule 1 not matched because of tags filter - {ObjectOpts{Name: "xyz/c4test", UserTags: "k1=v1", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[3], false}, //31. matches rule 1 - DeleteMarker replication disallowed by rule - {ObjectOpts{Name: "xyz/c4test", UserTags: "k1=v1", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], true}, //32. matches rule 1 - DeleteReplication allowed by rule for permanent delete of DeleteMarker - {ObjectOpts{Name: "xyz/c4test", UserTags: "k1=v1", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], true}, //33. matches rule 1 - DeleteReplication allowed by rule for permanent delete of version - {ObjectOpts{Name: "abc/c4test"}, cfgs[3], true}, //34. matches rule 2 for replication of content/metadata - {ObjectOpts{Name: "abc/c4test", UserTags: "k1=v2"}, cfgs[3], true}, //35. matches rule 2 for replication of content/metadata - {ObjectOpts{Name: "abc/c4test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[3], true}, //36. matches rule 2 - DeleteMarker replication allowed by rule - {ObjectOpts{Name: "abc/c4test", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], false}, //37. matches rule 2 - DeleteReplication disallowed by rule for permanent delete of DeleteMarker - {ObjectOpts{Name: "abc/c4test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], false}, //38. matches rule 2 - DeleteReplication disallowed by rule for permanent delete of version + {ObjectOpts{Name: "xy/c4test", UserTags: "k1=v1"}, cfgs[3], true}, // 28. matches rule 1 for replication of content/metadata + {ObjectOpts{Name: "xa/c4test", UserTags: "k1=v1"}, cfgs[3], false}, // 29. no rule match object prefix not in rules + {ObjectOpts{Name: "xyz/c4test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[3], false}, // 30. rule 1 not matched because of tags filter + {ObjectOpts{Name: "xyz/c4test", UserTags: "k1=v1", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[3], false}, // 31. matches rule 1 - DeleteMarker replication disallowed by rule + {ObjectOpts{Name: "xyz/c4test", UserTags: "k1=v1", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], true}, // 32. matches rule 1 - DeleteReplication allowed by rule for permanent delete of DeleteMarker + {ObjectOpts{Name: "xyz/c4test", UserTags: "k1=v1", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], true}, // 33. matches rule 1 - DeleteReplication allowed by rule for permanent delete of version + {ObjectOpts{Name: "abc/c4test"}, cfgs[3], true}, // 34. matches rule 2 for replication of content/metadata + {ObjectOpts{Name: "abc/c4test", UserTags: "k1=v2"}, cfgs[3], true}, // 35. matches rule 2 for replication of content/metadata + {ObjectOpts{Name: "abc/c4test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[3], true}, // 36. matches rule 2 - DeleteMarker replication allowed by rule + {ObjectOpts{Name: "abc/c4test", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], false}, // 37. matches rule 2 - DeleteReplication disallowed by rule for permanent delete of DeleteMarker + {ObjectOpts{Name: "abc/c4test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], false}, // 38. matches rule 2 - DeleteReplication disallowed by rule for permanent delete of version // using config 4 - with replica modification sync disabled. - {ObjectOpts{Name: "xy/c5test", UserTags: "k1=v1", Replica: true}, cfgs[4], false}, //39. replica syncing disabled, this object is a replica - {ObjectOpts{Name: "xa/c5test", UserTags: "k1=v1", Replica: false}, cfgs[4], true}, //40. replica syncing disabled, this object is NOT a replica + {ObjectOpts{Name: "xy/c5test", UserTags: "k1=v1", Replica: true}, cfgs[4], false}, // 39. replica syncing disabled, this object is a replica + {ObjectOpts{Name: "xa/c5test", UserTags: "k1=v1", Replica: false}, cfgs[4], true}, // 40. replica syncing disabled, this object is NOT a replica } for i, testCase := range testCases { @@ -322,7 +322,7 @@ func TestHasActiveRules(t *testing.T) { expectedNonRec: false, expectedRec: true, }, - //case 5 - has filter with prefix and tags, here we are not matching on tags + // case 5 - has filter with prefix and tags, here we are not matching on tags {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabled key-prefixkey1value1key2value2arn:aws:s3:::destinationbucket`, prefix: "testdir/", diff --git a/internal/bucket/replication/rule_test.go b/internal/bucket/replication/rule_test.go index c390e1ed6..d51456330 100644 --- a/internal/bucket/replication/rule_test.go +++ b/internal/bucket/replication/rule_test.go @@ -31,23 +31,23 @@ func TestMetadataReplicate(t *testing.T) { }{ // case 1 - rule with replica modification enabled; not a replica {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucketEnabled`, - opts: ObjectOpts{Name: "c1test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: false}, //1. Replica mod sync enabled; not a replica + opts: ObjectOpts{Name: "c1test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: false}, // 1. Replica mod sync enabled; not a replica expectedResult: true, }, // case 2 - rule with replica modification disabled; a replica {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucketDisabled`, - opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: true}, //1. Replica mod sync enabled; a replica + opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: true}, // 1. Replica mod sync enabled; a replica expectedResult: false, }, // case 3 - rule with replica modification disabled; not a replica {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucketDisabled`, - opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: false}, //1. Replica mod sync disabled; not a replica + opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: false}, // 1. Replica mod sync disabled; not a replica expectedResult: true, }, // case 4 - rule with replica modification enabled; a replica {inputConfig: `arn:aws:iam::AcctID:role/role-nameEnabledDisabledDisabledkey-prefixarn:aws:s3:::destinationbucketEnabled`, - opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: MetadataReplicationType, Replica: true}, //1. Replica mod sync enabled; a replica + opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: MetadataReplicationType, Replica: true}, // 1. Replica mod sync enabled; a replica expectedResult: true, }, } diff --git a/internal/config/certs.go b/internal/config/certs.go index 5903fcc09..b80946c3c 100644 --- a/internal/config/certs.go +++ b/internal/config/certs.go @@ -117,9 +117,9 @@ func LoadX509KeyPair(certFile, keyFile string) (tls.Certificate, error) { } // EnsureCertAndKey checks if both client certificate and key paths are provided -func EnsureCertAndKey(ClientCert, ClientKey string) error { - if (ClientCert != "" && ClientKey == "") || - (ClientCert == "" && ClientKey != "") { +func EnsureCertAndKey(clientCert, clientKey string) error { + if (clientCert != "" && clientKey == "") || + (clientCert == "" && clientKey != "") { return errors.New("cert and key must be specified as a pair") } return nil diff --git a/internal/config/certsinfo.go b/internal/config/certsinfo.go index edc0ea7aa..013bd1d10 100644 --- a/internal/config/certsinfo.go +++ b/internal/config/certsinfo.go @@ -38,6 +38,7 @@ func printName(names []pkix.AttributeTypeAndValue, buf *strings.Builder) []strin values := []string{} for _, name := range names { oid := name.Type + //nolint:gocritic if len(oid) == 4 && oid[0] == 2 && oid[1] == 5 && oid[2] == 4 { switch oid[3] { case 3: diff --git a/internal/config/dns/operator_dns.go b/internal/config/dns/operator_dns.go index 47759ccb3..fd0e06444 100644 --- a/internal/config/dns/operator_dns.go +++ b/internal/config/dns/operator_dns.go @@ -201,9 +201,9 @@ func Authentication(username, password string) OperatorOption { } // RootCAs - add custom trust certs pool -func RootCAs(CAs *x509.CertPool) OperatorOption { +func RootCAs(certPool *x509.CertPool) OperatorOption { return func(args *OperatorDNS) { - args.rootCAs = CAs + args.rootCAs = certPool } } diff --git a/internal/config/heal/heal.go b/internal/config/heal/heal.go index 9cd1db8ce..a98ccd06e 100644 --- a/internal/config/heal/heal.go +++ b/internal/config/heal/heal.go @@ -86,7 +86,7 @@ func (opts Config) Wait(currentIO func() int, systemIO func() int) { } else { time.Sleep(waitTick) } - tmpMaxWait = tmpMaxWait - waitTick + tmpMaxWait -= waitTick } if tmpMaxWait <= 0 { return diff --git a/internal/config/identity/ldap/config.go b/internal/config/identity/ldap/config.go index c7409d08e..5bd029215 100644 --- a/internal/config/identity/ldap/config.go +++ b/internal/config/identity/ldap/config.go @@ -186,7 +186,7 @@ func (l *Config) lookupBind(conn *ldap.Conn) error { // assumed to be using the lookup bind service account. It is required that the // search result in at most one result. func (l *Config) lookupUserDN(conn *ldap.Conn, username string) (string, error) { - filter := strings.Replace(l.UserDNSearchFilter, "%s", ldap.EscapeFilter(username), -1) + filter := strings.ReplaceAll(l.UserDNSearchFilter, "%s", ldap.EscapeFilter(username)) searchRequest := ldap.NewSearchRequest( l.UserDNSearchBaseDN, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, @@ -213,8 +213,8 @@ func (l *Config) searchForUserGroups(conn *ldap.Conn, username, bindDN string) ( var groups []string if l.GroupSearchFilter != "" { for _, groupSearchBase := range l.GroupSearchBaseDistNames { - filter := strings.Replace(l.GroupSearchFilter, "%s", ldap.EscapeFilter(username), -1) - filter = strings.Replace(filter, "%d", ldap.EscapeFilter(bindDN), -1) + filter := strings.ReplaceAll(l.GroupSearchFilter, "%s", ldap.EscapeFilter(username)) + filter = strings.ReplaceAll(filter, "%d", ldap.EscapeFilter(bindDN)) searchRequest := ldap.NewSearchRequest( groupSearchBase, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, @@ -393,7 +393,7 @@ func (l *Config) GetNonEligibleUserDistNames(userDistNames []string) ([]string, } // Evaluate the filter again with generic wildcard instead of specific values - filter := strings.Replace(l.UserDNSearchFilter, "%s", "*", -1) + filter := strings.ReplaceAll(l.UserDNSearchFilter, "%s", "*") nonExistentUsers := []string{} for _, dn := range userDistNames { diff --git a/internal/config/identity/openid/jwks_test.go b/internal/config/identity/openid/jwks_test.go index 33e569343..fa2895bbf 100644 --- a/internal/config/identity/openid/jwks_test.go +++ b/internal/config/identity/openid/jwks_test.go @@ -85,6 +85,7 @@ func TestPublicKey(t *testing.T) { } } + //nolint:gocritic if key0, ok := keys[0].(*ecdsa.PublicKey); !ok { t.Fatalf("Expected ECDSA key[0], got %T", keys[0]) } else if key1, ok := keys[1].(*rsa.PublicKey); !ok { diff --git a/internal/config/legacy.go b/internal/config/legacy.go index 5afde7a8d..45b134ce4 100644 --- a/internal/config/legacy.go +++ b/internal/config/legacy.go @@ -19,7 +19,7 @@ package config import "github.com/minio/minio/internal/auth" -//// One time migration code section +// One time migration code section // SetCredentials - One time migration code needed, for migrating from older config to new for server credentials. func SetCredentials(c Config, cred auth.Credentials) { diff --git a/internal/crypto/key.go b/internal/crypto/key.go index 3fc247386..8e81c165d 100644 --- a/internal/crypto/key.go +++ b/internal/crypto/key.go @@ -90,7 +90,7 @@ func (key ObjectKey) Seal(extKey []byte, iv [32]byte, domain, bucket, object str sealingKey [32]byte encryptedKey bytes.Buffer ) - mac := hmac.New(sha256.New, extKey[:]) + mac := hmac.New(sha256.New, extKey) mac.Write(iv[:]) mac.Write([]byte(domain)) mac.Write([]byte(SealAlgorithm)) @@ -118,7 +118,7 @@ func (key *ObjectKey) Unseal(extKey []byte, sealedKey SealedKey, domain, bucket, default: return Errorf("The sealing algorithm '%s' is not supported", sealedKey.Algorithm) case SealAlgorithm: - mac := hmac.New(sha256.New, extKey[:]) + mac := hmac.New(sha256.New, extKey) mac.Write(sealedKey.IV[:]) mac.Write([]byte(domain)) mac.Write([]byte(SealAlgorithm)) @@ -126,7 +126,7 @@ func (key *ObjectKey) Unseal(extKey []byte, sealedKey SealedKey, domain, bucket, unsealConfig = sio.Config{MinVersion: sio.Version20, Key: mac.Sum(nil), CipherSuites: fips.CipherSuitesDARE()} case InsecureSealAlgorithm: sha := sha256.New() - sha.Write(extKey[:]) + sha.Write(extKey) sha.Write(sealedKey.IV[:]) unsealConfig = sio.Config{MinVersion: sio.Version10, Key: sha.Sum(nil), CipherSuites: fips.CipherSuitesDARE()} } diff --git a/internal/crypto/key_test.go b/internal/crypto/key_test.go index f924fa438..ca1aa7ec9 100644 --- a/internal/crypto/key_test.go +++ b/internal/crypto/key_test.go @@ -164,7 +164,7 @@ func TestDerivePartKey(t *testing.T) { t.Fatalf("Test %d failed to decode expected part-key: %v", i, err) } partKey := key.DerivePartKey(test.PartID) - if !bytes.Equal(partKey[:], expectedPartKey[:]) { + if !bytes.Equal(partKey[:], expectedPartKey) { t.Errorf("Test %d derives wrong part-key: got '%s' want: '%s'", i, hex.EncodeToString(partKey[:]), test.PartKey) } } diff --git a/internal/crypto/sse-kms.go b/internal/crypto/sse-kms.go index 0ee23e466..4dfec6f05 100644 --- a/internal/crypto/sse-kms.go +++ b/internal/crypto/sse-kms.go @@ -109,7 +109,7 @@ func (s3 ssekms) UnsealObjectKey(KMS kms.KMS, metadata map[string]string, bucket if err != nil { return key, err } - err = key.Unseal(unsealKey[:], sealedKey, s3.String(), bucket, object) + err = key.Unseal(unsealKey, sealedKey, s3.String(), bucket, object) return key, err } diff --git a/internal/crypto/sse-s3.go b/internal/crypto/sse-s3.go index 9352d5fb5..60c3b81d4 100644 --- a/internal/crypto/sse-s3.go +++ b/internal/crypto/sse-s3.go @@ -80,7 +80,7 @@ func (s3 sses3) UnsealObjectKey(KMS kms.KMS, metadata map[string]string, bucket, if err != nil { return key, err } - err = key.Unseal(unsealKey[:], sealedKey, s3.String(), bucket, object) + err = key.Unseal(unsealKey, sealedKey, s3.String(), bucket, object) return key, err } diff --git a/internal/disk/directio_unix.go b/internal/disk/directio_unix.go index ba659e52b..703584ca4 100644 --- a/internal/disk/directio_unix.go +++ b/internal/disk/directio_unix.go @@ -40,12 +40,12 @@ func DisableDirectIO(f *os.File) error { if err != nil { return err } - flag = flag & ^(syscall.O_DIRECT) + flag &= ^(syscall.O_DIRECT) _, err = unix.FcntlInt(fd, unix.F_SETFL, flag) return err } // AlignedBlock - pass through to directio implementation. -func AlignedBlock(BlockSize int) []byte { - return directio.AlignedBlock(BlockSize) +func AlignedBlock(blockSize int) []byte { + return directio.AlignedBlock(blockSize) } diff --git a/internal/dsync/drwmutex.go b/internal/dsync/drwmutex.go index 2fbb12258..50c553aa6 100644 --- a/internal/dsync/drwmutex.go +++ b/internal/dsync/drwmutex.go @@ -199,9 +199,9 @@ func (dm *DRWMutex) lockBlocking(ctx context.Context, lockLossCallback func(), i // If success, copy array to object if isReadLock { - copy(dm.readLocks, locks[:]) + copy(dm.readLocks, locks) } else { - copy(dm.writeLocks, locks[:]) + copy(dm.writeLocks, locks) } dm.m.Unlock() @@ -579,7 +579,7 @@ func (dm *DRWMutex) Unlock() { } // Copy write locks to stack array - copy(locks, dm.writeLocks[:]) + copy(locks, dm.writeLocks) } // Tolerance is not set, defaults to half of the locker clients. @@ -620,7 +620,7 @@ func (dm *DRWMutex) RUnlock() { } // Copy write locks to stack array - copy(locks, dm.readLocks[:]) + copy(locks, dm.readLocks) } // Tolerance is not set, defaults to half of the locker clients. diff --git a/internal/dsync/dsync-server_test.go b/internal/dsync/dsync-server_test.go index 7a1a1b337..09a539e80 100644 --- a/internal/dsync/dsync-server_test.go +++ b/internal/dsync/dsync-server_test.go @@ -94,10 +94,8 @@ func (l *lockServer) RLock(args *LockArgs, reply *bool) error { if locksHeld, *reply = l.lockMap[args.Resources[0]]; !*reply { l.lockMap[args.Resources[0]] = ReadLock // No locks held on the given name, so claim (first) read lock *reply = true - } else { - if *reply = locksHeld != WriteLock; *reply { // Unless there is a write lock - l.lockMap[args.Resources[0]] = locksHeld + ReadLock // Grant another read lock - } + } else if *reply = locksHeld != WriteLock; *reply { // Unless there is a write lock + l.lockMap[args.Resources[0]] = locksHeld + ReadLock // Grant another read lock } return nil } diff --git a/internal/event/config.go b/internal/event/config.go index cbe0a1987..dc3990184 100644 --- a/internal/event/config.go +++ b/internal/event/config.go @@ -318,7 +318,7 @@ func ParseConfig(reader io.Reader, region string, targetList *TargetList) (*Conf } config.SetRegion(region) - //If xml namespace is empty, set a default value before returning. + // If xml namespace is empty, set a default value before returning. if config.XMLNS == "" { config.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/" } diff --git a/internal/event/rules.go b/internal/event/rules.go index b7a59cd1a..4df4c32a9 100644 --- a/internal/event/rules.go +++ b/internal/event/rules.go @@ -41,7 +41,7 @@ func NewPattern(prefix, suffix string) (pattern string) { pattern += suffix } - pattern = strings.Replace(pattern, "**", "*", -1) + pattern = strings.ReplaceAll(pattern, "**", "*") return pattern } diff --git a/internal/event/target/kafka_scram_client_contrib.go b/internal/event/target/kafka_scram_client_contrib.go index 3ef5acb43..b1fce03f8 100644 --- a/internal/event/target/kafka_scram_client_contrib.go +++ b/internal/event/target/kafka_scram_client_contrib.go @@ -25,13 +25,14 @@ import ( ) func initScramClient(args KafkaArgs, config *sarama.Config) { - if args.SASL.Mechanism == "sha512" { + switch args.SASL.Mechanism { + case "sha512": config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: KafkaSHA512} } config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA512) - } else if args.SASL.Mechanism == "sha256" { + case "sha256": config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: KafkaSHA256} } config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA256) - } else { + default: // default to PLAIN config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypePlaintext) } diff --git a/internal/event/target/mqtt.go b/internal/event/target/mqtt.go index eda005de6..d9c04bc62 100644 --- a/internal/event/target/mqtt.go +++ b/internal/event/target/mqtt.go @@ -272,10 +272,8 @@ func NewMQTTTarget(id string, args MQTTArgs, doneCh <-chan struct{}, loggerOnce // Start replaying events from the store. go sendEvents(target, eventKeyCh, doneCh, target.loggerOnce) } - } else { - if token.Wait() && token.Error() != nil { - return target, token.Error() - } + } else if token.Wait() && token.Error() != nil { + return target, token.Error() } return target, nil } diff --git a/internal/event/target/nats.go b/internal/event/target/nats.go index dbfc38f0e..f89adc5f7 100644 --- a/internal/event/target/nats.go +++ b/internal/event/target/nats.go @@ -172,6 +172,7 @@ func (n NATSArgs) connectStan() (stan.Conn, error) { } var addressURL string + //nolint:gocritic if n.Username != "" && n.Password != "" { addressURL = scheme + "://" + n.Username + ":" + n.Password + "@" + n.Address.String() } else if n.Token != "" { @@ -219,18 +220,14 @@ func (target *NATSTarget) IsActive() (bool, error) { if target.args.Streaming.Enable { if target.stanConn == nil || target.stanConn.NatsConn() == nil { target.stanConn, connErr = target.args.connectStan() - } else { - if !target.stanConn.NatsConn().IsConnected() { - return false, errNotConnected - } + } else if !target.stanConn.NatsConn().IsConnected() { + return false, errNotConnected } } else { if target.natsConn == nil { target.natsConn, connErr = target.args.connectNats() - } else { - if !target.natsConn.IsConnected() { - return false, errNotConnected - } + } else if !target.natsConn.IsConnected() { + return false, errNotConnected } } diff --git a/internal/ioutil/append-file_nix.go b/internal/ioutil/append-file_nix.go index cd1de6082..757d2251b 100644 --- a/internal/ioutil/append-file_nix.go +++ b/internal/ioutil/append-file_nix.go @@ -29,7 +29,7 @@ import ( func AppendFile(dst string, src string, osync bool) error { flags := os.O_WRONLY | os.O_APPEND | os.O_CREATE if osync { - flags = flags | os.O_SYNC + flags |= os.O_SYNC } appendFile, err := os.OpenFile(dst, flags, 0666) if err != nil { diff --git a/internal/ioutil/ioutil.go b/internal/ioutil/ioutil.go index ab50b6b49..0623e0dce 100644 --- a/internal/ioutil/ioutil.go +++ b/internal/ioutil/ioutil.go @@ -136,7 +136,7 @@ func (w *LimitWriter) Write(p []byte) (n int, err error) { var n1 int if w.skipBytes > 0 { if w.skipBytes >= int64(len(p)) { - w.skipBytes = w.skipBytes - int64(len(p)) + w.skipBytes -= int64(len(p)) return n, nil } p = p[w.skipBytes:] @@ -147,11 +147,11 @@ func (w *LimitWriter) Write(p []byte) (n int, err error) { } if w.wLimit < int64(len(p)) { n1, err = w.Writer.Write(p[:w.wLimit]) - w.wLimit = w.wLimit - int64(n1) + w.wLimit -= int64(n1) return n, err } n1, err = w.Writer.Write(p) - w.wLimit = w.wLimit - int64(n1) + w.wLimit -= int64(n1) return n, err } diff --git a/internal/kms/single-key.go b/internal/kms/single-key.go index d03de45f1..29d7f8713 100644 --- a/internal/kms/single-key.go +++ b/internal/kms/single-key.go @@ -191,7 +191,7 @@ func (kms secretKey) DecryptKey(keyID string, ciphertext []byte, context Context mac.Write(encryptedKey.IV) sealingKey := mac.Sum(nil) - block, err := aes.NewCipher(sealingKey[:]) + block, err := aes.NewCipher(sealingKey) if err != nil { return nil, err } diff --git a/internal/logger/logonce.go b/internal/logger/logonce.go index 63ad3e87a..55a2b062c 100644 --- a/internal/logger/logonce.go +++ b/internal/logger/logonce.go @@ -42,11 +42,9 @@ func (l *logOnceType) logOnceIf(ctx context.Context, err error, id interface{}, if prevErr == nil { l.IDMap[id] = err shouldLog = true - } else { - if prevErr.Error() != err.Error() { - l.IDMap[id] = err - shouldLog = true - } + } else if prevErr.Error() != err.Error() { + l.IDMap[id] = err + shouldLog = true } l.Unlock() diff --git a/internal/logger/target/kafka/kafka_scram_client_contrib.go b/internal/logger/target/kafka/kafka_scram_client_contrib.go index e8e78e62a..516f2b6d0 100644 --- a/internal/logger/target/kafka/kafka_scram_client_contrib.go +++ b/internal/logger/target/kafka/kafka_scram_client_contrib.go @@ -26,13 +26,14 @@ import ( ) func initScramClient(cfg Config, config *sarama.Config) { - if cfg.SASL.Mechanism == "sha512" { + switch cfg.SASL.Mechanism { + case "sha512": config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: KafkaSHA512} } config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA512) - } else if cfg.SASL.Mechanism == "sha256" { + case "sha256": config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: KafkaSHA256} } config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA256) - } else { + default: // default to PLAIN config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypePlaintext) } diff --git a/internal/rest/client_test.go b/internal/rest/client_test.go index 9c17a96f2..56de9dfc2 100644 --- a/internal/rest/client_test.go +++ b/internal/rest/client_test.go @@ -54,6 +54,7 @@ func TestNetworkError_Unwrap(t *testing.T) { n := &NetworkError{ Err: tt.err, } + //nolint:gocritic if tt.target == nil { var netErrInterface net.Error if errors.As(n, &netErrInterface) != tt.want { diff --git a/internal/s3select/csv/reader_contrib_test.go b/internal/s3select/csv/reader_contrib_test.go index 51ef863b2..903b24582 100644 --- a/internal/s3select/csv/reader_contrib_test.go +++ b/internal/s3select/csv/reader_contrib_test.go @@ -22,7 +22,6 @@ import ( "fmt" "io" "io/ioutil" - "path/filepath" "reflect" "strings" "testing" @@ -89,7 +88,7 @@ type tester interface { } func openTestFile(t tester, file string) []byte { - f, err := ioutil.ReadFile(filepath.Join("testdata/testdata.zip")) + f, err := ioutil.ReadFile("testdata/testdata.zip") if err != nil { t.Fatal(err) } diff --git a/internal/s3select/json/record.go b/internal/s3select/json/record.go index 1047fedf7..be8d0d8a9 100644 --- a/internal/s3select/json/record.go +++ b/internal/s3select/json/record.go @@ -103,7 +103,7 @@ func (r *Record) Set(name string, value *sql.Value) (sql.Record, error) { return nil, fmt.Errorf("unsupported sql value %v and type %v", value, value.GetTypeString()) } - name = strings.Replace(name, "*", "__ALL__", -1) + name = strings.ReplaceAll(name, "*", "__ALL__") r.KVS = append(r.KVS, jstream.KV{Key: name, Value: v}) return r, nil } diff --git a/internal/s3select/simdj/reader_amd64_test.go b/internal/s3select/simdj/reader_amd64_test.go index 9b320f80f..be6c770a6 100644 --- a/internal/s3select/simdj/reader_amd64_test.go +++ b/internal/s3select/simdj/reader_amd64_test.go @@ -87,7 +87,7 @@ func TestNDJSON(t *testing.T) { if false { t.Log(string(b)) } - //_ = ioutil.WriteFile(filepath.Join("testdata", tt.name+".json"), b, os.ModePerm) + // _ = ioutil.WriteFile(filepath.Join("testdata", tt.name+".json"), b, os.ModePerm) parser: for { diff --git a/internal/s3select/sql/parser.go b/internal/s3select/sql/parser.go index 2b35ae35d..8c7b2991a 100644 --- a/internal/s3select/sql/parser.go +++ b/internal/s3select/sql/parser.go @@ -44,7 +44,7 @@ func (ls *LiteralString) Capture(values []string) error { n := len(values[0]) r := values[0][1 : n-1] // Translate doubled quotes - *ls = LiteralString(strings.Replace(r, "''", "'", -1)) + *ls = LiteralString(strings.ReplaceAll(r, "''", "'")) return nil } @@ -78,7 +78,7 @@ func (qi *QuotedIdentifier) Capture(values []string) error { r := values[0][1 : n-1] // Translate doubled quotes - *qi = QuotedIdentifier(strings.Replace(r, `""`, `"`, -1)) + *qi = QuotedIdentifier(strings.ReplaceAll(r, `""`, `"`)) return nil } diff --git a/internal/s3select/sql/value.go b/internal/s3select/sql/value.go index 57a1b7b74..720323ba0 100644 --- a/internal/s3select/sql/value.go +++ b/internal/s3select/sql/value.go @@ -231,6 +231,7 @@ func (v Value) ToArray() (val []Value, ok bool) { // IsNull - checks if value is missing. func (v Value) IsNull() bool { + //nolint:gocritic switch v.value.(type) { case nil: return true @@ -245,6 +246,7 @@ func (v Value) IsArray() (ok bool) { } func (v Value) isNumeric() bool { + //nolint:gocritic switch v.value.(type) { case int64, float64: return true diff --git a/internal/smart/smart.go b/internal/smart/smart.go index aaf4eb667..ddfecdcb5 100644 --- a/internal/smart/smart.go +++ b/internal/smart/smart.go @@ -44,7 +44,7 @@ func GetInfo(device string) (madmin.SmartInfo, error) { } var db drivedb.DriveDb - dec := yaml.NewDecoder(bytes.NewBuffer(MustAsset("drivedb.yaml"))) + dec := yaml.NewDecoder(bytes.NewReader(MustAsset("drivedb.yaml"))) err := dec.Decode(&db) if err != nil { @@ -108,7 +108,7 @@ func getNvmeInfo(d *NVMeDevice) (*madmin.SmartNvmeInfo, error) { } var controller nvmeIdentController - binary.Read(bytes.NewBuffer(buf[:]), utils.NativeEndian, &controller) + binary.Read(bytes.NewReader(buf), utils.NativeEndian, &controller) nvmeInfo.VendorID = strings.TrimSpace(fmt.Sprintf("%#04x", controller.VendorID)) nvmeInfo.ModelNum = strings.TrimSpace(fmt.Sprintf("%s", controller.ModelNumber)) @@ -124,7 +124,7 @@ func getNvmeInfo(d *NVMeDevice) (*madmin.SmartNvmeInfo, error) { } var sl nvmeSMARTLog - binary.Read(bytes.NewBuffer(buf2[:]), utils.NativeEndian, &sl) + binary.Read(bytes.NewReader(buf2), utils.NativeEndian, &sl) unitsRead := le128ToBigInt(sl.DataUnitsRead) unitsWritten := le128ToBigInt(sl.DataUnitsWritten)