add gocritic/ruleguard checks back again, cleanup code. (#13665)

- remove some duplicated code
- reported a bug, separately fixed in #13664
- using strings.ReplaceAll() when needed
- using filepath.ToSlash() use when needed
- remove all non-Go style comments from the codebase

Co-authored-by: Aditya Manthramurthy <donatello@users.noreply.github.com>
This commit is contained in:
Harshavardhana 2021-11-16 09:28:29 -08:00 committed by GitHub
parent 07c5e72cdb
commit 661b263e77
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
111 changed files with 409 additions and 450 deletions

View File

@ -23,12 +23,19 @@ linters:
- structcheck - structcheck
- unconvert - unconvert
- varcheck - varcheck
- gocritic
issues: issues:
exclude-use-default: false exclude-use-default: false
exclude: exclude:
- should have a package comment - should have a package comment
- error strings should not be capitalized or end with punctuation or a newline - error strings should not be capitalized or end with punctuation or a newline
# todo fix these when we get enough time.
- "singleCaseSwitch: should rewrite switch statement to if statement"
- "unlambda: replace"
- "captLocal:"
- "ifElseChain:"
- "elseif:"
service: service:
golangci-lint-version: 1.20.0 # use the fixed version to not introduce new linters unexpectedly golangci-lint-version: 1.43.0 # use the fixed version to not introduce new linters unexpectedly

View File

@ -19,7 +19,7 @@ help: ## print this help
getdeps: ## fetch necessary dependencies getdeps: ## fetch necessary dependencies
@mkdir -p ${GOPATH}/bin @mkdir -p ${GOPATH}/bin
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.40.1 @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.43.0
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@latest @echo "Installing msgp" && go install -v github.com/tinylib/msgp@latest
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest @echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest

View File

@ -215,13 +215,11 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *
return return
} }
} }
} else { } else if err := delServerConfigHistory(ctx, objectAPI, restoreID); err != nil {
if err := delServerConfigHistory(ctx, objectAPI, restoreID); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return return
} }
} }
}
// RestoreConfigHistoryKVHandler - restores a config with KV settings for the given KV id. // RestoreConfigHistoryKVHandler - restores a config with KV settings for the given KV id.
func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) { func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {

View File

@ -323,11 +323,12 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
status := vars["status"] status := vars["status"]
var err error var err error
if status == statusEnabled { switch status {
case statusEnabled:
err = globalIAMSys.SetGroupStatus(ctx, group, true) err = globalIAMSys.SetGroupStatus(ctx, group, true)
} else if status == statusDisabled { case statusDisabled:
err = globalIAMSys.SetGroupStatus(ctx, group, false) err = globalIAMSys.SetGroupStatus(ctx, group, false)
} else { default:
err = errInvalidArgument err = errInvalidArgument
} }
if err != nil { if err != nil {

View File

@ -1356,6 +1356,7 @@ func getServerInfo(ctx context.Context, r *http.Request) madmin.InfoMessage {
ldap := madmin.LDAP{} ldap := madmin.LDAP{}
if globalLDAPConfig.Enabled { if globalLDAPConfig.Enabled {
ldapConn, err := globalLDAPConfig.Connect() ldapConn, err := globalLDAPConfig.Connect()
//nolint:gocritic
if err != nil { if err != nil {
ldap.Status = string(madmin.ItemOffline) ldap.Status = string(madmin.ItemOffline)
} else if ldapConn == nil { } else if ldapConn == nil {
@ -1636,8 +1637,8 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
anonymizeCmdLine := func(cmdLine string) string { anonymizeCmdLine := func(cmdLine string) string {
if !globalIsDistErasure { if !globalIsDistErasure {
// FS mode - single server - hard code to `server1` // FS mode - single server - hard code to `server1`
anonCmdLine := strings.Replace(cmdLine, globalLocalNodeName, "server1", -1) anonCmdLine := strings.ReplaceAll(cmdLine, globalLocalNodeName, "server1")
return strings.Replace(anonCmdLine, globalMinioConsoleHost, "server1", -1) return strings.ReplaceAll(anonCmdLine, globalMinioConsoleHost, "server1")
} }
// Server start command regex groups: // Server start command regex groups:

View File

@ -491,7 +491,7 @@ func (h *healSequence) getScannedItemsCount() int64 {
defer h.mutex.RUnlock() defer h.mutex.RUnlock()
for _, v := range h.scannedItemsMap { for _, v := range h.scannedItemsMap {
count = count + v count += v
} }
return count return count
} }

View File

@ -43,8 +43,6 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
// Admin router // Admin router
adminRouter := router.PathPrefix(adminPathPrefix).Subrouter() adminRouter := router.PathPrefix(adminPathPrefix).Subrouter()
/// Service operations
adminVersions := []string{ adminVersions := []string{
adminAPIVersionPrefix, adminAPIVersionPrefix,
} }
@ -71,7 +69,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(gz(httpTraceAll(adminAPI.DataUsageInfoHandler))) adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(gz(httpTraceAll(adminAPI.DataUsageInfoHandler)))
if globalIsDistErasure || globalIsErasure { if globalIsDistErasure || globalIsErasure {
/// Heal operations // Heal operations
// Heal processing endpoint. // Heal processing endpoint.
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler))) adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler)))
@ -79,9 +77,6 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler))) adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler)))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(gz(httpTraceAll(adminAPI.BackgroundHealStatusHandler))) adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(gz(httpTraceAll(adminAPI.BackgroundHealStatusHandler)))
/// Health operations
} }
// Profiling operations // Profiling operations
@ -106,7 +101,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/restore-config-history-kv").HandlerFunc(gz(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler))).Queries("restoreId", "{restoreId:.*}") adminRouter.Methods(http.MethodPut).Path(adminVersion+"/restore-config-history-kv").HandlerFunc(gz(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler))).Queries("restoreId", "{restoreId:.*}")
} }
/// Config import/export bulk operations // Config import/export bulk operations
if enableConfigOps { if enableConfigOps {
// Get config // Get config
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/config").HandlerFunc(gz(httpTraceHdrs(adminAPI.GetConfigHandler))) adminRouter.Methods(http.MethodGet).Path(adminVersion + "/config").HandlerFunc(gz(httpTraceHdrs(adminAPI.GetConfigHandler)))

View File

@ -973,7 +973,7 @@ var errorCodes = errorCodeMap{
HTTPStatusCode: http.StatusNotFound, HTTPStatusCode: http.StatusNotFound,
}, },
/// Bucket notification related errors. // Bucket notification related errors.
ErrEventNotification: { ErrEventNotification: {
Code: "InvalidArgument", Code: "InvalidArgument",
Description: "A specified event is not supported for notifications.", Description: "A specified event is not supported for notifications.",
@ -1120,14 +1120,14 @@ var errorCodes = errorCodeMap{
HTTPStatusCode: http.StatusForbidden, HTTPStatusCode: http.StatusForbidden,
}, },
/// S3 extensions. // S3 extensions.
ErrContentSHA256Mismatch: { ErrContentSHA256Mismatch: {
Code: "XAmzContentSHA256Mismatch", Code: "XAmzContentSHA256Mismatch",
Description: "The provided 'x-amz-content-sha256' header does not match what was computed.", Description: "The provided 'x-amz-content-sha256' header does not match what was computed.",
HTTPStatusCode: http.StatusBadRequest, HTTPStatusCode: http.StatusBadRequest,
}, },
/// MinIO extensions. // MinIO extensions.
ErrStorageFull: { ErrStorageFull: {
Code: "XMinioStorageFull", Code: "XMinioStorageFull",
Description: "Storage backend has reached its minimum free disk threshold. Please delete a few objects to proceed.", Description: "Storage backend has reached its minimum free disk threshold. Please delete a few objects to proceed.",
@ -2074,6 +2074,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
default: default:
var ie, iw int var ie, iw int
// This work-around is to handle the issue golang/go#30648 // This work-around is to handle the issue golang/go#30648
//nolint:gocritic
if _, ferr := fmt.Fscanf(strings.NewReader(err.Error()), if _, ferr := fmt.Fscanf(strings.NewReader(err.Error()),
"request declared a Content-Length of %d but only wrote %d bytes", "request declared a Content-Length of %d but only wrote %d bytes",
&ie, &iw); ferr != nil { &ie, &iw); ferr != nil {
@ -2229,6 +2230,7 @@ func toAPIError(ctx context.Context, err error) APIError {
} }
// Add more Gateway SDKs here if any in future. // Add more Gateway SDKs here if any in future.
default: default:
//nolint:gocritic
if errors.Is(err, errMalformedEncoding) { if errors.Is(err, errMalformedEncoding) {
apiErr = APIError{ apiErr = APIError{
Code: "BadRequest", Code: "BadRequest",

View File

@ -301,7 +301,8 @@ func registerAPIRouter(router *mux.Router) {
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc( router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
collectAPIStats("restoreobject", maxClients(gz(httpTraceAll(api.PostRestoreObjectHandler))))).Queries("restore", "") collectAPIStats("restoreobject", maxClients(gz(httpTraceAll(api.PostRestoreObjectHandler))))).Queries("restore", "")
/// Bucket operations // Bucket operations
// GetBucketLocation // GetBucketLocation
router.Methods(http.MethodGet).HandlerFunc( router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketlocation", maxClients(gz(httpTraceAll(api.GetBucketLocationHandler))))).Queries("location", "") collectAPIStats("getbucketlocation", maxClients(gz(httpTraceAll(api.GetBucketLocationHandler))))).Queries("location", "")
@ -452,7 +453,7 @@ func registerAPIRouter(router *mux.Router) {
collectAPIStats("listobjectsv1", maxClients(gz(httpTraceAll(api.ListObjectsV1Handler))))) collectAPIStats("listobjectsv1", maxClients(gz(httpTraceAll(api.ListObjectsV1Handler)))))
} }
/// Root operation // Root operation
// ListenNotification // ListenNotification
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc( apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(

View File

@ -903,7 +903,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") { if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") {
// S3 feature to replace ${filename} found in Key form field // S3 feature to replace ${filename} found in Key form field
// by the filename attribute passed in multipart // by the filename attribute passed in multipart
formValues.Set("Key", strings.Replace(formValues.Get("Key"), "${filename}", fileName, -1)) formValues.Set("Key", strings.ReplaceAll(formValues.Get("Key"), "${filename}", fileName))
} }
object := trimLeadingSlash(formValues.Get("Key")) object := trimLeadingSlash(formValues.Get("Key"))

View File

@ -59,8 +59,8 @@ func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int
} }
if encodingType != "" { if encodingType != "" {
// Only url encoding type is supported // AWS S3 spec only supports 'url' encoding type
if strings.ToLower(encodingType) != "url" { if !strings.EqualFold(encodingType, "url") {
return ErrInvalidEncodingMethod return ErrInvalidEncodingMethod
} }
} }

View File

@ -172,11 +172,12 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
vStr, ok := v.(string) vStr, ok := v.(string)
if ok { if ok {
// Special case for AD/LDAP STS users // Special case for AD/LDAP STS users
if k == ldapUser { switch k {
case ldapUser:
args["user"] = []string{vStr} args["user"] = []string{vStr}
} else if k == ldapUserN { case ldapUserN:
args["username"] = []string{vStr} args["username"] = []string{vStr}
} else { default:
args[k] = []string{vStr} args[k] = []string{vStr}
} }
} }

View File

@ -181,7 +181,6 @@ var parseReplicationDecisionTest = []struct {
func TestParseReplicateDecision(t *testing.T) { func TestParseReplicateDecision(t *testing.T) {
for i, test := range parseReplicationDecisionTest { for i, test := range parseReplicationDecisionTest {
//dsc, err := parseReplicateDecision(test.dsc)
dsc, err := parseReplicateDecision(test.expDsc.String()) dsc, err := parseReplicateDecision(test.expDsc.String())
if err != nil { if err != nil {

View File

@ -56,7 +56,7 @@ func (brs BucketReplicationStats) Clone() BucketReplicationStats {
c := BucketReplicationStats{ c := BucketReplicationStats{
Stats: make(map[string]*BucketReplicationStat, len(brs.Stats)), Stats: make(map[string]*BucketReplicationStat, len(brs.Stats)),
} }
//this is called only by replicationStats cache and already holds a read lock before calling Clone() // This is called only by replicationStats cache and already holds a read lock before calling Clone()
for arn, st := range brs.Stats { for arn, st := range brs.Stats {
c.Stats[arn] = &BucketReplicationStat{ c.Stats[arn] = &BucketReplicationStat{
FailedSize: atomic.LoadInt64(&st.FailedSize), FailedSize: atomic.LoadInt64(&st.FailedSize),

View File

@ -526,6 +526,7 @@ func handleCommonEnvVars() {
// Warn user if deprecated environment variables, // Warn user if deprecated environment variables,
// "MINIO_ACCESS_KEY" and "MINIO_SECRET_KEY", are defined // "MINIO_ACCESS_KEY" and "MINIO_SECRET_KEY", are defined
// Check all error conditions first // Check all error conditions first
//nolint:gocritic
if !env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) { if !env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) {
logger.Fatal(config.ErrMissingEnvCredentialRootUser(nil), "Unable to start MinIO") logger.Fatal(config.ErrMissingEnvCredentialRootUser(nil), "Unable to start MinIO")
} else if env.IsSet(config.EnvRootUser) && !env.IsSet(config.EnvRootPassword) { } else if env.IsSet(config.EnvRootUser) && !env.IsSet(config.EnvRootPassword) {
@ -544,6 +545,7 @@ func handleCommonEnvVars() {
var user, password string var user, password string
haveRootCredentials := false haveRootCredentials := false
haveAccessCredentials := false haveAccessCredentials := false
//nolint:gocritic
if env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) { if env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) {
user = env.Get(config.EnvRootUser, "") user = env.Get(config.EnvRootUser, "")
password = env.Get(config.EnvRootPassword, "") password = env.Get(config.EnvRootPassword, "")

View File

@ -696,9 +696,7 @@ func GetHelp(subSys, key string, envOnly bool) (Help, error) {
// to list the ENV, for regular k/v EnableKey is // to list the ENV, for regular k/v EnableKey is
// implicit, for ENVs we cannot make it implicit. // implicit, for ENVs we cannot make it implicit.
if subSysHelp.MultipleTargets { if subSysHelp.MultipleTargets {
envK := config.EnvPrefix + strings.Join([]string{ envK := config.EnvPrefix + strings.ToTitle(subSys) + config.EnvWordDelimiter + strings.ToTitle(madmin.EnableKey)
strings.ToTitle(subSys), strings.ToTitle(madmin.EnableKey),
}, config.EnvWordDelimiter)
envHelp = append(envHelp, config.HelpKV{ envHelp = append(envHelp, config.HelpKV{
Key: envK, Key: envK,
Description: fmt.Sprintf("enable %s target, default is 'off'", subSys), Description: fmt.Sprintf("enable %s target, default is 'off'", subSys),
@ -707,9 +705,7 @@ func GetHelp(subSys, key string, envOnly bool) (Help, error) {
}) })
} }
for _, hkv := range h { for _, hkv := range h {
envK := config.EnvPrefix + strings.Join([]string{ envK := config.EnvPrefix + strings.ToTitle(subSys) + config.EnvWordDelimiter + strings.ToTitle(hkv.Key)
strings.ToTitle(subSys), strings.ToTitle(hkv.Key),
}, config.EnvWordDelimiter)
envHelp = append(envHelp, config.HelpKV{ envHelp = append(envHelp, config.HelpKV{
Key: envK, Key: envK,
Description: hkv.Description, Description: hkv.Description,

View File

@ -34,14 +34,12 @@ import (
"github.com/minio/pkg/quick" "github.com/minio/pkg/quick"
) )
/////////////////// Config V1 ///////////////////
type configV1 struct { type configV1 struct {
Version string `json:"version"` Version string `json:"version"`
AccessKey string `json:"accessKeyId"` AccessKey string `json:"accessKeyId"`
SecretKey string `json:"secretAccessKey"` SecretKey string `json:"secretAccessKey"`
} }
/////////////////// Config V2 ///////////////////
type configV2 struct { type configV2 struct {
Version string `json:"version"` Version string `json:"version"`
Credentials struct { Credentials struct {
@ -63,7 +61,6 @@ type configV2 struct {
} `json:"fileLogger"` } `json:"fileLogger"`
} }
/////////////////// Config V3 ///////////////////
// backendV3 type. // backendV3 type.
type backendV3 struct { type backendV3 struct {
Type string `json:"type"` Type string `json:"type"`

View File

@ -203,7 +203,7 @@ func (d *dataUpdateTracker) latestWithDir(dir string) uint64 {
// start a saver goroutine. // start a saver goroutine.
// All of these will exit when the context is canceled. // All of these will exit when the context is canceled.
func (d *dataUpdateTracker) start(ctx context.Context, drives ...string) { func (d *dataUpdateTracker) start(ctx context.Context, drives ...string) {
if len(drives) <= 0 { if len(drives) == 0 {
logger.LogIf(ctx, errors.New("dataUpdateTracker.start: No drives specified")) logger.LogIf(ctx, errors.New("dataUpdateTracker.start: No drives specified"))
return return
} }
@ -220,7 +220,7 @@ func (d *dataUpdateTracker) start(ctx context.Context, drives ...string) {
// If no valid data usage tracker can be found d will remain unchanged. // If no valid data usage tracker can be found d will remain unchanged.
// If object is shared the caller should lock it. // If object is shared the caller should lock it.
func (d *dataUpdateTracker) load(ctx context.Context, drives ...string) { func (d *dataUpdateTracker) load(ctx context.Context, drives ...string) {
if len(drives) <= 0 { if len(drives) == 0 {
logger.LogIf(ctx, errors.New("dataUpdateTracker.load: No drives specified")) logger.LogIf(ctx, errors.New("dataUpdateTracker.load: No drives specified"))
return return
} }

View File

@ -773,7 +773,7 @@ func newCacheEncryptReader(content io.Reader, bucket, object string, metadata ma
return nil, err return nil, err
} }
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20, CipherSuites: fips.CipherSuitesDARE()}) reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey, MinVersion: sio.Version20, CipherSuites: fips.CipherSuitesDARE()})
if err != nil { if err != nil {
return nil, crypto.ErrInvalidCustomerKey return nil, crypto.ErrInvalidCustomerKey
} }

View File

@ -61,7 +61,7 @@ func NewDummyDataGen(totalLength, skipOffset int64) io.ReadSeeker {
panic("Negative rotations are not allowed") panic("Negative rotations are not allowed")
} }
skipOffset = skipOffset % int64(len(alphabets)) skipOffset %= int64(len(alphabets))
as := make([]byte, 2*len(alphabets)) as := make([]byte, 2*len(alphabets))
copy(as, alphabets) copy(as, alphabets)
copy(as[len(alphabets):], alphabets) copy(as[len(alphabets):], alphabets)

View File

@ -242,7 +242,7 @@ func getTotalSizes(argPatterns []ellipses.ArgPattern) []uint64 {
for _, argPattern := range argPatterns { for _, argPattern := range argPatterns {
var totalSize uint64 = 1 var totalSize uint64 = 1
for _, p := range argPattern { for _, p := range argPattern {
totalSize = totalSize * uint64(len(p.Seq)) totalSize *= uint64(len(p.Seq))
} }
totalSizes = append(totalSizes, totalSize) totalSizes = append(totalSizes, totalSize)
} }

View File

@ -497,6 +497,7 @@ func NewEndpoints(args ...string) (endpoints Endpoints, err error) {
} }
// All endpoints have to be same type and scheme if applicable. // All endpoints have to be same type and scheme if applicable.
//nolint:gocritic
if i == 0 { if i == 0 {
endpointType = endpoint.Type() endpointType = endpoint.Type()
scheme = endpoint.Scheme scheme = endpoint.Scheme

View File

@ -32,7 +32,7 @@ var bucketOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnform
// list all errors that can be ignored in a bucket metadata operation. // list all errors that can be ignored in a bucket metadata operation.
var bucketMetadataOpIgnoredErrs = append(bucketOpIgnoredErrs, errVolumeNotFound) var bucketMetadataOpIgnoredErrs = append(bucketOpIgnoredErrs, errVolumeNotFound)
/// Bucket operations // Bucket operations
// MakeBucket - make a bucket. // MakeBucket - make a bucket.
func (er erasureObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error { func (er erasureObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error {

View File

@ -95,7 +95,7 @@ func (e *Erasure) EncodeData(ctx context.Context, data []byte) ([][]byte, error)
// It returns an error if the decoding failed. // It returns an error if the decoding failed.
func (e *Erasure) DecodeDataBlocks(data [][]byte) error { func (e *Erasure) DecodeDataBlocks(data [][]byte) error {
var isZero = 0 var isZero = 0
for _, b := range data[:] { for _, b := range data {
if len(b) == 0 { if len(b) == 0 {
isZero++ isZero++
break break

View File

@ -110,7 +110,7 @@ func TestErasureDecode(t *testing.T) {
for i, disk := range disks { for i, disk := range disks {
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(test.data), writeAlgorithm, erasure.ShardSize()) writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(test.data), writeAlgorithm, erasure.ShardSize())
} }
n, err := erasure.Encode(context.Background(), bytes.NewReader(data[:]), writers, buffer, erasure.dataBlocks+1) n, err := erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers) closeBitrotWriters(writers)
if err != nil { if err != nil {
setup.Remove() setup.Remove()

View File

@ -235,7 +235,7 @@ func TestListOnlineDisks(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to open %s: %s\n", filePath, err) t.Fatalf("Failed to open %s: %s\n", filePath, err)
} }
f.Write([]byte("oops")) // Will cause bitrot error f.WriteString("oops") // Will cause bitrot error
f.Close() f.Close()
break break
} }
@ -414,7 +414,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to open %s: %s\n", filePath, err) t.Fatalf("Failed to open %s: %s\n", filePath, err)
} }
f.Write([]byte("oops")) // Will cause bitrot error f.WriteString("oops") // Will cause bitrot error
f.Close() f.Close()
break break
} }
@ -563,7 +563,7 @@ func TestDisksWithAllParts(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to open %s: %s\n", filePath, err) t.Fatalf("Failed to open %s: %s\n", filePath, err)
} }
f.Write([]byte("oops")) // Will cause bitrot error f.WriteString("oops") // Will cause bitrot error
f.Close() f.Close()
} }
} }

View File

@ -163,7 +163,7 @@ func TestHealingDanglingObject(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
//defer removeRoots(fsDirs) defer removeRoots(fsDirs)
// Everything is fine, should return nil // Everything is fine, should return nil
objLayer, disks, err := initObjectLayer(ctx, mustGetPoolEndpoints(fsDirs...)) objLayer, disks, err := initObjectLayer(ctx, mustGetPoolEndpoints(fsDirs...))

View File

@ -292,25 +292,25 @@ func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.
for i, meta := range metaArr { for i, meta := range metaArr {
if meta.IsValid() && meta.ModTime.Equal(modTime) && meta.DataDir == dataDir { if meta.IsValid() && meta.ModTime.Equal(modTime) && meta.DataDir == dataDir {
for _, part := range meta.Parts { for _, part := range meta.Parts {
h.Write([]byte(fmt.Sprintf("part.%d", part.Number))) fmt.Fprintf(h, "part.%d", part.Number)
} }
h.Write([]byte(fmt.Sprintf("%v", meta.Erasure.Distribution))) fmt.Fprintf(h, "%v", meta.Erasure.Distribution)
// make sure that length of Data is same // make sure that length of Data is same
h.Write([]byte(fmt.Sprintf("%v", len(meta.Data)))) fmt.Fprintf(h, "%v", len(meta.Data))
// ILM transition fields // ILM transition fields
h.Write([]byte(meta.TransitionStatus)) fmt.Fprint(h, meta.TransitionStatus)
h.Write([]byte(meta.TransitionTier)) fmt.Fprint(h, meta.TransitionTier)
h.Write([]byte(meta.TransitionedObjName)) fmt.Fprint(h, meta.TransitionedObjName)
h.Write([]byte(meta.TransitionVersionID)) fmt.Fprint(h, meta.TransitionVersionID)
// Server-side replication fields // Server-side replication fields
h.Write([]byte(fmt.Sprintf("%v", meta.MarkDeleted))) fmt.Fprintf(h, "%v", meta.MarkDeleted)
h.Write([]byte(meta.Metadata[string(meta.ReplicationState.ReplicaStatus)])) fmt.Fprint(h, meta.Metadata[string(meta.ReplicationState.ReplicaStatus)])
h.Write([]byte(meta.Metadata[meta.ReplicationState.ReplicationTimeStamp.Format(http.TimeFormat)])) fmt.Fprint(h, meta.Metadata[meta.ReplicationState.ReplicationTimeStamp.Format(http.TimeFormat)])
h.Write([]byte(meta.Metadata[meta.ReplicationState.ReplicaTimeStamp.Format(http.TimeFormat)])) fmt.Fprint(h, meta.Metadata[meta.ReplicationState.ReplicaTimeStamp.Format(http.TimeFormat)])
h.Write([]byte(meta.Metadata[meta.ReplicationState.ReplicationStatusInternal])) fmt.Fprint(h, meta.Metadata[meta.ReplicationState.ReplicationStatusInternal])
h.Write([]byte(meta.Metadata[meta.ReplicationState.VersionPurgeStatusInternal])) fmt.Fprint(h, meta.Metadata[meta.ReplicationState.VersionPurgeStatusInternal])
metaHashes[i] = hex.EncodeToString(h.Sum(nil)) metaHashes[i] = hex.EncodeToString(h.Sum(nil))
h.Reset() h.Reset()

View File

@ -46,7 +46,7 @@ import (
// list all errors which can be ignored in object operations. // list all errors which can be ignored in object operations.
var objectOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnformattedDisk) var objectOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnformattedDisk)
/// Object Operations // Object Operations
func countOnlineDisks(onlineDisks []StorageAPI) (online int) { func countOnlineDisks(onlineDisks []StorageAPI) (online int) {
for _, onlineDisk := range onlineDisks { for _, onlineDisk := range onlineDisks {

View File

@ -327,7 +327,7 @@ func fsCreateFile(ctx context.Context, filePath string, reader io.Reader, falloc
flags := os.O_CREATE | os.O_WRONLY flags := os.O_CREATE | os.O_WRONLY
if globalFSOSync { if globalFSOSync {
flags = flags | os.O_SYNC flags |= os.O_SYNC
} }
writer, err := lock.Open(filePath, flags, 0666) writer, err := lock.Open(filePath, flags, 0666)
if err != nil { if err != nil {

View File

@ -109,7 +109,7 @@ func (fsi *fsIOPool) Open(path string) (*lock.RLockedFile, error) {
} }
} }
/// Save new reader on the map. // Save new reader on the map.
// It is possible by this time due to concurrent // It is possible by this time due to concurrent
// i/o we might have another lock present. Lookup // i/o we might have another lock present. Lookup

View File

@ -398,7 +398,7 @@ func (fs *FSObjects) scanBucket(ctx context.Context, bucket string, cache dataUs
return cache, err return cache, err
} }
/// Bucket operations // Bucket operations
// getBucketDir - will convert incoming bucket names to // getBucketDir - will convert incoming bucket names to
// corresponding valid bucket names on the backend in a platform // corresponding valid bucket names on the backend in a platform
@ -601,7 +601,7 @@ func (fs *FSObjects) DeleteBucket(ctx context.Context, bucket string, opts Delet
return nil return nil
} }
/// Object Operations // Object Operations
// CopyObject - copy object source object to destination object. // CopyObject - copy object source object to destination object.
// if source object and destination object are same we only // if source object and destination object are same we only

View File

@ -274,7 +274,7 @@ func s3MetaToAzureProperties(ctx context.Context, s3Metadata map[string]string)
encodeKey := func(key string) string { encodeKey := func(key string) string {
tokens := strings.Split(key, "_") tokens := strings.Split(key, "_")
for i := range tokens { for i := range tokens {
tokens[i] = strings.Replace(tokens[i], "-", "_", -1) tokens[i] = strings.ReplaceAll(tokens[i], "-", "_")
} }
return strings.Join(tokens, "__") return strings.Join(tokens, "__")
} }
@ -367,7 +367,7 @@ func azurePropertiesToS3Meta(meta azblob.Metadata, props azblob.BlobHTTPHeaders,
decodeKey := func(key string) string { decodeKey := func(key string) string {
tokens := strings.Split(key, "__") tokens := strings.Split(key, "__")
for i := range tokens { for i := range tokens {
tokens[i] = strings.Replace(tokens[i], "_", "-", -1) tokens[i] = strings.ReplaceAll(tokens[i], "_", "-")
} }
return strings.Join(tokens, "_") return strings.Join(tokens, "_")
} }

View File

@ -531,7 +531,7 @@ func toGCSPageToken(name string) string {
byte(length & 0xFF), byte(length & 0xFF),
} }
length = length >> 7 length >>= 7
if length > 0 { if length > 0 {
b = append(b, byte(length&0xFF)) b = append(b, byte(length&0xFF))
} }

View File

@ -289,7 +289,7 @@ func validateFormFieldSize(ctx context.Context, formValues http.Header) error {
// Extract form fields and file data from a HTTP POST Policy // Extract form fields and file data from a HTTP POST Policy
func extractPostPolicyFormValues(ctx context.Context, form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) { func extractPostPolicyFormValues(ctx context.Context, form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) {
/// HTML Form values // HTML Form values
fileName = "" fileName = ""
// Canonicalize the form values into http.Header. // Canonicalize the form values into http.Header.

View File

@ -558,9 +558,7 @@ func (store *IAMStoreSys) AddUsersToGroup(ctx context.Context, group string, mem
// exist. // exist.
gi = newGroupInfo(members) gi = newGroupInfo(members)
} else { } else {
mergedMembers := append(gi.Members, members...) gi.Members = set.CreateStringSet(append(gi.Members, members...)...).ToSlice()
uniqMembers := set.CreateStringSet(mergedMembers...).ToSlice()
gi.Members = uniqMembers
} }
if err := store.saveGroupInfo(ctx, group, gi); err != nil { if err := store.saveGroupInfo(ctx, group, gi); err != nil {

View File

@ -351,7 +351,6 @@ func (sys *IAMSys) loadWatchedEvent(ctx context.Context, event iamWatchEvent) (e
ctx, cancel := context.WithTimeout(ctx, defaultContextTimeout) ctx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
defer cancel() defer cancel()
if event.isCreated {
switch { switch {
case usersPrefix: case usersPrefix:
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigUsersPrefix)) accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigUsersPrefix))
@ -381,38 +380,6 @@ func (sys *IAMSys) loadWatchedEvent(ctx context.Context, event iamWatchEvent) (e
user := strings.TrimSuffix(policyMapFile, ".json") user := strings.TrimSuffix(policyMapFile, ".json")
err = sys.store.PolicyMappingNotificationHandler(ctx, user, true, regUser) err = sys.store.PolicyMappingNotificationHandler(ctx, user, true, regUser)
} }
} else {
// delete event
switch {
case usersPrefix:
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigUsersPrefix))
err = sys.store.UserNotificationHandler(ctx, accessKey, regUser)
case stsPrefix:
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigSTSPrefix))
err = sys.store.UserNotificationHandler(ctx, accessKey, stsUser)
case svcPrefix:
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigServiceAccountsPrefix))
err = sys.store.UserNotificationHandler(ctx, accessKey, svcUser)
case groupsPrefix:
group := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigGroupsPrefix))
err = sys.store.GroupNotificationHandler(ctx, group)
case policyPrefix:
policyName := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigPoliciesPrefix))
err = sys.store.PolicyNotificationHandler(ctx, policyName)
case policyDBUsersPrefix:
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBUsersPrefix)
user := strings.TrimSuffix(policyMapFile, ".json")
err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, regUser)
case policyDBSTSUsersPrefix:
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBSTSUsersPrefix)
user := strings.TrimSuffix(policyMapFile, ".json")
err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, stsUser)
case policyDBGroupsPrefix:
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBGroupsPrefix)
user := strings.TrimSuffix(policyMapFile, ".json")
err = sys.store.PolicyMappingNotificationHandler(ctx, user, true, regUser)
}
}
return err return err
} }

View File

@ -620,12 +620,10 @@ func mergeEntryChannels(ctx context.Context, in []chan metaCacheEntry, out chan<
} }
best = other best = other
bestIdx = otherIdx bestIdx = otherIdx
} else { } else if err := selectFrom(otherIdx); err != nil {
// Keep best, replace "other" // Keep best, replace "other"
if err := selectFrom(otherIdx); err != nil {
return err return err
} }
}
continue continue
} }
if best.name > other.name { if best.name > other.name {
@ -636,11 +634,9 @@ func mergeEntryChannels(ctx context.Context, in []chan metaCacheEntry, out chan<
if best.name > last { if best.name > last {
out <- *best out <- *best
last = best.name last = best.name
} else { } else if serverDebugLog {
if serverDebugLog {
console.Debugln("mergeEntryChannels: discarding duplicate", best.name, "<=", last) console.Debugln("mergeEntryChannels: discarding duplicate", best.name, "<=", last)
} }
}
// Replace entry we just sent. // Replace entry we just sent.
if err := selectFrom(bestIdx); err != nil { if err := selectFrom(bestIdx); err != nil {
return err return err

View File

@ -81,6 +81,7 @@ func Test_metaCacheEntries_merge(t *testing.T) {
} }
// Merge b into a // Merge b into a
a.merge(b, -1) a.merge(b, -1)
//nolint:gocritic
want := append(loadMetacacheSampleNames, loadMetacacheSampleNames...) want := append(loadMetacacheSampleNames, loadMetacacheSampleNames...)
sort.Strings(want) sort.Strings(want)
got := a.entries().names() got := a.entries().names()

View File

@ -1623,20 +1623,18 @@ func (c *minioClusterCollector) Collect(out chan<- prometheus.Metric) {
continue continue
} }
for k, v := range metric.Histogram { for k, v := range metric.Histogram {
l := append(labels, metric.HistogramBucketLabel)
lv := append(values, k)
out <- prometheus.MustNewConstMetric( out <- prometheus.MustNewConstMetric(
prometheus.NewDesc( prometheus.NewDesc(
prometheus.BuildFQName(string(metric.Description.Namespace), prometheus.BuildFQName(string(metric.Description.Namespace),
string(metric.Description.Subsystem), string(metric.Description.Subsystem),
string(metric.Description.Name)), string(metric.Description.Name)),
metric.Description.Help, metric.Description.Help,
l, append(labels, metric.HistogramBucketLabel),
metric.StaticLabels, metric.StaticLabels,
), ),
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v), float64(v),
lv...) append(values, k)...)
} }
continue continue
} }

View File

@ -341,22 +341,18 @@ func sameLocalAddrs(addr1, addr2 string) (bool, error) {
if host1 == "" { if host1 == "" {
// If empty host means it is localhost // If empty host means it is localhost
addr1Local = true addr1Local = true
} else { } else if addr1Local, err = isLocalHost(host1, port1, port1); err != nil {
// Host not empty, check if it is local // Host not empty, check if it is local
if addr1Local, err = isLocalHost(host1, port1, port1); err != nil {
return false, err return false, err
} }
}
if host2 == "" { if host2 == "" {
// If empty host means it is localhost // If empty host means it is localhost
addr2Local = true addr2Local = true
} else { } else if addr2Local, err = isLocalHost(host2, port2, port2); err != nil {
// Host not empty, check if it is local // Host not empty, check if it is local
if addr2Local, err = isLocalHost(host2, port2, port2); err != nil {
return false, err return false, err
} }
}
// If both of addresses point to the same machine, check if // If both of addresses point to the same machine, check if
// have the same port // have the same port

View File

@ -484,7 +484,7 @@ func (e InvalidObjectState) Error() string {
return "The operation is not valid for the current state of the object " + e.Bucket + "/" + e.Object + "(" + e.VersionID + ")" return "The operation is not valid for the current state of the object " + e.Bucket + "/" + e.Object + "(" + e.VersionID + ")"
} }
/// Bucket related errors. // Bucket related errors.
// BucketNameInvalid - bucketname provided is invalid. // BucketNameInvalid - bucketname provided is invalid.
type BucketNameInvalid GenericError type BucketNameInvalid GenericError
@ -494,7 +494,7 @@ func (e BucketNameInvalid) Error() string {
return "Bucket name invalid: " + e.Bucket return "Bucket name invalid: " + e.Bucket
} }
/// Object related errors. // Object related errors.
// ObjectNameInvalid - object name provided is invalid. // ObjectNameInvalid - object name provided is invalid.
type ObjectNameInvalid GenericError type ObjectNameInvalid GenericError
@ -569,7 +569,7 @@ func (e OperationTimedOut) Error() string {
return "Operation timed out" return "Operation timed out"
} }
/// Multipart related errors. // Multipart related errors.
// MalformedUploadID malformed upload id. // MalformedUploadID malformed upload id.
type MalformedUploadID struct { type MalformedUploadID struct {

View File

@ -1694,9 +1694,9 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
{bucketNames[0], objectNames[0], uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd"))}, {bucketNames[0], objectNames[0], uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd"))},
{bucketNames[0], objectNames[0], uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd"))}, {bucketNames[0], objectNames[0], uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd"))},
// Part with size larger than 5Mb. // Part with size larger than 5Mb.
{bucketNames[0], objectNames[0], uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketNames[0], objectNames[0], uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(validPart))},
{bucketNames[0], objectNames[0], uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketNames[0], objectNames[0], uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(validPart))},
{bucketNames[0], objectNames[0], uploadIDs[0], 7, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketNames[0], objectNames[0], uploadIDs[0], 7, string(validPart), validPartMD5, int64(len(validPart))},
} }
sha256sum := "" sha256sum := ""
var opts ObjectOptions var opts ObjectOptions

View File

@ -1574,7 +1574,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
return return
} }
/// if Content-Length is unknown/missing, deny the request // if Content-Length is unknown/missing, deny the request
size := r.ContentLength size := r.ContentLength
rAuthType := getRequestAuthType(r) rAuthType := getRequestAuthType(r)
if rAuthType == authTypeStreamingSigned { if rAuthType == authTypeStreamingSigned {
@ -1595,7 +1595,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
return return
} }
/// maximum Upload size for objects in a single operation // maximum Upload size for objects in a single operation
if isMaxObjectSize(size) { if isMaxObjectSize(size) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL) writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
return return
@ -1924,7 +1924,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
return return
} }
/// if Content-Length is unknown/missing, deny the request // if Content-Length is unknown/missing, deny the request
size := r.ContentLength size := r.ContentLength
rAuthType := getRequestAuthType(r) rAuthType := getRequestAuthType(r)
if rAuthType == authTypeStreamingSigned { if rAuthType == authTypeStreamingSigned {
@ -1946,7 +1946,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
return return
} }
/// maximum Upload size for objects in a single operation // maximum Upload size for objects in a single operation
if isMaxObjectSize(size) { if isMaxObjectSize(size) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL) writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
return return
@ -2155,7 +2155,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
writeSuccessResponseHeadersOnly(w) writeSuccessResponseHeadersOnly(w)
} }
/// Multipart objectAPIHandlers // Multipart objectAPIHandlers
// NewMultipartUploadHandler - New multipart upload. // NewMultipartUploadHandler - New multipart upload.
// Notice: The S3 client can send secret keys in headers for encryption related jobs, // Notice: The S3 client can send secret keys in headers for encryption related jobs,
@ -2478,7 +2478,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
return return
} }
/// maximum copy size for multipart objects in a single operation // maximum copy size for multipart objects in a single operation
if isMaxAllowedPartSize(length) { if isMaxAllowedPartSize(length) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL) writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
return return
@ -2670,7 +2670,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
return return
} }
/// if Content-Length is unknown/missing, throw away // if Content-Length is unknown/missing, throw away
size := r.ContentLength size := r.ContentLength
rAuthType := getRequestAuthType(r) rAuthType := getRequestAuthType(r)
@ -2693,7 +2693,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
return return
} }
/// maximum Upload size for multipart objects in a single operation // maximum Upload size for multipart objects in a single operation
if isMaxAllowedPartSize(size) { if isMaxAllowedPartSize(size) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL) writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
return return
@ -3319,7 +3319,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
} }
} }
/// Delete objectAPIHandlers // Delete objectAPIHandlers
// DeleteObjectHandler - delete an object // DeleteObjectHandler - delete an object
func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {

View File

@ -2706,13 +2706,13 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
{bucketName, objectName, uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd"))}, {bucketName, objectName, uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd"))},
{bucketName, objectName, uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd"))}, {bucketName, objectName, uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd"))},
// Part with size larger than 5 MiB. // Part with size larger than 5 MiB.
{bucketName, objectName, uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketName, objectName, uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(validPart))},
{bucketName, objectName, uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketName, objectName, uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(validPart))},
// Part with size larger than 5 MiB. // Part with size larger than 5 MiB.
// Parts uploaded for anonymous/unsigned API handler test. // Parts uploaded for anonymous/unsigned API handler test.
{bucketName, objectName, uploadIDs[1], 1, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketName, objectName, uploadIDs[1], 1, string(validPart), validPartMD5, int64(len(validPart))},
{bucketName, objectName, uploadIDs[1], 2, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketName, objectName, uploadIDs[1], 2, string(validPart), validPartMD5, int64(len(validPart))},
} }
// Iterating over creatPartCases to generate multipart chunks. // Iterating over creatPartCases to generate multipart chunks.
for _, part := range parts { for _, part := range parts {
@ -3077,13 +3077,13 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
{bucketName, objectName, uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd"))}, {bucketName, objectName, uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd"))},
{bucketName, objectName, uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd"))}, {bucketName, objectName, uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd"))},
// Part with size larger than 5 MiB. // Part with size larger than 5 MiB.
{bucketName, objectName, uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketName, objectName, uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(validPart))},
{bucketName, objectName, uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketName, objectName, uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(validPart))},
// Part with size larger than 5 MiB. // Part with size larger than 5 MiB.
// Parts uploaded for anonymous/unsigned API handler test. // Parts uploaded for anonymous/unsigned API handler test.
{bucketName, objectName, uploadIDs[1], 1, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketName, objectName, uploadIDs[1], 1, string(validPart), validPartMD5, int64(len(validPart))},
{bucketName, objectName, uploadIDs[1], 2, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketName, objectName, uploadIDs[1], 2, string(validPart), validPartMD5, int64(len(validPart))},
} }
// Iterating over createPartCases to generate multipart chunks. // Iterating over createPartCases to generate multipart chunks.
for _, part := range parts { for _, part := range parts {

View File

@ -150,7 +150,7 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHan
randomPerm := rand.Perm(10) randomPerm := rand.Perm(10)
randomString := "" randomString := ""
for _, num := range randomPerm { for _, num := range randomPerm {
randomString = randomString + strconv.Itoa(num) randomString += strconv.Itoa(num)
} }
expectedETaghex := getMD5Hash([]byte(randomString)) expectedETaghex := getMD5Hash([]byte(randomString))
@ -189,7 +189,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH
randomPerm := rand.Perm(100) randomPerm := rand.Perm(100)
randomString := "" randomString := ""
for _, num := range randomPerm { for _, num := range randomPerm {
randomString = randomString + strconv.Itoa(num) randomString += strconv.Itoa(num)
} }
expectedETaghex := getMD5Hash([]byte(randomString)) expectedETaghex := getMD5Hash([]byte(randomString))

View File

@ -61,8 +61,8 @@ func newPostPolicyBytesV4WithContentRange(credential, bucketName, objectKey stri
keyConditionStr, contentLengthCondStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr) keyConditionStr, contentLengthCondStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr)
retStr := "{" retStr := "{"
retStr = retStr + expirationStr + "," retStr = retStr + expirationStr + ","
retStr = retStr + conditionStr retStr += conditionStr
retStr = retStr + "}" retStr += "}"
return []byte(retStr) return []byte(retStr)
} }
@ -89,8 +89,8 @@ func newPostPolicyBytesV4(credential, bucketName, objectKey string, expiration t
conditionStr := fmt.Sprintf(`"conditions":[%s, %s, %s, %s, %s, %s]`, bucketConditionStr, keyConditionStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr) conditionStr := fmt.Sprintf(`"conditions":[%s, %s, %s, %s, %s, %s]`, bucketConditionStr, keyConditionStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr)
retStr := "{" retStr := "{"
retStr = retStr + expirationStr + "," retStr = retStr + expirationStr + ","
retStr = retStr + conditionStr retStr += conditionStr
retStr = retStr + "}" retStr += "}"
return []byte(retStr) return []byte(retStr)
} }
@ -108,8 +108,8 @@ func newPostPolicyBytesV2(bucketName, objectKey string, expiration time.Time) []
conditionStr := fmt.Sprintf(`"conditions":[%s, %s]`, bucketConditionStr, keyConditionStr) conditionStr := fmt.Sprintf(`"conditions":[%s, %s]`, bucketConditionStr, keyConditionStr)
retStr := "{" retStr := "{"
retStr = retStr + expirationStr + "," retStr = retStr + expirationStr + ","
retStr = retStr + conditionStr retStr += conditionStr
retStr = retStr + "}" retStr += "}"
return []byte(retStr) return []byte(retStr)
} }

View File

@ -303,9 +303,8 @@ func checkPostPolicy(formValues http.Header, postPolicyForm PostPolicyForm) erro
if !condPassed { if !condPassed {
return fmt.Errorf("Invalid according to Policy: Policy Condition failed") return fmt.Errorf("Invalid according to Policy: Policy Condition failed")
} }
} else { } else if strings.HasPrefix(policy.Key, "$x-amz-meta-") || strings.HasPrefix(policy.Key, "$x-amz-") {
// This covers all conditions X-Amz-Meta-* and X-Amz-* // This covers all conditions X-Amz-Meta-* and X-Amz-*
if strings.HasPrefix(policy.Key, "$x-amz-meta-") || strings.HasPrefix(policy.Key, "$x-amz-") {
// Check if policy condition is satisfied // Check if policy condition is satisfied
condPassed = checkPolicyCond(op, formValues.Get(formCanonicalName), policy.Value) condPassed = checkPolicyCond(op, formValues.Get(formCanonicalName), policy.Value)
if !condPassed { if !condPassed {
@ -313,7 +312,6 @@ func checkPostPolicy(formValues http.Header, postPolicyForm PostPolicyForm) erro
} }
} }
} }
}
return nil return nil
} }

View File

@ -365,7 +365,7 @@ func (s *TestSuiteCommon) TestBucketPolicy(c *check) {
// assert the http response status code. // assert the http response status code.
c.Assert(response.StatusCode, http.StatusOK) c.Assert(response.StatusCode, http.StatusOK)
/// Put a new bucket policy. // Put a new bucket policy.
request, err = newTestSignedRequest(http.MethodPut, getPutPolicyURL(s.endPoint, bucketName), request, err = newTestSignedRequest(http.MethodPut, getPutPolicyURL(s.endPoint, bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), s.accessKey, s.secretKey, s.signer) int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), s.accessKey, s.secretKey, s.signer)
c.Assert(err, nil) c.Assert(err, nil)

View File

@ -261,7 +261,7 @@ func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues
// Replace all spaced strings, some clients can send spaced // Replace all spaced strings, some clients can send spaced
// parameters and some won't. So we pro-actively remove any spaces // parameters and some won't. So we pro-actively remove any spaces
// to make parsing easier. // to make parsing easier.
v4Auth = strings.Replace(v4Auth, " ", "", -1) v4Auth = strings.ReplaceAll(v4Auth, " ", "")
if v4Auth == "" { if v4Auth == "" {
return sv, ErrAuthHeaderEmpty return sv, ErrAuthHeaderEmpty
} }

View File

@ -103,7 +103,7 @@ func getSignedHeaders(signedHeaders http.Header) string {
// <HashedPayload> // <HashedPayload>
// //
func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string { func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string {
rawQuery := strings.Replace(queryStr, "+", "%20", -1) rawQuery := strings.ReplaceAll(queryStr, "+", "%20")
encodedPath := s3utils.EncodePath(urlPath) encodedPath := s3utils.EncodePath(urlPath)
canonicalRequest := strings.Join([]string{ canonicalRequest := strings.Join([]string{
method, method,
@ -130,9 +130,9 @@ func getScope(t time.Time, region string) string {
// getStringToSign a string based on selected query values. // getStringToSign a string based on selected query values.
func getStringToSign(canonicalRequest string, t time.Time, scope string) string { func getStringToSign(canonicalRequest string, t time.Time, scope string) string {
stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n" stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n"
stringToSign = stringToSign + scope + "\n" stringToSign += scope + "\n"
canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest)) canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest))
stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:]) stringToSign += hex.EncodeToString(canonicalRequestBytes[:])
return stringToSign return stringToSign
} }
@ -306,7 +306,7 @@ func doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region s
return ErrInvalidToken return ErrInvalidToken
} }
/// Verify finally if signature is same. // Verify finally if signature is same.
// Get canonical request. // Get canonical request.
presignedCanonicalReq := getCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method) presignedCanonicalReq := getCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method)

View File

@ -29,13 +29,8 @@ import (
xnet "github.com/minio/pkg/net" xnet "github.com/minio/pkg/net"
) )
///////////////////////////////////////////////////////////////////////////////
//
// Storage REST server, storageRESTReceiver and StorageRESTClient are // Storage REST server, storageRESTReceiver and StorageRESTClient are
// inter-dependent, below test functions are sufficient to test all of them. // inter-dependent, below test functions are sufficient to test all of them.
//
///////////////////////////////////////////////////////////////////////////////
func testStorageAPIDiskInfo(t *testing.T, storage StorageAPI) { func testStorageAPIDiskInfo(t *testing.T, storage StorageAPI) {
testCases := []struct { testCases := []struct {
expectErr bool expectErr bool

View File

@ -436,7 +436,7 @@ func parseHexUint(v []byte) (n uint64, err error) {
for i, b := range v { for i, b := range v {
switch { switch {
case '0' <= b && b <= '9': case '0' <= b && b <= '9':
b = b - '0' b -= '0'
case 'a' <= b && b <= 'f': case 'a' <= b && b <= 'f':
b = b - 'a' + 10 b = b - 'a' + 10
case 'A' <= b && b <= 'F': case 'A' <= b && b <= 'F':

View File

@ -119,19 +119,19 @@ func TestMain(m *testing.M) {
// concurrency level for certain parallel tests. // concurrency level for certain parallel tests.
const testConcurrencyLevel = 10 const testConcurrencyLevel = 10
/// //
/// Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258 // Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258
/// //
/// User-Agent: // User-Agent:
/// //
/// This is ignored from signing because signing this causes problems with generating pre-signed URLs // This is ignored from signing because signing this causes problems with generating pre-signed URLs
/// (that are executed by other agents) or when customers pass requests through proxies, which may // (that are executed by other agents) or when customers pass requests through proxies, which may
/// modify the user-agent. // modify the user-agent.
/// //
/// Authorization: // Authorization:
/// //
/// Is skipped for obvious reasons // Is skipped for obvious reasons
/// //
var ignoredHeaders = map[string]bool{ var ignoredHeaders = map[string]bool{
"Authorization": true, "Authorization": true,
"User-Agent": true, "User-Agent": true,
@ -633,7 +633,7 @@ func signStreamingRequest(req *http.Request, accessKey, secretKey string, currTi
signedHeaders := strings.Join(headers, ";") signedHeaders := strings.Join(headers, ";")
// Get canonical query string. // Get canonical query string.
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20")
// Get canonical URI. // Get canonical URI.
canonicalURI := s3utils.EncodePath(req.URL.Path) canonicalURI := s3utils.EncodePath(req.URL.Path)
@ -665,8 +665,8 @@ func signStreamingRequest(req *http.Request, accessKey, secretKey string, currTi
}, SlashSeparator) }, SlashSeparator)
stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n" stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n"
stringToSign = stringToSign + scope + "\n" stringToSign += scope + "\n"
stringToSign = stringToSign + getSHA256Hash([]byte(canonicalRequest)) stringToSign += getSHA256Hash([]byte(canonicalRequest))
date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd)))
region := sumHMAC(date, []byte(globalMinioDefaultRegion)) region := sumHMAC(date, []byte(globalMinioDefaultRegion))
@ -749,7 +749,7 @@ func assembleStreamingChunks(req *http.Request, body io.ReadSeeker, chunkSize in
stringToSign = stringToSign + scope + "\n" stringToSign = stringToSign + scope + "\n"
stringToSign = stringToSign + signature + "\n" stringToSign = stringToSign + signature + "\n"
stringToSign = stringToSign + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + "\n" // hex(sum256("")) stringToSign = stringToSign + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + "\n" // hex(sum256(""))
stringToSign = stringToSign + getSHA256Hash(buffer[:n]) stringToSign += getSHA256Hash(buffer[:n])
date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd)))
region := sumHMAC(date, []byte(regionStr)) region := sumHMAC(date, []byte(regionStr))
@ -851,7 +851,7 @@ func preSignV4(req *http.Request, accessKeyID, secretAccessKey string, expires i
extractedSignedHeaders := make(http.Header) extractedSignedHeaders := make(http.Header)
extractedSignedHeaders.Set("host", req.Host) extractedSignedHeaders.Set("host", req.Host)
queryStr := strings.Replace(query.Encode(), "+", "%20", -1) queryStr := strings.ReplaceAll(query.Encode(), "+", "%20")
canonicalRequest := getCanonicalRequest(extractedSignedHeaders, unsignedPayload, queryStr, req.URL.Path, req.Method) canonicalRequest := getCanonicalRequest(extractedSignedHeaders, unsignedPayload, queryStr, req.URL.Path, req.Method)
stringToSign := getStringToSign(canonicalRequest, date, scope) stringToSign := getStringToSign(canonicalRequest, date, scope)
signingKey := getSigningKey(secretAccessKey, date, region, serviceS3) signingKey := getSigningKey(secretAccessKey, date, region, serviceS3)
@ -988,7 +988,7 @@ func signRequestV4(req *http.Request, accessKey, secretKey string) error {
signedHeaders := strings.Join(headers, ";") signedHeaders := strings.Join(headers, ";")
// Get canonical query string. // Get canonical query string.
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20")
// Get canonical URI. // Get canonical URI.
canonicalURI := s3utils.EncodePath(req.URL.Path) canonicalURI := s3utils.EncodePath(req.URL.Path)
@ -1021,7 +1021,7 @@ func signRequestV4(req *http.Request, accessKey, secretKey string) error {
stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n" stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n"
stringToSign = stringToSign + scope + "\n" stringToSign = stringToSign + scope + "\n"
stringToSign = stringToSign + getSHA256Hash([]byte(canonicalRequest)) stringToSign += getSHA256Hash([]byte(canonicalRequest))
date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd)))
regionHMAC := sumHMAC(date, []byte(region)) regionHMAC := sumHMAC(date, []byte(region))
@ -1220,7 +1220,7 @@ func makeTestTargetURL(endPoint, bucketName, objectName string, queryValues url.
urlStr = urlStr + bucketName + SlashSeparator urlStr = urlStr + bucketName + SlashSeparator
} }
if objectName != "" { if objectName != "" {
urlStr = urlStr + s3utils.EncodePath(objectName) urlStr += s3utils.EncodePath(objectName)
} }
if len(queryValues) > 0 { if len(queryValues) > 0 {
urlStr = urlStr + "?" + queryValues.Encode() urlStr = urlStr + "?" + queryValues.Encode()

View File

@ -161,7 +161,7 @@ func TestUserAgent(t *testing.T) {
str := getUserAgent(testCase.mode) str := getUserAgent(testCase.mode)
expectedStr := testCase.expectedStr expectedStr := testCase.expectedStr
if IsDocker() { if IsDocker() {
expectedStr = strings.Replace(expectedStr, "; source", "; docker; source", -1) expectedStr = strings.ReplaceAll(expectedStr, "; source", "; docker; source")
} }
if str != expectedStr { if str != expectedStr {
t.Errorf("Test %d: expected: %s, got: %s", i+1, expectedStr, str) t.Errorf("Test %d: expected: %s, got: %s", i+1, expectedStr, str)
@ -216,7 +216,7 @@ func TestGetHelmVersion(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Unable to create temporary file. %s", err) t.Fatalf("Unable to create temporary file. %s", err)
} }
if _, err = tmpfile.Write([]byte(content)); err != nil { if _, err = tmpfile.WriteString(content); err != nil {
t.Fatalf("Unable to create temporary file. %s", err) t.Fatalf("Unable to create temporary file. %s", err)
} }
if err = tmpfile.Close(); err != nil { if err = tmpfile.Close(); err != nil {

View File

@ -23,7 +23,7 @@ import (
) )
func BenchmarkURLQueryForm(b *testing.B) { func BenchmarkURLQueryForm(b *testing.B) {
req, err := http.NewRequest(http.MethodGet, "http://localhost:9000/bucket/name?uploadId=upload&partNumber=1", nil) req, err := http.NewRequest(http.MethodGet, "http://localhost:9000/bucket/name?uploadId=upload&partNumber=1", http.NoBody)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -49,7 +49,7 @@ func BenchmarkURLQueryForm(b *testing.B) {
// BenchmarkURLQuery - benchmark URL memory allocations // BenchmarkURLQuery - benchmark URL memory allocations
func BenchmarkURLQuery(b *testing.B) { func BenchmarkURLQuery(b *testing.B) {
req, err := http.NewRequest(http.MethodGet, "http://localhost:9000/bucket/name?uploadId=upload&partNumber=1", nil) req, err := http.NewRequest(http.MethodGet, "http://localhost:9000/bucket/name?uploadId=upload&partNumber=1", http.NoBody)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }

View File

@ -160,7 +160,7 @@ func hasContentMD5(h http.Header) bool {
return ok return ok
} }
/// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html // http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
const ( const (
// Maximum object size per PUT request is 5TB. // Maximum object size per PUT request is 5TB.
// This is a divergence from S3 limit on purpose to support // This is a divergence from S3 limit on purpose to support
@ -409,7 +409,7 @@ func dumpRequest(r *http.Request) string {
header.Set("Host", r.Host) header.Set("Host", r.Host)
// Replace all '%' to '%%' so that printer format parser // Replace all '%' to '%%' so that printer format parser
// to ignore URL encoded values. // to ignore URL encoded values.
rawURI := strings.Replace(r.RequestURI, "%", "%%", -1) rawURI := strings.ReplaceAll(r.RequestURI, "%", "%%")
req := struct { req := struct {
Method string `json:"method"` Method string `json:"method"`
RequestURI string `json:"reqURI"` RequestURI string `json:"reqURI"`

View File

@ -238,9 +238,8 @@ func TestDumpRequest(t *testing.T) {
RequestURI string `json:"reqURI"` RequestURI string `json:"reqURI"`
Header http.Header `json:"header"` Header http.Header `json:"header"`
} }
jsonReq = strings.Replace(jsonReq, "%%", "%", -1)
res := jsonResult{} res := jsonResult{}
if err = json.Unmarshal([]byte(jsonReq), &res); err != nil { if err = json.Unmarshal([]byte(strings.ReplaceAll(jsonReq, "%%", "%")), &res); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -399,7 +398,6 @@ func TestCeilFrac(t *testing.T) {
// Test if isErrIgnored works correctly. // Test if isErrIgnored works correctly.
func TestIsErrIgnored(t *testing.T) { func TestIsErrIgnored(t *testing.T) {
var errIgnored = fmt.Errorf("ignored error") var errIgnored = fmt.Errorf("ignored error")
ignoredErrs := append(baseIgnoredErrs, errIgnored)
var testCases = []struct { var testCases = []struct {
err error err error
ignored bool ignored bool
@ -418,7 +416,7 @@ func TestIsErrIgnored(t *testing.T) {
}, },
} }
for i, testCase := range testCases { for i, testCase := range testCases {
if ok := IsErrIgnored(testCase.err, ignoredErrs...); ok != testCase.ignored { if ok := IsErrIgnored(testCase.err, append(baseIgnoredErrs, errIgnored)...); ok != testCase.ignored {
t.Errorf("Test: %d, Expected %t, got %t", i+1, testCase.ignored, ok) t.Errorf("Test: %d, Expected %t, got %t", i+1, testCase.ignored, ok)
} }
} }

View File

@ -276,7 +276,7 @@ func newXLStorage(ep Endpoint) (*xlStorage, error) {
if err != nil { if err != nil {
return p, err return p, err
} }
if _, err = w.Write(alignedBuf[:]); err != nil { if _, err = w.Write(alignedBuf); err != nil {
w.Close() w.Close()
return p, err return p, err
} }
@ -2394,10 +2394,13 @@ func (s *xlStorage) StatInfoFile(ctx context.Context, volume, path string, glob
if err != nil { if err != nil {
name = filePath name = filePath
} }
if os.PathSeparator != '/' { stat = append(stat, StatInfo{
name = strings.Replace(name, string(os.PathSeparator), "/", -1) Name: filepath.ToSlash(name),
} Size: st.Size(),
stat = append(stat, StatInfo{ModTime: st.ModTime(), Size: st.Size(), Name: name, Dir: st.IsDir(), Mode: uint32(st.Mode())}) Dir: st.IsDir(),
Mode: uint32(st.Mode()),
ModTime: st.ModTime(),
})
} }
return stat, nil return stat, nil
} }

View File

@ -447,7 +447,7 @@ func TestXLStorageReadAll(t *testing.T) {
continue continue
} }
if err == nil { if err == nil {
if string(dataRead) != string([]byte("Hello, World")) { if !bytes.Equal(dataRead, []byte("Hello, World")) {
t.Errorf("TestXLStorage %d: Expected the data read to be \"%s\", but instead got \"%s\"", i+1, "Hello, World", string(dataRead)) t.Errorf("TestXLStorage %d: Expected the data read to be \"%s\", but instead got \"%s\"", i+1, "Hello, World", string(dataRead))
} }
} }
@ -1227,7 +1227,10 @@ func TestXLStorageReadFile(t *testing.T) {
t.Errorf("Case: %d %#v, expected: %s, got :%s", i+1, testCase, testCase.expectedErr, err) t.Errorf("Case: %d %#v, expected: %s, got :%s", i+1, testCase, testCase.expectedErr, err)
} }
// Expected error retured, proceed further to validate the returned results. // Expected error retured, proceed further to validate the returned results.
if err == nil && err == testCase.expectedErr { if err != nil && testCase.expectedErr == nil {
t.Errorf("Case: %d %#v, expected: %s, got :%s", i+1, testCase, testCase.expectedErr, err)
}
if err == nil {
if !bytes.Equal(testCase.expectedBuf, buf) { if !bytes.Equal(testCase.expectedBuf, buf) {
t.Errorf("Case: %d %#v, expected: \"%s\", got: \"%s\"", i+1, testCase, string(testCase.expectedBuf), string(buf[:testCase.bufSize])) t.Errorf("Case: %d %#v, expected: \"%s\", got: \"%s\"", i+1, testCase, string(testCase.expectedBuf), string(buf[:testCase.bufSize]))
} }

View File

@ -56,12 +56,13 @@ func main() {
fatalErr(json.Unmarshal(got, &input)) fatalErr(json.Unmarshal(got, &input))
r, err := os.Open(input.File) r, err := os.Open(input.File)
fatalErr(err) fatalErr(err)
defer r.Close()
dstName := strings.TrimSuffix(input.File, ".enc") + ".zip" dstName := strings.TrimSuffix(input.File, ".enc") + ".zip"
w, err := os.Create(dstName) w, err := os.Create(dstName)
fatalErr(err) fatalErr(err)
defer w.Close()
decrypt(input.Key, r, w) decrypt(input.Key, r, w)
r.Close()
w.Close()
fmt.Println("Output decrypted to", dstName) fmt.Println("Output decrypted to", dstName)
return return
} }
@ -78,14 +79,13 @@ func main() {
case 1: case 1:
r, err := os.Open(args[0]) r, err := os.Open(args[0])
fatalErr(err) fatalErr(err)
defer r.Close()
if len(*key) == 0 { if len(*key) == 0 {
reader := bufio.NewReader(os.Stdin) reader := bufio.NewReader(os.Stdin)
fmt.Print("Enter Decryption Key: ") fmt.Print("Enter Decryption Key: ")
text, _ := reader.ReadString('\n') text, _ := reader.ReadString('\n')
// convert CRLF to LF // convert CRLF to LF
*key = strings.Replace(text, "\n", "", -1) *key = strings.ReplaceAll(text, "\n", "")
} }
*key = strings.TrimSpace(*key) *key = strings.TrimSpace(*key)
fatalIf(len(*key) != 72, "Unexpected key length: %d, want 72", len(*key)) fatalIf(len(*key) != 72, "Unexpected key length: %d, want 72", len(*key))
@ -93,9 +93,11 @@ func main() {
dstName := strings.TrimSuffix(args[0], ".enc") + ".zip" dstName := strings.TrimSuffix(args[0], ".enc") + ".zip"
w, err := os.Create(dstName) w, err := os.Create(dstName)
fatalErr(err) fatalErr(err)
defer w.Close()
decrypt(*key, r, w) decrypt(*key, r, w)
r.Close()
w.Close()
fmt.Println("Output decrypted to", dstName) fmt.Println("Output decrypted to", dstName)
return return
default: default:

View File

@ -222,8 +222,8 @@ func GenerateCredentials() (accessKey, secretKey string, err error) {
return "", "", err return "", "", err
} }
secretKey = strings.Replace(string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLen]), secretKey = strings.ReplaceAll(string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLen]),
"/", "+", -1) "/", "+")
return accessKey, secretKey, nil return accessKey, secretKey, nil
} }

View File

@ -193,6 +193,7 @@ func (dr *DefaultRetention) UnmarshalXML(d *xml.Decoder, start xml.StartElement)
return fmt.Errorf("either Days or Years must be specified, not both") return fmt.Errorf("either Days or Years must be specified, not both")
} }
//nolint:gocritic
if retention.Days != nil { if retention.Days != nil {
if *retention.Days == 0 { if *retention.Days == 0 {
return fmt.Errorf("Default retention period must be a positive integer value for 'Days'") return fmt.Errorf("Default retention period must be a positive integer value for 'Days'")

View File

@ -137,6 +137,7 @@ func TestUnmarshalDefaultRetention(t *testing.T) {
} }
var dr DefaultRetention var dr DefaultRetention
err = xml.Unmarshal(d, &dr) err = xml.Unmarshal(d, &dr)
//nolint:gocritic
if tt.expectedErr == nil { if tt.expectedErr == nil {
if err != nil { if err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err) t.Fatalf("error: expected = <nil>, got = %v", err)
@ -173,6 +174,7 @@ func TestParseObjectLockConfig(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
_, err := ParseObjectLockConfig(strings.NewReader(tt.value)) _, err := ParseObjectLockConfig(strings.NewReader(tt.value))
//nolint:gocritic
if tt.expectedErr == nil { if tt.expectedErr == nil {
if err != nil { if err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err) t.Fatalf("error: expected = <nil>, got = %v", err)
@ -209,6 +211,7 @@ func TestParseObjectRetention(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
_, err := ParseObjectRetention(strings.NewReader(tt.value)) _, err := ParseObjectRetention(strings.NewReader(tt.value))
//nolint:gocritic
if tt.expectedErr == nil { if tt.expectedErr == nil {
if err != nil { if err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err) t.Fatalf("error: expected = <nil>, got = %v", err)
@ -367,6 +370,7 @@ func TestParseObjectLockRetentionHeaders(t *testing.T) {
for i, tt := range tests { for i, tt := range tests {
_, _, err := ParseObjectLockRetentionHeaders(tt.header) _, _, err := ParseObjectLockRetentionHeaders(tt.header)
//nolint:gocritic
if tt.expectedErr == nil { if tt.expectedErr == nil {
if err != nil { if err != nil {
t.Fatalf("Case %d error: expected = <nil>, got = %v", i, err) t.Fatalf("Case %d error: expected = <nil>, got = %v", i, err)
@ -494,6 +498,7 @@ func TestParseObjectLegalHold(t *testing.T) {
} }
for i, tt := range tests { for i, tt := range tests {
_, err := ParseObjectLegalHold(strings.NewReader(tt.value)) _, err := ParseObjectLegalHold(strings.NewReader(tt.value))
//nolint:gocritic
if tt.expectedErr == nil { if tt.expectedErr == nil {
if err != nil { if err != nil {
t.Fatalf("Case %d error: expected = <nil>, got = %v", i, err) t.Fatalf("Case %d error: expected = <nil>, got = %v", i, err)

View File

@ -57,14 +57,14 @@ func (d Destination) String() string {
} }
//LegacyArn returns true if arn format has prefix "arn:aws:s3:::" which was used // LegacyArn returns true if arn format has prefix "arn:aws:s3:::" which was
// prior to multi-destination // used prior to multi-destination
func (d Destination) LegacyArn() bool { func (d Destination) LegacyArn() bool {
return strings.HasPrefix(d.ARN, DestinationARNPrefix) return strings.HasPrefix(d.ARN, DestinationARNPrefix)
} }
//TargetArn returns true if arn format has prefix "arn:minio:replication:::" used // TargetArn returns true if arn format has prefix "arn:minio:replication:::"
// for multi-destination targets // used for multi-destination targets
func (d Destination) TargetArn() bool { func (d Destination) TargetArn() bool {
return strings.HasPrefix(d.ARN, DestinationARNMinIOPrefix) return strings.HasPrefix(d.ARN, DestinationARNMinIOPrefix)
} }

View File

@ -175,7 +175,7 @@ func (c Config) FilterActionableRules(obj ObjectOpts) []Rule {
rules = append(rules, rule) rules = append(rules, rule)
} }
} }
sort.Slice(rules[:], func(i, j int) bool { sort.Slice(rules, func(i, j int) bool {
return rules[i].Priority > rules[j].Priority && rules[i].Destination.String() == rules[j].Destination.String() return rules[i].Priority > rules[j].Priority && rules[i].Destination.String() == rules[j].Destination.String()
}) })

View File

@ -117,9 +117,9 @@ func LoadX509KeyPair(certFile, keyFile string) (tls.Certificate, error) {
} }
// EnsureCertAndKey checks if both client certificate and key paths are provided // EnsureCertAndKey checks if both client certificate and key paths are provided
func EnsureCertAndKey(ClientCert, ClientKey string) error { func EnsureCertAndKey(clientCert, clientKey string) error {
if (ClientCert != "" && ClientKey == "") || if (clientCert != "" && clientKey == "") ||
(ClientCert == "" && ClientKey != "") { (clientCert == "" && clientKey != "") {
return errors.New("cert and key must be specified as a pair") return errors.New("cert and key must be specified as a pair")
} }
return nil return nil

View File

@ -38,6 +38,7 @@ func printName(names []pkix.AttributeTypeAndValue, buf *strings.Builder) []strin
values := []string{} values := []string{}
for _, name := range names { for _, name := range names {
oid := name.Type oid := name.Type
//nolint:gocritic
if len(oid) == 4 && oid[0] == 2 && oid[1] == 5 && oid[2] == 4 { if len(oid) == 4 && oid[0] == 2 && oid[1] == 5 && oid[2] == 4 {
switch oid[3] { switch oid[3] {
case 3: case 3:

View File

@ -201,9 +201,9 @@ func Authentication(username, password string) OperatorOption {
} }
// RootCAs - add custom trust certs pool // RootCAs - add custom trust certs pool
func RootCAs(CAs *x509.CertPool) OperatorOption { func RootCAs(certPool *x509.CertPool) OperatorOption {
return func(args *OperatorDNS) { return func(args *OperatorDNS) {
args.rootCAs = CAs args.rootCAs = certPool
} }
} }

View File

@ -86,7 +86,7 @@ func (opts Config) Wait(currentIO func() int, systemIO func() int) {
} else { } else {
time.Sleep(waitTick) time.Sleep(waitTick)
} }
tmpMaxWait = tmpMaxWait - waitTick tmpMaxWait -= waitTick
} }
if tmpMaxWait <= 0 { if tmpMaxWait <= 0 {
return return

View File

@ -186,7 +186,7 @@ func (l *Config) lookupBind(conn *ldap.Conn) error {
// assumed to be using the lookup bind service account. It is required that the // assumed to be using the lookup bind service account. It is required that the
// search result in at most one result. // search result in at most one result.
func (l *Config) lookupUserDN(conn *ldap.Conn, username string) (string, error) { func (l *Config) lookupUserDN(conn *ldap.Conn, username string) (string, error) {
filter := strings.Replace(l.UserDNSearchFilter, "%s", ldap.EscapeFilter(username), -1) filter := strings.ReplaceAll(l.UserDNSearchFilter, "%s", ldap.EscapeFilter(username))
searchRequest := ldap.NewSearchRequest( searchRequest := ldap.NewSearchRequest(
l.UserDNSearchBaseDN, l.UserDNSearchBaseDN,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
@ -213,8 +213,8 @@ func (l *Config) searchForUserGroups(conn *ldap.Conn, username, bindDN string) (
var groups []string var groups []string
if l.GroupSearchFilter != "" { if l.GroupSearchFilter != "" {
for _, groupSearchBase := range l.GroupSearchBaseDistNames { for _, groupSearchBase := range l.GroupSearchBaseDistNames {
filter := strings.Replace(l.GroupSearchFilter, "%s", ldap.EscapeFilter(username), -1) filter := strings.ReplaceAll(l.GroupSearchFilter, "%s", ldap.EscapeFilter(username))
filter = strings.Replace(filter, "%d", ldap.EscapeFilter(bindDN), -1) filter = strings.ReplaceAll(filter, "%d", ldap.EscapeFilter(bindDN))
searchRequest := ldap.NewSearchRequest( searchRequest := ldap.NewSearchRequest(
groupSearchBase, groupSearchBase,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
@ -393,7 +393,7 @@ func (l *Config) GetNonEligibleUserDistNames(userDistNames []string) ([]string,
} }
// Evaluate the filter again with generic wildcard instead of specific values // Evaluate the filter again with generic wildcard instead of specific values
filter := strings.Replace(l.UserDNSearchFilter, "%s", "*", -1) filter := strings.ReplaceAll(l.UserDNSearchFilter, "%s", "*")
nonExistentUsers := []string{} nonExistentUsers := []string{}
for _, dn := range userDistNames { for _, dn := range userDistNames {

View File

@ -85,6 +85,7 @@ func TestPublicKey(t *testing.T) {
} }
} }
//nolint:gocritic
if key0, ok := keys[0].(*ecdsa.PublicKey); !ok { if key0, ok := keys[0].(*ecdsa.PublicKey); !ok {
t.Fatalf("Expected ECDSA key[0], got %T", keys[0]) t.Fatalf("Expected ECDSA key[0], got %T", keys[0])
} else if key1, ok := keys[1].(*rsa.PublicKey); !ok { } else if key1, ok := keys[1].(*rsa.PublicKey); !ok {

View File

@ -19,7 +19,7 @@ package config
import "github.com/minio/minio/internal/auth" import "github.com/minio/minio/internal/auth"
//// One time migration code section // One time migration code section
// SetCredentials - One time migration code needed, for migrating from older config to new for server credentials. // SetCredentials - One time migration code needed, for migrating from older config to new for server credentials.
func SetCredentials(c Config, cred auth.Credentials) { func SetCredentials(c Config, cred auth.Credentials) {

View File

@ -90,7 +90,7 @@ func (key ObjectKey) Seal(extKey []byte, iv [32]byte, domain, bucket, object str
sealingKey [32]byte sealingKey [32]byte
encryptedKey bytes.Buffer encryptedKey bytes.Buffer
) )
mac := hmac.New(sha256.New, extKey[:]) mac := hmac.New(sha256.New, extKey)
mac.Write(iv[:]) mac.Write(iv[:])
mac.Write([]byte(domain)) mac.Write([]byte(domain))
mac.Write([]byte(SealAlgorithm)) mac.Write([]byte(SealAlgorithm))
@ -118,7 +118,7 @@ func (key *ObjectKey) Unseal(extKey []byte, sealedKey SealedKey, domain, bucket,
default: default:
return Errorf("The sealing algorithm '%s' is not supported", sealedKey.Algorithm) return Errorf("The sealing algorithm '%s' is not supported", sealedKey.Algorithm)
case SealAlgorithm: case SealAlgorithm:
mac := hmac.New(sha256.New, extKey[:]) mac := hmac.New(sha256.New, extKey)
mac.Write(sealedKey.IV[:]) mac.Write(sealedKey.IV[:])
mac.Write([]byte(domain)) mac.Write([]byte(domain))
mac.Write([]byte(SealAlgorithm)) mac.Write([]byte(SealAlgorithm))
@ -126,7 +126,7 @@ func (key *ObjectKey) Unseal(extKey []byte, sealedKey SealedKey, domain, bucket,
unsealConfig = sio.Config{MinVersion: sio.Version20, Key: mac.Sum(nil), CipherSuites: fips.CipherSuitesDARE()} unsealConfig = sio.Config{MinVersion: sio.Version20, Key: mac.Sum(nil), CipherSuites: fips.CipherSuitesDARE()}
case InsecureSealAlgorithm: case InsecureSealAlgorithm:
sha := sha256.New() sha := sha256.New()
sha.Write(extKey[:]) sha.Write(extKey)
sha.Write(sealedKey.IV[:]) sha.Write(sealedKey.IV[:])
unsealConfig = sio.Config{MinVersion: sio.Version10, Key: sha.Sum(nil), CipherSuites: fips.CipherSuitesDARE()} unsealConfig = sio.Config{MinVersion: sio.Version10, Key: sha.Sum(nil), CipherSuites: fips.CipherSuitesDARE()}
} }

View File

@ -164,7 +164,7 @@ func TestDerivePartKey(t *testing.T) {
t.Fatalf("Test %d failed to decode expected part-key: %v", i, err) t.Fatalf("Test %d failed to decode expected part-key: %v", i, err)
} }
partKey := key.DerivePartKey(test.PartID) partKey := key.DerivePartKey(test.PartID)
if !bytes.Equal(partKey[:], expectedPartKey[:]) { if !bytes.Equal(partKey[:], expectedPartKey) {
t.Errorf("Test %d derives wrong part-key: got '%s' want: '%s'", i, hex.EncodeToString(partKey[:]), test.PartKey) t.Errorf("Test %d derives wrong part-key: got '%s' want: '%s'", i, hex.EncodeToString(partKey[:]), test.PartKey)
} }
} }

View File

@ -109,7 +109,7 @@ func (s3 ssekms) UnsealObjectKey(KMS kms.KMS, metadata map[string]string, bucket
if err != nil { if err != nil {
return key, err return key, err
} }
err = key.Unseal(unsealKey[:], sealedKey, s3.String(), bucket, object) err = key.Unseal(unsealKey, sealedKey, s3.String(), bucket, object)
return key, err return key, err
} }

View File

@ -80,7 +80,7 @@ func (s3 sses3) UnsealObjectKey(KMS kms.KMS, metadata map[string]string, bucket,
if err != nil { if err != nil {
return key, err return key, err
} }
err = key.Unseal(unsealKey[:], sealedKey, s3.String(), bucket, object) err = key.Unseal(unsealKey, sealedKey, s3.String(), bucket, object)
return key, err return key, err
} }

View File

@ -40,12 +40,12 @@ func DisableDirectIO(f *os.File) error {
if err != nil { if err != nil {
return err return err
} }
flag = flag & ^(syscall.O_DIRECT) flag &= ^(syscall.O_DIRECT)
_, err = unix.FcntlInt(fd, unix.F_SETFL, flag) _, err = unix.FcntlInt(fd, unix.F_SETFL, flag)
return err return err
} }
// AlignedBlock - pass through to directio implementation. // AlignedBlock - pass through to directio implementation.
func AlignedBlock(BlockSize int) []byte { func AlignedBlock(blockSize int) []byte {
return directio.AlignedBlock(BlockSize) return directio.AlignedBlock(blockSize)
} }

View File

@ -199,9 +199,9 @@ func (dm *DRWMutex) lockBlocking(ctx context.Context, lockLossCallback func(), i
// If success, copy array to object // If success, copy array to object
if isReadLock { if isReadLock {
copy(dm.readLocks, locks[:]) copy(dm.readLocks, locks)
} else { } else {
copy(dm.writeLocks, locks[:]) copy(dm.writeLocks, locks)
} }
dm.m.Unlock() dm.m.Unlock()
@ -579,7 +579,7 @@ func (dm *DRWMutex) Unlock() {
} }
// Copy write locks to stack array // Copy write locks to stack array
copy(locks, dm.writeLocks[:]) copy(locks, dm.writeLocks)
} }
// Tolerance is not set, defaults to half of the locker clients. // Tolerance is not set, defaults to half of the locker clients.
@ -620,7 +620,7 @@ func (dm *DRWMutex) RUnlock() {
} }
// Copy write locks to stack array // Copy write locks to stack array
copy(locks, dm.readLocks[:]) copy(locks, dm.readLocks)
} }
// Tolerance is not set, defaults to half of the locker clients. // Tolerance is not set, defaults to half of the locker clients.

View File

@ -94,11 +94,9 @@ func (l *lockServer) RLock(args *LockArgs, reply *bool) error {
if locksHeld, *reply = l.lockMap[args.Resources[0]]; !*reply { if locksHeld, *reply = l.lockMap[args.Resources[0]]; !*reply {
l.lockMap[args.Resources[0]] = ReadLock // No locks held on the given name, so claim (first) read lock l.lockMap[args.Resources[0]] = ReadLock // No locks held on the given name, so claim (first) read lock
*reply = true *reply = true
} else { } else if *reply = locksHeld != WriteLock; *reply { // Unless there is a write lock
if *reply = locksHeld != WriteLock; *reply { // Unless there is a write lock
l.lockMap[args.Resources[0]] = locksHeld + ReadLock // Grant another read lock l.lockMap[args.Resources[0]] = locksHeld + ReadLock // Grant another read lock
} }
}
return nil return nil
} }

View File

@ -41,7 +41,7 @@ func NewPattern(prefix, suffix string) (pattern string) {
pattern += suffix pattern += suffix
} }
pattern = strings.Replace(pattern, "**", "*", -1) pattern = strings.ReplaceAll(pattern, "**", "*")
return pattern return pattern
} }

View File

@ -25,13 +25,14 @@ import (
) )
func initScramClient(args KafkaArgs, config *sarama.Config) { func initScramClient(args KafkaArgs, config *sarama.Config) {
if args.SASL.Mechanism == "sha512" { switch args.SASL.Mechanism {
case "sha512":
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: KafkaSHA512} } config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: KafkaSHA512} }
config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA512) config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA512)
} else if args.SASL.Mechanism == "sha256" { case "sha256":
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: KafkaSHA256} } config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: KafkaSHA256} }
config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA256) config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA256)
} else { default:
// default to PLAIN // default to PLAIN
config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypePlaintext) config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypePlaintext)
} }

View File

@ -272,10 +272,8 @@ func NewMQTTTarget(id string, args MQTTArgs, doneCh <-chan struct{}, loggerOnce
// Start replaying events from the store. // Start replaying events from the store.
go sendEvents(target, eventKeyCh, doneCh, target.loggerOnce) go sendEvents(target, eventKeyCh, doneCh, target.loggerOnce)
} }
} else { } else if token.Wait() && token.Error() != nil {
if token.Wait() && token.Error() != nil {
return target, token.Error() return target, token.Error()
} }
}
return target, nil return target, nil
} }

View File

@ -172,6 +172,7 @@ func (n NATSArgs) connectStan() (stan.Conn, error) {
} }
var addressURL string var addressURL string
//nolint:gocritic
if n.Username != "" && n.Password != "" { if n.Username != "" && n.Password != "" {
addressURL = scheme + "://" + n.Username + ":" + n.Password + "@" + n.Address.String() addressURL = scheme + "://" + n.Username + ":" + n.Password + "@" + n.Address.String()
} else if n.Token != "" { } else if n.Token != "" {
@ -219,20 +220,16 @@ func (target *NATSTarget) IsActive() (bool, error) {
if target.args.Streaming.Enable { if target.args.Streaming.Enable {
if target.stanConn == nil || target.stanConn.NatsConn() == nil { if target.stanConn == nil || target.stanConn.NatsConn() == nil {
target.stanConn, connErr = target.args.connectStan() target.stanConn, connErr = target.args.connectStan()
} else { } else if !target.stanConn.NatsConn().IsConnected() {
if !target.stanConn.NatsConn().IsConnected() {
return false, errNotConnected return false, errNotConnected
} }
}
} else { } else {
if target.natsConn == nil { if target.natsConn == nil {
target.natsConn, connErr = target.args.connectNats() target.natsConn, connErr = target.args.connectNats()
} else { } else if !target.natsConn.IsConnected() {
if !target.natsConn.IsConnected() {
return false, errNotConnected return false, errNotConnected
} }
} }
}
if connErr != nil { if connErr != nil {
if connErr.Error() == nats.ErrNoServers.Error() { if connErr.Error() == nats.ErrNoServers.Error() {

View File

@ -29,7 +29,7 @@ import (
func AppendFile(dst string, src string, osync bool) error { func AppendFile(dst string, src string, osync bool) error {
flags := os.O_WRONLY | os.O_APPEND | os.O_CREATE flags := os.O_WRONLY | os.O_APPEND | os.O_CREATE
if osync { if osync {
flags = flags | os.O_SYNC flags |= os.O_SYNC
} }
appendFile, err := os.OpenFile(dst, flags, 0666) appendFile, err := os.OpenFile(dst, flags, 0666)
if err != nil { if err != nil {

View File

@ -136,7 +136,7 @@ func (w *LimitWriter) Write(p []byte) (n int, err error) {
var n1 int var n1 int
if w.skipBytes > 0 { if w.skipBytes > 0 {
if w.skipBytes >= int64(len(p)) { if w.skipBytes >= int64(len(p)) {
w.skipBytes = w.skipBytes - int64(len(p)) w.skipBytes -= int64(len(p))
return n, nil return n, nil
} }
p = p[w.skipBytes:] p = p[w.skipBytes:]
@ -147,11 +147,11 @@ func (w *LimitWriter) Write(p []byte) (n int, err error) {
} }
if w.wLimit < int64(len(p)) { if w.wLimit < int64(len(p)) {
n1, err = w.Writer.Write(p[:w.wLimit]) n1, err = w.Writer.Write(p[:w.wLimit])
w.wLimit = w.wLimit - int64(n1) w.wLimit -= int64(n1)
return n, err return n, err
} }
n1, err = w.Writer.Write(p) n1, err = w.Writer.Write(p)
w.wLimit = w.wLimit - int64(n1) w.wLimit -= int64(n1)
return n, err return n, err
} }

View File

@ -191,7 +191,7 @@ func (kms secretKey) DecryptKey(keyID string, ciphertext []byte, context Context
mac.Write(encryptedKey.IV) mac.Write(encryptedKey.IV)
sealingKey := mac.Sum(nil) sealingKey := mac.Sum(nil)
block, err := aes.NewCipher(sealingKey[:]) block, err := aes.NewCipher(sealingKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -42,12 +42,10 @@ func (l *logOnceType) logOnceIf(ctx context.Context, err error, id interface{},
if prevErr == nil { if prevErr == nil {
l.IDMap[id] = err l.IDMap[id] = err
shouldLog = true shouldLog = true
} else { } else if prevErr.Error() != err.Error() {
if prevErr.Error() != err.Error() {
l.IDMap[id] = err l.IDMap[id] = err
shouldLog = true shouldLog = true
} }
}
l.Unlock() l.Unlock()
if shouldLog { if shouldLog {

View File

@ -26,13 +26,14 @@ import (
) )
func initScramClient(cfg Config, config *sarama.Config) { func initScramClient(cfg Config, config *sarama.Config) {
if cfg.SASL.Mechanism == "sha512" { switch cfg.SASL.Mechanism {
case "sha512":
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: KafkaSHA512} } config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: KafkaSHA512} }
config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA512) config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA512)
} else if cfg.SASL.Mechanism == "sha256" { case "sha256":
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: KafkaSHA256} } config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: KafkaSHA256} }
config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA256) config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA256)
} else { default:
// default to PLAIN // default to PLAIN
config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypePlaintext) config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypePlaintext)
} }

View File

@ -54,6 +54,7 @@ func TestNetworkError_Unwrap(t *testing.T) {
n := &NetworkError{ n := &NetworkError{
Err: tt.err, Err: tt.err,
} }
//nolint:gocritic
if tt.target == nil { if tt.target == nil {
var netErrInterface net.Error var netErrInterface net.Error
if errors.As(n, &netErrInterface) != tt.want { if errors.As(n, &netErrInterface) != tt.want {

View File

@ -22,7 +22,6 @@ import (
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"path/filepath"
"reflect" "reflect"
"strings" "strings"
"testing" "testing"
@ -89,7 +88,7 @@ type tester interface {
} }
func openTestFile(t tester, file string) []byte { func openTestFile(t tester, file string) []byte {
f, err := ioutil.ReadFile(filepath.Join("testdata/testdata.zip")) f, err := ioutil.ReadFile("testdata/testdata.zip")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -103,7 +103,7 @@ func (r *Record) Set(name string, value *sql.Value) (sql.Record, error) {
return nil, fmt.Errorf("unsupported sql value %v and type %v", value, value.GetTypeString()) return nil, fmt.Errorf("unsupported sql value %v and type %v", value, value.GetTypeString())
} }
name = strings.Replace(name, "*", "__ALL__", -1) name = strings.ReplaceAll(name, "*", "__ALL__")
r.KVS = append(r.KVS, jstream.KV{Key: name, Value: v}) r.KVS = append(r.KVS, jstream.KV{Key: name, Value: v})
return r, nil return r, nil
} }

View File

@ -44,7 +44,7 @@ func (ls *LiteralString) Capture(values []string) error {
n := len(values[0]) n := len(values[0])
r := values[0][1 : n-1] r := values[0][1 : n-1]
// Translate doubled quotes // Translate doubled quotes
*ls = LiteralString(strings.Replace(r, "''", "'", -1)) *ls = LiteralString(strings.ReplaceAll(r, "''", "'"))
return nil return nil
} }
@ -78,7 +78,7 @@ func (qi *QuotedIdentifier) Capture(values []string) error {
r := values[0][1 : n-1] r := values[0][1 : n-1]
// Translate doubled quotes // Translate doubled quotes
*qi = QuotedIdentifier(strings.Replace(r, `""`, `"`, -1)) *qi = QuotedIdentifier(strings.ReplaceAll(r, `""`, `"`))
return nil return nil
} }

View File

@ -231,6 +231,7 @@ func (v Value) ToArray() (val []Value, ok bool) {
// IsNull - checks if value is missing. // IsNull - checks if value is missing.
func (v Value) IsNull() bool { func (v Value) IsNull() bool {
//nolint:gocritic
switch v.value.(type) { switch v.value.(type) {
case nil: case nil:
return true return true
@ -245,6 +246,7 @@ func (v Value) IsArray() (ok bool) {
} }
func (v Value) isNumeric() bool { func (v Value) isNumeric() bool {
//nolint:gocritic
switch v.value.(type) { switch v.value.(type) {
case int64, float64: case int64, float64:
return true return true

View File

@ -44,7 +44,7 @@ func GetInfo(device string) (madmin.SmartInfo, error) {
} }
var db drivedb.DriveDb var db drivedb.DriveDb
dec := yaml.NewDecoder(bytes.NewBuffer(MustAsset("drivedb.yaml"))) dec := yaml.NewDecoder(bytes.NewReader(MustAsset("drivedb.yaml")))
err := dec.Decode(&db) err := dec.Decode(&db)
if err != nil { if err != nil {
@ -108,7 +108,7 @@ func getNvmeInfo(d *NVMeDevice) (*madmin.SmartNvmeInfo, error) {
} }
var controller nvmeIdentController var controller nvmeIdentController
binary.Read(bytes.NewBuffer(buf[:]), utils.NativeEndian, &controller) binary.Read(bytes.NewReader(buf), utils.NativeEndian, &controller)
nvmeInfo.VendorID = strings.TrimSpace(fmt.Sprintf("%#04x", controller.VendorID)) nvmeInfo.VendorID = strings.TrimSpace(fmt.Sprintf("%#04x", controller.VendorID))
nvmeInfo.ModelNum = strings.TrimSpace(fmt.Sprintf("%s", controller.ModelNumber)) nvmeInfo.ModelNum = strings.TrimSpace(fmt.Sprintf("%s", controller.ModelNumber))
@ -124,7 +124,7 @@ func getNvmeInfo(d *NVMeDevice) (*madmin.SmartNvmeInfo, error) {
} }
var sl nvmeSMARTLog var sl nvmeSMARTLog
binary.Read(bytes.NewBuffer(buf2[:]), utils.NativeEndian, &sl) binary.Read(bytes.NewReader(buf2), utils.NativeEndian, &sl)
unitsRead := le128ToBigInt(sl.DataUnitsRead) unitsRead := le128ToBigInt(sl.DataUnitsRead)
unitsWritten := le128ToBigInt(sl.DataUnitsWritten) unitsWritten := le128ToBigInt(sl.DataUnitsWritten)