add gocritic/ruleguard checks back again, cleanup code. (#13665)

- remove some duplicated code
- reported a bug, separately fixed in #13664
- using strings.ReplaceAll() when needed
- using filepath.ToSlash() use when needed
- remove all non-Go style comments from the codebase

Co-authored-by: Aditya Manthramurthy <donatello@users.noreply.github.com>
This commit is contained in:
Harshavardhana 2021-11-16 09:28:29 -08:00 committed by GitHub
parent 07c5e72cdb
commit 661b263e77
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
111 changed files with 409 additions and 450 deletions

View File

@ -23,12 +23,19 @@ linters:
- structcheck - structcheck
- unconvert - unconvert
- varcheck - varcheck
- gocritic
issues: issues:
exclude-use-default: false exclude-use-default: false
exclude: exclude:
- should have a package comment - should have a package comment
- error strings should not be capitalized or end with punctuation or a newline - error strings should not be capitalized or end with punctuation or a newline
# todo fix these when we get enough time.
- "singleCaseSwitch: should rewrite switch statement to if statement"
- "unlambda: replace"
- "captLocal:"
- "ifElseChain:"
- "elseif:"
service: service:
golangci-lint-version: 1.20.0 # use the fixed version to not introduce new linters unexpectedly golangci-lint-version: 1.43.0 # use the fixed version to not introduce new linters unexpectedly

View File

@ -19,7 +19,7 @@ help: ## print this help
getdeps: ## fetch necessary dependencies getdeps: ## fetch necessary dependencies
@mkdir -p ${GOPATH}/bin @mkdir -p ${GOPATH}/bin
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.40.1 @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.43.0
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@latest @echo "Installing msgp" && go install -v github.com/tinylib/msgp@latest
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest @echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest

View File

@ -215,11 +215,9 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *
return return
} }
} }
} else { } else if err := delServerConfigHistory(ctx, objectAPI, restoreID); err != nil {
if err := delServerConfigHistory(ctx, objectAPI, restoreID); err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return
return
}
} }
} }

View File

@ -323,11 +323,12 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
status := vars["status"] status := vars["status"]
var err error var err error
if status == statusEnabled { switch status {
case statusEnabled:
err = globalIAMSys.SetGroupStatus(ctx, group, true) err = globalIAMSys.SetGroupStatus(ctx, group, true)
} else if status == statusDisabled { case statusDisabled:
err = globalIAMSys.SetGroupStatus(ctx, group, false) err = globalIAMSys.SetGroupStatus(ctx, group, false)
} else { default:
err = errInvalidArgument err = errInvalidArgument
} }
if err != nil { if err != nil {

View File

@ -1356,6 +1356,7 @@ func getServerInfo(ctx context.Context, r *http.Request) madmin.InfoMessage {
ldap := madmin.LDAP{} ldap := madmin.LDAP{}
if globalLDAPConfig.Enabled { if globalLDAPConfig.Enabled {
ldapConn, err := globalLDAPConfig.Connect() ldapConn, err := globalLDAPConfig.Connect()
//nolint:gocritic
if err != nil { if err != nil {
ldap.Status = string(madmin.ItemOffline) ldap.Status = string(madmin.ItemOffline)
} else if ldapConn == nil { } else if ldapConn == nil {
@ -1636,8 +1637,8 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
anonymizeCmdLine := func(cmdLine string) string { anonymizeCmdLine := func(cmdLine string) string {
if !globalIsDistErasure { if !globalIsDistErasure {
// FS mode - single server - hard code to `server1` // FS mode - single server - hard code to `server1`
anonCmdLine := strings.Replace(cmdLine, globalLocalNodeName, "server1", -1) anonCmdLine := strings.ReplaceAll(cmdLine, globalLocalNodeName, "server1")
return strings.Replace(anonCmdLine, globalMinioConsoleHost, "server1", -1) return strings.ReplaceAll(anonCmdLine, globalMinioConsoleHost, "server1")
} }
// Server start command regex groups: // Server start command regex groups:

View File

@ -491,7 +491,7 @@ func (h *healSequence) getScannedItemsCount() int64 {
defer h.mutex.RUnlock() defer h.mutex.RUnlock()
for _, v := range h.scannedItemsMap { for _, v := range h.scannedItemsMap {
count = count + v count += v
} }
return count return count
} }

View File

@ -43,8 +43,6 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
// Admin router // Admin router
adminRouter := router.PathPrefix(adminPathPrefix).Subrouter() adminRouter := router.PathPrefix(adminPathPrefix).Subrouter()
/// Service operations
adminVersions := []string{ adminVersions := []string{
adminAPIVersionPrefix, adminAPIVersionPrefix,
} }
@ -71,7 +69,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(gz(httpTraceAll(adminAPI.DataUsageInfoHandler))) adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(gz(httpTraceAll(adminAPI.DataUsageInfoHandler)))
if globalIsDistErasure || globalIsErasure { if globalIsDistErasure || globalIsErasure {
/// Heal operations // Heal operations
// Heal processing endpoint. // Heal processing endpoint.
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler))) adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler)))
@ -79,9 +77,6 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler))) adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler)))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(gz(httpTraceAll(adminAPI.BackgroundHealStatusHandler))) adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(gz(httpTraceAll(adminAPI.BackgroundHealStatusHandler)))
/// Health operations
} }
// Profiling operations // Profiling operations
@ -106,7 +101,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/restore-config-history-kv").HandlerFunc(gz(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler))).Queries("restoreId", "{restoreId:.*}") adminRouter.Methods(http.MethodPut).Path(adminVersion+"/restore-config-history-kv").HandlerFunc(gz(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler))).Queries("restoreId", "{restoreId:.*}")
} }
/// Config import/export bulk operations // Config import/export bulk operations
if enableConfigOps { if enableConfigOps {
// Get config // Get config
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/config").HandlerFunc(gz(httpTraceHdrs(adminAPI.GetConfigHandler))) adminRouter.Methods(http.MethodGet).Path(adminVersion + "/config").HandlerFunc(gz(httpTraceHdrs(adminAPI.GetConfigHandler)))

View File

@ -973,7 +973,7 @@ var errorCodes = errorCodeMap{
HTTPStatusCode: http.StatusNotFound, HTTPStatusCode: http.StatusNotFound,
}, },
/// Bucket notification related errors. // Bucket notification related errors.
ErrEventNotification: { ErrEventNotification: {
Code: "InvalidArgument", Code: "InvalidArgument",
Description: "A specified event is not supported for notifications.", Description: "A specified event is not supported for notifications.",
@ -1120,14 +1120,14 @@ var errorCodes = errorCodeMap{
HTTPStatusCode: http.StatusForbidden, HTTPStatusCode: http.StatusForbidden,
}, },
/// S3 extensions. // S3 extensions.
ErrContentSHA256Mismatch: { ErrContentSHA256Mismatch: {
Code: "XAmzContentSHA256Mismatch", Code: "XAmzContentSHA256Mismatch",
Description: "The provided 'x-amz-content-sha256' header does not match what was computed.", Description: "The provided 'x-amz-content-sha256' header does not match what was computed.",
HTTPStatusCode: http.StatusBadRequest, HTTPStatusCode: http.StatusBadRequest,
}, },
/// MinIO extensions. // MinIO extensions.
ErrStorageFull: { ErrStorageFull: {
Code: "XMinioStorageFull", Code: "XMinioStorageFull",
Description: "Storage backend has reached its minimum free disk threshold. Please delete a few objects to proceed.", Description: "Storage backend has reached its minimum free disk threshold. Please delete a few objects to proceed.",
@ -1370,7 +1370,7 @@ var errorCodes = errorCodeMap{
Description: "The continuation token provided is incorrect", Description: "The continuation token provided is incorrect",
HTTPStatusCode: http.StatusBadRequest, HTTPStatusCode: http.StatusBadRequest,
}, },
//S3 Select API Errors // S3 Select API Errors
ErrEmptyRequestBody: { ErrEmptyRequestBody: {
Code: "EmptyRequestBody", Code: "EmptyRequestBody",
Description: "Request body cannot be empty.", Description: "Request body cannot be empty.",
@ -2074,6 +2074,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
default: default:
var ie, iw int var ie, iw int
// This work-around is to handle the issue golang/go#30648 // This work-around is to handle the issue golang/go#30648
//nolint:gocritic
if _, ferr := fmt.Fscanf(strings.NewReader(err.Error()), if _, ferr := fmt.Fscanf(strings.NewReader(err.Error()),
"request declared a Content-Length of %d but only wrote %d bytes", "request declared a Content-Length of %d but only wrote %d bytes",
&ie, &iw); ferr != nil { &ie, &iw); ferr != nil {
@ -2229,6 +2230,7 @@ func toAPIError(ctx context.Context, err error) APIError {
} }
// Add more Gateway SDKs here if any in future. // Add more Gateway SDKs here if any in future.
default: default:
//nolint:gocritic
if errors.Is(err, errMalformedEncoding) { if errors.Is(err, errMalformedEncoding) {
apiErr = APIError{ apiErr = APIError{
Code: "BadRequest", Code: "BadRequest",

View File

@ -301,7 +301,8 @@ func registerAPIRouter(router *mux.Router) {
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc( router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
collectAPIStats("restoreobject", maxClients(gz(httpTraceAll(api.PostRestoreObjectHandler))))).Queries("restore", "") collectAPIStats("restoreobject", maxClients(gz(httpTraceAll(api.PostRestoreObjectHandler))))).Queries("restore", "")
/// Bucket operations // Bucket operations
// GetBucketLocation // GetBucketLocation
router.Methods(http.MethodGet).HandlerFunc( router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbucketlocation", maxClients(gz(httpTraceAll(api.GetBucketLocationHandler))))).Queries("location", "") collectAPIStats("getbucketlocation", maxClients(gz(httpTraceAll(api.GetBucketLocationHandler))))).Queries("location", "")
@ -355,7 +356,7 @@ func registerAPIRouter(router *mux.Router) {
// GetBucketTaggingHandler // GetBucketTaggingHandler
router.Methods(http.MethodGet).HandlerFunc( router.Methods(http.MethodGet).HandlerFunc(
collectAPIStats("getbuckettagging", maxClients(gz(httpTraceAll(api.GetBucketTaggingHandler))))).Queries("tagging", "") collectAPIStats("getbuckettagging", maxClients(gz(httpTraceAll(api.GetBucketTaggingHandler))))).Queries("tagging", "")
//DeleteBucketWebsiteHandler // DeleteBucketWebsiteHandler
router.Methods(http.MethodDelete).HandlerFunc( router.Methods(http.MethodDelete).HandlerFunc(
collectAPIStats("deletebucketwebsite", maxClients(gz(httpTraceAll(api.DeleteBucketWebsiteHandler))))).Queries("website", "") collectAPIStats("deletebucketwebsite", maxClients(gz(httpTraceAll(api.DeleteBucketWebsiteHandler))))).Queries("website", "")
// DeleteBucketTaggingHandler // DeleteBucketTaggingHandler
@ -452,7 +453,7 @@ func registerAPIRouter(router *mux.Router) {
collectAPIStats("listobjectsv1", maxClients(gz(httpTraceAll(api.ListObjectsV1Handler))))) collectAPIStats("listobjectsv1", maxClients(gz(httpTraceAll(api.ListObjectsV1Handler)))))
} }
/// Root operation // Root operation
// ListenNotification // ListenNotification
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc( apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(

View File

@ -903,7 +903,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") { if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") {
// S3 feature to replace ${filename} found in Key form field // S3 feature to replace ${filename} found in Key form field
// by the filename attribute passed in multipart // by the filename attribute passed in multipart
formValues.Set("Key", strings.Replace(formValues.Get("Key"), "${filename}", fileName, -1)) formValues.Set("Key", strings.ReplaceAll(formValues.Get("Key"), "${filename}", fileName))
} }
object := trimLeadingSlash(formValues.Get("Key")) object := trimLeadingSlash(formValues.Get("Key"))

View File

@ -59,8 +59,8 @@ func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int
} }
if encodingType != "" { if encodingType != "" {
// Only url encoding type is supported // AWS S3 spec only supports 'url' encoding type
if strings.ToLower(encodingType) != "url" { if !strings.EqualFold(encodingType, "url") {
return ErrInvalidEncodingMethod return ErrInvalidEncodingMethod
} }
} }

View File

@ -172,11 +172,12 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
vStr, ok := v.(string) vStr, ok := v.(string)
if ok { if ok {
// Special case for AD/LDAP STS users // Special case for AD/LDAP STS users
if k == ldapUser { switch k {
case ldapUser:
args["user"] = []string{vStr} args["user"] = []string{vStr}
} else if k == ldapUserN { case ldapUserN:
args["username"] = []string{vStr} args["username"] = []string{vStr}
} else { default:
args[k] = []string{vStr} args[k] = []string{vStr}
} }
} }

View File

@ -172,7 +172,7 @@ func (o *ObjectInfo) TargetReplicationStatus(arn string) (status replication.Sta
type replicateTargetDecision struct { type replicateTargetDecision struct {
Replicate bool // Replicate to this target Replicate bool // Replicate to this target
Synchronous bool // Synchronous replication configured. Synchronous bool // Synchronous replication configured.
Arn string //ARN of replication target Arn string // ARN of replication target
ID string ID string
} }

View File

@ -32,7 +32,7 @@ var replicatedInfosTests = []struct {
expectedOpType replication.Type expectedOpType replication.Type
expectedAction replicationAction expectedAction replicationAction
}{ }{
{ //1. empty tgtInfos slice { // 1. empty tgtInfos slice
name: "no replicated targets", name: "no replicated targets",
tgtInfos: []replicatedTargetInfo{}, tgtInfos: []replicatedTargetInfo{},
expectedCompletedSize: 0, expectedCompletedSize: 0,
@ -41,7 +41,7 @@ var replicatedInfosTests = []struct {
expectedOpType: replication.UnsetReplicationType, expectedOpType: replication.UnsetReplicationType,
expectedAction: replicateNone, expectedAction: replicateNone,
}, },
{ //2. replication completed to single target { // 2. replication completed to single target
name: "replication completed to single target", name: "replication completed to single target",
tgtInfos: []replicatedTargetInfo{ tgtInfos: []replicatedTargetInfo{
{ {
@ -59,7 +59,7 @@ var replicatedInfosTests = []struct {
expectedOpType: replication.ObjectReplicationType, expectedOpType: replication.ObjectReplicationType,
expectedAction: replicateAll, expectedAction: replicateAll,
}, },
{ //3. replication completed to single target; failed to another { // 3. replication completed to single target; failed to another
name: "replication completed to single target", name: "replication completed to single target",
tgtInfos: []replicatedTargetInfo{ tgtInfos: []replicatedTargetInfo{
{ {
@ -84,7 +84,7 @@ var replicatedInfosTests = []struct {
expectedOpType: replication.ObjectReplicationType, expectedOpType: replication.ObjectReplicationType,
expectedAction: replicateAll, expectedAction: replicateAll,
}, },
{ //4. replication pending on one target; failed to another { // 4. replication pending on one target; failed to another
name: "replication completed to single target", name: "replication completed to single target",
tgtInfos: []replicatedTargetInfo{ tgtInfos: []replicatedTargetInfo{
{ {
@ -137,7 +137,7 @@ var parseReplicationDecisionTest = []struct {
expDsc ReplicateDecision expDsc ReplicateDecision
expErr error expErr error
}{ }{
{ //1. { // 1.
name: "empty string", name: "empty string",
dsc: "", dsc: "",
expDsc: ReplicateDecision{ expDsc: ReplicateDecision{
@ -146,7 +146,7 @@ var parseReplicationDecisionTest = []struct {
expErr: nil, expErr: nil,
}, },
{ //2. { // 2.
name: "replicate decision for one target", name: "replicate decision for one target",
dsc: "arn:minio:replication::id:bucket=true;false;arn:minio:replication::id:bucket;id", dsc: "arn:minio:replication::id:bucket=true;false;arn:minio:replication::id:bucket;id",
expErr: nil, expErr: nil,
@ -156,7 +156,7 @@ var parseReplicationDecisionTest = []struct {
}, },
}, },
}, },
{ //3. { // 3.
name: "replicate decision for multiple targets", name: "replicate decision for multiple targets",
dsc: "arn:minio:replication::id:bucket=true;false;arn:minio:replication::id:bucket;id,arn:minio:replication::id2:bucket=false;true;arn:minio:replication::id2:bucket;id2", dsc: "arn:minio:replication::id:bucket=true;false;arn:minio:replication::id:bucket;id,arn:minio:replication::id2:bucket=false;true;arn:minio:replication::id2:bucket;id2",
expErr: nil, expErr: nil,
@ -167,7 +167,7 @@ var parseReplicationDecisionTest = []struct {
}, },
}, },
}, },
{ //4. { // 4.
name: "invalid format replicate decision for one target", name: "invalid format replicate decision for one target",
dsc: "arn:minio:replication::id:bucket:true;false;arn:minio:replication::id:bucket;id", dsc: "arn:minio:replication::id:bucket:true;false;arn:minio:replication::id:bucket;id",
expErr: errInvalidReplicateDecisionFormat, expErr: errInvalidReplicateDecisionFormat,
@ -181,7 +181,6 @@ var parseReplicationDecisionTest = []struct {
func TestParseReplicateDecision(t *testing.T) { func TestParseReplicateDecision(t *testing.T) {
for i, test := range parseReplicationDecisionTest { for i, test := range parseReplicationDecisionTest {
//dsc, err := parseReplicateDecision(test.dsc)
dsc, err := parseReplicateDecision(test.expDsc.String()) dsc, err := parseReplicateDecision(test.expDsc.String())
if err != nil { if err != nil {
@ -208,22 +207,22 @@ var replicationStateTest = []struct {
arn string arn string
expStatus replication.StatusType expStatus replication.StatusType
}{ }{
{ //1. no replication status header { // 1. no replication status header
name: "no replicated targets", name: "no replicated targets",
rs: ReplicationState{}, rs: ReplicationState{},
expStatus: replication.StatusType(""), expStatus: replication.StatusType(""),
}, },
{ //2. replication status for one target { // 2. replication status for one target
name: "replication status for one target", name: "replication status for one target",
rs: ReplicationState{ReplicationStatusInternal: "arn1=PENDING;", Targets: map[string]replication.StatusType{"arn1": "PENDING"}}, rs: ReplicationState{ReplicationStatusInternal: "arn1=PENDING;", Targets: map[string]replication.StatusType{"arn1": "PENDING"}},
expStatus: replication.Pending, expStatus: replication.Pending,
}, },
{ //3. replication status for one target - incorrect format { // 3. replication status for one target - incorrect format
name: "replication status for one target", name: "replication status for one target",
rs: ReplicationState{ReplicationStatusInternal: "arn1=PENDING"}, rs: ReplicationState{ReplicationStatusInternal: "arn1=PENDING"},
expStatus: replication.StatusType(""), expStatus: replication.StatusType(""),
}, },
{ //4. replication status for 3 targets, one of them failed { // 4. replication status for 3 targets, one of them failed
name: "replication status for 3 targets - one failed", name: "replication status for 3 targets - one failed",
rs: ReplicationState{ rs: ReplicationState{
ReplicationStatusInternal: "arn1=COMPLETED;arn2=COMPLETED;arn3=FAILED;", ReplicationStatusInternal: "arn1=COMPLETED;arn2=COMPLETED;arn3=FAILED;",
@ -231,7 +230,7 @@ var replicationStateTest = []struct {
}, },
expStatus: replication.Failed, expStatus: replication.Failed,
}, },
{ //5. replication status for replica version { // 5. replication status for replica version
name: "replication status for replica version", name: "replication status for replica version",
rs: ReplicationState{ReplicationStatusInternal: string(replication.Replica)}, rs: ReplicationState{ReplicationStatusInternal: string(replication.Replica)},
expStatus: replication.Replica, expStatus: replication.Replica,

View File

@ -1740,7 +1740,7 @@ func resyncTarget(oi ObjectInfo, arn string, resetID string, resetBeforeDate tim
} }
rs, ok := oi.UserDefined[targetResetHeader(arn)] rs, ok := oi.UserDefined[targetResetHeader(arn)]
if !ok { if !ok {
rs, ok = oi.UserDefined[xhttp.MinIOReplicationResetStatus] //for backward compatibility rs, ok = oi.UserDefined[xhttp.MinIOReplicationResetStatus] // for backward compatibility
} }
if !ok { // existing object replication is enabled and object version is unreplicated so far. if !ok { // existing object replication is enabled and object version is unreplicated so far.
if resetID != "" && oi.ModTime.Before(resetBeforeDate) { // trigger replication if `mc replicate reset` requested if resetID != "" && oi.ModTime.Before(resetBeforeDate) { // trigger replication if `mc replicate reset` requested

View File

@ -55,25 +55,25 @@ var replicationConfigTests = []struct {
tgtStatuses map[string]replication.StatusType tgtStatuses map[string]replication.StatusType
expectedSync bool expectedSync bool
}{ }{
{ //1. no replication config { // 1. no replication config
name: "no replication config", name: "no replication config",
info: ObjectInfo{Size: 100}, info: ObjectInfo{Size: 100},
rcfg: replicationConfig{Config: nil}, rcfg: replicationConfig{Config: nil},
expectedSync: false, expectedSync: false,
}, },
{ //2. existing object replication config enabled, no versioning { // 2. existing object replication config enabled, no versioning
name: "existing object replication config enabled, no versioning", name: "existing object replication config enabled, no versioning",
info: ObjectInfo{Size: 100}, info: ObjectInfo{Size: 100},
rcfg: replicationConfig{Config: &configs[0]}, rcfg: replicationConfig{Config: &configs[0]},
expectedSync: false, expectedSync: false,
}, },
{ //3. existing object replication config enabled, versioning suspended { // 3. existing object replication config enabled, versioning suspended
name: "existing object replication config enabled, versioning suspended", name: "existing object replication config enabled, versioning suspended",
info: ObjectInfo{Size: 100, VersionID: nullVersionID}, info: ObjectInfo{Size: 100, VersionID: nullVersionID},
rcfg: replicationConfig{Config: &configs[0]}, rcfg: replicationConfig{Config: &configs[0]},
expectedSync: false, expectedSync: false,
}, },
{ //4. existing object replication enabled, versioning enabled; no reset in progress { // 4. existing object replication enabled, versioning enabled; no reset in progress
name: "existing object replication enabled, versioning enabled; no reset in progress", name: "existing object replication enabled, versioning enabled; no reset in progress",
info: ObjectInfo{Size: 100, info: ObjectInfo{Size: 100,
ReplicationStatus: replication.Completed, ReplicationStatus: replication.Completed,
@ -130,7 +130,7 @@ var replicationConfigTests2 = []struct {
}}}}, }}}},
expectedSync: true, expectedSync: true,
}, },
{ //3. replication status unset { // 3. replication status unset
name: "existing object replication on pre-existing unreplicated object", name: "existing object replication on pre-existing unreplicated object",
info: ObjectInfo{Size: 100, info: ObjectInfo{Size: 100,
ReplicationStatus: replication.StatusType(""), ReplicationStatus: replication.StatusType(""),
@ -142,7 +142,7 @@ var replicationConfigTests2 = []struct {
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}}, dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
expectedSync: true, expectedSync: true,
}, },
{ //4. replication status Complete { // 4. replication status Complete
name: "existing object replication on object in Completed replication status", name: "existing object replication on object in Completed replication status",
info: ObjectInfo{Size: 100, info: ObjectInfo{Size: 100,
ReplicationStatusInternal: "arn1:COMPLETED", ReplicationStatusInternal: "arn1:COMPLETED",
@ -155,7 +155,7 @@ var replicationConfigTests2 = []struct {
}}}}, }}}},
expectedSync: false, expectedSync: false,
}, },
{ //5. existing object replication enabled, versioning enabled, replication status Pending & reset ID present { // 5. existing object replication enabled, versioning enabled, replication status Pending & reset ID present
name: "existing object replication with reset in progress and object in Pending status", name: "existing object replication with reset in progress and object in Pending status",
info: ObjectInfo{Size: 100, info: ObjectInfo{Size: 100,
ReplicationStatusInternal: "arn1:PENDING;", ReplicationStatusInternal: "arn1:PENDING;",
@ -172,7 +172,7 @@ var replicationConfigTests2 = []struct {
}}}, }}},
}, },
}, },
{ //6. existing object replication enabled, versioning enabled, replication status Failed & reset ID present { // 6. existing object replication enabled, versioning enabled, replication status Failed & reset ID present
name: "existing object replication with reset in progress and object in Failed status", name: "existing object replication with reset in progress and object in Failed status",
info: ObjectInfo{Size: 100, info: ObjectInfo{Size: 100,
ReplicationStatusInternal: "arn1:FAILED;", ReplicationStatusInternal: "arn1:FAILED;",
@ -189,7 +189,7 @@ var replicationConfigTests2 = []struct {
}, },
expectedSync: true, expectedSync: true,
}, },
{ //7. existing object replication enabled, versioning enabled, replication status unset & reset ID present { // 7. existing object replication enabled, versioning enabled, replication status unset & reset ID present
name: "existing object replication with reset in progress and object never replicated before", name: "existing object replication with reset in progress and object never replicated before",
info: ObjectInfo{Size: 100, info: ObjectInfo{Size: 100,
ReplicationStatus: replication.StatusType(""), ReplicationStatus: replication.StatusType(""),
@ -207,7 +207,7 @@ var replicationConfigTests2 = []struct {
expectedSync: true, expectedSync: true,
}, },
{ //8. existing object replication enabled, versioning enabled, replication status Complete & reset ID present { // 8. existing object replication enabled, versioning enabled, replication status Complete & reset ID present
name: "existing object replication enabled - reset in progress for an object in Completed status", name: "existing object replication enabled - reset in progress for an object in Completed status",
info: ObjectInfo{Size: 100, info: ObjectInfo{Size: 100,
ReplicationStatusInternal: "arn1:COMPLETED;", ReplicationStatusInternal: "arn1:COMPLETED;",
@ -224,7 +224,7 @@ var replicationConfigTests2 = []struct {
}}}, }}},
}, },
}, },
{ //9. existing object replication enabled, versioning enabled, replication status Pending & reset ID different { // 9. existing object replication enabled, versioning enabled, replication status Pending & reset ID different
name: "existing object replication enabled, newer reset in progress on object in Pending replication status", name: "existing object replication enabled, newer reset in progress on object in Pending replication status",
info: ObjectInfo{Size: 100, info: ObjectInfo{Size: 100,
ReplicationStatusInternal: "arn1:PENDING;", ReplicationStatusInternal: "arn1:PENDING;",
@ -243,7 +243,7 @@ var replicationConfigTests2 = []struct {
}}}, }}},
}, },
}, },
{ //10. existing object replication enabled, versioning enabled, replication status Complete & reset done { // 10. existing object replication enabled, versioning enabled, replication status Complete & reset done
name: "reset done on object in Completed Status - ineligbile for re-replication", name: "reset done on object in Completed Status - ineligbile for re-replication",
info: ObjectInfo{Size: 100, info: ObjectInfo{Size: 100,
ReplicationStatusInternal: "arn1:COMPLETED;", ReplicationStatusInternal: "arn1:COMPLETED;",

View File

@ -56,7 +56,7 @@ func (brs BucketReplicationStats) Clone() BucketReplicationStats {
c := BucketReplicationStats{ c := BucketReplicationStats{
Stats: make(map[string]*BucketReplicationStat, len(brs.Stats)), Stats: make(map[string]*BucketReplicationStat, len(brs.Stats)),
} }
//this is called only by replicationStats cache and already holds a read lock before calling Clone() // This is called only by replicationStats cache and already holds a read lock before calling Clone()
for arn, st := range brs.Stats { for arn, st := range brs.Stats {
c.Stats[arn] = &BucketReplicationStat{ c.Stats[arn] = &BucketReplicationStat{
FailedSize: atomic.LoadInt64(&st.FailedSize), FailedSize: atomic.LoadInt64(&st.FailedSize),

View File

@ -444,6 +444,6 @@ type TargetClient struct {
StorageClass string // storage class on remote StorageClass string // storage class on remote
disableProxy bool disableProxy bool
healthCancelFn context.CancelFunc // cancellation function for client healthcheck healthCancelFn context.CancelFunc // cancellation function for client healthcheck
ARN string //ARN to uniquely identify remote target ARN string // ARN to uniquely identify remote target
ResetID string ResetID string
} }

View File

@ -526,6 +526,7 @@ func handleCommonEnvVars() {
// Warn user if deprecated environment variables, // Warn user if deprecated environment variables,
// "MINIO_ACCESS_KEY" and "MINIO_SECRET_KEY", are defined // "MINIO_ACCESS_KEY" and "MINIO_SECRET_KEY", are defined
// Check all error conditions first // Check all error conditions first
//nolint:gocritic
if !env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) { if !env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) {
logger.Fatal(config.ErrMissingEnvCredentialRootUser(nil), "Unable to start MinIO") logger.Fatal(config.ErrMissingEnvCredentialRootUser(nil), "Unable to start MinIO")
} else if env.IsSet(config.EnvRootUser) && !env.IsSet(config.EnvRootPassword) { } else if env.IsSet(config.EnvRootUser) && !env.IsSet(config.EnvRootPassword) {
@ -544,6 +545,7 @@ func handleCommonEnvVars() {
var user, password string var user, password string
haveRootCredentials := false haveRootCredentials := false
haveAccessCredentials := false haveAccessCredentials := false
//nolint:gocritic
if env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) { if env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) {
user = env.Get(config.EnvRootUser, "") user = env.Get(config.EnvRootUser, "")
password = env.Get(config.EnvRootPassword, "") password = env.Get(config.EnvRootPassword, "")

View File

@ -696,9 +696,7 @@ func GetHelp(subSys, key string, envOnly bool) (Help, error) {
// to list the ENV, for regular k/v EnableKey is // to list the ENV, for regular k/v EnableKey is
// implicit, for ENVs we cannot make it implicit. // implicit, for ENVs we cannot make it implicit.
if subSysHelp.MultipleTargets { if subSysHelp.MultipleTargets {
envK := config.EnvPrefix + strings.Join([]string{ envK := config.EnvPrefix + strings.ToTitle(subSys) + config.EnvWordDelimiter + strings.ToTitle(madmin.EnableKey)
strings.ToTitle(subSys), strings.ToTitle(madmin.EnableKey),
}, config.EnvWordDelimiter)
envHelp = append(envHelp, config.HelpKV{ envHelp = append(envHelp, config.HelpKV{
Key: envK, Key: envK,
Description: fmt.Sprintf("enable %s target, default is 'off'", subSys), Description: fmt.Sprintf("enable %s target, default is 'off'", subSys),
@ -707,9 +705,7 @@ func GetHelp(subSys, key string, envOnly bool) (Help, error) {
}) })
} }
for _, hkv := range h { for _, hkv := range h {
envK := config.EnvPrefix + strings.Join([]string{ envK := config.EnvPrefix + strings.ToTitle(subSys) + config.EnvWordDelimiter + strings.ToTitle(hkv.Key)
strings.ToTitle(subSys), strings.ToTitle(hkv.Key),
}, config.EnvWordDelimiter)
envHelp = append(envHelp, config.HelpKV{ envHelp = append(envHelp, config.HelpKV{
Key: envK, Key: envK,
Description: hkv.Description, Description: hkv.Description,

View File

@ -34,14 +34,12 @@ import (
"github.com/minio/pkg/quick" "github.com/minio/pkg/quick"
) )
/////////////////// Config V1 ///////////////////
type configV1 struct { type configV1 struct {
Version string `json:"version"` Version string `json:"version"`
AccessKey string `json:"accessKeyId"` AccessKey string `json:"accessKeyId"`
SecretKey string `json:"secretAccessKey"` SecretKey string `json:"secretAccessKey"`
} }
/////////////////// Config V2 ///////////////////
type configV2 struct { type configV2 struct {
Version string `json:"version"` Version string `json:"version"`
Credentials struct { Credentials struct {
@ -63,7 +61,6 @@ type configV2 struct {
} `json:"fileLogger"` } `json:"fileLogger"`
} }
/////////////////// Config V3 ///////////////////
// backendV3 type. // backendV3 type.
type backendV3 struct { type backendV3 struct {
Type string `json:"type"` Type string `json:"type"`

View File

@ -32,7 +32,7 @@ import (
// number of log messages to buffer // number of log messages to buffer
const defaultLogBufferCount = 10000 const defaultLogBufferCount = 10000
//HTTPConsoleLoggerSys holds global console logger state // HTTPConsoleLoggerSys holds global console logger state
type HTTPConsoleLoggerSys struct { type HTTPConsoleLoggerSys struct {
sync.RWMutex sync.RWMutex
pubsub *pubsub.PubSub pubsub *pubsub.PubSub

View File

@ -775,7 +775,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
if flat.Objects < dataScannerCompactLeastObject { if flat.Objects < dataScannerCompactLeastObject {
if f.dataUsageScannerDebug && flat.Objects > 1 { if f.dataUsageScannerDebug && flat.Objects > 1 {
// Disabled, rather chatty: // Disabled, rather chatty:
//console.Debugf(scannerLogPrefix+" Only %d objects, compacting %s -> %+v\n", flat.Objects, folder.name, flat) // console.Debugf(scannerLogPrefix+" Only %d objects, compacting %s -> %+v\n", flat.Objects, folder.name, flat)
} }
compact = true compact = true
} else { } else {
@ -791,7 +791,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
} }
if f.dataUsageScannerDebug && compact { if f.dataUsageScannerDebug && compact {
// Disabled, rather chatty: // Disabled, rather chatty:
//console.Debugf(scannerLogPrefix+" Only objects (%d), compacting %s -> %+v\n", flat.Objects, folder.name, flat) // console.Debugf(scannerLogPrefix+" Only objects (%d), compacting %s -> %+v\n", flat.Objects, folder.name, flat)
} }
} }
if compact { if compact {

View File

@ -203,7 +203,7 @@ func (d *dataUpdateTracker) latestWithDir(dir string) uint64 {
// start a saver goroutine. // start a saver goroutine.
// All of these will exit when the context is canceled. // All of these will exit when the context is canceled.
func (d *dataUpdateTracker) start(ctx context.Context, drives ...string) { func (d *dataUpdateTracker) start(ctx context.Context, drives ...string) {
if len(drives) <= 0 { if len(drives) == 0 {
logger.LogIf(ctx, errors.New("dataUpdateTracker.start: No drives specified")) logger.LogIf(ctx, errors.New("dataUpdateTracker.start: No drives specified"))
return return
} }
@ -220,7 +220,7 @@ func (d *dataUpdateTracker) start(ctx context.Context, drives ...string) {
// If no valid data usage tracker can be found d will remain unchanged. // If no valid data usage tracker can be found d will remain unchanged.
// If object is shared the caller should lock it. // If object is shared the caller should lock it.
func (d *dataUpdateTracker) load(ctx context.Context, drives ...string) { func (d *dataUpdateTracker) load(ctx context.Context, drives ...string) {
if len(drives) <= 0 { if len(drives) == 0 {
logger.LogIf(ctx, errors.New("dataUpdateTracker.load: No drives specified")) logger.LogIf(ctx, errors.New("dataUpdateTracker.load: No drives specified"))
return return
} }

View File

@ -773,7 +773,7 @@ func newCacheEncryptReader(content io.Reader, bucket, object string, metadata ma
return nil, err return nil, err
} }
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20, CipherSuites: fips.CipherSuitesDARE()}) reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey, MinVersion: sio.Version20, CipherSuites: fips.CipherSuitesDARE()})
if err != nil { if err != nil {
return nil, crypto.ErrInvalidCustomerKey return nil, crypto.ErrInvalidCustomerKey
} }

View File

@ -61,7 +61,7 @@ func NewDummyDataGen(totalLength, skipOffset int64) io.ReadSeeker {
panic("Negative rotations are not allowed") panic("Negative rotations are not allowed")
} }
skipOffset = skipOffset % int64(len(alphabets)) skipOffset %= int64(len(alphabets))
as := make([]byte, 2*len(alphabets)) as := make([]byte, 2*len(alphabets))
copy(as, alphabets) copy(as, alphabets)
copy(as[len(alphabets):], alphabets) copy(as[len(alphabets):], alphabets)

View File

@ -242,7 +242,7 @@ func getTotalSizes(argPatterns []ellipses.ArgPattern) []uint64 {
for _, argPattern := range argPatterns { for _, argPattern := range argPatterns {
var totalSize uint64 = 1 var totalSize uint64 = 1
for _, p := range argPattern { for _, p := range argPattern {
totalSize = totalSize * uint64(len(p.Seq)) totalSize *= uint64(len(p.Seq))
} }
totalSizes = append(totalSizes, totalSize) totalSizes = append(totalSizes, totalSize)
} }

View File

@ -497,6 +497,7 @@ func NewEndpoints(args ...string) (endpoints Endpoints, err error) {
} }
// All endpoints have to be same type and scheme if applicable. // All endpoints have to be same type and scheme if applicable.
//nolint:gocritic
if i == 0 { if i == 0 {
endpointType = endpoint.Type() endpointType = endpoint.Type()
scheme = endpoint.Scheme scheme = endpoint.Scheme

View File

@ -32,7 +32,7 @@ var bucketOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnform
// list all errors that can be ignored in a bucket metadata operation. // list all errors that can be ignored in a bucket metadata operation.
var bucketMetadataOpIgnoredErrs = append(bucketOpIgnoredErrs, errVolumeNotFound) var bucketMetadataOpIgnoredErrs = append(bucketOpIgnoredErrs, errVolumeNotFound)
/// Bucket operations // Bucket operations
// MakeBucket - make a bucket. // MakeBucket - make a bucket.
func (er erasureObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error { func (er erasureObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error {

View File

@ -95,7 +95,7 @@ func (e *Erasure) EncodeData(ctx context.Context, data []byte) ([][]byte, error)
// It returns an error if the decoding failed. // It returns an error if the decoding failed.
func (e *Erasure) DecodeDataBlocks(data [][]byte) error { func (e *Erasure) DecodeDataBlocks(data [][]byte) error {
var isZero = 0 var isZero = 0
for _, b := range data[:] { for _, b := range data {
if len(b) == 0 { if len(b) == 0 {
isZero++ isZero++
break break

View File

@ -110,7 +110,7 @@ func TestErasureDecode(t *testing.T) {
for i, disk := range disks { for i, disk := range disks {
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(test.data), writeAlgorithm, erasure.ShardSize()) writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(test.data), writeAlgorithm, erasure.ShardSize())
} }
n, err := erasure.Encode(context.Background(), bytes.NewReader(data[:]), writers, buffer, erasure.dataBlocks+1) n, err := erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers) closeBitrotWriters(writers)
if err != nil { if err != nil {
setup.Remove() setup.Remove()

View File

@ -235,7 +235,7 @@ func TestListOnlineDisks(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to open %s: %s\n", filePath, err) t.Fatalf("Failed to open %s: %s\n", filePath, err)
} }
f.Write([]byte("oops")) // Will cause bitrot error f.WriteString("oops") // Will cause bitrot error
f.Close() f.Close()
break break
} }
@ -414,7 +414,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to open %s: %s\n", filePath, err) t.Fatalf("Failed to open %s: %s\n", filePath, err)
} }
f.Write([]byte("oops")) // Will cause bitrot error f.WriteString("oops") // Will cause bitrot error
f.Close() f.Close()
break break
} }
@ -563,7 +563,7 @@ func TestDisksWithAllParts(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Failed to open %s: %s\n", filePath, err) t.Fatalf("Failed to open %s: %s\n", filePath, err)
} }
f.Write([]byte("oops")) // Will cause bitrot error f.WriteString("oops") // Will cause bitrot error
f.Close() f.Close()
} }
} }

View File

@ -163,7 +163,7 @@ func TestHealingDanglingObject(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
//defer removeRoots(fsDirs) defer removeRoots(fsDirs)
// Everything is fine, should return nil // Everything is fine, should return nil
objLayer, disks, err := initObjectLayer(ctx, mustGetPoolEndpoints(fsDirs...)) objLayer, disks, err := initObjectLayer(ctx, mustGetPoolEndpoints(fsDirs...))

View File

@ -292,25 +292,25 @@ func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.
for i, meta := range metaArr { for i, meta := range metaArr {
if meta.IsValid() && meta.ModTime.Equal(modTime) && meta.DataDir == dataDir { if meta.IsValid() && meta.ModTime.Equal(modTime) && meta.DataDir == dataDir {
for _, part := range meta.Parts { for _, part := range meta.Parts {
h.Write([]byte(fmt.Sprintf("part.%d", part.Number))) fmt.Fprintf(h, "part.%d", part.Number)
} }
h.Write([]byte(fmt.Sprintf("%v", meta.Erasure.Distribution))) fmt.Fprintf(h, "%v", meta.Erasure.Distribution)
// make sure that length of Data is same // make sure that length of Data is same
h.Write([]byte(fmt.Sprintf("%v", len(meta.Data)))) fmt.Fprintf(h, "%v", len(meta.Data))
// ILM transition fields // ILM transition fields
h.Write([]byte(meta.TransitionStatus)) fmt.Fprint(h, meta.TransitionStatus)
h.Write([]byte(meta.TransitionTier)) fmt.Fprint(h, meta.TransitionTier)
h.Write([]byte(meta.TransitionedObjName)) fmt.Fprint(h, meta.TransitionedObjName)
h.Write([]byte(meta.TransitionVersionID)) fmt.Fprint(h, meta.TransitionVersionID)
// Server-side replication fields // Server-side replication fields
h.Write([]byte(fmt.Sprintf("%v", meta.MarkDeleted))) fmt.Fprintf(h, "%v", meta.MarkDeleted)
h.Write([]byte(meta.Metadata[string(meta.ReplicationState.ReplicaStatus)])) fmt.Fprint(h, meta.Metadata[string(meta.ReplicationState.ReplicaStatus)])
h.Write([]byte(meta.Metadata[meta.ReplicationState.ReplicationTimeStamp.Format(http.TimeFormat)])) fmt.Fprint(h, meta.Metadata[meta.ReplicationState.ReplicationTimeStamp.Format(http.TimeFormat)])
h.Write([]byte(meta.Metadata[meta.ReplicationState.ReplicaTimeStamp.Format(http.TimeFormat)])) fmt.Fprint(h, meta.Metadata[meta.ReplicationState.ReplicaTimeStamp.Format(http.TimeFormat)])
h.Write([]byte(meta.Metadata[meta.ReplicationState.ReplicationStatusInternal])) fmt.Fprint(h, meta.Metadata[meta.ReplicationState.ReplicationStatusInternal])
h.Write([]byte(meta.Metadata[meta.ReplicationState.VersionPurgeStatusInternal])) fmt.Fprint(h, meta.Metadata[meta.ReplicationState.VersionPurgeStatusInternal])
metaHashes[i] = hex.EncodeToString(h.Sum(nil)) metaHashes[i] = hex.EncodeToString(h.Sum(nil))
h.Reset() h.Reset()

View File

@ -46,7 +46,7 @@ import (
// list all errors which can be ignored in object operations. // list all errors which can be ignored in object operations.
var objectOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnformattedDisk) var objectOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnformattedDisk)
/// Object Operations // Object Operations
func countOnlineDisks(onlineDisks []StorageAPI) (online int) { func countOnlineDisks(onlineDisks []StorageAPI) (online int) {
for _, onlineDisk := range onlineDisks { for _, onlineDisk := range onlineDisks {

View File

@ -327,7 +327,7 @@ func fsCreateFile(ctx context.Context, filePath string, reader io.Reader, falloc
flags := os.O_CREATE | os.O_WRONLY flags := os.O_CREATE | os.O_WRONLY
if globalFSOSync { if globalFSOSync {
flags = flags | os.O_SYNC flags |= os.O_SYNC
} }
writer, err := lock.Open(filePath, flags, 0666) writer, err := lock.Open(filePath, flags, 0666)
if err != nil { if err != nil {

View File

@ -109,7 +109,7 @@ func (fsi *fsIOPool) Open(path string) (*lock.RLockedFile, error) {
} }
} }
/// Save new reader on the map. // Save new reader on the map.
// It is possible by this time due to concurrent // It is possible by this time due to concurrent
// i/o we might have another lock present. Lookup // i/o we might have another lock present. Lookup

View File

@ -398,7 +398,7 @@ func (fs *FSObjects) scanBucket(ctx context.Context, bucket string, cache dataUs
return cache, err return cache, err
} }
/// Bucket operations // Bucket operations
// getBucketDir - will convert incoming bucket names to // getBucketDir - will convert incoming bucket names to
// corresponding valid bucket names on the backend in a platform // corresponding valid bucket names on the backend in a platform
@ -601,7 +601,7 @@ func (fs *FSObjects) DeleteBucket(ctx context.Context, bucket string, opts Delet
return nil return nil
} }
/// Object Operations // Object Operations
// CopyObject - copy object source object to destination object. // CopyObject - copy object source object to destination object.
// if source object and destination object are same we only // if source object and destination object are same we only

View File

@ -274,7 +274,7 @@ func s3MetaToAzureProperties(ctx context.Context, s3Metadata map[string]string)
encodeKey := func(key string) string { encodeKey := func(key string) string {
tokens := strings.Split(key, "_") tokens := strings.Split(key, "_")
for i := range tokens { for i := range tokens {
tokens[i] = strings.Replace(tokens[i], "-", "_", -1) tokens[i] = strings.ReplaceAll(tokens[i], "-", "_")
} }
return strings.Join(tokens, "__") return strings.Join(tokens, "__")
} }
@ -367,7 +367,7 @@ func azurePropertiesToS3Meta(meta azblob.Metadata, props azblob.BlobHTTPHeaders,
decodeKey := func(key string) string { decodeKey := func(key string) string {
tokens := strings.Split(key, "__") tokens := strings.Split(key, "__")
for i := range tokens { for i := range tokens {
tokens[i] = strings.Replace(tokens[i], "_", "-", -1) tokens[i] = strings.ReplaceAll(tokens[i], "_", "-")
} }
return strings.Join(tokens, "_") return strings.Join(tokens, "_")
} }

View File

@ -531,7 +531,7 @@ func toGCSPageToken(name string) string {
byte(length & 0xFF), byte(length & 0xFF),
} }
length = length >> 7 length >>= 7
if length > 0 { if length > 0 {
b = append(b, byte(length&0xFF)) b = append(b, byte(length&0xFF))
} }

View File

@ -668,7 +668,7 @@ func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
return oi, e return oi, e
} }
//delete any unencrypted version of object that might be on the backend // delete any unencrypted version of object that might be on the backend
defer l.s3Objects.DeleteObject(ctx, bucket, object, opts) defer l.s3Objects.DeleteObject(ctx, bucket, object, opts)
// Save the final object size and modtime. // Save the final object size and modtime.

View File

@ -289,7 +289,7 @@ func validateFormFieldSize(ctx context.Context, formValues http.Header) error {
// Extract form fields and file data from a HTTP POST Policy // Extract form fields and file data from a HTTP POST Policy
func extractPostPolicyFormValues(ctx context.Context, form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) { func extractPostPolicyFormValues(ctx context.Context, form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) {
/// HTML Form values // HTML Form values
fileName = "" fileName = ""
// Canonicalize the form values into http.Header. // Canonicalize the form values into http.Header.

View File

@ -171,7 +171,7 @@ func newMappedPolicy(policy string) MappedPolicy {
// key options // key options
type options struct { type options struct {
ttl int64 //expiry in seconds ttl int64 // expiry in seconds
} }
type iamWatchEvent struct { type iamWatchEvent struct {
@ -558,9 +558,7 @@ func (store *IAMStoreSys) AddUsersToGroup(ctx context.Context, group string, mem
// exist. // exist.
gi = newGroupInfo(members) gi = newGroupInfo(members)
} else { } else {
mergedMembers := append(gi.Members, members...) gi.Members = set.CreateStringSet(append(gi.Members, members...)...).ToSlice()
uniqMembers := set.CreateStringSet(mergedMembers...).ToSlice()
gi.Members = uniqMembers
} }
if err := store.saveGroupInfo(ctx, group, gi); err != nil { if err := store.saveGroupInfo(ctx, group, gi); err != nil {

View File

@ -351,67 +351,34 @@ func (sys *IAMSys) loadWatchedEvent(ctx context.Context, event iamWatchEvent) (e
ctx, cancel := context.WithTimeout(ctx, defaultContextTimeout) ctx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
defer cancel() defer cancel()
if event.isCreated { switch {
switch { case usersPrefix:
case usersPrefix: accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigUsersPrefix))
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigUsersPrefix)) err = sys.store.UserNotificationHandler(ctx, accessKey, regUser)
err = sys.store.UserNotificationHandler(ctx, accessKey, regUser) case stsPrefix:
case stsPrefix: accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigSTSPrefix))
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigSTSPrefix)) err = sys.store.UserNotificationHandler(ctx, accessKey, stsUser)
err = sys.store.UserNotificationHandler(ctx, accessKey, stsUser) case svcPrefix:
case svcPrefix: accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigServiceAccountsPrefix))
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigServiceAccountsPrefix)) err = sys.store.UserNotificationHandler(ctx, accessKey, svcUser)
err = sys.store.UserNotificationHandler(ctx, accessKey, svcUser) case groupsPrefix:
case groupsPrefix: group := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigGroupsPrefix))
group := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigGroupsPrefix)) err = sys.store.GroupNotificationHandler(ctx, group)
err = sys.store.GroupNotificationHandler(ctx, group) case policyPrefix:
case policyPrefix: policyName := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigPoliciesPrefix))
policyName := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigPoliciesPrefix)) err = sys.store.PolicyNotificationHandler(ctx, policyName)
err = sys.store.PolicyNotificationHandler(ctx, policyName) case policyDBUsersPrefix:
case policyDBUsersPrefix: policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBUsersPrefix)
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBUsersPrefix) user := strings.TrimSuffix(policyMapFile, ".json")
user := strings.TrimSuffix(policyMapFile, ".json") err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, regUser)
err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, regUser) case policyDBSTSUsersPrefix:
case policyDBSTSUsersPrefix: policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBSTSUsersPrefix)
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBSTSUsersPrefix) user := strings.TrimSuffix(policyMapFile, ".json")
user := strings.TrimSuffix(policyMapFile, ".json") err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, stsUser)
err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, stsUser) case policyDBGroupsPrefix:
case policyDBGroupsPrefix: policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBGroupsPrefix)
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBGroupsPrefix) user := strings.TrimSuffix(policyMapFile, ".json")
user := strings.TrimSuffix(policyMapFile, ".json") err = sys.store.PolicyMappingNotificationHandler(ctx, user, true, regUser)
err = sys.store.PolicyMappingNotificationHandler(ctx, user, true, regUser)
}
} else {
// delete event
switch {
case usersPrefix:
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigUsersPrefix))
err = sys.store.UserNotificationHandler(ctx, accessKey, regUser)
case stsPrefix:
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigSTSPrefix))
err = sys.store.UserNotificationHandler(ctx, accessKey, stsUser)
case svcPrefix:
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigServiceAccountsPrefix))
err = sys.store.UserNotificationHandler(ctx, accessKey, svcUser)
case groupsPrefix:
group := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigGroupsPrefix))
err = sys.store.GroupNotificationHandler(ctx, group)
case policyPrefix:
policyName := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigPoliciesPrefix))
err = sys.store.PolicyNotificationHandler(ctx, policyName)
case policyDBUsersPrefix:
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBUsersPrefix)
user := strings.TrimSuffix(policyMapFile, ".json")
err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, regUser)
case policyDBSTSUsersPrefix:
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBSTSUsersPrefix)
user := strings.TrimSuffix(policyMapFile, ".json")
err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, stsUser)
case policyDBGroupsPrefix:
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBGroupsPrefix)
user := strings.TrimSuffix(policyMapFile, ".json")
err = sys.store.PolicyMappingNotificationHandler(ctx, user, true, regUser)
}
} }
return err return err
} }

View File

@ -620,11 +620,9 @@ func mergeEntryChannels(ctx context.Context, in []chan metaCacheEntry, out chan<
} }
best = other best = other
bestIdx = otherIdx bestIdx = otherIdx
} else { } else if err := selectFrom(otherIdx); err != nil {
// Keep best, replace "other" // Keep best, replace "other"
if err := selectFrom(otherIdx); err != nil { return err
return err
}
} }
continue continue
} }
@ -636,10 +634,8 @@ func mergeEntryChannels(ctx context.Context, in []chan metaCacheEntry, out chan<
if best.name > last { if best.name > last {
out <- *best out <- *best
last = best.name last = best.name
} else { } else if serverDebugLog {
if serverDebugLog { console.Debugln("mergeEntryChannels: discarding duplicate", best.name, "<=", last)
console.Debugln("mergeEntryChannels: discarding duplicate", best.name, "<=", last)
}
} }
// Replace entry we just sent. // Replace entry we just sent.
if err := selectFrom(bestIdx); err != nil { if err := selectFrom(bestIdx); err != nil {

View File

@ -81,6 +81,7 @@ func Test_metaCacheEntries_merge(t *testing.T) {
} }
// Merge b into a // Merge b into a
a.merge(b, -1) a.merge(b, -1)
//nolint:gocritic
want := append(loadMetacacheSampleNames, loadMetacacheSampleNames...) want := append(loadMetacacheSampleNames, loadMetacacheSampleNames...)
sort.Strings(want) sort.Strings(want)
got := a.entries().names() got := a.entries().names()

View File

@ -1623,20 +1623,18 @@ func (c *minioClusterCollector) Collect(out chan<- prometheus.Metric) {
continue continue
} }
for k, v := range metric.Histogram { for k, v := range metric.Histogram {
l := append(labels, metric.HistogramBucketLabel)
lv := append(values, k)
out <- prometheus.MustNewConstMetric( out <- prometheus.MustNewConstMetric(
prometheus.NewDesc( prometheus.NewDesc(
prometheus.BuildFQName(string(metric.Description.Namespace), prometheus.BuildFQName(string(metric.Description.Namespace),
string(metric.Description.Subsystem), string(metric.Description.Subsystem),
string(metric.Description.Name)), string(metric.Description.Name)),
metric.Description.Help, metric.Description.Help,
l, append(labels, metric.HistogramBucketLabel),
metric.StaticLabels, metric.StaticLabels,
), ),
prometheus.GaugeValue, prometheus.GaugeValue,
float64(v), float64(v),
lv...) append(values, k)...)
} }
continue continue
} }

View File

@ -341,21 +341,17 @@ func sameLocalAddrs(addr1, addr2 string) (bool, error) {
if host1 == "" { if host1 == "" {
// If empty host means it is localhost // If empty host means it is localhost
addr1Local = true addr1Local = true
} else { } else if addr1Local, err = isLocalHost(host1, port1, port1); err != nil {
// Host not empty, check if it is local // Host not empty, check if it is local
if addr1Local, err = isLocalHost(host1, port1, port1); err != nil { return false, err
return false, err
}
} }
if host2 == "" { if host2 == "" {
// If empty host means it is localhost // If empty host means it is localhost
addr2Local = true addr2Local = true
} else { } else if addr2Local, err = isLocalHost(host2, port2, port2); err != nil {
// Host not empty, check if it is local // Host not empty, check if it is local
if addr2Local, err = isLocalHost(host2, port2, port2); err != nil { return false, err
return false, err
}
} }
// If both of addresses point to the same machine, check if // If both of addresses point to the same machine, check if

View File

@ -52,7 +52,7 @@ var globalObjLayerMutex sync.RWMutex
// Global object layer, only accessed by globalObjectAPI. // Global object layer, only accessed by globalObjectAPI.
var globalObjectAPI ObjectLayer var globalObjectAPI ObjectLayer
//Global cacheObjects, only accessed by newCacheObjectsFn(). // Global cacheObjects, only accessed by newCacheObjectsFn().
var globalCacheObjectAPI CacheObjectLayer var globalCacheObjectAPI CacheObjectLayer
// Checks if the object is a directory, this logic uses // Checks if the object is a directory, this logic uses

View File

@ -312,7 +312,7 @@ func (e ObjectExistsAsDirectory) Error() string {
return "Object exists on : " + e.Bucket + " as directory " + e.Object return "Object exists on : " + e.Bucket + " as directory " + e.Object
} }
//PrefixAccessDenied object access is denied. // PrefixAccessDenied object access is denied.
type PrefixAccessDenied GenericError type PrefixAccessDenied GenericError
func (e PrefixAccessDenied) Error() string { func (e PrefixAccessDenied) Error() string {
@ -484,7 +484,7 @@ func (e InvalidObjectState) Error() string {
return "The operation is not valid for the current state of the object " + e.Bucket + "/" + e.Object + "(" + e.VersionID + ")" return "The operation is not valid for the current state of the object " + e.Bucket + "/" + e.Object + "(" + e.VersionID + ")"
} }
/// Bucket related errors. // Bucket related errors.
// BucketNameInvalid - bucketname provided is invalid. // BucketNameInvalid - bucketname provided is invalid.
type BucketNameInvalid GenericError type BucketNameInvalid GenericError
@ -494,7 +494,7 @@ func (e BucketNameInvalid) Error() string {
return "Bucket name invalid: " + e.Bucket return "Bucket name invalid: " + e.Bucket
} }
/// Object related errors. // Object related errors.
// ObjectNameInvalid - object name provided is invalid. // ObjectNameInvalid - object name provided is invalid.
type ObjectNameInvalid GenericError type ObjectNameInvalid GenericError
@ -569,7 +569,7 @@ func (e OperationTimedOut) Error() string {
return "Operation timed out" return "Operation timed out"
} }
/// Multipart related errors. // Multipart related errors.
// MalformedUploadID malformed upload id. // MalformedUploadID malformed upload id.
type MalformedUploadID struct { type MalformedUploadID struct {

View File

@ -205,7 +205,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) {
}, },
// ListObjectsResult-9. // ListObjectsResult-9.
// Used for asserting the case with marker, but without prefix. // Used for asserting the case with marker, but without prefix.
//marker is set to "newPrefix0" in the testCase, (testCase 33). // marker is set to "newPrefix0" in the testCase, (testCase 33).
{ {
IsTruncated: false, IsTruncated: false,
Objects: []ObjectInfo{ Objects: []ObjectInfo{
@ -217,7 +217,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) {
}, },
}, },
// ListObjectsResult-10. // ListObjectsResult-10.
//marker is set to "newPrefix1" in the testCase, (testCase 34). // marker is set to "newPrefix1" in the testCase, (testCase 34).
{ {
IsTruncated: false, IsTruncated: false,
Objects: []ObjectInfo{ Objects: []ObjectInfo{
@ -228,7 +228,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) {
}, },
}, },
// ListObjectsResult-11. // ListObjectsResult-11.
//marker is set to "obj0" in the testCase, (testCase 35). // marker is set to "obj0" in the testCase, (testCase 35).
{ {
IsTruncated: false, IsTruncated: false,
Objects: []ObjectInfo{ Objects: []ObjectInfo{
@ -548,7 +548,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) {
{"empty-bucket", "", "", "", 111100000, ListObjectsInfo{}, nil, true}, {"empty-bucket", "", "", "", 111100000, ListObjectsInfo{}, nil, true},
// Testing for all 10 objects in the bucket (18). // Testing for all 10 objects in the bucket (18).
{"test-bucket-list-object", "", "", "", 10, resultCases[0], nil, true}, {"test-bucket-list-object", "", "", "", 10, resultCases[0], nil, true},
//Testing for negative value of maxKey, this should set maxKeys to listObjectsLimit (19). // Testing for negative value of maxKey, this should set maxKeys to listObjectsLimit (19).
{"test-bucket-list-object", "", "", "", -1, resultCases[0], nil, true}, {"test-bucket-list-object", "", "", "", -1, resultCases[0], nil, true},
// Testing for very large value of maxKey, this should set maxKeys to listObjectsLimit (20). // Testing for very large value of maxKey, this should set maxKeys to listObjectsLimit (20).
{"test-bucket-list-object", "", "", "", 1234567890, resultCases[0], nil, true}, {"test-bucket-list-object", "", "", "", 1234567890, resultCases[0], nil, true},
@ -905,7 +905,7 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand
}, },
// ListObjectsResult-9. // ListObjectsResult-9.
// Used for asserting the case with marker, but without prefix. // Used for asserting the case with marker, but without prefix.
//marker is set to "newPrefix0" in the testCase, (testCase 33). // marker is set to "newPrefix0" in the testCase, (testCase 33).
{ {
IsTruncated: false, IsTruncated: false,
Objects: []ObjectInfo{ Objects: []ObjectInfo{
@ -917,7 +917,7 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand
}, },
}, },
// ListObjectsResult-10. // ListObjectsResult-10.
//marker is set to "newPrefix1" in the testCase, (testCase 34). // marker is set to "newPrefix1" in the testCase, (testCase 34).
{ {
IsTruncated: false, IsTruncated: false,
Objects: []ObjectInfo{ Objects: []ObjectInfo{
@ -928,7 +928,7 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand
}, },
}, },
// ListObjectsResult-11. // ListObjectsResult-11.
//marker is set to "obj0" in the testCase, (testCase 35). // marker is set to "obj0" in the testCase, (testCase 35).
{ {
IsTruncated: false, IsTruncated: false,
Objects: []ObjectInfo{ Objects: []ObjectInfo{
@ -1223,7 +1223,7 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand
{"empty-bucket", "", "", "", 111100000, ListObjectsInfo{}, nil, true}, {"empty-bucket", "", "", "", 111100000, ListObjectsInfo{}, nil, true},
// Testing for all 10 objects in the bucket (16). // Testing for all 10 objects in the bucket (16).
{"test-bucket-list-object", "", "", "", 10, resultCases[0], nil, true}, {"test-bucket-list-object", "", "", "", 10, resultCases[0], nil, true},
//Testing for negative value of maxKey, this should set maxKeys to listObjectsLimit (17). // Testing for negative value of maxKey, this should set maxKeys to listObjectsLimit (17).
{"test-bucket-list-object", "", "", "", -1, resultCases[0], nil, true}, {"test-bucket-list-object", "", "", "", -1, resultCases[0], nil, true},
// Testing for very large value of maxKey, this should set maxKeys to listObjectsLimit (18). // Testing for very large value of maxKey, this should set maxKeys to listObjectsLimit (18).
{"test-bucket-list-object", "", "", "", 1234567890, resultCases[0], nil, true}, {"test-bucket-list-object", "", "", "", 1234567890, resultCases[0], nil, true},

View File

@ -1065,7 +1065,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
{bucketNames[0], "Asia", "", "", "", 2, listMultipartResults[13], nil, true}, {bucketNames[0], "Asia", "", "", "", 2, listMultipartResults[13], nil, true},
// setting delimiter (Test number 27). // setting delimiter (Test number 27).
{bucketNames[0], "", "", "", SlashSeparator, 2, listMultipartResults[14], nil, true}, {bucketNames[0], "", "", "", SlashSeparator, 2, listMultipartResults[14], nil, true},
//Test case with multiple uploadID listing for given object (Test number 28). // Test case with multiple uploadID listing for given object (Test number 28).
{bucketNames[1], "", "", "", "", 100, listMultipartResults[15], nil, true}, {bucketNames[1], "", "", "", "", 100, listMultipartResults[15], nil, true},
// Test case with multiple uploadID listing for given object, but uploadID marker set. // Test case with multiple uploadID listing for given object, but uploadID marker set.
// Testing whether the marker entry is skipped (Test number 29-30). // Testing whether the marker entry is skipped (Test number 29-30).
@ -1088,29 +1088,29 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
// Test case with `Prefix` and `UploadIDMarker` (Test number 37). // Test case with `Prefix` and `UploadIDMarker` (Test number 37).
{bucketNames[1], "min", "minio-object-1.txt", uploadIDs[1], "", 10, listMultipartResults[24], nil, true}, {bucketNames[1], "min", "minio-object-1.txt", uploadIDs[1], "", 10, listMultipartResults[24], nil, true},
// Test case for bucket with multiple objects in it. // Test case for bucket with multiple objects in it.
// Bucket used : `bucketNames[2]`. // Bucket used : `bucketNames[2]`.
// Objects used: `objectNames[1-5]`. // Objects used: `objectNames[1-5]`.
// UploadId's used: uploadIds[4-8]. // UploadId's used: uploadIds[4-8].
// (Test number 39). // (Test number 39).
{bucketNames[2], "", "", "", "", 100, listMultipartResults[25], nil, true}, {bucketNames[2], "", "", "", "", 100, listMultipartResults[25], nil, true},
//Test cases with prefixes. // Test cases with prefixes.
//Testing listing with prefix set to "min" (Test number 40) . // Testing listing with prefix set to "min" (Test number 40) .
{bucketNames[2], "min", "", "", "", 100, listMultipartResults[26], nil, true}, {bucketNames[2], "min", "", "", "", 100, listMultipartResults[26], nil, true},
//Testing listing with prefix set to "ney" (Test number 41). // Testing listing with prefix set to "ney" (Test number 41).
{bucketNames[2], "ney", "", "", "", 100, listMultipartResults[27], nil, true}, {bucketNames[2], "ney", "", "", "", 100, listMultipartResults[27], nil, true},
//Testing listing with prefix set to "par" (Test number 42). // Testing listing with prefix set to "par" (Test number 42).
{bucketNames[2], "parrot", "", "", "", 100, listMultipartResults[28], nil, true}, {bucketNames[2], "parrot", "", "", "", 100, listMultipartResults[28], nil, true},
//Testing listing with prefix set to object name "neymar.jpeg" (Test number 43). // Testing listing with prefix set to object name "neymar.jpeg" (Test number 43).
{bucketNames[2], "neymar.jpeg", "", "", "", 100, listMultipartResults[29], nil, true}, {bucketNames[2], "neymar.jpeg", "", "", "", 100, listMultipartResults[29], nil, true},
// Testing listing with `MaxUploads` set to 3 (Test number 44). // Testing listing with `MaxUploads` set to 3 (Test number 44).
{bucketNames[2], "", "", "", "", 3, listMultipartResults[30], nil, true}, {bucketNames[2], "", "", "", "", 3, listMultipartResults[30], nil, true},
// In case of bucketNames[2], there are 6 entries (Test number 45). // In case of bucketNames[2], there are 6 entries (Test number 45).
// Since all available entries are listed, IsTruncated is expected to be false // Since all available entries are listed, IsTruncated is expected to be false
// and NextMarkers are expected to empty. // and NextMarkers are expected to empty.
{bucketNames[2], "", "", "", "", 6, listMultipartResults[31], nil, true}, {bucketNames[2], "", "", "", "", 6, listMultipartResults[31], nil, true},
// Test case with `KeyMarker` (Test number 47). // Test case with `KeyMarker` (Test number 47).
{bucketNames[2], "", objectNames[3], "", "", 10, listMultipartResults[33], nil, true}, {bucketNames[2], "", objectNames[3], "", "", 10, listMultipartResults[33], nil, true},
// Test case with `prefix` and `KeyMarker` (Test number 48). // Test case with `prefix` and `KeyMarker` (Test number 48).
{bucketNames[2], "minio-object", objectNames[1], "", "", 10, listMultipartResults[34], nil, true}, {bucketNames[2], "minio-object", objectNames[1], "", "", 10, listMultipartResults[34], nil, true},
} }
@ -1694,9 +1694,9 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
{bucketNames[0], objectNames[0], uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd"))}, {bucketNames[0], objectNames[0], uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd"))},
{bucketNames[0], objectNames[0], uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd"))}, {bucketNames[0], objectNames[0], uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd"))},
// Part with size larger than 5Mb. // Part with size larger than 5Mb.
{bucketNames[0], objectNames[0], uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketNames[0], objectNames[0], uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(validPart))},
{bucketNames[0], objectNames[0], uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketNames[0], objectNames[0], uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(validPart))},
{bucketNames[0], objectNames[0], uploadIDs[0], 7, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketNames[0], objectNames[0], uploadIDs[0], 7, string(validPart), validPartMD5, int64(len(validPart))},
} }
sha256sum := "" sha256sum := ""
var opts ObjectOptions var opts ObjectOptions

View File

@ -790,7 +790,7 @@ func (g *GetObjectReader) Close() error {
return nil return nil
} }
//SealMD5CurrFn seals md5sum with object encryption key and returns sealed // SealMD5CurrFn seals md5sum with object encryption key and returns sealed
// md5sum // md5sum
type SealMD5CurrFn func([]byte) []byte type SealMD5CurrFn func([]byte) []byte

View File

@ -1574,7 +1574,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
return return
} }
/// if Content-Length is unknown/missing, deny the request // if Content-Length is unknown/missing, deny the request
size := r.ContentLength size := r.ContentLength
rAuthType := getRequestAuthType(r) rAuthType := getRequestAuthType(r)
if rAuthType == authTypeStreamingSigned { if rAuthType == authTypeStreamingSigned {
@ -1595,7 +1595,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
return return
} }
/// maximum Upload size for objects in a single operation // maximum Upload size for objects in a single operation
if isMaxObjectSize(size) { if isMaxObjectSize(size) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL) writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
return return
@ -1924,7 +1924,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
return return
} }
/// if Content-Length is unknown/missing, deny the request // if Content-Length is unknown/missing, deny the request
size := r.ContentLength size := r.ContentLength
rAuthType := getRequestAuthType(r) rAuthType := getRequestAuthType(r)
if rAuthType == authTypeStreamingSigned { if rAuthType == authTypeStreamingSigned {
@ -1946,7 +1946,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
return return
} }
/// maximum Upload size for objects in a single operation // maximum Upload size for objects in a single operation
if isMaxObjectSize(size) { if isMaxObjectSize(size) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL) writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
return return
@ -2155,7 +2155,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
writeSuccessResponseHeadersOnly(w) writeSuccessResponseHeadersOnly(w)
} }
/// Multipart objectAPIHandlers // Multipart objectAPIHandlers
// NewMultipartUploadHandler - New multipart upload. // NewMultipartUploadHandler - New multipart upload.
// Notice: The S3 client can send secret keys in headers for encryption related jobs, // Notice: The S3 client can send secret keys in headers for encryption related jobs,
@ -2478,7 +2478,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
return return
} }
/// maximum copy size for multipart objects in a single operation // maximum copy size for multipart objects in a single operation
if isMaxAllowedPartSize(length) { if isMaxAllowedPartSize(length) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL) writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
return return
@ -2670,7 +2670,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
return return
} }
/// if Content-Length is unknown/missing, throw away // if Content-Length is unknown/missing, throw away
size := r.ContentLength size := r.ContentLength
rAuthType := getRequestAuthType(r) rAuthType := getRequestAuthType(r)
@ -2693,7 +2693,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
return return
} }
/// maximum Upload size for multipart objects in a single operation // maximum Upload size for multipart objects in a single operation
if isMaxAllowedPartSize(size) { if isMaxAllowedPartSize(size) {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL) writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
return return
@ -3319,7 +3319,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
} }
} }
/// Delete objectAPIHandlers // Delete objectAPIHandlers
// DeleteObjectHandler - delete an object // DeleteObjectHandler - delete an object
func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {

View File

@ -2706,13 +2706,13 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
{bucketName, objectName, uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd"))}, {bucketName, objectName, uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd"))},
{bucketName, objectName, uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd"))}, {bucketName, objectName, uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd"))},
// Part with size larger than 5 MiB. // Part with size larger than 5 MiB.
{bucketName, objectName, uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketName, objectName, uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(validPart))},
{bucketName, objectName, uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketName, objectName, uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(validPart))},
// Part with size larger than 5 MiB. // Part with size larger than 5 MiB.
// Parts uploaded for anonymous/unsigned API handler test. // Parts uploaded for anonymous/unsigned API handler test.
{bucketName, objectName, uploadIDs[1], 1, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketName, objectName, uploadIDs[1], 1, string(validPart), validPartMD5, int64(len(validPart))},
{bucketName, objectName, uploadIDs[1], 2, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketName, objectName, uploadIDs[1], 2, string(validPart), validPartMD5, int64(len(validPart))},
} }
// Iterating over creatPartCases to generate multipart chunks. // Iterating over creatPartCases to generate multipart chunks.
for _, part := range parts { for _, part := range parts {
@ -3077,13 +3077,13 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
{bucketName, objectName, uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd"))}, {bucketName, objectName, uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd"))},
{bucketName, objectName, uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd"))}, {bucketName, objectName, uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd"))},
// Part with size larger than 5 MiB. // Part with size larger than 5 MiB.
{bucketName, objectName, uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketName, objectName, uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(validPart))},
{bucketName, objectName, uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketName, objectName, uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(validPart))},
// Part with size larger than 5 MiB. // Part with size larger than 5 MiB.
// Parts uploaded for anonymous/unsigned API handler test. // Parts uploaded for anonymous/unsigned API handler test.
{bucketName, objectName, uploadIDs[1], 1, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketName, objectName, uploadIDs[1], 1, string(validPart), validPartMD5, int64(len(validPart))},
{bucketName, objectName, uploadIDs[1], 2, string(validPart), validPartMD5, int64(len(string(validPart)))}, {bucketName, objectName, uploadIDs[1], 2, string(validPart), validPartMD5, int64(len(validPart))},
} }
// Iterating over createPartCases to generate multipart chunks. // Iterating over createPartCases to generate multipart chunks.
for _, part := range parts { for _, part := range parts {

View File

@ -150,7 +150,7 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHan
randomPerm := rand.Perm(10) randomPerm := rand.Perm(10)
randomString := "" randomString := ""
for _, num := range randomPerm { for _, num := range randomPerm {
randomString = randomString + strconv.Itoa(num) randomString += strconv.Itoa(num)
} }
expectedETaghex := getMD5Hash([]byte(randomString)) expectedETaghex := getMD5Hash([]byte(randomString))
@ -189,7 +189,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH
randomPerm := rand.Perm(100) randomPerm := rand.Perm(100)
randomString := "" randomString := ""
for _, num := range randomPerm { for _, num := range randomPerm {
randomString = randomString + strconv.Itoa(num) randomString += strconv.Itoa(num)
} }
expectedETaghex := getMD5Hash([]byte(randomString)) expectedETaghex := getMD5Hash([]byte(randomString))

View File

@ -61,8 +61,8 @@ func newPostPolicyBytesV4WithContentRange(credential, bucketName, objectKey stri
keyConditionStr, contentLengthCondStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr) keyConditionStr, contentLengthCondStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr)
retStr := "{" retStr := "{"
retStr = retStr + expirationStr + "," retStr = retStr + expirationStr + ","
retStr = retStr + conditionStr retStr += conditionStr
retStr = retStr + "}" retStr += "}"
return []byte(retStr) return []byte(retStr)
} }
@ -89,8 +89,8 @@ func newPostPolicyBytesV4(credential, bucketName, objectKey string, expiration t
conditionStr := fmt.Sprintf(`"conditions":[%s, %s, %s, %s, %s, %s]`, bucketConditionStr, keyConditionStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr) conditionStr := fmt.Sprintf(`"conditions":[%s, %s, %s, %s, %s, %s]`, bucketConditionStr, keyConditionStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr)
retStr := "{" retStr := "{"
retStr = retStr + expirationStr + "," retStr = retStr + expirationStr + ","
retStr = retStr + conditionStr retStr += conditionStr
retStr = retStr + "}" retStr += "}"
return []byte(retStr) return []byte(retStr)
} }
@ -108,8 +108,8 @@ func newPostPolicyBytesV2(bucketName, objectKey string, expiration time.Time) []
conditionStr := fmt.Sprintf(`"conditions":[%s, %s]`, bucketConditionStr, keyConditionStr) conditionStr := fmt.Sprintf(`"conditions":[%s, %s]`, bucketConditionStr, keyConditionStr)
retStr := "{" retStr := "{"
retStr = retStr + expirationStr + "," retStr = retStr + expirationStr + ","
retStr = retStr + conditionStr retStr += conditionStr
retStr = retStr + "}" retStr += "}"
return []byte(retStr) return []byte(retStr)
} }

View File

@ -303,14 +303,12 @@ func checkPostPolicy(formValues http.Header, postPolicyForm PostPolicyForm) erro
if !condPassed { if !condPassed {
return fmt.Errorf("Invalid according to Policy: Policy Condition failed") return fmt.Errorf("Invalid according to Policy: Policy Condition failed")
} }
} else { } else if strings.HasPrefix(policy.Key, "$x-amz-meta-") || strings.HasPrefix(policy.Key, "$x-amz-") {
// This covers all conditions X-Amz-Meta-* and X-Amz-* // This covers all conditions X-Amz-Meta-* and X-Amz-*
if strings.HasPrefix(policy.Key, "$x-amz-meta-") || strings.HasPrefix(policy.Key, "$x-amz-") { // Check if policy condition is satisfied
// Check if policy condition is satisfied condPassed = checkPolicyCond(op, formValues.Get(formCanonicalName), policy.Value)
condPassed = checkPolicyCond(op, formValues.Get(formCanonicalName), policy.Value) if !condPassed {
if !condPassed { return fmt.Errorf("Invalid according to Policy: Policy Condition failed: [%s, %s, %s]", op, policy.Key, policy.Value)
return fmt.Errorf("Invalid according to Policy: Policy Condition failed: [%s, %s, %s]", op, policy.Key, policy.Value)
}
} }
} }
} }

View File

@ -365,7 +365,7 @@ func (s *TestSuiteCommon) TestBucketPolicy(c *check) {
// assert the http response status code. // assert the http response status code.
c.Assert(response.StatusCode, http.StatusOK) c.Assert(response.StatusCode, http.StatusOK)
/// Put a new bucket policy. // Put a new bucket policy.
request, err = newTestSignedRequest(http.MethodPut, getPutPolicyURL(s.endPoint, bucketName), request, err = newTestSignedRequest(http.MethodPut, getPutPolicyURL(s.endPoint, bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), s.accessKey, s.secretKey, s.signer) int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), s.accessKey, s.secretKey, s.signer)
c.Assert(err, nil) c.Assert(err, nil)
@ -980,7 +980,7 @@ func (s *TestSuiteCommon) TestPutBucket(c *check) {
wg.Wait() wg.Wait()
bucketName = getRandomBucketName() bucketName = getRandomBucketName()
//Block 2: testing for correctness of the functionality // Block 2: testing for correctness of the functionality
// HTTP request to create the bucket. // HTTP request to create the bucket.
request, err := newTestSignedRequest(http.MethodPut, getMakeBucketURL(s.endPoint, bucketName), request, err := newTestSignedRequest(http.MethodPut, getMakeBucketURL(s.endPoint, bucketName),
0, nil, s.accessKey, s.secretKey, s.signer) 0, nil, s.accessKey, s.secretKey, s.signer)
@ -1273,7 +1273,7 @@ func (s *TestSuiteCommon) TestPutObjectLongName(c *check) {
c.Assert(err, nil) c.Assert(err, nil)
c.Assert(response.StatusCode, http.StatusOK) c.Assert(response.StatusCode, http.StatusOK)
//make long object name. // make long object name.
longObjName = fmt.Sprintf("%0255d/%0255d/%0255d/%0255d/%0255d", 1, 1, 1, 1, 1) longObjName = fmt.Sprintf("%0255d/%0255d/%0255d/%0255d/%0255d", 1, 1, 1, 1, 1)
if IsDocker() || IsKubernetes() { if IsDocker() || IsKubernetes() {
longObjName = fmt.Sprintf("%0242d/%0242d/%0242d/%0242d/%0242d", 1, 1, 1, 1, 1) longObjName = fmt.Sprintf("%0242d/%0242d/%0242d/%0242d/%0242d", 1, 1, 1, 1, 1)

View File

@ -261,7 +261,7 @@ func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues
// Replace all spaced strings, some clients can send spaced // Replace all spaced strings, some clients can send spaced
// parameters and some won't. So we pro-actively remove any spaces // parameters and some won't. So we pro-actively remove any spaces
// to make parsing easier. // to make parsing easier.
v4Auth = strings.Replace(v4Auth, " ", "", -1) v4Auth = strings.ReplaceAll(v4Auth, " ", "")
if v4Auth == "" { if v4Auth == "" {
return sv, ErrAuthHeaderEmpty return sv, ErrAuthHeaderEmpty
} }

View File

@ -103,7 +103,7 @@ func getSignedHeaders(signedHeaders http.Header) string {
// <HashedPayload> // <HashedPayload>
// //
func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string { func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string {
rawQuery := strings.Replace(queryStr, "+", "%20", -1) rawQuery := strings.ReplaceAll(queryStr, "+", "%20")
encodedPath := s3utils.EncodePath(urlPath) encodedPath := s3utils.EncodePath(urlPath)
canonicalRequest := strings.Join([]string{ canonicalRequest := strings.Join([]string{
method, method,
@ -130,9 +130,9 @@ func getScope(t time.Time, region string) string {
// getStringToSign a string based on selected query values. // getStringToSign a string based on selected query values.
func getStringToSign(canonicalRequest string, t time.Time, scope string) string { func getStringToSign(canonicalRequest string, t time.Time, scope string) string {
stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n" stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n"
stringToSign = stringToSign + scope + "\n" stringToSign += scope + "\n"
canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest)) canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest))
stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:]) stringToSign += hex.EncodeToString(canonicalRequestBytes[:])
return stringToSign return stringToSign
} }
@ -306,7 +306,7 @@ func doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region s
return ErrInvalidToken return ErrInvalidToken
} }
/// Verify finally if signature is same. // Verify finally if signature is same.
// Get canonical request. // Get canonical request.
presignedCanonicalReq := getCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method) presignedCanonicalReq := getCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method)

View File

@ -29,13 +29,8 @@ import (
xnet "github.com/minio/pkg/net" xnet "github.com/minio/pkg/net"
) )
///////////////////////////////////////////////////////////////////////////////
//
// Storage REST server, storageRESTReceiver and StorageRESTClient are // Storage REST server, storageRESTReceiver and StorageRESTClient are
// inter-dependent, below test functions are sufficient to test all of them. // inter-dependent, below test functions are sufficient to test all of them.
//
///////////////////////////////////////////////////////////////////////////////
func testStorageAPIDiskInfo(t *testing.T, storage StorageAPI) { func testStorageAPIDiskInfo(t *testing.T, storage StorageAPI) {
testCases := []struct { testCases := []struct {
expectErr bool expectErr bool

View File

@ -436,7 +436,7 @@ func parseHexUint(v []byte) (n uint64, err error) {
for i, b := range v { for i, b := range v {
switch { switch {
case '0' <= b && b <= '9': case '0' <= b && b <= '9':
b = b - '0' b -= '0'
case 'a' <= b && b <= 'f': case 'a' <= b && b <= 'f':
b = b - 'a' + 10 b = b - 'a' + 10
case 'A' <= b && b <= 'F': case 'A' <= b && b <= 'F':

View File

@ -119,19 +119,19 @@ func TestMain(m *testing.M) {
// concurrency level for certain parallel tests. // concurrency level for certain parallel tests.
const testConcurrencyLevel = 10 const testConcurrencyLevel = 10
/// //
/// Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258 // Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258
/// //
/// User-Agent: // User-Agent:
/// //
/// This is ignored from signing because signing this causes problems with generating pre-signed URLs // This is ignored from signing because signing this causes problems with generating pre-signed URLs
/// (that are executed by other agents) or when customers pass requests through proxies, which may // (that are executed by other agents) or when customers pass requests through proxies, which may
/// modify the user-agent. // modify the user-agent.
/// //
/// Authorization: // Authorization:
/// //
/// Is skipped for obvious reasons // Is skipped for obvious reasons
/// //
var ignoredHeaders = map[string]bool{ var ignoredHeaders = map[string]bool{
"Authorization": true, "Authorization": true,
"User-Agent": true, "User-Agent": true,
@ -633,7 +633,7 @@ func signStreamingRequest(req *http.Request, accessKey, secretKey string, currTi
signedHeaders := strings.Join(headers, ";") signedHeaders := strings.Join(headers, ";")
// Get canonical query string. // Get canonical query string.
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20")
// Get canonical URI. // Get canonical URI.
canonicalURI := s3utils.EncodePath(req.URL.Path) canonicalURI := s3utils.EncodePath(req.URL.Path)
@ -665,8 +665,8 @@ func signStreamingRequest(req *http.Request, accessKey, secretKey string, currTi
}, SlashSeparator) }, SlashSeparator)
stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n" stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n"
stringToSign = stringToSign + scope + "\n" stringToSign += scope + "\n"
stringToSign = stringToSign + getSHA256Hash([]byte(canonicalRequest)) stringToSign += getSHA256Hash([]byte(canonicalRequest))
date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd)))
region := sumHMAC(date, []byte(globalMinioDefaultRegion)) region := sumHMAC(date, []byte(globalMinioDefaultRegion))
@ -749,7 +749,7 @@ func assembleStreamingChunks(req *http.Request, body io.ReadSeeker, chunkSize in
stringToSign = stringToSign + scope + "\n" stringToSign = stringToSign + scope + "\n"
stringToSign = stringToSign + signature + "\n" stringToSign = stringToSign + signature + "\n"
stringToSign = stringToSign + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + "\n" // hex(sum256("")) stringToSign = stringToSign + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + "\n" // hex(sum256(""))
stringToSign = stringToSign + getSHA256Hash(buffer[:n]) stringToSign += getSHA256Hash(buffer[:n])
date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd)))
region := sumHMAC(date, []byte(regionStr)) region := sumHMAC(date, []byte(regionStr))
@ -851,7 +851,7 @@ func preSignV4(req *http.Request, accessKeyID, secretAccessKey string, expires i
extractedSignedHeaders := make(http.Header) extractedSignedHeaders := make(http.Header)
extractedSignedHeaders.Set("host", req.Host) extractedSignedHeaders.Set("host", req.Host)
queryStr := strings.Replace(query.Encode(), "+", "%20", -1) queryStr := strings.ReplaceAll(query.Encode(), "+", "%20")
canonicalRequest := getCanonicalRequest(extractedSignedHeaders, unsignedPayload, queryStr, req.URL.Path, req.Method) canonicalRequest := getCanonicalRequest(extractedSignedHeaders, unsignedPayload, queryStr, req.URL.Path, req.Method)
stringToSign := getStringToSign(canonicalRequest, date, scope) stringToSign := getStringToSign(canonicalRequest, date, scope)
signingKey := getSigningKey(secretAccessKey, date, region, serviceS3) signingKey := getSigningKey(secretAccessKey, date, region, serviceS3)
@ -988,7 +988,7 @@ func signRequestV4(req *http.Request, accessKey, secretKey string) error {
signedHeaders := strings.Join(headers, ";") signedHeaders := strings.Join(headers, ";")
// Get canonical query string. // Get canonical query string.
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20")
// Get canonical URI. // Get canonical URI.
canonicalURI := s3utils.EncodePath(req.URL.Path) canonicalURI := s3utils.EncodePath(req.URL.Path)
@ -1021,7 +1021,7 @@ func signRequestV4(req *http.Request, accessKey, secretKey string) error {
stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n" stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n"
stringToSign = stringToSign + scope + "\n" stringToSign = stringToSign + scope + "\n"
stringToSign = stringToSign + getSHA256Hash([]byte(canonicalRequest)) stringToSign += getSHA256Hash([]byte(canonicalRequest))
date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd))) date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd)))
regionHMAC := sumHMAC(date, []byte(region)) regionHMAC := sumHMAC(date, []byte(region))
@ -1220,7 +1220,7 @@ func makeTestTargetURL(endPoint, bucketName, objectName string, queryValues url.
urlStr = urlStr + bucketName + SlashSeparator urlStr = urlStr + bucketName + SlashSeparator
} }
if objectName != "" { if objectName != "" {
urlStr = urlStr + s3utils.EncodePath(objectName) urlStr += s3utils.EncodePath(objectName)
} }
if len(queryValues) > 0 { if len(queryValues) > 0 {
urlStr = urlStr + "?" + queryValues.Encode() urlStr = urlStr + "?" + queryValues.Encode()
@ -1504,7 +1504,7 @@ func removeRoots(roots []string) {
} }
} }
//removeDiskN - removes N disks from supplied disk slice. // removeDiskN - removes N disks from supplied disk slice.
func removeDiskN(disks []string, n int) { func removeDiskN(disks []string, n int) {
if n > len(disks) { if n > len(disks) {
n = len(disks) n = len(disks)

View File

@ -161,7 +161,7 @@ func TestUserAgent(t *testing.T) {
str := getUserAgent(testCase.mode) str := getUserAgent(testCase.mode)
expectedStr := testCase.expectedStr expectedStr := testCase.expectedStr
if IsDocker() { if IsDocker() {
expectedStr = strings.Replace(expectedStr, "; source", "; docker; source", -1) expectedStr = strings.ReplaceAll(expectedStr, "; source", "; docker; source")
} }
if str != expectedStr { if str != expectedStr {
t.Errorf("Test %d: expected: %s, got: %s", i+1, expectedStr, str) t.Errorf("Test %d: expected: %s, got: %s", i+1, expectedStr, str)
@ -216,7 +216,7 @@ func TestGetHelmVersion(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Unable to create temporary file. %s", err) t.Fatalf("Unable to create temporary file. %s", err)
} }
if _, err = tmpfile.Write([]byte(content)); err != nil { if _, err = tmpfile.WriteString(content); err != nil {
t.Fatalf("Unable to create temporary file. %s", err) t.Fatalf("Unable to create temporary file. %s", err)
} }
if err = tmpfile.Close(); err != nil { if err = tmpfile.Close(); err != nil {

View File

@ -23,7 +23,7 @@ import (
) )
func BenchmarkURLQueryForm(b *testing.B) { func BenchmarkURLQueryForm(b *testing.B) {
req, err := http.NewRequest(http.MethodGet, "http://localhost:9000/bucket/name?uploadId=upload&partNumber=1", nil) req, err := http.NewRequest(http.MethodGet, "http://localhost:9000/bucket/name?uploadId=upload&partNumber=1", http.NoBody)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -49,7 +49,7 @@ func BenchmarkURLQueryForm(b *testing.B) {
// BenchmarkURLQuery - benchmark URL memory allocations // BenchmarkURLQuery - benchmark URL memory allocations
func BenchmarkURLQuery(b *testing.B) { func BenchmarkURLQuery(b *testing.B) {
req, err := http.NewRequest(http.MethodGet, "http://localhost:9000/bucket/name?uploadId=upload&partNumber=1", nil) req, err := http.NewRequest(http.MethodGet, "http://localhost:9000/bucket/name?uploadId=upload&partNumber=1", http.NoBody)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }

View File

@ -160,7 +160,7 @@ func hasContentMD5(h http.Header) bool {
return ok return ok
} }
/// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html // http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
const ( const (
// Maximum object size per PUT request is 5TB. // Maximum object size per PUT request is 5TB.
// This is a divergence from S3 limit on purpose to support // This is a divergence from S3 limit on purpose to support
@ -409,7 +409,7 @@ func dumpRequest(r *http.Request) string {
header.Set("Host", r.Host) header.Set("Host", r.Host)
// Replace all '%' to '%%' so that printer format parser // Replace all '%' to '%%' so that printer format parser
// to ignore URL encoded values. // to ignore URL encoded values.
rawURI := strings.Replace(r.RequestURI, "%", "%%", -1) rawURI := strings.ReplaceAll(r.RequestURI, "%", "%%")
req := struct { req := struct {
Method string `json:"method"` Method string `json:"method"`
RequestURI string `json:"reqURI"` RequestURI string `json:"reqURI"`

View File

@ -238,9 +238,8 @@ func TestDumpRequest(t *testing.T) {
RequestURI string `json:"reqURI"` RequestURI string `json:"reqURI"`
Header http.Header `json:"header"` Header http.Header `json:"header"`
} }
jsonReq = strings.Replace(jsonReq, "%%", "%", -1)
res := jsonResult{} res := jsonResult{}
if err = json.Unmarshal([]byte(jsonReq), &res); err != nil { if err = json.Unmarshal([]byte(strings.ReplaceAll(jsonReq, "%%", "%")), &res); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -399,7 +398,6 @@ func TestCeilFrac(t *testing.T) {
// Test if isErrIgnored works correctly. // Test if isErrIgnored works correctly.
func TestIsErrIgnored(t *testing.T) { func TestIsErrIgnored(t *testing.T) {
var errIgnored = fmt.Errorf("ignored error") var errIgnored = fmt.Errorf("ignored error")
ignoredErrs := append(baseIgnoredErrs, errIgnored)
var testCases = []struct { var testCases = []struct {
err error err error
ignored bool ignored bool
@ -418,7 +416,7 @@ func TestIsErrIgnored(t *testing.T) {
}, },
} }
for i, testCase := range testCases { for i, testCase := range testCases {
if ok := IsErrIgnored(testCase.err, ignoredErrs...); ok != testCase.ignored { if ok := IsErrIgnored(testCase.err, append(baseIgnoredErrs, errIgnored)...); ok != testCase.ignored {
t.Errorf("Test: %d, Expected %t, got %t", i+1, testCase.ignored, ok) t.Errorf("Test: %d, Expected %t, got %t", i+1, testCase.ignored, ok)
} }
} }

View File

@ -276,7 +276,7 @@ func newXLStorage(ep Endpoint) (*xlStorage, error) {
if err != nil { if err != nil {
return p, err return p, err
} }
if _, err = w.Write(alignedBuf[:]); err != nil { if _, err = w.Write(alignedBuf); err != nil {
w.Close() w.Close()
return p, err return p, err
} }
@ -2394,10 +2394,13 @@ func (s *xlStorage) StatInfoFile(ctx context.Context, volume, path string, glob
if err != nil { if err != nil {
name = filePath name = filePath
} }
if os.PathSeparator != '/' { stat = append(stat, StatInfo{
name = strings.Replace(name, string(os.PathSeparator), "/", -1) Name: filepath.ToSlash(name),
} Size: st.Size(),
stat = append(stat, StatInfo{ModTime: st.ModTime(), Size: st.Size(), Name: name, Dir: st.IsDir(), Mode: uint32(st.Mode())}) Dir: st.IsDir(),
Mode: uint32(st.Mode()),
ModTime: st.ModTime(),
})
} }
return stat, nil return stat, nil
} }

View File

@ -447,7 +447,7 @@ func TestXLStorageReadAll(t *testing.T) {
continue continue
} }
if err == nil { if err == nil {
if string(dataRead) != string([]byte("Hello, World")) { if !bytes.Equal(dataRead, []byte("Hello, World")) {
t.Errorf("TestXLStorage %d: Expected the data read to be \"%s\", but instead got \"%s\"", i+1, "Hello, World", string(dataRead)) t.Errorf("TestXLStorage %d: Expected the data read to be \"%s\", but instead got \"%s\"", i+1, "Hello, World", string(dataRead))
} }
} }
@ -1227,7 +1227,10 @@ func TestXLStorageReadFile(t *testing.T) {
t.Errorf("Case: %d %#v, expected: %s, got :%s", i+1, testCase, testCase.expectedErr, err) t.Errorf("Case: %d %#v, expected: %s, got :%s", i+1, testCase, testCase.expectedErr, err)
} }
// Expected error retured, proceed further to validate the returned results. // Expected error retured, proceed further to validate the returned results.
if err == nil && err == testCase.expectedErr { if err != nil && testCase.expectedErr == nil {
t.Errorf("Case: %d %#v, expected: %s, got :%s", i+1, testCase, testCase.expectedErr, err)
}
if err == nil {
if !bytes.Equal(testCase.expectedBuf, buf) { if !bytes.Equal(testCase.expectedBuf, buf) {
t.Errorf("Case: %d %#v, expected: \"%s\", got: \"%s\"", i+1, testCase, string(testCase.expectedBuf), string(buf[:testCase.bufSize])) t.Errorf("Case: %d %#v, expected: \"%s\", got: \"%s\"", i+1, testCase, string(testCase.expectedBuf), string(buf[:testCase.bufSize]))
} }

View File

@ -56,12 +56,13 @@ func main() {
fatalErr(json.Unmarshal(got, &input)) fatalErr(json.Unmarshal(got, &input))
r, err := os.Open(input.File) r, err := os.Open(input.File)
fatalErr(err) fatalErr(err)
defer r.Close()
dstName := strings.TrimSuffix(input.File, ".enc") + ".zip" dstName := strings.TrimSuffix(input.File, ".enc") + ".zip"
w, err := os.Create(dstName) w, err := os.Create(dstName)
fatalErr(err) fatalErr(err)
defer w.Close()
decrypt(input.Key, r, w) decrypt(input.Key, r, w)
r.Close()
w.Close()
fmt.Println("Output decrypted to", dstName) fmt.Println("Output decrypted to", dstName)
return return
} }
@ -78,14 +79,13 @@ func main() {
case 1: case 1:
r, err := os.Open(args[0]) r, err := os.Open(args[0])
fatalErr(err) fatalErr(err)
defer r.Close()
if len(*key) == 0 { if len(*key) == 0 {
reader := bufio.NewReader(os.Stdin) reader := bufio.NewReader(os.Stdin)
fmt.Print("Enter Decryption Key: ") fmt.Print("Enter Decryption Key: ")
text, _ := reader.ReadString('\n') text, _ := reader.ReadString('\n')
// convert CRLF to LF // convert CRLF to LF
*key = strings.Replace(text, "\n", "", -1) *key = strings.ReplaceAll(text, "\n", "")
} }
*key = strings.TrimSpace(*key) *key = strings.TrimSpace(*key)
fatalIf(len(*key) != 72, "Unexpected key length: %d, want 72", len(*key)) fatalIf(len(*key) != 72, "Unexpected key length: %d, want 72", len(*key))
@ -93,9 +93,11 @@ func main() {
dstName := strings.TrimSuffix(args[0], ".enc") + ".zip" dstName := strings.TrimSuffix(args[0], ".enc") + ".zip"
w, err := os.Create(dstName) w, err := os.Create(dstName)
fatalErr(err) fatalErr(err)
defer w.Close()
decrypt(*key, r, w) decrypt(*key, r, w)
r.Close()
w.Close()
fmt.Println("Output decrypted to", dstName) fmt.Println("Output decrypted to", dstName)
return return
default: default:

View File

@ -222,8 +222,8 @@ func GenerateCredentials() (accessKey, secretKey string, err error) {
return "", "", err return "", "", err
} }
secretKey = strings.Replace(string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLen]), secretKey = strings.ReplaceAll(string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLen]),
"/", "+", -1) "/", "+")
return accessKey, secretKey, nil return accessKey, secretKey, nil
} }

View File

@ -42,7 +42,7 @@ type Monitor struct {
NodeCount uint64 NodeCount uint64
} }
//NewMonitor returns a monitor with defaults. // NewMonitor returns a monitor with defaults.
func NewMonitor(ctx context.Context, numNodes uint64) *Monitor { func NewMonitor(ctx context.Context, numNodes uint64) *Monitor {
m := &Monitor{ m := &Monitor{
activeBuckets: make(map[string]*bucketMeasurement), activeBuckets: make(map[string]*bucketMeasurement),
@ -63,7 +63,7 @@ func (m *Monitor) updateMeasurement(bucket string, bytes uint64) {
} }
} }
//SelectionFunction for buckets // SelectionFunction for buckets
type SelectionFunction func(bucket string) bool type SelectionFunction func(bucket string) bool
// SelectBuckets will select all the buckets passed in. // SelectBuckets will select all the buckets passed in.

View File

@ -57,7 +57,7 @@ const (
DeleteVersionAction DeleteVersionAction
// TransitionAction transitions a particular object after evaluating lifecycle transition rules // TransitionAction transitions a particular object after evaluating lifecycle transition rules
TransitionAction TransitionAction
//TransitionVersionAction transitions a particular object version after evaluating lifecycle transition rules // TransitionVersionAction transitions a particular object version after evaluating lifecycle transition rules
TransitionVersionAction TransitionVersionAction
// DeleteRestoredAction means the temporarily restored object needs to be removed after evaluating lifecycle rules // DeleteRestoredAction means the temporarily restored object needs to be removed after evaluating lifecycle rules
DeleteRestoredAction DeleteRestoredAction

View File

@ -193,6 +193,7 @@ func (dr *DefaultRetention) UnmarshalXML(d *xml.Decoder, start xml.StartElement)
return fmt.Errorf("either Days or Years must be specified, not both") return fmt.Errorf("either Days or Years must be specified, not both")
} }
//nolint:gocritic
if retention.Days != nil { if retention.Days != nil {
if *retention.Days == 0 { if *retention.Days == 0 {
return fmt.Errorf("Default retention period must be a positive integer value for 'Days'") return fmt.Errorf("Default retention period must be a positive integer value for 'Days'")

View File

@ -137,6 +137,7 @@ func TestUnmarshalDefaultRetention(t *testing.T) {
} }
var dr DefaultRetention var dr DefaultRetention
err = xml.Unmarshal(d, &dr) err = xml.Unmarshal(d, &dr)
//nolint:gocritic
if tt.expectedErr == nil { if tt.expectedErr == nil {
if err != nil { if err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err) t.Fatalf("error: expected = <nil>, got = %v", err)
@ -173,6 +174,7 @@ func TestParseObjectLockConfig(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
_, err := ParseObjectLockConfig(strings.NewReader(tt.value)) _, err := ParseObjectLockConfig(strings.NewReader(tt.value))
//nolint:gocritic
if tt.expectedErr == nil { if tt.expectedErr == nil {
if err != nil { if err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err) t.Fatalf("error: expected = <nil>, got = %v", err)
@ -209,6 +211,7 @@ func TestParseObjectRetention(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
_, err := ParseObjectRetention(strings.NewReader(tt.value)) _, err := ParseObjectRetention(strings.NewReader(tt.value))
//nolint:gocritic
if tt.expectedErr == nil { if tt.expectedErr == nil {
if err != nil { if err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err) t.Fatalf("error: expected = <nil>, got = %v", err)
@ -367,6 +370,7 @@ func TestParseObjectLockRetentionHeaders(t *testing.T) {
for i, tt := range tests { for i, tt := range tests {
_, _, err := ParseObjectLockRetentionHeaders(tt.header) _, _, err := ParseObjectLockRetentionHeaders(tt.header)
//nolint:gocritic
if tt.expectedErr == nil { if tt.expectedErr == nil {
if err != nil { if err != nil {
t.Fatalf("Case %d error: expected = <nil>, got = %v", i, err) t.Fatalf("Case %d error: expected = <nil>, got = %v", i, err)
@ -494,6 +498,7 @@ func TestParseObjectLegalHold(t *testing.T) {
} }
for i, tt := range tests { for i, tt := range tests {
_, err := ParseObjectLegalHold(strings.NewReader(tt.value)) _, err := ParseObjectLegalHold(strings.NewReader(tt.value))
//nolint:gocritic
if tt.expectedErr == nil { if tt.expectedErr == nil {
if err != nil { if err != nil {
t.Fatalf("Case %d error: expected = <nil>, got = %v", i, err) t.Fatalf("Case %d error: expected = <nil>, got = %v", i, err)

View File

@ -37,7 +37,7 @@ type Destination struct {
Bucket string `xml:"Bucket" json:"Bucket"` Bucket string `xml:"Bucket" json:"Bucket"`
StorageClass string `xml:"StorageClass" json:"StorageClass"` StorageClass string `xml:"StorageClass" json:"StorageClass"`
ARN string ARN string
//EncryptionConfiguration TODO: not needed for MinIO // EncryptionConfiguration TODO: not needed for MinIO
} }
func (d Destination) isValidStorageClass() bool { func (d Destination) isValidStorageClass() bool {
@ -57,14 +57,14 @@ func (d Destination) String() string {
} }
//LegacyArn returns true if arn format has prefix "arn:aws:s3:::" which was used // LegacyArn returns true if arn format has prefix "arn:aws:s3:::" which was
// prior to multi-destination // used prior to multi-destination
func (d Destination) LegacyArn() bool { func (d Destination) LegacyArn() bool {
return strings.HasPrefix(d.ARN, DestinationARNPrefix) return strings.HasPrefix(d.ARN, DestinationARNPrefix)
} }
//TargetArn returns true if arn format has prefix "arn:minio:replication:::" used // TargetArn returns true if arn format has prefix "arn:minio:replication:::"
// for multi-destination targets // used for multi-destination targets
func (d Destination) TargetArn() bool { func (d Destination) TargetArn() bool {
return strings.HasPrefix(d.ARN, DestinationARNMinIOPrefix) return strings.HasPrefix(d.ARN, DestinationARNMinIOPrefix)
} }

View File

@ -175,7 +175,7 @@ func (c Config) FilterActionableRules(obj ObjectOpts) []Rule {
rules = append(rules, rule) rules = append(rules, rule)
} }
} }
sort.Slice(rules[:], func(i, j int) bool { sort.Slice(rules, func(i, j int) bool {
return rules[i].Priority > rules[j].Priority && rules[i].Destination.String() == rules[j].Destination.String() return rules[i].Priority > rules[j].Priority && rules[i].Destination.String() == rules[j].Destination.String()
}) })

View File

@ -31,28 +31,28 @@ func TestParseAndValidateReplicationConfig(t *testing.T) {
destBucket string destBucket string
sameTarget bool sameTarget bool
}{ }{
{ //1 Invalid delete marker status in replication config { // 1 Invalid delete marker status in replication config
inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>string</Status></DeleteMarkerReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`, inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>string</Status></DeleteMarkerReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
destBucket: "destinationbucket", destBucket: "destinationbucket",
sameTarget: false, sameTarget: false,
expectedParsingErr: nil, expectedParsingErr: nil,
expectedValidationErr: errInvalidDeleteMarkerReplicationStatus, expectedValidationErr: errInvalidDeleteMarkerReplicationStatus,
}, },
//2 Invalid delete replication status in replication config // 2 Invalid delete replication status in replication config
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`, {inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
destBucket: "destinationbucket", destBucket: "destinationbucket",
sameTarget: false, sameTarget: false,
expectedParsingErr: nil, expectedParsingErr: nil,
expectedValidationErr: errDeleteReplicationMissing, expectedValidationErr: errDeleteReplicationMissing,
}, },
//3 valid replication config // 3 valid replication config
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`, {inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
destBucket: "destinationbucket", destBucket: "destinationbucket",
sameTarget: false, sameTarget: false,
expectedParsingErr: nil, expectedParsingErr: nil,
expectedValidationErr: nil, expectedValidationErr: nil,
}, },
//4 missing role in config and destination ARN is in legacy format // 4 missing role in config and destination ARN is in legacy format
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`, {inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
// destination bucket in config different from bucket specified // destination bucket in config different from bucket specified
destBucket: "destinationbucket", destBucket: "destinationbucket",
@ -60,63 +60,63 @@ func TestParseAndValidateReplicationConfig(t *testing.T) {
expectedParsingErr: nil, expectedParsingErr: nil,
expectedValidationErr: errDestinationArnMissing, expectedValidationErr: errDestinationArnMissing,
}, },
//5 replication destination in different rules not identical // 5 replication destination in different rules not identical
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role></Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:minio:replication:::destinationbucket</Bucket></Destination></Rule><Rule><Status>Enabled</Status><Priority>3</Priority><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:minio:replication:::destinationbucket2</Bucket></Destination></Rule></ReplicationConfiguration>`, {inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role></Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:minio:replication:::destinationbucket</Bucket></Destination></Rule><Rule><Status>Enabled</Status><Priority>3</Priority><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:minio:replication:::destinationbucket2</Bucket></Destination></Rule></ReplicationConfiguration>`,
destBucket: "destinationbucket", destBucket: "destinationbucket",
sameTarget: false, sameTarget: false,
expectedParsingErr: nil, expectedParsingErr: nil,
expectedValidationErr: nil, expectedValidationErr: nil,
}, },
//6 missing rule status in replication config // 6 missing rule status in replication config
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`, {inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
destBucket: "destinationbucket", destBucket: "destinationbucket",
sameTarget: false, sameTarget: false,
expectedParsingErr: nil, expectedParsingErr: nil,
expectedValidationErr: errEmptyRuleStatus, expectedValidationErr: errEmptyRuleStatus,
}, },
//7 invalid rule status in replication config // 7 invalid rule status in replication config
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enssabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`, {inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enssabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
destBucket: "destinationbucket", destBucket: "destinationbucket",
sameTarget: false, sameTarget: false,
expectedParsingErr: nil, expectedParsingErr: nil,
expectedValidationErr: errInvalidRuleStatus, expectedValidationErr: errInvalidRuleStatus,
}, },
//8 invalid rule id exceeds length allowed in replication config // 8 invalid rule id exceeds length allowed in replication config
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><ID>vsUVERgOc8zZYagLSzSa5lE8qeI6nh1lyLNS4R9W052yfecrhhepGboswSWMMNO8CPcXM4GM3nKyQ72EadlMzzZBFoYWKn7ju5GoE5w9c57a0piHR1vexpdd9FrMquiruvAJ0MTGVupm0EegMVxoIOdjx7VgZhGrmi2XDvpVEFT7WmYMA9fSK297XkTHWyECaNHBySJ1Qp4vwX8tPNauKpfHx4kzUpnKe1PZbptGMWbY5qTcwlNuMhVSmgFffShq</ID><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`, {inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><ID>vsUVERgOc8zZYagLSzSa5lE8qeI6nh1lyLNS4R9W052yfecrhhepGboswSWMMNO8CPcXM4GM3nKyQ72EadlMzzZBFoYWKn7ju5GoE5w9c57a0piHR1vexpdd9FrMquiruvAJ0MTGVupm0EegMVxoIOdjx7VgZhGrmi2XDvpVEFT7WmYMA9fSK297XkTHWyECaNHBySJ1Qp4vwX8tPNauKpfHx4kzUpnKe1PZbptGMWbY5qTcwlNuMhVSmgFffShq</ID><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
destBucket: "destinationbucket", destBucket: "destinationbucket",
sameTarget: false, sameTarget: false,
expectedParsingErr: nil, expectedParsingErr: nil,
expectedValidationErr: errInvalidRuleID, expectedValidationErr: errInvalidRuleID,
}, },
//9 invalid priority status in replication config // 9 invalid priority status in replication config
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`, {inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
destBucket: "destinationbucket", destBucket: "destinationbucket",
sameTarget: false, sameTarget: false,
expectedParsingErr: nil, expectedParsingErr: nil,
expectedValidationErr: errReplicationUniquePriority, expectedValidationErr: errReplicationUniquePriority,
}, },
//10 no rule in replication config // 10 no rule in replication config
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role></ReplicationConfiguration>`, {inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role></ReplicationConfiguration>`,
destBucket: "destinationbucket", destBucket: "destinationbucket",
sameTarget: false, sameTarget: false,
expectedParsingErr: nil, expectedParsingErr: nil,
expectedValidationErr: errReplicationNoRule, expectedValidationErr: errReplicationNoRule,
}, },
//11 no destination in replication config // 11 no destination in replication config
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination></Destination></Rule></ReplicationConfiguration>`, {inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination></Destination></Rule></ReplicationConfiguration>`,
destBucket: "destinationbucket", destBucket: "destinationbucket",
sameTarget: false, sameTarget: false,
expectedParsingErr: Errorf("invalid destination '%v'", ""), expectedParsingErr: Errorf("invalid destination '%v'", ""),
expectedValidationErr: nil, expectedValidationErr: nil,
}, },
//12 destination not matching ARN in replication config // 12 destination not matching ARN in replication config
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>destinationbucket2</Bucket></Destination></Rule></ReplicationConfiguration>`, {inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>destinationbucket2</Bucket></Destination></Rule></ReplicationConfiguration>`,
destBucket: "destinationbucket", destBucket: "destinationbucket",
sameTarget: false, sameTarget: false,
expectedParsingErr: fmt.Errorf("invalid destination '%v'", "destinationbucket2"), expectedParsingErr: fmt.Errorf("invalid destination '%v'", "destinationbucket2"),
expectedValidationErr: nil, expectedValidationErr: nil,
}, },
//13 missing role in config and destination ARN has target ARN // 13 missing role in config and destination ARN has target ARN
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:minio:replication::8320b6d18f9032b4700f1f03b50d8d1853de8f22cab86931ee794e12f190852c:destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`, {inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:minio:replication::8320b6d18f9032b4700f1f03b50d8d1853de8f22cab86931ee794e12f190852c:destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
// destination bucket in config different from bucket specified // destination bucket in config different from bucket specified
destBucket: "destinationbucket", destBucket: "destinationbucket",
@ -124,7 +124,7 @@ func TestParseAndValidateReplicationConfig(t *testing.T) {
expectedParsingErr: nil, expectedParsingErr: nil,
expectedValidationErr: nil, expectedValidationErr: nil,
}, },
//14 role absent in config and destination ARN has target ARN in invalid format // 14 role absent in config and destination ARN has target ARN in invalid format
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:xx:replication::8320b6d18f9032b4700f1f03b50d8d1853de8f22cab86931ee794e12f190852c:destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`, {inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:xx:replication::8320b6d18f9032b4700f1f03b50d8d1853de8f22cab86931ee794e12f190852c:destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
// destination bucket in config different from bucket specified // destination bucket in config different from bucket specified
destBucket: "destinationbucket", destBucket: "destinationbucket",
@ -232,53 +232,53 @@ func TestReplicate(t *testing.T) {
expectedResult bool expectedResult bool
}{ }{
// using config 1 - no filters, all replication enabled // using config 1 - no filters, all replication enabled
{ObjectOpts{}, cfgs[0], false}, //1. invalid ObjectOpts missing object name {ObjectOpts{}, cfgs[0], false}, // 1. invalid ObjectOpts missing object name
{ObjectOpts{Name: "c1test"}, cfgs[0], true}, //2. valid ObjectOpts passing empty Filter {ObjectOpts{Name: "c1test"}, cfgs[0], true}, // 2. valid ObjectOpts passing empty Filter
{ObjectOpts{Name: "c1test", VersionID: "vid"}, cfgs[0], true}, //3. valid ObjectOpts passing empty Filter {ObjectOpts{Name: "c1test", VersionID: "vid"}, cfgs[0], true}, // 3. valid ObjectOpts passing empty Filter
{ObjectOpts{Name: "c1test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[0], true}, //4. DeleteMarker version replication valid case - matches DeleteMarkerReplication status {ObjectOpts{Name: "c1test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[0], true}, // 4. DeleteMarker version replication valid case - matches DeleteMarkerReplication status
{ObjectOpts{Name: "c1test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[0], true}, //5. permanent delete of version, matches DeleteReplication status - valid case {ObjectOpts{Name: "c1test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[0], true}, // 5. permanent delete of version, matches DeleteReplication status - valid case
{ObjectOpts{Name: "c1test", VersionID: "vid", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[0], true}, //6. permanent delete of version, matches DeleteReplication status {ObjectOpts{Name: "c1test", VersionID: "vid", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[0], true}, // 6. permanent delete of version, matches DeleteReplication status
{ObjectOpts{Name: "c1test", VersionID: "vid", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[0], false}, //7. permanent delete of version, disqualified by SSE-C {ObjectOpts{Name: "c1test", VersionID: "vid", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[0], false}, // 7. permanent delete of version, disqualified by SSE-C
{ObjectOpts{Name: "c1test", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[0], false}, //8. setting DeleteMarker on SSE-C encrypted object, disqualified by SSE-C {ObjectOpts{Name: "c1test", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[0], false}, // 8. setting DeleteMarker on SSE-C encrypted object, disqualified by SSE-C
{ObjectOpts{Name: "c1test", SSEC: true}, cfgs[0], false}, //9. replication of SSE-C encrypted object, disqualified {ObjectOpts{Name: "c1test", SSEC: true}, cfgs[0], false}, // 9. replication of SSE-C encrypted object, disqualified
// using config 2 - no filters, only replication of object, metadata enabled // using config 2 - no filters, only replication of object, metadata enabled
{ObjectOpts{Name: "c2test"}, cfgs[1], true}, //10. valid ObjectOpts passing empty Filter {ObjectOpts{Name: "c2test"}, cfgs[1], true}, // 10. valid ObjectOpts passing empty Filter
{ObjectOpts{Name: "c2test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[1], false}, //11. DeleteMarker version replication not allowed due to DeleteMarkerReplication status {ObjectOpts{Name: "c2test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[1], false}, // 11. DeleteMarker version replication not allowed due to DeleteMarkerReplication status
{ObjectOpts{Name: "c2test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[1], false}, //12. permanent delete of version, disallowed by DeleteReplication status {ObjectOpts{Name: "c2test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[1], false}, // 12. permanent delete of version, disallowed by DeleteReplication status
{ObjectOpts{Name: "c2test", VersionID: "vid", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[1], false}, //13. permanent delete of DeleteMarker version, disallowed by DeleteReplication status {ObjectOpts{Name: "c2test", VersionID: "vid", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[1], false}, // 13. permanent delete of DeleteMarker version, disallowed by DeleteReplication status
{ObjectOpts{Name: "c2test", VersionID: "vid", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[1], false}, //14. permanent delete of version, disqualified by SSE-C & DeleteReplication status {ObjectOpts{Name: "c2test", VersionID: "vid", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[1], false}, // 14. permanent delete of version, disqualified by SSE-C & DeleteReplication status
{ObjectOpts{Name: "c2test", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[1], false}, //15. setting DeleteMarker on SSE-C encrypted object, disqualified by SSE-C & DeleteMarkerReplication status {ObjectOpts{Name: "c2test", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[1], false}, // 15. setting DeleteMarker on SSE-C encrypted object, disqualified by SSE-C & DeleteMarkerReplication status
{ObjectOpts{Name: "c2test", SSEC: true}, cfgs[1], false}, //16. replication of SSE-C encrypted object, disqualified by default {ObjectOpts{Name: "c2test", SSEC: true}, cfgs[1], false}, // 16. replication of SSE-C encrypted object, disqualified by default
// using config 2 - has more than one rule with overlapping prefixes // using config 2 - has more than one rule with overlapping prefixes
{ObjectOpts{Name: "xy/c3test", UserTags: "k1=v1"}, cfgs[2], true}, //17. matches rule 1 for replication of content/metadata {ObjectOpts{Name: "xy/c3test", UserTags: "k1=v1"}, cfgs[2], true}, // 17. matches rule 1 for replication of content/metadata
{ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1"}, cfgs[2], true}, //18. matches rule 1 for replication of content/metadata {ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1"}, cfgs[2], true}, // 18. matches rule 1 for replication of content/metadata
{ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[2], false}, //19. matches rule 1 - DeleteMarker replication disallowed by rule {ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[2], false}, // 19. matches rule 1 - DeleteMarker replication disallowed by rule
{ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], true}, //20. matches rule 1 - DeleteReplication allowed by rule for permanent delete of DeleteMarker {ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], true}, // 20. matches rule 1 - DeleteReplication allowed by rule for permanent delete of DeleteMarker
{ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], true}, //21. matches rule 1 - DeleteReplication allowed by rule for permanent delete of version {ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], true}, // 21. matches rule 1 - DeleteReplication allowed by rule for permanent delete of version
{ObjectOpts{Name: "xyz/c3test"}, cfgs[2], true}, //22. matches rule 2 for replication of content/metadata {ObjectOpts{Name: "xyz/c3test"}, cfgs[2], true}, // 22. matches rule 2 for replication of content/metadata
{ObjectOpts{Name: "xy/c3test", UserTags: "k1=v2"}, cfgs[2], false}, //23. does not match rule1 because tag value does not pass filter {ObjectOpts{Name: "xy/c3test", UserTags: "k1=v2"}, cfgs[2], false}, // 23. does not match rule1 because tag value does not pass filter
{ObjectOpts{Name: "xyz/c3test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[2], true}, //24. matches rule 2 - DeleteMarker replication allowed by rule {ObjectOpts{Name: "xyz/c3test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[2], true}, // 24. matches rule 2 - DeleteMarker replication allowed by rule
{ObjectOpts{Name: "xyz/c3test", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], false}, //25. matches rule 2 - DeleteReplication disallowed by rule for permanent delete of DeleteMarker {ObjectOpts{Name: "xyz/c3test", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], false}, // 25. matches rule 2 - DeleteReplication disallowed by rule for permanent delete of DeleteMarker
{ObjectOpts{Name: "xyz/c3test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], false}, //26. matches rule 1 - DeleteReplication disallowed by rule for permanent delete of version {ObjectOpts{Name: "xyz/c3test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], false}, // 26. matches rule 1 - DeleteReplication disallowed by rule for permanent delete of version
{ObjectOpts{Name: "abc/c3test"}, cfgs[2], false}, //27. matches no rule because object prefix does not match {ObjectOpts{Name: "abc/c3test"}, cfgs[2], false}, // 27. matches no rule because object prefix does not match
// using config 3 - has no overlapping rules // using config 3 - has no overlapping rules
{ObjectOpts{Name: "xy/c4test", UserTags: "k1=v1"}, cfgs[3], true}, //28. matches rule 1 for replication of content/metadata {ObjectOpts{Name: "xy/c4test", UserTags: "k1=v1"}, cfgs[3], true}, // 28. matches rule 1 for replication of content/metadata
{ObjectOpts{Name: "xa/c4test", UserTags: "k1=v1"}, cfgs[3], false}, //29. no rule match object prefix not in rules {ObjectOpts{Name: "xa/c4test", UserTags: "k1=v1"}, cfgs[3], false}, // 29. no rule match object prefix not in rules
{ObjectOpts{Name: "xyz/c4test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[3], false}, //30. rule 1 not matched because of tags filter {ObjectOpts{Name: "xyz/c4test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[3], false}, // 30. rule 1 not matched because of tags filter
{ObjectOpts{Name: "xyz/c4test", UserTags: "k1=v1", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[3], false}, //31. matches rule 1 - DeleteMarker replication disallowed by rule {ObjectOpts{Name: "xyz/c4test", UserTags: "k1=v1", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[3], false}, // 31. matches rule 1 - DeleteMarker replication disallowed by rule
{ObjectOpts{Name: "xyz/c4test", UserTags: "k1=v1", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], true}, //32. matches rule 1 - DeleteReplication allowed by rule for permanent delete of DeleteMarker {ObjectOpts{Name: "xyz/c4test", UserTags: "k1=v1", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], true}, // 32. matches rule 1 - DeleteReplication allowed by rule for permanent delete of DeleteMarker
{ObjectOpts{Name: "xyz/c4test", UserTags: "k1=v1", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], true}, //33. matches rule 1 - DeleteReplication allowed by rule for permanent delete of version {ObjectOpts{Name: "xyz/c4test", UserTags: "k1=v1", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], true}, // 33. matches rule 1 - DeleteReplication allowed by rule for permanent delete of version
{ObjectOpts{Name: "abc/c4test"}, cfgs[3], true}, //34. matches rule 2 for replication of content/metadata {ObjectOpts{Name: "abc/c4test"}, cfgs[3], true}, // 34. matches rule 2 for replication of content/metadata
{ObjectOpts{Name: "abc/c4test", UserTags: "k1=v2"}, cfgs[3], true}, //35. matches rule 2 for replication of content/metadata {ObjectOpts{Name: "abc/c4test", UserTags: "k1=v2"}, cfgs[3], true}, // 35. matches rule 2 for replication of content/metadata
{ObjectOpts{Name: "abc/c4test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[3], true}, //36. matches rule 2 - DeleteMarker replication allowed by rule {ObjectOpts{Name: "abc/c4test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[3], true}, // 36. matches rule 2 - DeleteMarker replication allowed by rule
{ObjectOpts{Name: "abc/c4test", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], false}, //37. matches rule 2 - DeleteReplication disallowed by rule for permanent delete of DeleteMarker {ObjectOpts{Name: "abc/c4test", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], false}, // 37. matches rule 2 - DeleteReplication disallowed by rule for permanent delete of DeleteMarker
{ObjectOpts{Name: "abc/c4test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], false}, //38. matches rule 2 - DeleteReplication disallowed by rule for permanent delete of version {ObjectOpts{Name: "abc/c4test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], false}, // 38. matches rule 2 - DeleteReplication disallowed by rule for permanent delete of version
// using config 4 - with replica modification sync disabled. // using config 4 - with replica modification sync disabled.
{ObjectOpts{Name: "xy/c5test", UserTags: "k1=v1", Replica: true}, cfgs[4], false}, //39. replica syncing disabled, this object is a replica {ObjectOpts{Name: "xy/c5test", UserTags: "k1=v1", Replica: true}, cfgs[4], false}, // 39. replica syncing disabled, this object is a replica
{ObjectOpts{Name: "xa/c5test", UserTags: "k1=v1", Replica: false}, cfgs[4], true}, //40. replica syncing disabled, this object is NOT a replica {ObjectOpts{Name: "xa/c5test", UserTags: "k1=v1", Replica: false}, cfgs[4], true}, // 40. replica syncing disabled, this object is NOT a replica
} }
for i, testCase := range testCases { for i, testCase := range testCases {
@ -322,7 +322,7 @@ func TestHasActiveRules(t *testing.T) {
expectedNonRec: false, expectedNonRec: false,
expectedRec: true, expectedRec: true,
}, },
//case 5 - has filter with prefix and tags, here we are not matching on tags // case 5 - has filter with prefix and tags, here we are not matching on tags
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Filter> {inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Filter>
<And><Prefix>key-prefix</Prefix><Tag><Key>key1</Key><Value>value1</Value></Tag><Tag><Key>key2</Key><Value>value2</Value></Tag></And></Filter><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`, <And><Prefix>key-prefix</Prefix><Tag><Key>key1</Key><Value>value1</Value></Tag><Tag><Key>key2</Key><Value>value2</Value></Tag></And></Filter><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
prefix: "testdir/", prefix: "testdir/",

View File

@ -31,23 +31,23 @@ func TestMetadataReplicate(t *testing.T) {
}{ }{
// case 1 - rule with replica modification enabled; not a replica // case 1 - rule with replica modification enabled; not a replica
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination><SourceSelectionCriteria><ReplicaModifications><Status>Enabled</Status></ReplicaModifications></SourceSelectionCriteria></Rule></ReplicationConfiguration>`, {inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination><SourceSelectionCriteria><ReplicaModifications><Status>Enabled</Status></ReplicaModifications></SourceSelectionCriteria></Rule></ReplicationConfiguration>`,
opts: ObjectOpts{Name: "c1test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: false}, //1. Replica mod sync enabled; not a replica opts: ObjectOpts{Name: "c1test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: false}, // 1. Replica mod sync enabled; not a replica
expectedResult: true, expectedResult: true,
}, },
// case 2 - rule with replica modification disabled; a replica // case 2 - rule with replica modification disabled; a replica
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination><SourceSelectionCriteria><ReplicaModifications><Status>Disabled</Status></ReplicaModifications></SourceSelectionCriteria></Rule></ReplicationConfiguration>`, {inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination><SourceSelectionCriteria><ReplicaModifications><Status>Disabled</Status></ReplicaModifications></SourceSelectionCriteria></Rule></ReplicationConfiguration>`,
opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: true}, //1. Replica mod sync enabled; a replica opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: true}, // 1. Replica mod sync enabled; a replica
expectedResult: false, expectedResult: false,
}, },
// case 3 - rule with replica modification disabled; not a replica // case 3 - rule with replica modification disabled; not a replica
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination><SourceSelectionCriteria><ReplicaModifications><Status>Disabled</Status></ReplicaModifications></SourceSelectionCriteria></Rule></ReplicationConfiguration>`, {inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination><SourceSelectionCriteria><ReplicaModifications><Status>Disabled</Status></ReplicaModifications></SourceSelectionCriteria></Rule></ReplicationConfiguration>`,
opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: false}, //1. Replica mod sync disabled; not a replica opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: false}, // 1. Replica mod sync disabled; not a replica
expectedResult: true, expectedResult: true,
}, },
// case 4 - rule with replica modification enabled; a replica // case 4 - rule with replica modification enabled; a replica
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination><SourceSelectionCriteria><ReplicaModifications><Status>Enabled</Status></ReplicaModifications></SourceSelectionCriteria></Rule></ReplicationConfiguration>`, {inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination><SourceSelectionCriteria><ReplicaModifications><Status>Enabled</Status></ReplicaModifications></SourceSelectionCriteria></Rule></ReplicationConfiguration>`,
opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: MetadataReplicationType, Replica: true}, //1. Replica mod sync enabled; a replica opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: MetadataReplicationType, Replica: true}, // 1. Replica mod sync enabled; a replica
expectedResult: true, expectedResult: true,
}, },
} }

View File

@ -117,9 +117,9 @@ func LoadX509KeyPair(certFile, keyFile string) (tls.Certificate, error) {
} }
// EnsureCertAndKey checks if both client certificate and key paths are provided // EnsureCertAndKey checks if both client certificate and key paths are provided
func EnsureCertAndKey(ClientCert, ClientKey string) error { func EnsureCertAndKey(clientCert, clientKey string) error {
if (ClientCert != "" && ClientKey == "") || if (clientCert != "" && clientKey == "") ||
(ClientCert == "" && ClientKey != "") { (clientCert == "" && clientKey != "") {
return errors.New("cert and key must be specified as a pair") return errors.New("cert and key must be specified as a pair")
} }
return nil return nil

View File

@ -38,6 +38,7 @@ func printName(names []pkix.AttributeTypeAndValue, buf *strings.Builder) []strin
values := []string{} values := []string{}
for _, name := range names { for _, name := range names {
oid := name.Type oid := name.Type
//nolint:gocritic
if len(oid) == 4 && oid[0] == 2 && oid[1] == 5 && oid[2] == 4 { if len(oid) == 4 && oid[0] == 2 && oid[1] == 5 && oid[2] == 4 {
switch oid[3] { switch oid[3] {
case 3: case 3:

View File

@ -201,9 +201,9 @@ func Authentication(username, password string) OperatorOption {
} }
// RootCAs - add custom trust certs pool // RootCAs - add custom trust certs pool
func RootCAs(CAs *x509.CertPool) OperatorOption { func RootCAs(certPool *x509.CertPool) OperatorOption {
return func(args *OperatorDNS) { return func(args *OperatorDNS) {
args.rootCAs = CAs args.rootCAs = certPool
} }
} }

View File

@ -86,7 +86,7 @@ func (opts Config) Wait(currentIO func() int, systemIO func() int) {
} else { } else {
time.Sleep(waitTick) time.Sleep(waitTick)
} }
tmpMaxWait = tmpMaxWait - waitTick tmpMaxWait -= waitTick
} }
if tmpMaxWait <= 0 { if tmpMaxWait <= 0 {
return return

View File

@ -186,7 +186,7 @@ func (l *Config) lookupBind(conn *ldap.Conn) error {
// assumed to be using the lookup bind service account. It is required that the // assumed to be using the lookup bind service account. It is required that the
// search result in at most one result. // search result in at most one result.
func (l *Config) lookupUserDN(conn *ldap.Conn, username string) (string, error) { func (l *Config) lookupUserDN(conn *ldap.Conn, username string) (string, error) {
filter := strings.Replace(l.UserDNSearchFilter, "%s", ldap.EscapeFilter(username), -1) filter := strings.ReplaceAll(l.UserDNSearchFilter, "%s", ldap.EscapeFilter(username))
searchRequest := ldap.NewSearchRequest( searchRequest := ldap.NewSearchRequest(
l.UserDNSearchBaseDN, l.UserDNSearchBaseDN,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
@ -213,8 +213,8 @@ func (l *Config) searchForUserGroups(conn *ldap.Conn, username, bindDN string) (
var groups []string var groups []string
if l.GroupSearchFilter != "" { if l.GroupSearchFilter != "" {
for _, groupSearchBase := range l.GroupSearchBaseDistNames { for _, groupSearchBase := range l.GroupSearchBaseDistNames {
filter := strings.Replace(l.GroupSearchFilter, "%s", ldap.EscapeFilter(username), -1) filter := strings.ReplaceAll(l.GroupSearchFilter, "%s", ldap.EscapeFilter(username))
filter = strings.Replace(filter, "%d", ldap.EscapeFilter(bindDN), -1) filter = strings.ReplaceAll(filter, "%d", ldap.EscapeFilter(bindDN))
searchRequest := ldap.NewSearchRequest( searchRequest := ldap.NewSearchRequest(
groupSearchBase, groupSearchBase,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
@ -393,7 +393,7 @@ func (l *Config) GetNonEligibleUserDistNames(userDistNames []string) ([]string,
} }
// Evaluate the filter again with generic wildcard instead of specific values // Evaluate the filter again with generic wildcard instead of specific values
filter := strings.Replace(l.UserDNSearchFilter, "%s", "*", -1) filter := strings.ReplaceAll(l.UserDNSearchFilter, "%s", "*")
nonExistentUsers := []string{} nonExistentUsers := []string{}
for _, dn := range userDistNames { for _, dn := range userDistNames {

View File

@ -85,6 +85,7 @@ func TestPublicKey(t *testing.T) {
} }
} }
//nolint:gocritic
if key0, ok := keys[0].(*ecdsa.PublicKey); !ok { if key0, ok := keys[0].(*ecdsa.PublicKey); !ok {
t.Fatalf("Expected ECDSA key[0], got %T", keys[0]) t.Fatalf("Expected ECDSA key[0], got %T", keys[0])
} else if key1, ok := keys[1].(*rsa.PublicKey); !ok { } else if key1, ok := keys[1].(*rsa.PublicKey); !ok {

View File

@ -19,7 +19,7 @@ package config
import "github.com/minio/minio/internal/auth" import "github.com/minio/minio/internal/auth"
//// One time migration code section // One time migration code section
// SetCredentials - One time migration code needed, for migrating from older config to new for server credentials. // SetCredentials - One time migration code needed, for migrating from older config to new for server credentials.
func SetCredentials(c Config, cred auth.Credentials) { func SetCredentials(c Config, cred auth.Credentials) {

View File

@ -90,7 +90,7 @@ func (key ObjectKey) Seal(extKey []byte, iv [32]byte, domain, bucket, object str
sealingKey [32]byte sealingKey [32]byte
encryptedKey bytes.Buffer encryptedKey bytes.Buffer
) )
mac := hmac.New(sha256.New, extKey[:]) mac := hmac.New(sha256.New, extKey)
mac.Write(iv[:]) mac.Write(iv[:])
mac.Write([]byte(domain)) mac.Write([]byte(domain))
mac.Write([]byte(SealAlgorithm)) mac.Write([]byte(SealAlgorithm))
@ -118,7 +118,7 @@ func (key *ObjectKey) Unseal(extKey []byte, sealedKey SealedKey, domain, bucket,
default: default:
return Errorf("The sealing algorithm '%s' is not supported", sealedKey.Algorithm) return Errorf("The sealing algorithm '%s' is not supported", sealedKey.Algorithm)
case SealAlgorithm: case SealAlgorithm:
mac := hmac.New(sha256.New, extKey[:]) mac := hmac.New(sha256.New, extKey)
mac.Write(sealedKey.IV[:]) mac.Write(sealedKey.IV[:])
mac.Write([]byte(domain)) mac.Write([]byte(domain))
mac.Write([]byte(SealAlgorithm)) mac.Write([]byte(SealAlgorithm))
@ -126,7 +126,7 @@ func (key *ObjectKey) Unseal(extKey []byte, sealedKey SealedKey, domain, bucket,
unsealConfig = sio.Config{MinVersion: sio.Version20, Key: mac.Sum(nil), CipherSuites: fips.CipherSuitesDARE()} unsealConfig = sio.Config{MinVersion: sio.Version20, Key: mac.Sum(nil), CipherSuites: fips.CipherSuitesDARE()}
case InsecureSealAlgorithm: case InsecureSealAlgorithm:
sha := sha256.New() sha := sha256.New()
sha.Write(extKey[:]) sha.Write(extKey)
sha.Write(sealedKey.IV[:]) sha.Write(sealedKey.IV[:])
unsealConfig = sio.Config{MinVersion: sio.Version10, Key: sha.Sum(nil), CipherSuites: fips.CipherSuitesDARE()} unsealConfig = sio.Config{MinVersion: sio.Version10, Key: sha.Sum(nil), CipherSuites: fips.CipherSuitesDARE()}
} }

View File

@ -164,7 +164,7 @@ func TestDerivePartKey(t *testing.T) {
t.Fatalf("Test %d failed to decode expected part-key: %v", i, err) t.Fatalf("Test %d failed to decode expected part-key: %v", i, err)
} }
partKey := key.DerivePartKey(test.PartID) partKey := key.DerivePartKey(test.PartID)
if !bytes.Equal(partKey[:], expectedPartKey[:]) { if !bytes.Equal(partKey[:], expectedPartKey) {
t.Errorf("Test %d derives wrong part-key: got '%s' want: '%s'", i, hex.EncodeToString(partKey[:]), test.PartKey) t.Errorf("Test %d derives wrong part-key: got '%s' want: '%s'", i, hex.EncodeToString(partKey[:]), test.PartKey)
} }
} }

View File

@ -109,7 +109,7 @@ func (s3 ssekms) UnsealObjectKey(KMS kms.KMS, metadata map[string]string, bucket
if err != nil { if err != nil {
return key, err return key, err
} }
err = key.Unseal(unsealKey[:], sealedKey, s3.String(), bucket, object) err = key.Unseal(unsealKey, sealedKey, s3.String(), bucket, object)
return key, err return key, err
} }

View File

@ -80,7 +80,7 @@ func (s3 sses3) UnsealObjectKey(KMS kms.KMS, metadata map[string]string, bucket,
if err != nil { if err != nil {
return key, err return key, err
} }
err = key.Unseal(unsealKey[:], sealedKey, s3.String(), bucket, object) err = key.Unseal(unsealKey, sealedKey, s3.String(), bucket, object)
return key, err return key, err
} }

View File

@ -40,12 +40,12 @@ func DisableDirectIO(f *os.File) error {
if err != nil { if err != nil {
return err return err
} }
flag = flag & ^(syscall.O_DIRECT) flag &= ^(syscall.O_DIRECT)
_, err = unix.FcntlInt(fd, unix.F_SETFL, flag) _, err = unix.FcntlInt(fd, unix.F_SETFL, flag)
return err return err
} }
// AlignedBlock - pass through to directio implementation. // AlignedBlock - pass through to directio implementation.
func AlignedBlock(BlockSize int) []byte { func AlignedBlock(blockSize int) []byte {
return directio.AlignedBlock(BlockSize) return directio.AlignedBlock(blockSize)
} }

View File

@ -199,9 +199,9 @@ func (dm *DRWMutex) lockBlocking(ctx context.Context, lockLossCallback func(), i
// If success, copy array to object // If success, copy array to object
if isReadLock { if isReadLock {
copy(dm.readLocks, locks[:]) copy(dm.readLocks, locks)
} else { } else {
copy(dm.writeLocks, locks[:]) copy(dm.writeLocks, locks)
} }
dm.m.Unlock() dm.m.Unlock()
@ -579,7 +579,7 @@ func (dm *DRWMutex) Unlock() {
} }
// Copy write locks to stack array // Copy write locks to stack array
copy(locks, dm.writeLocks[:]) copy(locks, dm.writeLocks)
} }
// Tolerance is not set, defaults to half of the locker clients. // Tolerance is not set, defaults to half of the locker clients.
@ -620,7 +620,7 @@ func (dm *DRWMutex) RUnlock() {
} }
// Copy write locks to stack array // Copy write locks to stack array
copy(locks, dm.readLocks[:]) copy(locks, dm.readLocks)
} }
// Tolerance is not set, defaults to half of the locker clients. // Tolerance is not set, defaults to half of the locker clients.

View File

@ -94,10 +94,8 @@ func (l *lockServer) RLock(args *LockArgs, reply *bool) error {
if locksHeld, *reply = l.lockMap[args.Resources[0]]; !*reply { if locksHeld, *reply = l.lockMap[args.Resources[0]]; !*reply {
l.lockMap[args.Resources[0]] = ReadLock // No locks held on the given name, so claim (first) read lock l.lockMap[args.Resources[0]] = ReadLock // No locks held on the given name, so claim (first) read lock
*reply = true *reply = true
} else { } else if *reply = locksHeld != WriteLock; *reply { // Unless there is a write lock
if *reply = locksHeld != WriteLock; *reply { // Unless there is a write lock l.lockMap[args.Resources[0]] = locksHeld + ReadLock // Grant another read lock
l.lockMap[args.Resources[0]] = locksHeld + ReadLock // Grant another read lock
}
} }
return nil return nil
} }

View File

@ -318,7 +318,7 @@ func ParseConfig(reader io.Reader, region string, targetList *TargetList) (*Conf
} }
config.SetRegion(region) config.SetRegion(region)
//If xml namespace is empty, set a default value before returning. // If xml namespace is empty, set a default value before returning.
if config.XMLNS == "" { if config.XMLNS == "" {
config.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/" config.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
} }

View File

@ -41,7 +41,7 @@ func NewPattern(prefix, suffix string) (pattern string) {
pattern += suffix pattern += suffix
} }
pattern = strings.Replace(pattern, "**", "*", -1) pattern = strings.ReplaceAll(pattern, "**", "*")
return pattern return pattern
} }

View File

@ -25,13 +25,14 @@ import (
) )
func initScramClient(args KafkaArgs, config *sarama.Config) { func initScramClient(args KafkaArgs, config *sarama.Config) {
if args.SASL.Mechanism == "sha512" { switch args.SASL.Mechanism {
case "sha512":
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: KafkaSHA512} } config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: KafkaSHA512} }
config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA512) config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA512)
} else if args.SASL.Mechanism == "sha256" { case "sha256":
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: KafkaSHA256} } config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: KafkaSHA256} }
config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA256) config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA256)
} else { default:
// default to PLAIN // default to PLAIN
config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypePlaintext) config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypePlaintext)
} }

View File

@ -272,10 +272,8 @@ func NewMQTTTarget(id string, args MQTTArgs, doneCh <-chan struct{}, loggerOnce
// Start replaying events from the store. // Start replaying events from the store.
go sendEvents(target, eventKeyCh, doneCh, target.loggerOnce) go sendEvents(target, eventKeyCh, doneCh, target.loggerOnce)
} }
} else { } else if token.Wait() && token.Error() != nil {
if token.Wait() && token.Error() != nil { return target, token.Error()
return target, token.Error()
}
} }
return target, nil return target, nil
} }

View File

@ -172,6 +172,7 @@ func (n NATSArgs) connectStan() (stan.Conn, error) {
} }
var addressURL string var addressURL string
//nolint:gocritic
if n.Username != "" && n.Password != "" { if n.Username != "" && n.Password != "" {
addressURL = scheme + "://" + n.Username + ":" + n.Password + "@" + n.Address.String() addressURL = scheme + "://" + n.Username + ":" + n.Password + "@" + n.Address.String()
} else if n.Token != "" { } else if n.Token != "" {
@ -219,18 +220,14 @@ func (target *NATSTarget) IsActive() (bool, error) {
if target.args.Streaming.Enable { if target.args.Streaming.Enable {
if target.stanConn == nil || target.stanConn.NatsConn() == nil { if target.stanConn == nil || target.stanConn.NatsConn() == nil {
target.stanConn, connErr = target.args.connectStan() target.stanConn, connErr = target.args.connectStan()
} else { } else if !target.stanConn.NatsConn().IsConnected() {
if !target.stanConn.NatsConn().IsConnected() { return false, errNotConnected
return false, errNotConnected
}
} }
} else { } else {
if target.natsConn == nil { if target.natsConn == nil {
target.natsConn, connErr = target.args.connectNats() target.natsConn, connErr = target.args.connectNats()
} else { } else if !target.natsConn.IsConnected() {
if !target.natsConn.IsConnected() { return false, errNotConnected
return false, errNotConnected
}
} }
} }

View File

@ -29,7 +29,7 @@ import (
func AppendFile(dst string, src string, osync bool) error { func AppendFile(dst string, src string, osync bool) error {
flags := os.O_WRONLY | os.O_APPEND | os.O_CREATE flags := os.O_WRONLY | os.O_APPEND | os.O_CREATE
if osync { if osync {
flags = flags | os.O_SYNC flags |= os.O_SYNC
} }
appendFile, err := os.OpenFile(dst, flags, 0666) appendFile, err := os.OpenFile(dst, flags, 0666)
if err != nil { if err != nil {

Some files were not shown because too many files have changed in this diff Show More