mirror of https://github.com/minio/minio.git
add gocritic/ruleguard checks back again, cleanup code. (#13665)
- remove some duplicated code - reported a bug, separately fixed in #13664 - using strings.ReplaceAll() when needed - using filepath.ToSlash() use when needed - remove all non-Go style comments from the codebase Co-authored-by: Aditya Manthramurthy <donatello@users.noreply.github.com>
This commit is contained in:
parent
07c5e72cdb
commit
661b263e77
|
@ -23,12 +23,19 @@ linters:
|
|||
- structcheck
|
||||
- unconvert
|
||||
- varcheck
|
||||
- gocritic
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
exclude:
|
||||
- should have a package comment
|
||||
- error strings should not be capitalized or end with punctuation or a newline
|
||||
# todo fix these when we get enough time.
|
||||
- "singleCaseSwitch: should rewrite switch statement to if statement"
|
||||
- "unlambda: replace"
|
||||
- "captLocal:"
|
||||
- "ifElseChain:"
|
||||
- "elseif:"
|
||||
|
||||
service:
|
||||
golangci-lint-version: 1.20.0 # use the fixed version to not introduce new linters unexpectedly
|
||||
golangci-lint-version: 1.43.0 # use the fixed version to not introduce new linters unexpectedly
|
||||
|
|
2
Makefile
2
Makefile
|
@ -19,7 +19,7 @@ help: ## print this help
|
|||
|
||||
getdeps: ## fetch necessary dependencies
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.40.1
|
||||
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.43.0
|
||||
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@latest
|
||||
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest
|
||||
|
||||
|
|
|
@ -215,11 +215,9 @@ func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *
|
|||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := delServerConfigHistory(ctx, objectAPI, restoreID); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
} else if err := delServerConfigHistory(ctx, objectAPI, restoreID); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -323,11 +323,12 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
|
|||
status := vars["status"]
|
||||
|
||||
var err error
|
||||
if status == statusEnabled {
|
||||
switch status {
|
||||
case statusEnabled:
|
||||
err = globalIAMSys.SetGroupStatus(ctx, group, true)
|
||||
} else if status == statusDisabled {
|
||||
case statusDisabled:
|
||||
err = globalIAMSys.SetGroupStatus(ctx, group, false)
|
||||
} else {
|
||||
default:
|
||||
err = errInvalidArgument
|
||||
}
|
||||
if err != nil {
|
||||
|
|
|
@ -1356,6 +1356,7 @@ func getServerInfo(ctx context.Context, r *http.Request) madmin.InfoMessage {
|
|||
ldap := madmin.LDAP{}
|
||||
if globalLDAPConfig.Enabled {
|
||||
ldapConn, err := globalLDAPConfig.Connect()
|
||||
//nolint:gocritic
|
||||
if err != nil {
|
||||
ldap.Status = string(madmin.ItemOffline)
|
||||
} else if ldapConn == nil {
|
||||
|
@ -1636,8 +1637,8 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
|||
anonymizeCmdLine := func(cmdLine string) string {
|
||||
if !globalIsDistErasure {
|
||||
// FS mode - single server - hard code to `server1`
|
||||
anonCmdLine := strings.Replace(cmdLine, globalLocalNodeName, "server1", -1)
|
||||
return strings.Replace(anonCmdLine, globalMinioConsoleHost, "server1", -1)
|
||||
anonCmdLine := strings.ReplaceAll(cmdLine, globalLocalNodeName, "server1")
|
||||
return strings.ReplaceAll(anonCmdLine, globalMinioConsoleHost, "server1")
|
||||
}
|
||||
|
||||
// Server start command regex groups:
|
||||
|
|
|
@ -491,7 +491,7 @@ func (h *healSequence) getScannedItemsCount() int64 {
|
|||
defer h.mutex.RUnlock()
|
||||
|
||||
for _, v := range h.scannedItemsMap {
|
||||
count = count + v
|
||||
count += v
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
|
|
@ -43,8 +43,6 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
|||
// Admin router
|
||||
adminRouter := router.PathPrefix(adminPathPrefix).Subrouter()
|
||||
|
||||
/// Service operations
|
||||
|
||||
adminVersions := []string{
|
||||
adminAPIVersionPrefix,
|
||||
}
|
||||
|
@ -71,7 +69,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
|||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(gz(httpTraceAll(adminAPI.DataUsageInfoHandler)))
|
||||
|
||||
if globalIsDistErasure || globalIsErasure {
|
||||
/// Heal operations
|
||||
// Heal operations
|
||||
|
||||
// Heal processing endpoint.
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler)))
|
||||
|
@ -79,9 +77,6 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
|||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(gz(httpTraceAll(adminAPI.HealHandler)))
|
||||
|
||||
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(gz(httpTraceAll(adminAPI.BackgroundHealStatusHandler)))
|
||||
|
||||
/// Health operations
|
||||
|
||||
}
|
||||
|
||||
// Profiling operations
|
||||
|
@ -106,7 +101,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
|||
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/restore-config-history-kv").HandlerFunc(gz(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler))).Queries("restoreId", "{restoreId:.*}")
|
||||
}
|
||||
|
||||
/// Config import/export bulk operations
|
||||
// Config import/export bulk operations
|
||||
if enableConfigOps {
|
||||
// Get config
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/config").HandlerFunc(gz(httpTraceHdrs(adminAPI.GetConfigHandler)))
|
||||
|
|
|
@ -973,7 +973,7 @@ var errorCodes = errorCodeMap{
|
|||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
|
||||
/// Bucket notification related errors.
|
||||
// Bucket notification related errors.
|
||||
ErrEventNotification: {
|
||||
Code: "InvalidArgument",
|
||||
Description: "A specified event is not supported for notifications.",
|
||||
|
@ -1120,14 +1120,14 @@ var errorCodes = errorCodeMap{
|
|||
HTTPStatusCode: http.StatusForbidden,
|
||||
},
|
||||
|
||||
/// S3 extensions.
|
||||
// S3 extensions.
|
||||
ErrContentSHA256Mismatch: {
|
||||
Code: "XAmzContentSHA256Mismatch",
|
||||
Description: "The provided 'x-amz-content-sha256' header does not match what was computed.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
/// MinIO extensions.
|
||||
// MinIO extensions.
|
||||
ErrStorageFull: {
|
||||
Code: "XMinioStorageFull",
|
||||
Description: "Storage backend has reached its minimum free disk threshold. Please delete a few objects to proceed.",
|
||||
|
@ -1370,7 +1370,7 @@ var errorCodes = errorCodeMap{
|
|||
Description: "The continuation token provided is incorrect",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
//S3 Select API Errors
|
||||
// S3 Select API Errors
|
||||
ErrEmptyRequestBody: {
|
||||
Code: "EmptyRequestBody",
|
||||
Description: "Request body cannot be empty.",
|
||||
|
@ -2074,6 +2074,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
|||
default:
|
||||
var ie, iw int
|
||||
// This work-around is to handle the issue golang/go#30648
|
||||
//nolint:gocritic
|
||||
if _, ferr := fmt.Fscanf(strings.NewReader(err.Error()),
|
||||
"request declared a Content-Length of %d but only wrote %d bytes",
|
||||
&ie, &iw); ferr != nil {
|
||||
|
@ -2229,6 +2230,7 @@ func toAPIError(ctx context.Context, err error) APIError {
|
|||
}
|
||||
// Add more Gateway SDKs here if any in future.
|
||||
default:
|
||||
//nolint:gocritic
|
||||
if errors.Is(err, errMalformedEncoding) {
|
||||
apiErr = APIError{
|
||||
Code: "BadRequest",
|
||||
|
|
|
@ -301,7 +301,8 @@ func registerAPIRouter(router *mux.Router) {
|
|||
router.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
|
||||
collectAPIStats("restoreobject", maxClients(gz(httpTraceAll(api.PostRestoreObjectHandler))))).Queries("restore", "")
|
||||
|
||||
/// Bucket operations
|
||||
// Bucket operations
|
||||
|
||||
// GetBucketLocation
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbucketlocation", maxClients(gz(httpTraceAll(api.GetBucketLocationHandler))))).Queries("location", "")
|
||||
|
@ -355,7 +356,7 @@ func registerAPIRouter(router *mux.Router) {
|
|||
// GetBucketTaggingHandler
|
||||
router.Methods(http.MethodGet).HandlerFunc(
|
||||
collectAPIStats("getbuckettagging", maxClients(gz(httpTraceAll(api.GetBucketTaggingHandler))))).Queries("tagging", "")
|
||||
//DeleteBucketWebsiteHandler
|
||||
// DeleteBucketWebsiteHandler
|
||||
router.Methods(http.MethodDelete).HandlerFunc(
|
||||
collectAPIStats("deletebucketwebsite", maxClients(gz(httpTraceAll(api.DeleteBucketWebsiteHandler))))).Queries("website", "")
|
||||
// DeleteBucketTaggingHandler
|
||||
|
@ -452,7 +453,7 @@ func registerAPIRouter(router *mux.Router) {
|
|||
collectAPIStats("listobjectsv1", maxClients(gz(httpTraceAll(api.ListObjectsV1Handler)))))
|
||||
}
|
||||
|
||||
/// Root operation
|
||||
// Root operation
|
||||
|
||||
// ListenNotification
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
|
||||
|
|
|
@ -903,7 +903,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
|||
if fileName != "" && strings.Contains(formValues.Get("Key"), "${filename}") {
|
||||
// S3 feature to replace ${filename} found in Key form field
|
||||
// by the filename attribute passed in multipart
|
||||
formValues.Set("Key", strings.Replace(formValues.Get("Key"), "${filename}", fileName, -1))
|
||||
formValues.Set("Key", strings.ReplaceAll(formValues.Get("Key"), "${filename}", fileName))
|
||||
}
|
||||
object := trimLeadingSlash(formValues.Get("Key"))
|
||||
|
||||
|
|
|
@ -59,8 +59,8 @@ func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int
|
|||
}
|
||||
|
||||
if encodingType != "" {
|
||||
// Only url encoding type is supported
|
||||
if strings.ToLower(encodingType) != "url" {
|
||||
// AWS S3 spec only supports 'url' encoding type
|
||||
if !strings.EqualFold(encodingType, "url") {
|
||||
return ErrInvalidEncodingMethod
|
||||
}
|
||||
}
|
||||
|
|
|
@ -172,11 +172,12 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
|
|||
vStr, ok := v.(string)
|
||||
if ok {
|
||||
// Special case for AD/LDAP STS users
|
||||
if k == ldapUser {
|
||||
switch k {
|
||||
case ldapUser:
|
||||
args["user"] = []string{vStr}
|
||||
} else if k == ldapUserN {
|
||||
case ldapUserN:
|
||||
args["username"] = []string{vStr}
|
||||
} else {
|
||||
default:
|
||||
args[k] = []string{vStr}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -172,7 +172,7 @@ func (o *ObjectInfo) TargetReplicationStatus(arn string) (status replication.Sta
|
|||
type replicateTargetDecision struct {
|
||||
Replicate bool // Replicate to this target
|
||||
Synchronous bool // Synchronous replication configured.
|
||||
Arn string //ARN of replication target
|
||||
Arn string // ARN of replication target
|
||||
ID string
|
||||
}
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ var replicatedInfosTests = []struct {
|
|||
expectedOpType replication.Type
|
||||
expectedAction replicationAction
|
||||
}{
|
||||
{ //1. empty tgtInfos slice
|
||||
{ // 1. empty tgtInfos slice
|
||||
name: "no replicated targets",
|
||||
tgtInfos: []replicatedTargetInfo{},
|
||||
expectedCompletedSize: 0,
|
||||
|
@ -41,7 +41,7 @@ var replicatedInfosTests = []struct {
|
|||
expectedOpType: replication.UnsetReplicationType,
|
||||
expectedAction: replicateNone,
|
||||
},
|
||||
{ //2. replication completed to single target
|
||||
{ // 2. replication completed to single target
|
||||
name: "replication completed to single target",
|
||||
tgtInfos: []replicatedTargetInfo{
|
||||
{
|
||||
|
@ -59,7 +59,7 @@ var replicatedInfosTests = []struct {
|
|||
expectedOpType: replication.ObjectReplicationType,
|
||||
expectedAction: replicateAll,
|
||||
},
|
||||
{ //3. replication completed to single target; failed to another
|
||||
{ // 3. replication completed to single target; failed to another
|
||||
name: "replication completed to single target",
|
||||
tgtInfos: []replicatedTargetInfo{
|
||||
{
|
||||
|
@ -84,7 +84,7 @@ var replicatedInfosTests = []struct {
|
|||
expectedOpType: replication.ObjectReplicationType,
|
||||
expectedAction: replicateAll,
|
||||
},
|
||||
{ //4. replication pending on one target; failed to another
|
||||
{ // 4. replication pending on one target; failed to another
|
||||
name: "replication completed to single target",
|
||||
tgtInfos: []replicatedTargetInfo{
|
||||
{
|
||||
|
@ -137,7 +137,7 @@ var parseReplicationDecisionTest = []struct {
|
|||
expDsc ReplicateDecision
|
||||
expErr error
|
||||
}{
|
||||
{ //1.
|
||||
{ // 1.
|
||||
name: "empty string",
|
||||
dsc: "",
|
||||
expDsc: ReplicateDecision{
|
||||
|
@ -146,7 +146,7 @@ var parseReplicationDecisionTest = []struct {
|
|||
expErr: nil,
|
||||
},
|
||||
|
||||
{ //2.
|
||||
{ // 2.
|
||||
name: "replicate decision for one target",
|
||||
dsc: "arn:minio:replication::id:bucket=true;false;arn:minio:replication::id:bucket;id",
|
||||
expErr: nil,
|
||||
|
@ -156,7 +156,7 @@ var parseReplicationDecisionTest = []struct {
|
|||
},
|
||||
},
|
||||
},
|
||||
{ //3.
|
||||
{ // 3.
|
||||
name: "replicate decision for multiple targets",
|
||||
dsc: "arn:minio:replication::id:bucket=true;false;arn:minio:replication::id:bucket;id,arn:minio:replication::id2:bucket=false;true;arn:minio:replication::id2:bucket;id2",
|
||||
expErr: nil,
|
||||
|
@ -167,7 +167,7 @@ var parseReplicationDecisionTest = []struct {
|
|||
},
|
||||
},
|
||||
},
|
||||
{ //4.
|
||||
{ // 4.
|
||||
name: "invalid format replicate decision for one target",
|
||||
dsc: "arn:minio:replication::id:bucket:true;false;arn:minio:replication::id:bucket;id",
|
||||
expErr: errInvalidReplicateDecisionFormat,
|
||||
|
@ -181,7 +181,6 @@ var parseReplicationDecisionTest = []struct {
|
|||
|
||||
func TestParseReplicateDecision(t *testing.T) {
|
||||
for i, test := range parseReplicationDecisionTest {
|
||||
//dsc, err := parseReplicateDecision(test.dsc)
|
||||
dsc, err := parseReplicateDecision(test.expDsc.String())
|
||||
|
||||
if err != nil {
|
||||
|
@ -208,22 +207,22 @@ var replicationStateTest = []struct {
|
|||
arn string
|
||||
expStatus replication.StatusType
|
||||
}{
|
||||
{ //1. no replication status header
|
||||
{ // 1. no replication status header
|
||||
name: "no replicated targets",
|
||||
rs: ReplicationState{},
|
||||
expStatus: replication.StatusType(""),
|
||||
},
|
||||
{ //2. replication status for one target
|
||||
{ // 2. replication status for one target
|
||||
name: "replication status for one target",
|
||||
rs: ReplicationState{ReplicationStatusInternal: "arn1=PENDING;", Targets: map[string]replication.StatusType{"arn1": "PENDING"}},
|
||||
expStatus: replication.Pending,
|
||||
},
|
||||
{ //3. replication status for one target - incorrect format
|
||||
{ // 3. replication status for one target - incorrect format
|
||||
name: "replication status for one target",
|
||||
rs: ReplicationState{ReplicationStatusInternal: "arn1=PENDING"},
|
||||
expStatus: replication.StatusType(""),
|
||||
},
|
||||
{ //4. replication status for 3 targets, one of them failed
|
||||
{ // 4. replication status for 3 targets, one of them failed
|
||||
name: "replication status for 3 targets - one failed",
|
||||
rs: ReplicationState{
|
||||
ReplicationStatusInternal: "arn1=COMPLETED;arn2=COMPLETED;arn3=FAILED;",
|
||||
|
@ -231,7 +230,7 @@ var replicationStateTest = []struct {
|
|||
},
|
||||
expStatus: replication.Failed,
|
||||
},
|
||||
{ //5. replication status for replica version
|
||||
{ // 5. replication status for replica version
|
||||
name: "replication status for replica version",
|
||||
rs: ReplicationState{ReplicationStatusInternal: string(replication.Replica)},
|
||||
expStatus: replication.Replica,
|
||||
|
|
|
@ -1740,7 +1740,7 @@ func resyncTarget(oi ObjectInfo, arn string, resetID string, resetBeforeDate tim
|
|||
}
|
||||
rs, ok := oi.UserDefined[targetResetHeader(arn)]
|
||||
if !ok {
|
||||
rs, ok = oi.UserDefined[xhttp.MinIOReplicationResetStatus] //for backward compatibility
|
||||
rs, ok = oi.UserDefined[xhttp.MinIOReplicationResetStatus] // for backward compatibility
|
||||
}
|
||||
if !ok { // existing object replication is enabled and object version is unreplicated so far.
|
||||
if resetID != "" && oi.ModTime.Before(resetBeforeDate) { // trigger replication if `mc replicate reset` requested
|
||||
|
|
|
@ -55,25 +55,25 @@ var replicationConfigTests = []struct {
|
|||
tgtStatuses map[string]replication.StatusType
|
||||
expectedSync bool
|
||||
}{
|
||||
{ //1. no replication config
|
||||
{ // 1. no replication config
|
||||
name: "no replication config",
|
||||
info: ObjectInfo{Size: 100},
|
||||
rcfg: replicationConfig{Config: nil},
|
||||
expectedSync: false,
|
||||
},
|
||||
{ //2. existing object replication config enabled, no versioning
|
||||
{ // 2. existing object replication config enabled, no versioning
|
||||
name: "existing object replication config enabled, no versioning",
|
||||
info: ObjectInfo{Size: 100},
|
||||
rcfg: replicationConfig{Config: &configs[0]},
|
||||
expectedSync: false,
|
||||
},
|
||||
{ //3. existing object replication config enabled, versioning suspended
|
||||
{ // 3. existing object replication config enabled, versioning suspended
|
||||
name: "existing object replication config enabled, versioning suspended",
|
||||
info: ObjectInfo{Size: 100, VersionID: nullVersionID},
|
||||
rcfg: replicationConfig{Config: &configs[0]},
|
||||
expectedSync: false,
|
||||
},
|
||||
{ //4. existing object replication enabled, versioning enabled; no reset in progress
|
||||
{ // 4. existing object replication enabled, versioning enabled; no reset in progress
|
||||
name: "existing object replication enabled, versioning enabled; no reset in progress",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatus: replication.Completed,
|
||||
|
@ -130,7 +130,7 @@ var replicationConfigTests2 = []struct {
|
|||
}}}},
|
||||
expectedSync: true,
|
||||
},
|
||||
{ //3. replication status unset
|
||||
{ // 3. replication status unset
|
||||
name: "existing object replication on pre-existing unreplicated object",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatus: replication.StatusType(""),
|
||||
|
@ -142,7 +142,7 @@ var replicationConfigTests2 = []struct {
|
|||
dsc: ReplicateDecision{targetsMap: map[string]replicateTargetDecision{"arn1": newReplicateTargetDecision("arn1", true, false)}},
|
||||
expectedSync: true,
|
||||
},
|
||||
{ //4. replication status Complete
|
||||
{ // 4. replication status Complete
|
||||
name: "existing object replication on object in Completed replication status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:COMPLETED",
|
||||
|
@ -155,7 +155,7 @@ var replicationConfigTests2 = []struct {
|
|||
}}}},
|
||||
expectedSync: false,
|
||||
},
|
||||
{ //5. existing object replication enabled, versioning enabled, replication status Pending & reset ID present
|
||||
{ // 5. existing object replication enabled, versioning enabled, replication status Pending & reset ID present
|
||||
name: "existing object replication with reset in progress and object in Pending status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:PENDING;",
|
||||
|
@ -172,7 +172,7 @@ var replicationConfigTests2 = []struct {
|
|||
}}},
|
||||
},
|
||||
},
|
||||
{ //6. existing object replication enabled, versioning enabled, replication status Failed & reset ID present
|
||||
{ // 6. existing object replication enabled, versioning enabled, replication status Failed & reset ID present
|
||||
name: "existing object replication with reset in progress and object in Failed status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:FAILED;",
|
||||
|
@ -189,7 +189,7 @@ var replicationConfigTests2 = []struct {
|
|||
},
|
||||
expectedSync: true,
|
||||
},
|
||||
{ //7. existing object replication enabled, versioning enabled, replication status unset & reset ID present
|
||||
{ // 7. existing object replication enabled, versioning enabled, replication status unset & reset ID present
|
||||
name: "existing object replication with reset in progress and object never replicated before",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatus: replication.StatusType(""),
|
||||
|
@ -207,7 +207,7 @@ var replicationConfigTests2 = []struct {
|
|||
expectedSync: true,
|
||||
},
|
||||
|
||||
{ //8. existing object replication enabled, versioning enabled, replication status Complete & reset ID present
|
||||
{ // 8. existing object replication enabled, versioning enabled, replication status Complete & reset ID present
|
||||
name: "existing object replication enabled - reset in progress for an object in Completed status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:COMPLETED;",
|
||||
|
@ -224,7 +224,7 @@ var replicationConfigTests2 = []struct {
|
|||
}}},
|
||||
},
|
||||
},
|
||||
{ //9. existing object replication enabled, versioning enabled, replication status Pending & reset ID different
|
||||
{ // 9. existing object replication enabled, versioning enabled, replication status Pending & reset ID different
|
||||
name: "existing object replication enabled, newer reset in progress on object in Pending replication status",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:PENDING;",
|
||||
|
@ -243,7 +243,7 @@ var replicationConfigTests2 = []struct {
|
|||
}}},
|
||||
},
|
||||
},
|
||||
{ //10. existing object replication enabled, versioning enabled, replication status Complete & reset done
|
||||
{ // 10. existing object replication enabled, versioning enabled, replication status Complete & reset done
|
||||
name: "reset done on object in Completed Status - ineligbile for re-replication",
|
||||
info: ObjectInfo{Size: 100,
|
||||
ReplicationStatusInternal: "arn1:COMPLETED;",
|
||||
|
|
|
@ -56,7 +56,7 @@ func (brs BucketReplicationStats) Clone() BucketReplicationStats {
|
|||
c := BucketReplicationStats{
|
||||
Stats: make(map[string]*BucketReplicationStat, len(brs.Stats)),
|
||||
}
|
||||
//this is called only by replicationStats cache and already holds a read lock before calling Clone()
|
||||
// This is called only by replicationStats cache and already holds a read lock before calling Clone()
|
||||
for arn, st := range brs.Stats {
|
||||
c.Stats[arn] = &BucketReplicationStat{
|
||||
FailedSize: atomic.LoadInt64(&st.FailedSize),
|
||||
|
|
|
@ -444,6 +444,6 @@ type TargetClient struct {
|
|||
StorageClass string // storage class on remote
|
||||
disableProxy bool
|
||||
healthCancelFn context.CancelFunc // cancellation function for client healthcheck
|
||||
ARN string //ARN to uniquely identify remote target
|
||||
ARN string // ARN to uniquely identify remote target
|
||||
ResetID string
|
||||
}
|
||||
|
|
|
@ -526,6 +526,7 @@ func handleCommonEnvVars() {
|
|||
// Warn user if deprecated environment variables,
|
||||
// "MINIO_ACCESS_KEY" and "MINIO_SECRET_KEY", are defined
|
||||
// Check all error conditions first
|
||||
//nolint:gocritic
|
||||
if !env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) {
|
||||
logger.Fatal(config.ErrMissingEnvCredentialRootUser(nil), "Unable to start MinIO")
|
||||
} else if env.IsSet(config.EnvRootUser) && !env.IsSet(config.EnvRootPassword) {
|
||||
|
@ -544,6 +545,7 @@ func handleCommonEnvVars() {
|
|||
var user, password string
|
||||
haveRootCredentials := false
|
||||
haveAccessCredentials := false
|
||||
//nolint:gocritic
|
||||
if env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) {
|
||||
user = env.Get(config.EnvRootUser, "")
|
||||
password = env.Get(config.EnvRootPassword, "")
|
||||
|
|
|
@ -696,9 +696,7 @@ func GetHelp(subSys, key string, envOnly bool) (Help, error) {
|
|||
// to list the ENV, for regular k/v EnableKey is
|
||||
// implicit, for ENVs we cannot make it implicit.
|
||||
if subSysHelp.MultipleTargets {
|
||||
envK := config.EnvPrefix + strings.Join([]string{
|
||||
strings.ToTitle(subSys), strings.ToTitle(madmin.EnableKey),
|
||||
}, config.EnvWordDelimiter)
|
||||
envK := config.EnvPrefix + strings.ToTitle(subSys) + config.EnvWordDelimiter + strings.ToTitle(madmin.EnableKey)
|
||||
envHelp = append(envHelp, config.HelpKV{
|
||||
Key: envK,
|
||||
Description: fmt.Sprintf("enable %s target, default is 'off'", subSys),
|
||||
|
@ -707,9 +705,7 @@ func GetHelp(subSys, key string, envOnly bool) (Help, error) {
|
|||
})
|
||||
}
|
||||
for _, hkv := range h {
|
||||
envK := config.EnvPrefix + strings.Join([]string{
|
||||
strings.ToTitle(subSys), strings.ToTitle(hkv.Key),
|
||||
}, config.EnvWordDelimiter)
|
||||
envK := config.EnvPrefix + strings.ToTitle(subSys) + config.EnvWordDelimiter + strings.ToTitle(hkv.Key)
|
||||
envHelp = append(envHelp, config.HelpKV{
|
||||
Key: envK,
|
||||
Description: hkv.Description,
|
||||
|
|
|
@ -34,14 +34,12 @@ import (
|
|||
"github.com/minio/pkg/quick"
|
||||
)
|
||||
|
||||
/////////////////// Config V1 ///////////////////
|
||||
type configV1 struct {
|
||||
Version string `json:"version"`
|
||||
AccessKey string `json:"accessKeyId"`
|
||||
SecretKey string `json:"secretAccessKey"`
|
||||
}
|
||||
|
||||
/////////////////// Config V2 ///////////////////
|
||||
type configV2 struct {
|
||||
Version string `json:"version"`
|
||||
Credentials struct {
|
||||
|
@ -63,7 +61,6 @@ type configV2 struct {
|
|||
} `json:"fileLogger"`
|
||||
}
|
||||
|
||||
/////////////////// Config V3 ///////////////////
|
||||
// backendV3 type.
|
||||
type backendV3 struct {
|
||||
Type string `json:"type"`
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
// number of log messages to buffer
|
||||
const defaultLogBufferCount = 10000
|
||||
|
||||
//HTTPConsoleLoggerSys holds global console logger state
|
||||
// HTTPConsoleLoggerSys holds global console logger state
|
||||
type HTTPConsoleLoggerSys struct {
|
||||
sync.RWMutex
|
||||
pubsub *pubsub.PubSub
|
||||
|
|
|
@ -775,7 +775,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
|||
if flat.Objects < dataScannerCompactLeastObject {
|
||||
if f.dataUsageScannerDebug && flat.Objects > 1 {
|
||||
// Disabled, rather chatty:
|
||||
//console.Debugf(scannerLogPrefix+" Only %d objects, compacting %s -> %+v\n", flat.Objects, folder.name, flat)
|
||||
// console.Debugf(scannerLogPrefix+" Only %d objects, compacting %s -> %+v\n", flat.Objects, folder.name, flat)
|
||||
}
|
||||
compact = true
|
||||
} else {
|
||||
|
@ -791,7 +791,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
|||
}
|
||||
if f.dataUsageScannerDebug && compact {
|
||||
// Disabled, rather chatty:
|
||||
//console.Debugf(scannerLogPrefix+" Only objects (%d), compacting %s -> %+v\n", flat.Objects, folder.name, flat)
|
||||
// console.Debugf(scannerLogPrefix+" Only objects (%d), compacting %s -> %+v\n", flat.Objects, folder.name, flat)
|
||||
}
|
||||
}
|
||||
if compact {
|
||||
|
|
|
@ -203,7 +203,7 @@ func (d *dataUpdateTracker) latestWithDir(dir string) uint64 {
|
|||
// start a saver goroutine.
|
||||
// All of these will exit when the context is canceled.
|
||||
func (d *dataUpdateTracker) start(ctx context.Context, drives ...string) {
|
||||
if len(drives) <= 0 {
|
||||
if len(drives) == 0 {
|
||||
logger.LogIf(ctx, errors.New("dataUpdateTracker.start: No drives specified"))
|
||||
return
|
||||
}
|
||||
|
@ -220,7 +220,7 @@ func (d *dataUpdateTracker) start(ctx context.Context, drives ...string) {
|
|||
// If no valid data usage tracker can be found d will remain unchanged.
|
||||
// If object is shared the caller should lock it.
|
||||
func (d *dataUpdateTracker) load(ctx context.Context, drives ...string) {
|
||||
if len(drives) <= 0 {
|
||||
if len(drives) == 0 {
|
||||
logger.LogIf(ctx, errors.New("dataUpdateTracker.load: No drives specified"))
|
||||
return
|
||||
}
|
||||
|
|
|
@ -773,7 +773,7 @@ func newCacheEncryptReader(content io.Reader, bucket, object string, metadata ma
|
|||
return nil, err
|
||||
}
|
||||
|
||||
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20, CipherSuites: fips.CipherSuitesDARE()})
|
||||
reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey, MinVersion: sio.Version20, CipherSuites: fips.CipherSuitesDARE()})
|
||||
if err != nil {
|
||||
return nil, crypto.ErrInvalidCustomerKey
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ func NewDummyDataGen(totalLength, skipOffset int64) io.ReadSeeker {
|
|||
panic("Negative rotations are not allowed")
|
||||
}
|
||||
|
||||
skipOffset = skipOffset % int64(len(alphabets))
|
||||
skipOffset %= int64(len(alphabets))
|
||||
as := make([]byte, 2*len(alphabets))
|
||||
copy(as, alphabets)
|
||||
copy(as[len(alphabets):], alphabets)
|
||||
|
|
|
@ -242,7 +242,7 @@ func getTotalSizes(argPatterns []ellipses.ArgPattern) []uint64 {
|
|||
for _, argPattern := range argPatterns {
|
||||
var totalSize uint64 = 1
|
||||
for _, p := range argPattern {
|
||||
totalSize = totalSize * uint64(len(p.Seq))
|
||||
totalSize *= uint64(len(p.Seq))
|
||||
}
|
||||
totalSizes = append(totalSizes, totalSize)
|
||||
}
|
||||
|
|
|
@ -497,6 +497,7 @@ func NewEndpoints(args ...string) (endpoints Endpoints, err error) {
|
|||
}
|
||||
|
||||
// All endpoints have to be same type and scheme if applicable.
|
||||
//nolint:gocritic
|
||||
if i == 0 {
|
||||
endpointType = endpoint.Type()
|
||||
scheme = endpoint.Scheme
|
||||
|
|
|
@ -32,7 +32,7 @@ var bucketOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnform
|
|||
// list all errors that can be ignored in a bucket metadata operation.
|
||||
var bucketMetadataOpIgnoredErrs = append(bucketOpIgnoredErrs, errVolumeNotFound)
|
||||
|
||||
/// Bucket operations
|
||||
// Bucket operations
|
||||
|
||||
// MakeBucket - make a bucket.
|
||||
func (er erasureObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error {
|
||||
|
|
|
@ -95,7 +95,7 @@ func (e *Erasure) EncodeData(ctx context.Context, data []byte) ([][]byte, error)
|
|||
// It returns an error if the decoding failed.
|
||||
func (e *Erasure) DecodeDataBlocks(data [][]byte) error {
|
||||
var isZero = 0
|
||||
for _, b := range data[:] {
|
||||
for _, b := range data {
|
||||
if len(b) == 0 {
|
||||
isZero++
|
||||
break
|
||||
|
|
|
@ -110,7 +110,7 @@ func TestErasureDecode(t *testing.T) {
|
|||
for i, disk := range disks {
|
||||
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(test.data), writeAlgorithm, erasure.ShardSize())
|
||||
}
|
||||
n, err := erasure.Encode(context.Background(), bytes.NewReader(data[:]), writers, buffer, erasure.dataBlocks+1)
|
||||
n, err := erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
|
||||
closeBitrotWriters(writers)
|
||||
if err != nil {
|
||||
setup.Remove()
|
||||
|
|
|
@ -235,7 +235,7 @@ func TestListOnlineDisks(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("Failed to open %s: %s\n", filePath, err)
|
||||
}
|
||||
f.Write([]byte("oops")) // Will cause bitrot error
|
||||
f.WriteString("oops") // Will cause bitrot error
|
||||
f.Close()
|
||||
break
|
||||
}
|
||||
|
@ -414,7 +414,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("Failed to open %s: %s\n", filePath, err)
|
||||
}
|
||||
f.Write([]byte("oops")) // Will cause bitrot error
|
||||
f.WriteString("oops") // Will cause bitrot error
|
||||
f.Close()
|
||||
break
|
||||
}
|
||||
|
@ -563,7 +563,7 @@ func TestDisksWithAllParts(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("Failed to open %s: %s\n", filePath, err)
|
||||
}
|
||||
f.Write([]byte("oops")) // Will cause bitrot error
|
||||
f.WriteString("oops") // Will cause bitrot error
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -163,7 +163,7 @@ func TestHealingDanglingObject(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//defer removeRoots(fsDirs)
|
||||
defer removeRoots(fsDirs)
|
||||
|
||||
// Everything is fine, should return nil
|
||||
objLayer, disks, err := initObjectLayer(ctx, mustGetPoolEndpoints(fsDirs...))
|
||||
|
|
|
@ -292,25 +292,25 @@ func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.
|
|||
for i, meta := range metaArr {
|
||||
if meta.IsValid() && meta.ModTime.Equal(modTime) && meta.DataDir == dataDir {
|
||||
for _, part := range meta.Parts {
|
||||
h.Write([]byte(fmt.Sprintf("part.%d", part.Number)))
|
||||
fmt.Fprintf(h, "part.%d", part.Number)
|
||||
}
|
||||
h.Write([]byte(fmt.Sprintf("%v", meta.Erasure.Distribution)))
|
||||
fmt.Fprintf(h, "%v", meta.Erasure.Distribution)
|
||||
// make sure that length of Data is same
|
||||
h.Write([]byte(fmt.Sprintf("%v", len(meta.Data))))
|
||||
fmt.Fprintf(h, "%v", len(meta.Data))
|
||||
|
||||
// ILM transition fields
|
||||
h.Write([]byte(meta.TransitionStatus))
|
||||
h.Write([]byte(meta.TransitionTier))
|
||||
h.Write([]byte(meta.TransitionedObjName))
|
||||
h.Write([]byte(meta.TransitionVersionID))
|
||||
fmt.Fprint(h, meta.TransitionStatus)
|
||||
fmt.Fprint(h, meta.TransitionTier)
|
||||
fmt.Fprint(h, meta.TransitionedObjName)
|
||||
fmt.Fprint(h, meta.TransitionVersionID)
|
||||
|
||||
// Server-side replication fields
|
||||
h.Write([]byte(fmt.Sprintf("%v", meta.MarkDeleted)))
|
||||
h.Write([]byte(meta.Metadata[string(meta.ReplicationState.ReplicaStatus)]))
|
||||
h.Write([]byte(meta.Metadata[meta.ReplicationState.ReplicationTimeStamp.Format(http.TimeFormat)]))
|
||||
h.Write([]byte(meta.Metadata[meta.ReplicationState.ReplicaTimeStamp.Format(http.TimeFormat)]))
|
||||
h.Write([]byte(meta.Metadata[meta.ReplicationState.ReplicationStatusInternal]))
|
||||
h.Write([]byte(meta.Metadata[meta.ReplicationState.VersionPurgeStatusInternal]))
|
||||
fmt.Fprintf(h, "%v", meta.MarkDeleted)
|
||||
fmt.Fprint(h, meta.Metadata[string(meta.ReplicationState.ReplicaStatus)])
|
||||
fmt.Fprint(h, meta.Metadata[meta.ReplicationState.ReplicationTimeStamp.Format(http.TimeFormat)])
|
||||
fmt.Fprint(h, meta.Metadata[meta.ReplicationState.ReplicaTimeStamp.Format(http.TimeFormat)])
|
||||
fmt.Fprint(h, meta.Metadata[meta.ReplicationState.ReplicationStatusInternal])
|
||||
fmt.Fprint(h, meta.Metadata[meta.ReplicationState.VersionPurgeStatusInternal])
|
||||
|
||||
metaHashes[i] = hex.EncodeToString(h.Sum(nil))
|
||||
h.Reset()
|
||||
|
|
|
@ -46,7 +46,7 @@ import (
|
|||
// list all errors which can be ignored in object operations.
|
||||
var objectOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnformattedDisk)
|
||||
|
||||
/// Object Operations
|
||||
// Object Operations
|
||||
|
||||
func countOnlineDisks(onlineDisks []StorageAPI) (online int) {
|
||||
for _, onlineDisk := range onlineDisks {
|
||||
|
|
|
@ -327,7 +327,7 @@ func fsCreateFile(ctx context.Context, filePath string, reader io.Reader, falloc
|
|||
|
||||
flags := os.O_CREATE | os.O_WRONLY
|
||||
if globalFSOSync {
|
||||
flags = flags | os.O_SYNC
|
||||
flags |= os.O_SYNC
|
||||
}
|
||||
writer, err := lock.Open(filePath, flags, 0666)
|
||||
if err != nil {
|
||||
|
|
|
@ -109,7 +109,7 @@ func (fsi *fsIOPool) Open(path string) (*lock.RLockedFile, error) {
|
|||
}
|
||||
}
|
||||
|
||||
/// Save new reader on the map.
|
||||
// Save new reader on the map.
|
||||
|
||||
// It is possible by this time due to concurrent
|
||||
// i/o we might have another lock present. Lookup
|
||||
|
|
|
@ -398,7 +398,7 @@ func (fs *FSObjects) scanBucket(ctx context.Context, bucket string, cache dataUs
|
|||
return cache, err
|
||||
}
|
||||
|
||||
/// Bucket operations
|
||||
// Bucket operations
|
||||
|
||||
// getBucketDir - will convert incoming bucket names to
|
||||
// corresponding valid bucket names on the backend in a platform
|
||||
|
@ -601,7 +601,7 @@ func (fs *FSObjects) DeleteBucket(ctx context.Context, bucket string, opts Delet
|
|||
return nil
|
||||
}
|
||||
|
||||
/// Object Operations
|
||||
// Object Operations
|
||||
|
||||
// CopyObject - copy object source object to destination object.
|
||||
// if source object and destination object are same we only
|
||||
|
|
|
@ -274,7 +274,7 @@ func s3MetaToAzureProperties(ctx context.Context, s3Metadata map[string]string)
|
|||
encodeKey := func(key string) string {
|
||||
tokens := strings.Split(key, "_")
|
||||
for i := range tokens {
|
||||
tokens[i] = strings.Replace(tokens[i], "-", "_", -1)
|
||||
tokens[i] = strings.ReplaceAll(tokens[i], "-", "_")
|
||||
}
|
||||
return strings.Join(tokens, "__")
|
||||
}
|
||||
|
@ -367,7 +367,7 @@ func azurePropertiesToS3Meta(meta azblob.Metadata, props azblob.BlobHTTPHeaders,
|
|||
decodeKey := func(key string) string {
|
||||
tokens := strings.Split(key, "__")
|
||||
for i := range tokens {
|
||||
tokens[i] = strings.Replace(tokens[i], "_", "-", -1)
|
||||
tokens[i] = strings.ReplaceAll(tokens[i], "_", "-")
|
||||
}
|
||||
return strings.Join(tokens, "_")
|
||||
}
|
||||
|
|
|
@ -531,7 +531,7 @@ func toGCSPageToken(name string) string {
|
|||
byte(length & 0xFF),
|
||||
}
|
||||
|
||||
length = length >> 7
|
||||
length >>= 7
|
||||
if length > 0 {
|
||||
b = append(b, byte(length&0xFF))
|
||||
}
|
||||
|
|
|
@ -668,7 +668,7 @@ func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
|
|||
return oi, e
|
||||
}
|
||||
|
||||
//delete any unencrypted version of object that might be on the backend
|
||||
// delete any unencrypted version of object that might be on the backend
|
||||
defer l.s3Objects.DeleteObject(ctx, bucket, object, opts)
|
||||
|
||||
// Save the final object size and modtime.
|
||||
|
|
|
@ -289,7 +289,7 @@ func validateFormFieldSize(ctx context.Context, formValues http.Header) error {
|
|||
|
||||
// Extract form fields and file data from a HTTP POST Policy
|
||||
func extractPostPolicyFormValues(ctx context.Context, form *multipart.Form) (filePart io.ReadCloser, fileName string, fileSize int64, formValues http.Header, err error) {
|
||||
/// HTML Form values
|
||||
// HTML Form values
|
||||
fileName = ""
|
||||
|
||||
// Canonicalize the form values into http.Header.
|
||||
|
|
|
@ -171,7 +171,7 @@ func newMappedPolicy(policy string) MappedPolicy {
|
|||
|
||||
// key options
|
||||
type options struct {
|
||||
ttl int64 //expiry in seconds
|
||||
ttl int64 // expiry in seconds
|
||||
}
|
||||
|
||||
type iamWatchEvent struct {
|
||||
|
@ -558,9 +558,7 @@ func (store *IAMStoreSys) AddUsersToGroup(ctx context.Context, group string, mem
|
|||
// exist.
|
||||
gi = newGroupInfo(members)
|
||||
} else {
|
||||
mergedMembers := append(gi.Members, members...)
|
||||
uniqMembers := set.CreateStringSet(mergedMembers...).ToSlice()
|
||||
gi.Members = uniqMembers
|
||||
gi.Members = set.CreateStringSet(append(gi.Members, members...)...).ToSlice()
|
||||
}
|
||||
|
||||
if err := store.saveGroupInfo(ctx, group, gi); err != nil {
|
||||
|
|
89
cmd/iam.go
89
cmd/iam.go
|
@ -351,67 +351,34 @@ func (sys *IAMSys) loadWatchedEvent(ctx context.Context, event iamWatchEvent) (e
|
|||
ctx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
|
||||
if event.isCreated {
|
||||
switch {
|
||||
case usersPrefix:
|
||||
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigUsersPrefix))
|
||||
err = sys.store.UserNotificationHandler(ctx, accessKey, regUser)
|
||||
case stsPrefix:
|
||||
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigSTSPrefix))
|
||||
err = sys.store.UserNotificationHandler(ctx, accessKey, stsUser)
|
||||
case svcPrefix:
|
||||
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigServiceAccountsPrefix))
|
||||
err = sys.store.UserNotificationHandler(ctx, accessKey, svcUser)
|
||||
case groupsPrefix:
|
||||
group := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigGroupsPrefix))
|
||||
err = sys.store.GroupNotificationHandler(ctx, group)
|
||||
case policyPrefix:
|
||||
policyName := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigPoliciesPrefix))
|
||||
err = sys.store.PolicyNotificationHandler(ctx, policyName)
|
||||
case policyDBUsersPrefix:
|
||||
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBUsersPrefix)
|
||||
user := strings.TrimSuffix(policyMapFile, ".json")
|
||||
err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, regUser)
|
||||
case policyDBSTSUsersPrefix:
|
||||
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBSTSUsersPrefix)
|
||||
user := strings.TrimSuffix(policyMapFile, ".json")
|
||||
err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, stsUser)
|
||||
case policyDBGroupsPrefix:
|
||||
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBGroupsPrefix)
|
||||
user := strings.TrimSuffix(policyMapFile, ".json")
|
||||
err = sys.store.PolicyMappingNotificationHandler(ctx, user, true, regUser)
|
||||
}
|
||||
} else {
|
||||
// delete event
|
||||
switch {
|
||||
case usersPrefix:
|
||||
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigUsersPrefix))
|
||||
err = sys.store.UserNotificationHandler(ctx, accessKey, regUser)
|
||||
case stsPrefix:
|
||||
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigSTSPrefix))
|
||||
err = sys.store.UserNotificationHandler(ctx, accessKey, stsUser)
|
||||
case svcPrefix:
|
||||
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigServiceAccountsPrefix))
|
||||
err = sys.store.UserNotificationHandler(ctx, accessKey, svcUser)
|
||||
case groupsPrefix:
|
||||
group := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigGroupsPrefix))
|
||||
err = sys.store.GroupNotificationHandler(ctx, group)
|
||||
case policyPrefix:
|
||||
policyName := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigPoliciesPrefix))
|
||||
err = sys.store.PolicyNotificationHandler(ctx, policyName)
|
||||
case policyDBUsersPrefix:
|
||||
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBUsersPrefix)
|
||||
user := strings.TrimSuffix(policyMapFile, ".json")
|
||||
err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, regUser)
|
||||
case policyDBSTSUsersPrefix:
|
||||
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBSTSUsersPrefix)
|
||||
user := strings.TrimSuffix(policyMapFile, ".json")
|
||||
err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, stsUser)
|
||||
case policyDBGroupsPrefix:
|
||||
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBGroupsPrefix)
|
||||
user := strings.TrimSuffix(policyMapFile, ".json")
|
||||
err = sys.store.PolicyMappingNotificationHandler(ctx, user, true, regUser)
|
||||
}
|
||||
switch {
|
||||
case usersPrefix:
|
||||
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigUsersPrefix))
|
||||
err = sys.store.UserNotificationHandler(ctx, accessKey, regUser)
|
||||
case stsPrefix:
|
||||
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigSTSPrefix))
|
||||
err = sys.store.UserNotificationHandler(ctx, accessKey, stsUser)
|
||||
case svcPrefix:
|
||||
accessKey := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigServiceAccountsPrefix))
|
||||
err = sys.store.UserNotificationHandler(ctx, accessKey, svcUser)
|
||||
case groupsPrefix:
|
||||
group := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigGroupsPrefix))
|
||||
err = sys.store.GroupNotificationHandler(ctx, group)
|
||||
case policyPrefix:
|
||||
policyName := path.Dir(strings.TrimPrefix(event.keyPath, iamConfigPoliciesPrefix))
|
||||
err = sys.store.PolicyNotificationHandler(ctx, policyName)
|
||||
case policyDBUsersPrefix:
|
||||
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBUsersPrefix)
|
||||
user := strings.TrimSuffix(policyMapFile, ".json")
|
||||
err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, regUser)
|
||||
case policyDBSTSUsersPrefix:
|
||||
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBSTSUsersPrefix)
|
||||
user := strings.TrimSuffix(policyMapFile, ".json")
|
||||
err = sys.store.PolicyMappingNotificationHandler(ctx, user, false, stsUser)
|
||||
case policyDBGroupsPrefix:
|
||||
policyMapFile := strings.TrimPrefix(event.keyPath, iamConfigPolicyDBGroupsPrefix)
|
||||
user := strings.TrimSuffix(policyMapFile, ".json")
|
||||
err = sys.store.PolicyMappingNotificationHandler(ctx, user, true, regUser)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -620,11 +620,9 @@ func mergeEntryChannels(ctx context.Context, in []chan metaCacheEntry, out chan<
|
|||
}
|
||||
best = other
|
||||
bestIdx = otherIdx
|
||||
} else {
|
||||
} else if err := selectFrom(otherIdx); err != nil {
|
||||
// Keep best, replace "other"
|
||||
if err := selectFrom(otherIdx); err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -636,10 +634,8 @@ func mergeEntryChannels(ctx context.Context, in []chan metaCacheEntry, out chan<
|
|||
if best.name > last {
|
||||
out <- *best
|
||||
last = best.name
|
||||
} else {
|
||||
if serverDebugLog {
|
||||
console.Debugln("mergeEntryChannels: discarding duplicate", best.name, "<=", last)
|
||||
}
|
||||
} else if serverDebugLog {
|
||||
console.Debugln("mergeEntryChannels: discarding duplicate", best.name, "<=", last)
|
||||
}
|
||||
// Replace entry we just sent.
|
||||
if err := selectFrom(bestIdx); err != nil {
|
||||
|
|
|
@ -81,6 +81,7 @@ func Test_metaCacheEntries_merge(t *testing.T) {
|
|||
}
|
||||
// Merge b into a
|
||||
a.merge(b, -1)
|
||||
//nolint:gocritic
|
||||
want := append(loadMetacacheSampleNames, loadMetacacheSampleNames...)
|
||||
sort.Strings(want)
|
||||
got := a.entries().names()
|
||||
|
|
|
@ -1623,20 +1623,18 @@ func (c *minioClusterCollector) Collect(out chan<- prometheus.Metric) {
|
|||
continue
|
||||
}
|
||||
for k, v := range metric.Histogram {
|
||||
l := append(labels, metric.HistogramBucketLabel)
|
||||
lv := append(values, k)
|
||||
out <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
prometheus.BuildFQName(string(metric.Description.Namespace),
|
||||
string(metric.Description.Subsystem),
|
||||
string(metric.Description.Name)),
|
||||
metric.Description.Help,
|
||||
l,
|
||||
append(labels, metric.HistogramBucketLabel),
|
||||
metric.StaticLabels,
|
||||
),
|
||||
prometheus.GaugeValue,
|
||||
float64(v),
|
||||
lv...)
|
||||
append(values, k)...)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
|
12
cmd/net.go
12
cmd/net.go
|
@ -341,21 +341,17 @@ func sameLocalAddrs(addr1, addr2 string) (bool, error) {
|
|||
if host1 == "" {
|
||||
// If empty host means it is localhost
|
||||
addr1Local = true
|
||||
} else {
|
||||
} else if addr1Local, err = isLocalHost(host1, port1, port1); err != nil {
|
||||
// Host not empty, check if it is local
|
||||
if addr1Local, err = isLocalHost(host1, port1, port1); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
if host2 == "" {
|
||||
// If empty host means it is localhost
|
||||
addr2Local = true
|
||||
} else {
|
||||
} else if addr2Local, err = isLocalHost(host2, port2, port2); err != nil {
|
||||
// Host not empty, check if it is local
|
||||
if addr2Local, err = isLocalHost(host2, port2, port2); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// If both of addresses point to the same machine, check if
|
||||
|
|
|
@ -52,7 +52,7 @@ var globalObjLayerMutex sync.RWMutex
|
|||
// Global object layer, only accessed by globalObjectAPI.
|
||||
var globalObjectAPI ObjectLayer
|
||||
|
||||
//Global cacheObjects, only accessed by newCacheObjectsFn().
|
||||
// Global cacheObjects, only accessed by newCacheObjectsFn().
|
||||
var globalCacheObjectAPI CacheObjectLayer
|
||||
|
||||
// Checks if the object is a directory, this logic uses
|
||||
|
|
|
@ -312,7 +312,7 @@ func (e ObjectExistsAsDirectory) Error() string {
|
|||
return "Object exists on : " + e.Bucket + " as directory " + e.Object
|
||||
}
|
||||
|
||||
//PrefixAccessDenied object access is denied.
|
||||
// PrefixAccessDenied object access is denied.
|
||||
type PrefixAccessDenied GenericError
|
||||
|
||||
func (e PrefixAccessDenied) Error() string {
|
||||
|
@ -484,7 +484,7 @@ func (e InvalidObjectState) Error() string {
|
|||
return "The operation is not valid for the current state of the object " + e.Bucket + "/" + e.Object + "(" + e.VersionID + ")"
|
||||
}
|
||||
|
||||
/// Bucket related errors.
|
||||
// Bucket related errors.
|
||||
|
||||
// BucketNameInvalid - bucketname provided is invalid.
|
||||
type BucketNameInvalid GenericError
|
||||
|
@ -494,7 +494,7 @@ func (e BucketNameInvalid) Error() string {
|
|||
return "Bucket name invalid: " + e.Bucket
|
||||
}
|
||||
|
||||
/// Object related errors.
|
||||
// Object related errors.
|
||||
|
||||
// ObjectNameInvalid - object name provided is invalid.
|
||||
type ObjectNameInvalid GenericError
|
||||
|
@ -569,7 +569,7 @@ func (e OperationTimedOut) Error() string {
|
|||
return "Operation timed out"
|
||||
}
|
||||
|
||||
/// Multipart related errors.
|
||||
// Multipart related errors.
|
||||
|
||||
// MalformedUploadID malformed upload id.
|
||||
type MalformedUploadID struct {
|
||||
|
|
|
@ -205,7 +205,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) {
|
|||
},
|
||||
// ListObjectsResult-9.
|
||||
// Used for asserting the case with marker, but without prefix.
|
||||
//marker is set to "newPrefix0" in the testCase, (testCase 33).
|
||||
// marker is set to "newPrefix0" in the testCase, (testCase 33).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
|
@ -217,7 +217,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) {
|
|||
},
|
||||
},
|
||||
// ListObjectsResult-10.
|
||||
//marker is set to "newPrefix1" in the testCase, (testCase 34).
|
||||
// marker is set to "newPrefix1" in the testCase, (testCase 34).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
|
@ -228,7 +228,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) {
|
|||
},
|
||||
},
|
||||
// ListObjectsResult-11.
|
||||
//marker is set to "obj0" in the testCase, (testCase 35).
|
||||
// marker is set to "obj0" in the testCase, (testCase 35).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
|
@ -548,7 +548,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) {
|
|||
{"empty-bucket", "", "", "", 111100000, ListObjectsInfo{}, nil, true},
|
||||
// Testing for all 10 objects in the bucket (18).
|
||||
{"test-bucket-list-object", "", "", "", 10, resultCases[0], nil, true},
|
||||
//Testing for negative value of maxKey, this should set maxKeys to listObjectsLimit (19).
|
||||
// Testing for negative value of maxKey, this should set maxKeys to listObjectsLimit (19).
|
||||
{"test-bucket-list-object", "", "", "", -1, resultCases[0], nil, true},
|
||||
// Testing for very large value of maxKey, this should set maxKeys to listObjectsLimit (20).
|
||||
{"test-bucket-list-object", "", "", "", 1234567890, resultCases[0], nil, true},
|
||||
|
@ -905,7 +905,7 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand
|
|||
},
|
||||
// ListObjectsResult-9.
|
||||
// Used for asserting the case with marker, but without prefix.
|
||||
//marker is set to "newPrefix0" in the testCase, (testCase 33).
|
||||
// marker is set to "newPrefix0" in the testCase, (testCase 33).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
|
@ -917,7 +917,7 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand
|
|||
},
|
||||
},
|
||||
// ListObjectsResult-10.
|
||||
//marker is set to "newPrefix1" in the testCase, (testCase 34).
|
||||
// marker is set to "newPrefix1" in the testCase, (testCase 34).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
|
@ -928,7 +928,7 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand
|
|||
},
|
||||
},
|
||||
// ListObjectsResult-11.
|
||||
//marker is set to "obj0" in the testCase, (testCase 35).
|
||||
// marker is set to "obj0" in the testCase, (testCase 35).
|
||||
{
|
||||
IsTruncated: false,
|
||||
Objects: []ObjectInfo{
|
||||
|
@ -1223,7 +1223,7 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand
|
|||
{"empty-bucket", "", "", "", 111100000, ListObjectsInfo{}, nil, true},
|
||||
// Testing for all 10 objects in the bucket (16).
|
||||
{"test-bucket-list-object", "", "", "", 10, resultCases[0], nil, true},
|
||||
//Testing for negative value of maxKey, this should set maxKeys to listObjectsLimit (17).
|
||||
// Testing for negative value of maxKey, this should set maxKeys to listObjectsLimit (17).
|
||||
{"test-bucket-list-object", "", "", "", -1, resultCases[0], nil, true},
|
||||
// Testing for very large value of maxKey, this should set maxKeys to listObjectsLimit (18).
|
||||
{"test-bucket-list-object", "", "", "", 1234567890, resultCases[0], nil, true},
|
||||
|
|
|
@ -1065,7 +1065,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
|||
{bucketNames[0], "Asia", "", "", "", 2, listMultipartResults[13], nil, true},
|
||||
// setting delimiter (Test number 27).
|
||||
{bucketNames[0], "", "", "", SlashSeparator, 2, listMultipartResults[14], nil, true},
|
||||
//Test case with multiple uploadID listing for given object (Test number 28).
|
||||
// Test case with multiple uploadID listing for given object (Test number 28).
|
||||
{bucketNames[1], "", "", "", "", 100, listMultipartResults[15], nil, true},
|
||||
// Test case with multiple uploadID listing for given object, but uploadID marker set.
|
||||
// Testing whether the marker entry is skipped (Test number 29-30).
|
||||
|
@ -1088,29 +1088,29 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
|||
// Test case with `Prefix` and `UploadIDMarker` (Test number 37).
|
||||
{bucketNames[1], "min", "minio-object-1.txt", uploadIDs[1], "", 10, listMultipartResults[24], nil, true},
|
||||
// Test case for bucket with multiple objects in it.
|
||||
// Bucket used : `bucketNames[2]`.
|
||||
// Objects used: `objectNames[1-5]`.
|
||||
// Bucket used : `bucketNames[2]`.
|
||||
// Objects used: `objectNames[1-5]`.
|
||||
// UploadId's used: uploadIds[4-8].
|
||||
// (Test number 39).
|
||||
{bucketNames[2], "", "", "", "", 100, listMultipartResults[25], nil, true},
|
||||
//Test cases with prefixes.
|
||||
//Testing listing with prefix set to "min" (Test number 40) .
|
||||
// Test cases with prefixes.
|
||||
// Testing listing with prefix set to "min" (Test number 40) .
|
||||
{bucketNames[2], "min", "", "", "", 100, listMultipartResults[26], nil, true},
|
||||
//Testing listing with prefix set to "ney" (Test number 41).
|
||||
// Testing listing with prefix set to "ney" (Test number 41).
|
||||
{bucketNames[2], "ney", "", "", "", 100, listMultipartResults[27], nil, true},
|
||||
//Testing listing with prefix set to "par" (Test number 42).
|
||||
// Testing listing with prefix set to "par" (Test number 42).
|
||||
{bucketNames[2], "parrot", "", "", "", 100, listMultipartResults[28], nil, true},
|
||||
//Testing listing with prefix set to object name "neymar.jpeg" (Test number 43).
|
||||
// Testing listing with prefix set to object name "neymar.jpeg" (Test number 43).
|
||||
{bucketNames[2], "neymar.jpeg", "", "", "", 100, listMultipartResults[29], nil, true},
|
||||
// Testing listing with `MaxUploads` set to 3 (Test number 44).
|
||||
// Testing listing with `MaxUploads` set to 3 (Test number 44).
|
||||
{bucketNames[2], "", "", "", "", 3, listMultipartResults[30], nil, true},
|
||||
// In case of bucketNames[2], there are 6 entries (Test number 45).
|
||||
// Since all available entries are listed, IsTruncated is expected to be false
|
||||
// and NextMarkers are expected to empty.
|
||||
{bucketNames[2], "", "", "", "", 6, listMultipartResults[31], nil, true},
|
||||
// Test case with `KeyMarker` (Test number 47).
|
||||
// Test case with `KeyMarker` (Test number 47).
|
||||
{bucketNames[2], "", objectNames[3], "", "", 10, listMultipartResults[33], nil, true},
|
||||
// Test case with `prefix` and `KeyMarker` (Test number 48).
|
||||
// Test case with `prefix` and `KeyMarker` (Test number 48).
|
||||
{bucketNames[2], "minio-object", objectNames[1], "", "", 10, listMultipartResults[34], nil, true},
|
||||
}
|
||||
|
||||
|
@ -1694,9 +1694,9 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
|
|||
{bucketNames[0], objectNames[0], uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd"))},
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd"))},
|
||||
// Part with size larger than 5Mb.
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(string(validPart)))},
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(string(validPart)))},
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 7, string(validPart), validPartMD5, int64(len(string(validPart)))},
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(validPart))},
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(validPart))},
|
||||
{bucketNames[0], objectNames[0], uploadIDs[0], 7, string(validPart), validPartMD5, int64(len(validPart))},
|
||||
}
|
||||
sha256sum := ""
|
||||
var opts ObjectOptions
|
||||
|
|
|
@ -790,7 +790,7 @@ func (g *GetObjectReader) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
//SealMD5CurrFn seals md5sum with object encryption key and returns sealed
|
||||
// SealMD5CurrFn seals md5sum with object encryption key and returns sealed
|
||||
// md5sum
|
||||
type SealMD5CurrFn func([]byte) []byte
|
||||
|
||||
|
|
|
@ -1574,7 +1574,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
return
|
||||
}
|
||||
|
||||
/// if Content-Length is unknown/missing, deny the request
|
||||
// if Content-Length is unknown/missing, deny the request
|
||||
size := r.ContentLength
|
||||
rAuthType := getRequestAuthType(r)
|
||||
if rAuthType == authTypeStreamingSigned {
|
||||
|
@ -1595,7 +1595,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
return
|
||||
}
|
||||
|
||||
/// maximum Upload size for objects in a single operation
|
||||
// maximum Upload size for objects in a single operation
|
||||
if isMaxObjectSize(size) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
|
||||
return
|
||||
|
@ -1924,7 +1924,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
|||
return
|
||||
}
|
||||
|
||||
/// if Content-Length is unknown/missing, deny the request
|
||||
// if Content-Length is unknown/missing, deny the request
|
||||
size := r.ContentLength
|
||||
rAuthType := getRequestAuthType(r)
|
||||
if rAuthType == authTypeStreamingSigned {
|
||||
|
@ -1946,7 +1946,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
|||
return
|
||||
}
|
||||
|
||||
/// maximum Upload size for objects in a single operation
|
||||
// maximum Upload size for objects in a single operation
|
||||
if isMaxObjectSize(size) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
|
||||
return
|
||||
|
@ -2155,7 +2155,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h
|
|||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
/// Multipart objectAPIHandlers
|
||||
// Multipart objectAPIHandlers
|
||||
|
||||
// NewMultipartUploadHandler - New multipart upload.
|
||||
// Notice: The S3 client can send secret keys in headers for encryption related jobs,
|
||||
|
@ -2478,7 +2478,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt
|
|||
return
|
||||
}
|
||||
|
||||
/// maximum copy size for multipart objects in a single operation
|
||||
// maximum copy size for multipart objects in a single operation
|
||||
if isMaxAllowedPartSize(length) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
|
||||
return
|
||||
|
@ -2670,7 +2670,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||
return
|
||||
}
|
||||
|
||||
/// if Content-Length is unknown/missing, throw away
|
||||
// if Content-Length is unknown/missing, throw away
|
||||
size := r.ContentLength
|
||||
|
||||
rAuthType := getRequestAuthType(r)
|
||||
|
@ -2693,7 +2693,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
|||
return
|
||||
}
|
||||
|
||||
/// maximum Upload size for multipart objects in a single operation
|
||||
// maximum Upload size for multipart objects in a single operation
|
||||
if isMaxAllowedPartSize(size) {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
|
||||
return
|
||||
|
@ -3319,7 +3319,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
|||
}
|
||||
}
|
||||
|
||||
/// Delete objectAPIHandlers
|
||||
// Delete objectAPIHandlers
|
||||
|
||||
// DeleteObjectHandler - delete an object
|
||||
func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
|
@ -2706,13 +2706,13 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
|
|||
{bucketName, objectName, uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd"))},
|
||||
{bucketName, objectName, uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd"))},
|
||||
// Part with size larger than 5 MiB.
|
||||
{bucketName, objectName, uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(string(validPart)))},
|
||||
{bucketName, objectName, uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(string(validPart)))},
|
||||
{bucketName, objectName, uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(validPart))},
|
||||
{bucketName, objectName, uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(validPart))},
|
||||
|
||||
// Part with size larger than 5 MiB.
|
||||
// Parts uploaded for anonymous/unsigned API handler test.
|
||||
{bucketName, objectName, uploadIDs[1], 1, string(validPart), validPartMD5, int64(len(string(validPart)))},
|
||||
{bucketName, objectName, uploadIDs[1], 2, string(validPart), validPartMD5, int64(len(string(validPart)))},
|
||||
{bucketName, objectName, uploadIDs[1], 1, string(validPart), validPartMD5, int64(len(validPart))},
|
||||
{bucketName, objectName, uploadIDs[1], 2, string(validPart), validPartMD5, int64(len(validPart))},
|
||||
}
|
||||
// Iterating over creatPartCases to generate multipart chunks.
|
||||
for _, part := range parts {
|
||||
|
@ -3077,13 +3077,13 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
{bucketName, objectName, uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd"))},
|
||||
{bucketName, objectName, uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd"))},
|
||||
// Part with size larger than 5 MiB.
|
||||
{bucketName, objectName, uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(string(validPart)))},
|
||||
{bucketName, objectName, uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(string(validPart)))},
|
||||
{bucketName, objectName, uploadIDs[0], 5, string(validPart), validPartMD5, int64(len(validPart))},
|
||||
{bucketName, objectName, uploadIDs[0], 6, string(validPart), validPartMD5, int64(len(validPart))},
|
||||
|
||||
// Part with size larger than 5 MiB.
|
||||
// Parts uploaded for anonymous/unsigned API handler test.
|
||||
{bucketName, objectName, uploadIDs[1], 1, string(validPart), validPartMD5, int64(len(string(validPart)))},
|
||||
{bucketName, objectName, uploadIDs[1], 2, string(validPart), validPartMD5, int64(len(string(validPart)))},
|
||||
{bucketName, objectName, uploadIDs[1], 1, string(validPart), validPartMD5, int64(len(validPart))},
|
||||
{bucketName, objectName, uploadIDs[1], 2, string(validPart), validPartMD5, int64(len(validPart))},
|
||||
}
|
||||
// Iterating over createPartCases to generate multipart chunks.
|
||||
for _, part := range parts {
|
||||
|
|
|
@ -150,7 +150,7 @@ func testMultipartObjectAbort(obj ObjectLayer, instanceType string, t TestErrHan
|
|||
randomPerm := rand.Perm(10)
|
||||
randomString := ""
|
||||
for _, num := range randomPerm {
|
||||
randomString = randomString + strconv.Itoa(num)
|
||||
randomString += strconv.Itoa(num)
|
||||
}
|
||||
|
||||
expectedETaghex := getMD5Hash([]byte(randomString))
|
||||
|
@ -189,7 +189,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH
|
|||
randomPerm := rand.Perm(100)
|
||||
randomString := ""
|
||||
for _, num := range randomPerm {
|
||||
randomString = randomString + strconv.Itoa(num)
|
||||
randomString += strconv.Itoa(num)
|
||||
}
|
||||
|
||||
expectedETaghex := getMD5Hash([]byte(randomString))
|
||||
|
|
|
@ -61,8 +61,8 @@ func newPostPolicyBytesV4WithContentRange(credential, bucketName, objectKey stri
|
|||
keyConditionStr, contentLengthCondStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr)
|
||||
retStr := "{"
|
||||
retStr = retStr + expirationStr + ","
|
||||
retStr = retStr + conditionStr
|
||||
retStr = retStr + "}"
|
||||
retStr += conditionStr
|
||||
retStr += "}"
|
||||
|
||||
return []byte(retStr)
|
||||
}
|
||||
|
@ -89,8 +89,8 @@ func newPostPolicyBytesV4(credential, bucketName, objectKey string, expiration t
|
|||
conditionStr := fmt.Sprintf(`"conditions":[%s, %s, %s, %s, %s, %s]`, bucketConditionStr, keyConditionStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr)
|
||||
retStr := "{"
|
||||
retStr = retStr + expirationStr + ","
|
||||
retStr = retStr + conditionStr
|
||||
retStr = retStr + "}"
|
||||
retStr += conditionStr
|
||||
retStr += "}"
|
||||
|
||||
return []byte(retStr)
|
||||
}
|
||||
|
@ -108,8 +108,8 @@ func newPostPolicyBytesV2(bucketName, objectKey string, expiration time.Time) []
|
|||
conditionStr := fmt.Sprintf(`"conditions":[%s, %s]`, bucketConditionStr, keyConditionStr)
|
||||
retStr := "{"
|
||||
retStr = retStr + expirationStr + ","
|
||||
retStr = retStr + conditionStr
|
||||
retStr = retStr + "}"
|
||||
retStr += conditionStr
|
||||
retStr += "}"
|
||||
|
||||
return []byte(retStr)
|
||||
}
|
||||
|
|
|
@ -303,14 +303,12 @@ func checkPostPolicy(formValues http.Header, postPolicyForm PostPolicyForm) erro
|
|||
if !condPassed {
|
||||
return fmt.Errorf("Invalid according to Policy: Policy Condition failed")
|
||||
}
|
||||
} else {
|
||||
} else if strings.HasPrefix(policy.Key, "$x-amz-meta-") || strings.HasPrefix(policy.Key, "$x-amz-") {
|
||||
// This covers all conditions X-Amz-Meta-* and X-Amz-*
|
||||
if strings.HasPrefix(policy.Key, "$x-amz-meta-") || strings.HasPrefix(policy.Key, "$x-amz-") {
|
||||
// Check if policy condition is satisfied
|
||||
condPassed = checkPolicyCond(op, formValues.Get(formCanonicalName), policy.Value)
|
||||
if !condPassed {
|
||||
return fmt.Errorf("Invalid according to Policy: Policy Condition failed: [%s, %s, %s]", op, policy.Key, policy.Value)
|
||||
}
|
||||
// Check if policy condition is satisfied
|
||||
condPassed = checkPolicyCond(op, formValues.Get(formCanonicalName), policy.Value)
|
||||
if !condPassed {
|
||||
return fmt.Errorf("Invalid according to Policy: Policy Condition failed: [%s, %s, %s]", op, policy.Key, policy.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -365,7 +365,7 @@ func (s *TestSuiteCommon) TestBucketPolicy(c *check) {
|
|||
// assert the http response status code.
|
||||
c.Assert(response.StatusCode, http.StatusOK)
|
||||
|
||||
/// Put a new bucket policy.
|
||||
// Put a new bucket policy.
|
||||
request, err = newTestSignedRequest(http.MethodPut, getPutPolicyURL(s.endPoint, bucketName),
|
||||
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), s.accessKey, s.secretKey, s.signer)
|
||||
c.Assert(err, nil)
|
||||
|
@ -980,7 +980,7 @@ func (s *TestSuiteCommon) TestPutBucket(c *check) {
|
|||
wg.Wait()
|
||||
|
||||
bucketName = getRandomBucketName()
|
||||
//Block 2: testing for correctness of the functionality
|
||||
// Block 2: testing for correctness of the functionality
|
||||
// HTTP request to create the bucket.
|
||||
request, err := newTestSignedRequest(http.MethodPut, getMakeBucketURL(s.endPoint, bucketName),
|
||||
0, nil, s.accessKey, s.secretKey, s.signer)
|
||||
|
@ -1273,7 +1273,7 @@ func (s *TestSuiteCommon) TestPutObjectLongName(c *check) {
|
|||
c.Assert(err, nil)
|
||||
c.Assert(response.StatusCode, http.StatusOK)
|
||||
|
||||
//make long object name.
|
||||
// make long object name.
|
||||
longObjName = fmt.Sprintf("%0255d/%0255d/%0255d/%0255d/%0255d", 1, 1, 1, 1, 1)
|
||||
if IsDocker() || IsKubernetes() {
|
||||
longObjName = fmt.Sprintf("%0242d/%0242d/%0242d/%0242d/%0242d", 1, 1, 1, 1, 1)
|
||||
|
|
|
@ -261,7 +261,7 @@ func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues
|
|||
// Replace all spaced strings, some clients can send spaced
|
||||
// parameters and some won't. So we pro-actively remove any spaces
|
||||
// to make parsing easier.
|
||||
v4Auth = strings.Replace(v4Auth, " ", "", -1)
|
||||
v4Auth = strings.ReplaceAll(v4Auth, " ", "")
|
||||
if v4Auth == "" {
|
||||
return sv, ErrAuthHeaderEmpty
|
||||
}
|
||||
|
|
|
@ -103,7 +103,7 @@ func getSignedHeaders(signedHeaders http.Header) string {
|
|||
// <HashedPayload>
|
||||
//
|
||||
func getCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string {
|
||||
rawQuery := strings.Replace(queryStr, "+", "%20", -1)
|
||||
rawQuery := strings.ReplaceAll(queryStr, "+", "%20")
|
||||
encodedPath := s3utils.EncodePath(urlPath)
|
||||
canonicalRequest := strings.Join([]string{
|
||||
method,
|
||||
|
@ -130,9 +130,9 @@ func getScope(t time.Time, region string) string {
|
|||
// getStringToSign a string based on selected query values.
|
||||
func getStringToSign(canonicalRequest string, t time.Time, scope string) string {
|
||||
stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n"
|
||||
stringToSign = stringToSign + scope + "\n"
|
||||
stringToSign += scope + "\n"
|
||||
canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest))
|
||||
stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:])
|
||||
stringToSign += hex.EncodeToString(canonicalRequestBytes[:])
|
||||
return stringToSign
|
||||
}
|
||||
|
||||
|
@ -306,7 +306,7 @@ func doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region s
|
|||
return ErrInvalidToken
|
||||
}
|
||||
|
||||
/// Verify finally if signature is same.
|
||||
// Verify finally if signature is same.
|
||||
|
||||
// Get canonical request.
|
||||
presignedCanonicalReq := getCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method)
|
||||
|
|
|
@ -29,13 +29,8 @@ import (
|
|||
xnet "github.com/minio/pkg/net"
|
||||
)
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Storage REST server, storageRESTReceiver and StorageRESTClient are
|
||||
// inter-dependent, below test functions are sufficient to test all of them.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func testStorageAPIDiskInfo(t *testing.T, storage StorageAPI) {
|
||||
testCases := []struct {
|
||||
expectErr bool
|
||||
|
|
|
@ -436,7 +436,7 @@ func parseHexUint(v []byte) (n uint64, err error) {
|
|||
for i, b := range v {
|
||||
switch {
|
||||
case '0' <= b && b <= '9':
|
||||
b = b - '0'
|
||||
b -= '0'
|
||||
case 'a' <= b && b <= 'f':
|
||||
b = b - 'a' + 10
|
||||
case 'A' <= b && b <= 'F':
|
||||
|
|
|
@ -119,19 +119,19 @@ func TestMain(m *testing.M) {
|
|||
// concurrency level for certain parallel tests.
|
||||
const testConcurrencyLevel = 10
|
||||
|
||||
///
|
||||
/// Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258
|
||||
///
|
||||
/// User-Agent:
|
||||
///
|
||||
/// This is ignored from signing because signing this causes problems with generating pre-signed URLs
|
||||
/// (that are executed by other agents) or when customers pass requests through proxies, which may
|
||||
/// modify the user-agent.
|
||||
///
|
||||
/// Authorization:
|
||||
///
|
||||
/// Is skipped for obvious reasons
|
||||
///
|
||||
//
|
||||
// Excerpts from @lsegal - https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258
|
||||
//
|
||||
// User-Agent:
|
||||
//
|
||||
// This is ignored from signing because signing this causes problems with generating pre-signed URLs
|
||||
// (that are executed by other agents) or when customers pass requests through proxies, which may
|
||||
// modify the user-agent.
|
||||
//
|
||||
// Authorization:
|
||||
//
|
||||
// Is skipped for obvious reasons
|
||||
//
|
||||
var ignoredHeaders = map[string]bool{
|
||||
"Authorization": true,
|
||||
"User-Agent": true,
|
||||
|
@ -633,7 +633,7 @@ func signStreamingRequest(req *http.Request, accessKey, secretKey string, currTi
|
|||
signedHeaders := strings.Join(headers, ";")
|
||||
|
||||
// Get canonical query string.
|
||||
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
|
||||
req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20")
|
||||
|
||||
// Get canonical URI.
|
||||
canonicalURI := s3utils.EncodePath(req.URL.Path)
|
||||
|
@ -665,8 +665,8 @@ func signStreamingRequest(req *http.Request, accessKey, secretKey string, currTi
|
|||
}, SlashSeparator)
|
||||
|
||||
stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n"
|
||||
stringToSign = stringToSign + scope + "\n"
|
||||
stringToSign = stringToSign + getSHA256Hash([]byte(canonicalRequest))
|
||||
stringToSign += scope + "\n"
|
||||
stringToSign += getSHA256Hash([]byte(canonicalRequest))
|
||||
|
||||
date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd)))
|
||||
region := sumHMAC(date, []byte(globalMinioDefaultRegion))
|
||||
|
@ -749,7 +749,7 @@ func assembleStreamingChunks(req *http.Request, body io.ReadSeeker, chunkSize in
|
|||
stringToSign = stringToSign + scope + "\n"
|
||||
stringToSign = stringToSign + signature + "\n"
|
||||
stringToSign = stringToSign + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + "\n" // hex(sum256(""))
|
||||
stringToSign = stringToSign + getSHA256Hash(buffer[:n])
|
||||
stringToSign += getSHA256Hash(buffer[:n])
|
||||
|
||||
date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd)))
|
||||
region := sumHMAC(date, []byte(regionStr))
|
||||
|
@ -851,7 +851,7 @@ func preSignV4(req *http.Request, accessKeyID, secretAccessKey string, expires i
|
|||
extractedSignedHeaders := make(http.Header)
|
||||
extractedSignedHeaders.Set("host", req.Host)
|
||||
|
||||
queryStr := strings.Replace(query.Encode(), "+", "%20", -1)
|
||||
queryStr := strings.ReplaceAll(query.Encode(), "+", "%20")
|
||||
canonicalRequest := getCanonicalRequest(extractedSignedHeaders, unsignedPayload, queryStr, req.URL.Path, req.Method)
|
||||
stringToSign := getStringToSign(canonicalRequest, date, scope)
|
||||
signingKey := getSigningKey(secretAccessKey, date, region, serviceS3)
|
||||
|
@ -988,7 +988,7 @@ func signRequestV4(req *http.Request, accessKey, secretKey string) error {
|
|||
signedHeaders := strings.Join(headers, ";")
|
||||
|
||||
// Get canonical query string.
|
||||
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
|
||||
req.URL.RawQuery = strings.ReplaceAll(req.URL.Query().Encode(), "+", "%20")
|
||||
|
||||
// Get canonical URI.
|
||||
canonicalURI := s3utils.EncodePath(req.URL.Path)
|
||||
|
@ -1021,7 +1021,7 @@ func signRequestV4(req *http.Request, accessKey, secretKey string) error {
|
|||
|
||||
stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n"
|
||||
stringToSign = stringToSign + scope + "\n"
|
||||
stringToSign = stringToSign + getSHA256Hash([]byte(canonicalRequest))
|
||||
stringToSign += getSHA256Hash([]byte(canonicalRequest))
|
||||
|
||||
date := sumHMAC([]byte("AWS4"+secretKey), []byte(currTime.Format(yyyymmdd)))
|
||||
regionHMAC := sumHMAC(date, []byte(region))
|
||||
|
@ -1220,7 +1220,7 @@ func makeTestTargetURL(endPoint, bucketName, objectName string, queryValues url.
|
|||
urlStr = urlStr + bucketName + SlashSeparator
|
||||
}
|
||||
if objectName != "" {
|
||||
urlStr = urlStr + s3utils.EncodePath(objectName)
|
||||
urlStr += s3utils.EncodePath(objectName)
|
||||
}
|
||||
if len(queryValues) > 0 {
|
||||
urlStr = urlStr + "?" + queryValues.Encode()
|
||||
|
@ -1504,7 +1504,7 @@ func removeRoots(roots []string) {
|
|||
}
|
||||
}
|
||||
|
||||
//removeDiskN - removes N disks from supplied disk slice.
|
||||
// removeDiskN - removes N disks from supplied disk slice.
|
||||
func removeDiskN(disks []string, n int) {
|
||||
if n > len(disks) {
|
||||
n = len(disks)
|
||||
|
|
|
@ -161,7 +161,7 @@ func TestUserAgent(t *testing.T) {
|
|||
str := getUserAgent(testCase.mode)
|
||||
expectedStr := testCase.expectedStr
|
||||
if IsDocker() {
|
||||
expectedStr = strings.Replace(expectedStr, "; source", "; docker; source", -1)
|
||||
expectedStr = strings.ReplaceAll(expectedStr, "; source", "; docker; source")
|
||||
}
|
||||
if str != expectedStr {
|
||||
t.Errorf("Test %d: expected: %s, got: %s", i+1, expectedStr, str)
|
||||
|
@ -216,7 +216,7 @@ func TestGetHelmVersion(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("Unable to create temporary file. %s", err)
|
||||
}
|
||||
if _, err = tmpfile.Write([]byte(content)); err != nil {
|
||||
if _, err = tmpfile.WriteString(content); err != nil {
|
||||
t.Fatalf("Unable to create temporary file. %s", err)
|
||||
}
|
||||
if err = tmpfile.Close(); err != nil {
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
)
|
||||
|
||||
func BenchmarkURLQueryForm(b *testing.B) {
|
||||
req, err := http.NewRequest(http.MethodGet, "http://localhost:9000/bucket/name?uploadId=upload&partNumber=1", nil)
|
||||
req, err := http.NewRequest(http.MethodGet, "http://localhost:9000/bucket/name?uploadId=upload&partNumber=1", http.NoBody)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ func BenchmarkURLQueryForm(b *testing.B) {
|
|||
|
||||
// BenchmarkURLQuery - benchmark URL memory allocations
|
||||
func BenchmarkURLQuery(b *testing.B) {
|
||||
req, err := http.NewRequest(http.MethodGet, "http://localhost:9000/bucket/name?uploadId=upload&partNumber=1", nil)
|
||||
req, err := http.NewRequest(http.MethodGet, "http://localhost:9000/bucket/name?uploadId=upload&partNumber=1", http.NoBody)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -160,7 +160,7 @@ func hasContentMD5(h http.Header) bool {
|
|||
return ok
|
||||
}
|
||||
|
||||
/// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
|
||||
const (
|
||||
// Maximum object size per PUT request is 5TB.
|
||||
// This is a divergence from S3 limit on purpose to support
|
||||
|
@ -409,7 +409,7 @@ func dumpRequest(r *http.Request) string {
|
|||
header.Set("Host", r.Host)
|
||||
// Replace all '%' to '%%' so that printer format parser
|
||||
// to ignore URL encoded values.
|
||||
rawURI := strings.Replace(r.RequestURI, "%", "%%", -1)
|
||||
rawURI := strings.ReplaceAll(r.RequestURI, "%", "%%")
|
||||
req := struct {
|
||||
Method string `json:"method"`
|
||||
RequestURI string `json:"reqURI"`
|
||||
|
|
|
@ -238,9 +238,8 @@ func TestDumpRequest(t *testing.T) {
|
|||
RequestURI string `json:"reqURI"`
|
||||
Header http.Header `json:"header"`
|
||||
}
|
||||
jsonReq = strings.Replace(jsonReq, "%%", "%", -1)
|
||||
res := jsonResult{}
|
||||
if err = json.Unmarshal([]byte(jsonReq), &res); err != nil {
|
||||
if err = json.Unmarshal([]byte(strings.ReplaceAll(jsonReq, "%%", "%")), &res); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -399,7 +398,6 @@ func TestCeilFrac(t *testing.T) {
|
|||
// Test if isErrIgnored works correctly.
|
||||
func TestIsErrIgnored(t *testing.T) {
|
||||
var errIgnored = fmt.Errorf("ignored error")
|
||||
ignoredErrs := append(baseIgnoredErrs, errIgnored)
|
||||
var testCases = []struct {
|
||||
err error
|
||||
ignored bool
|
||||
|
@ -418,7 +416,7 @@ func TestIsErrIgnored(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
if ok := IsErrIgnored(testCase.err, ignoredErrs...); ok != testCase.ignored {
|
||||
if ok := IsErrIgnored(testCase.err, append(baseIgnoredErrs, errIgnored)...); ok != testCase.ignored {
|
||||
t.Errorf("Test: %d, Expected %t, got %t", i+1, testCase.ignored, ok)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -276,7 +276,7 @@ func newXLStorage(ep Endpoint) (*xlStorage, error) {
|
|||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
if _, err = w.Write(alignedBuf[:]); err != nil {
|
||||
if _, err = w.Write(alignedBuf); err != nil {
|
||||
w.Close()
|
||||
return p, err
|
||||
}
|
||||
|
@ -2394,10 +2394,13 @@ func (s *xlStorage) StatInfoFile(ctx context.Context, volume, path string, glob
|
|||
if err != nil {
|
||||
name = filePath
|
||||
}
|
||||
if os.PathSeparator != '/' {
|
||||
name = strings.Replace(name, string(os.PathSeparator), "/", -1)
|
||||
}
|
||||
stat = append(stat, StatInfo{ModTime: st.ModTime(), Size: st.Size(), Name: name, Dir: st.IsDir(), Mode: uint32(st.Mode())})
|
||||
stat = append(stat, StatInfo{
|
||||
Name: filepath.ToSlash(name),
|
||||
Size: st.Size(),
|
||||
Dir: st.IsDir(),
|
||||
Mode: uint32(st.Mode()),
|
||||
ModTime: st.ModTime(),
|
||||
})
|
||||
}
|
||||
return stat, nil
|
||||
}
|
||||
|
|
|
@ -447,7 +447,7 @@ func TestXLStorageReadAll(t *testing.T) {
|
|||
continue
|
||||
}
|
||||
if err == nil {
|
||||
if string(dataRead) != string([]byte("Hello, World")) {
|
||||
if !bytes.Equal(dataRead, []byte("Hello, World")) {
|
||||
t.Errorf("TestXLStorage %d: Expected the data read to be \"%s\", but instead got \"%s\"", i+1, "Hello, World", string(dataRead))
|
||||
}
|
||||
}
|
||||
|
@ -1227,7 +1227,10 @@ func TestXLStorageReadFile(t *testing.T) {
|
|||
t.Errorf("Case: %d %#v, expected: %s, got :%s", i+1, testCase, testCase.expectedErr, err)
|
||||
}
|
||||
// Expected error retured, proceed further to validate the returned results.
|
||||
if err == nil && err == testCase.expectedErr {
|
||||
if err != nil && testCase.expectedErr == nil {
|
||||
t.Errorf("Case: %d %#v, expected: %s, got :%s", i+1, testCase, testCase.expectedErr, err)
|
||||
}
|
||||
if err == nil {
|
||||
if !bytes.Equal(testCase.expectedBuf, buf) {
|
||||
t.Errorf("Case: %d %#v, expected: \"%s\", got: \"%s\"", i+1, testCase, string(testCase.expectedBuf), string(buf[:testCase.bufSize]))
|
||||
}
|
||||
|
|
|
@ -56,12 +56,13 @@ func main() {
|
|||
fatalErr(json.Unmarshal(got, &input))
|
||||
r, err := os.Open(input.File)
|
||||
fatalErr(err)
|
||||
defer r.Close()
|
||||
dstName := strings.TrimSuffix(input.File, ".enc") + ".zip"
|
||||
w, err := os.Create(dstName)
|
||||
fatalErr(err)
|
||||
defer w.Close()
|
||||
|
||||
decrypt(input.Key, r, w)
|
||||
r.Close()
|
||||
w.Close()
|
||||
fmt.Println("Output decrypted to", dstName)
|
||||
return
|
||||
}
|
||||
|
@ -78,14 +79,13 @@ func main() {
|
|||
case 1:
|
||||
r, err := os.Open(args[0])
|
||||
fatalErr(err)
|
||||
defer r.Close()
|
||||
if len(*key) == 0 {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
fmt.Print("Enter Decryption Key: ")
|
||||
|
||||
text, _ := reader.ReadString('\n')
|
||||
// convert CRLF to LF
|
||||
*key = strings.Replace(text, "\n", "", -1)
|
||||
*key = strings.ReplaceAll(text, "\n", "")
|
||||
}
|
||||
*key = strings.TrimSpace(*key)
|
||||
fatalIf(len(*key) != 72, "Unexpected key length: %d, want 72", len(*key))
|
||||
|
@ -93,9 +93,11 @@ func main() {
|
|||
dstName := strings.TrimSuffix(args[0], ".enc") + ".zip"
|
||||
w, err := os.Create(dstName)
|
||||
fatalErr(err)
|
||||
defer w.Close()
|
||||
|
||||
decrypt(*key, r, w)
|
||||
r.Close()
|
||||
w.Close()
|
||||
|
||||
fmt.Println("Output decrypted to", dstName)
|
||||
return
|
||||
default:
|
||||
|
|
|
@ -222,8 +222,8 @@ func GenerateCredentials() (accessKey, secretKey string, err error) {
|
|||
return "", "", err
|
||||
}
|
||||
|
||||
secretKey = strings.Replace(string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLen]),
|
||||
"/", "+", -1)
|
||||
secretKey = strings.ReplaceAll(string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLen]),
|
||||
"/", "+")
|
||||
|
||||
return accessKey, secretKey, nil
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ type Monitor struct {
|
|||
NodeCount uint64
|
||||
}
|
||||
|
||||
//NewMonitor returns a monitor with defaults.
|
||||
// NewMonitor returns a monitor with defaults.
|
||||
func NewMonitor(ctx context.Context, numNodes uint64) *Monitor {
|
||||
m := &Monitor{
|
||||
activeBuckets: make(map[string]*bucketMeasurement),
|
||||
|
@ -63,7 +63,7 @@ func (m *Monitor) updateMeasurement(bucket string, bytes uint64) {
|
|||
}
|
||||
}
|
||||
|
||||
//SelectionFunction for buckets
|
||||
// SelectionFunction for buckets
|
||||
type SelectionFunction func(bucket string) bool
|
||||
|
||||
// SelectBuckets will select all the buckets passed in.
|
||||
|
|
|
@ -57,7 +57,7 @@ const (
|
|||
DeleteVersionAction
|
||||
// TransitionAction transitions a particular object after evaluating lifecycle transition rules
|
||||
TransitionAction
|
||||
//TransitionVersionAction transitions a particular object version after evaluating lifecycle transition rules
|
||||
// TransitionVersionAction transitions a particular object version after evaluating lifecycle transition rules
|
||||
TransitionVersionAction
|
||||
// DeleteRestoredAction means the temporarily restored object needs to be removed after evaluating lifecycle rules
|
||||
DeleteRestoredAction
|
||||
|
|
|
@ -193,6 +193,7 @@ func (dr *DefaultRetention) UnmarshalXML(d *xml.Decoder, start xml.StartElement)
|
|||
return fmt.Errorf("either Days or Years must be specified, not both")
|
||||
}
|
||||
|
||||
//nolint:gocritic
|
||||
if retention.Days != nil {
|
||||
if *retention.Days == 0 {
|
||||
return fmt.Errorf("Default retention period must be a positive integer value for 'Days'")
|
||||
|
|
|
@ -137,6 +137,7 @@ func TestUnmarshalDefaultRetention(t *testing.T) {
|
|||
}
|
||||
var dr DefaultRetention
|
||||
err = xml.Unmarshal(d, &dr)
|
||||
//nolint:gocritic
|
||||
if tt.expectedErr == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||
|
@ -173,6 +174,7 @@ func TestParseObjectLockConfig(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
_, err := ParseObjectLockConfig(strings.NewReader(tt.value))
|
||||
//nolint:gocritic
|
||||
if tt.expectedErr == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||
|
@ -209,6 +211,7 @@ func TestParseObjectRetention(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
_, err := ParseObjectRetention(strings.NewReader(tt.value))
|
||||
//nolint:gocritic
|
||||
if tt.expectedErr == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||
|
@ -367,6 +370,7 @@ func TestParseObjectLockRetentionHeaders(t *testing.T) {
|
|||
|
||||
for i, tt := range tests {
|
||||
_, _, err := ParseObjectLockRetentionHeaders(tt.header)
|
||||
//nolint:gocritic
|
||||
if tt.expectedErr == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("Case %d error: expected = <nil>, got = %v", i, err)
|
||||
|
@ -494,6 +498,7 @@ func TestParseObjectLegalHold(t *testing.T) {
|
|||
}
|
||||
for i, tt := range tests {
|
||||
_, err := ParseObjectLegalHold(strings.NewReader(tt.value))
|
||||
//nolint:gocritic
|
||||
if tt.expectedErr == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("Case %d error: expected = <nil>, got = %v", i, err)
|
||||
|
|
|
@ -37,7 +37,7 @@ type Destination struct {
|
|||
Bucket string `xml:"Bucket" json:"Bucket"`
|
||||
StorageClass string `xml:"StorageClass" json:"StorageClass"`
|
||||
ARN string
|
||||
//EncryptionConfiguration TODO: not needed for MinIO
|
||||
// EncryptionConfiguration TODO: not needed for MinIO
|
||||
}
|
||||
|
||||
func (d Destination) isValidStorageClass() bool {
|
||||
|
@ -57,14 +57,14 @@ func (d Destination) String() string {
|
|||
|
||||
}
|
||||
|
||||
//LegacyArn returns true if arn format has prefix "arn:aws:s3:::" which was used
|
||||
// prior to multi-destination
|
||||
// LegacyArn returns true if arn format has prefix "arn:aws:s3:::" which was
|
||||
// used prior to multi-destination
|
||||
func (d Destination) LegacyArn() bool {
|
||||
return strings.HasPrefix(d.ARN, DestinationARNPrefix)
|
||||
}
|
||||
|
||||
//TargetArn returns true if arn format has prefix "arn:minio:replication:::" used
|
||||
// for multi-destination targets
|
||||
// TargetArn returns true if arn format has prefix "arn:minio:replication:::"
|
||||
// used for multi-destination targets
|
||||
func (d Destination) TargetArn() bool {
|
||||
return strings.HasPrefix(d.ARN, DestinationARNMinIOPrefix)
|
||||
}
|
||||
|
|
|
@ -175,7 +175,7 @@ func (c Config) FilterActionableRules(obj ObjectOpts) []Rule {
|
|||
rules = append(rules, rule)
|
||||
}
|
||||
}
|
||||
sort.Slice(rules[:], func(i, j int) bool {
|
||||
sort.Slice(rules, func(i, j int) bool {
|
||||
return rules[i].Priority > rules[j].Priority && rules[i].Destination.String() == rules[j].Destination.String()
|
||||
})
|
||||
|
||||
|
|
|
@ -31,28 +31,28 @@ func TestParseAndValidateReplicationConfig(t *testing.T) {
|
|||
destBucket string
|
||||
sameTarget bool
|
||||
}{
|
||||
{ //1 Invalid delete marker status in replication config
|
||||
{ // 1 Invalid delete marker status in replication config
|
||||
inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>string</Status></DeleteMarkerReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
|
||||
destBucket: "destinationbucket",
|
||||
sameTarget: false,
|
||||
expectedParsingErr: nil,
|
||||
expectedValidationErr: errInvalidDeleteMarkerReplicationStatus,
|
||||
},
|
||||
//2 Invalid delete replication status in replication config
|
||||
// 2 Invalid delete replication status in replication config
|
||||
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
|
||||
destBucket: "destinationbucket",
|
||||
sameTarget: false,
|
||||
expectedParsingErr: nil,
|
||||
expectedValidationErr: errDeleteReplicationMissing,
|
||||
},
|
||||
//3 valid replication config
|
||||
// 3 valid replication config
|
||||
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
|
||||
destBucket: "destinationbucket",
|
||||
sameTarget: false,
|
||||
expectedParsingErr: nil,
|
||||
expectedValidationErr: nil,
|
||||
},
|
||||
//4 missing role in config and destination ARN is in legacy format
|
||||
// 4 missing role in config and destination ARN is in legacy format
|
||||
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
|
||||
// destination bucket in config different from bucket specified
|
||||
destBucket: "destinationbucket",
|
||||
|
@ -60,63 +60,63 @@ func TestParseAndValidateReplicationConfig(t *testing.T) {
|
|||
expectedParsingErr: nil,
|
||||
expectedValidationErr: errDestinationArnMissing,
|
||||
},
|
||||
//5 replication destination in different rules not identical
|
||||
// 5 replication destination in different rules not identical
|
||||
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role></Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:minio:replication:::destinationbucket</Bucket></Destination></Rule><Rule><Status>Enabled</Status><Priority>3</Priority><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:minio:replication:::destinationbucket2</Bucket></Destination></Rule></ReplicationConfiguration>`,
|
||||
destBucket: "destinationbucket",
|
||||
sameTarget: false,
|
||||
expectedParsingErr: nil,
|
||||
expectedValidationErr: nil,
|
||||
},
|
||||
//6 missing rule status in replication config
|
||||
// 6 missing rule status in replication config
|
||||
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
|
||||
destBucket: "destinationbucket",
|
||||
sameTarget: false,
|
||||
expectedParsingErr: nil,
|
||||
expectedValidationErr: errEmptyRuleStatus,
|
||||
},
|
||||
//7 invalid rule status in replication config
|
||||
// 7 invalid rule status in replication config
|
||||
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enssabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
|
||||
destBucket: "destinationbucket",
|
||||
sameTarget: false,
|
||||
expectedParsingErr: nil,
|
||||
expectedValidationErr: errInvalidRuleStatus,
|
||||
},
|
||||
//8 invalid rule id exceeds length allowed in replication config
|
||||
// 8 invalid rule id exceeds length allowed in replication config
|
||||
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><ID>vsUVERgOc8zZYagLSzSa5lE8qeI6nh1lyLNS4R9W052yfecrhhepGboswSWMMNO8CPcXM4GM3nKyQ72EadlMzzZBFoYWKn7ju5GoE5w9c57a0piHR1vexpdd9FrMquiruvAJ0MTGVupm0EegMVxoIOdjx7VgZhGrmi2XDvpVEFT7WmYMA9fSK297XkTHWyECaNHBySJ1Qp4vwX8tPNauKpfHx4kzUpnKe1PZbptGMWbY5qTcwlNuMhVSmgFffShq</ID><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
|
||||
destBucket: "destinationbucket",
|
||||
sameTarget: false,
|
||||
expectedParsingErr: nil,
|
||||
expectedValidationErr: errInvalidRuleID,
|
||||
},
|
||||
//9 invalid priority status in replication config
|
||||
// 9 invalid priority status in replication config
|
||||
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
|
||||
destBucket: "destinationbucket",
|
||||
sameTarget: false,
|
||||
expectedParsingErr: nil,
|
||||
expectedValidationErr: errReplicationUniquePriority,
|
||||
},
|
||||
//10 no rule in replication config
|
||||
// 10 no rule in replication config
|
||||
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role></ReplicationConfiguration>`,
|
||||
destBucket: "destinationbucket",
|
||||
sameTarget: false,
|
||||
expectedParsingErr: nil,
|
||||
expectedValidationErr: errReplicationNoRule,
|
||||
},
|
||||
//11 no destination in replication config
|
||||
// 11 no destination in replication config
|
||||
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination></Destination></Rule></ReplicationConfiguration>`,
|
||||
destBucket: "destinationbucket",
|
||||
sameTarget: false,
|
||||
expectedParsingErr: Errorf("invalid destination '%v'", ""),
|
||||
expectedValidationErr: nil,
|
||||
},
|
||||
//12 destination not matching ARN in replication config
|
||||
// 12 destination not matching ARN in replication config
|
||||
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>destinationbucket2</Bucket></Destination></Rule></ReplicationConfiguration>`,
|
||||
destBucket: "destinationbucket",
|
||||
sameTarget: false,
|
||||
expectedParsingErr: fmt.Errorf("invalid destination '%v'", "destinationbucket2"),
|
||||
expectedValidationErr: nil,
|
||||
},
|
||||
//13 missing role in config and destination ARN has target ARN
|
||||
// 13 missing role in config and destination ARN has target ARN
|
||||
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:minio:replication::8320b6d18f9032b4700f1f03b50d8d1853de8f22cab86931ee794e12f190852c:destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
|
||||
// destination bucket in config different from bucket specified
|
||||
destBucket: "destinationbucket",
|
||||
|
@ -124,7 +124,7 @@ func TestParseAndValidateReplicationConfig(t *testing.T) {
|
|||
expectedParsingErr: nil,
|
||||
expectedValidationErr: nil,
|
||||
},
|
||||
//14 role absent in config and destination ARN has target ARN in invalid format
|
||||
// 14 role absent in config and destination ARN has target ARN in invalid format
|
||||
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:xx:replication::8320b6d18f9032b4700f1f03b50d8d1853de8f22cab86931ee794e12f190852c:destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
|
||||
// destination bucket in config different from bucket specified
|
||||
destBucket: "destinationbucket",
|
||||
|
@ -232,53 +232,53 @@ func TestReplicate(t *testing.T) {
|
|||
expectedResult bool
|
||||
}{
|
||||
// using config 1 - no filters, all replication enabled
|
||||
{ObjectOpts{}, cfgs[0], false}, //1. invalid ObjectOpts missing object name
|
||||
{ObjectOpts{Name: "c1test"}, cfgs[0], true}, //2. valid ObjectOpts passing empty Filter
|
||||
{ObjectOpts{Name: "c1test", VersionID: "vid"}, cfgs[0], true}, //3. valid ObjectOpts passing empty Filter
|
||||
{ObjectOpts{}, cfgs[0], false}, // 1. invalid ObjectOpts missing object name
|
||||
{ObjectOpts{Name: "c1test"}, cfgs[0], true}, // 2. valid ObjectOpts passing empty Filter
|
||||
{ObjectOpts{Name: "c1test", VersionID: "vid"}, cfgs[0], true}, // 3. valid ObjectOpts passing empty Filter
|
||||
|
||||
{ObjectOpts{Name: "c1test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[0], true}, //4. DeleteMarker version replication valid case - matches DeleteMarkerReplication status
|
||||
{ObjectOpts{Name: "c1test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[0], true}, //5. permanent delete of version, matches DeleteReplication status - valid case
|
||||
{ObjectOpts{Name: "c1test", VersionID: "vid", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[0], true}, //6. permanent delete of version, matches DeleteReplication status
|
||||
{ObjectOpts{Name: "c1test", VersionID: "vid", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[0], false}, //7. permanent delete of version, disqualified by SSE-C
|
||||
{ObjectOpts{Name: "c1test", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[0], false}, //8. setting DeleteMarker on SSE-C encrypted object, disqualified by SSE-C
|
||||
{ObjectOpts{Name: "c1test", SSEC: true}, cfgs[0], false}, //9. replication of SSE-C encrypted object, disqualified
|
||||
{ObjectOpts{Name: "c1test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[0], true}, // 4. DeleteMarker version replication valid case - matches DeleteMarkerReplication status
|
||||
{ObjectOpts{Name: "c1test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[0], true}, // 5. permanent delete of version, matches DeleteReplication status - valid case
|
||||
{ObjectOpts{Name: "c1test", VersionID: "vid", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[0], true}, // 6. permanent delete of version, matches DeleteReplication status
|
||||
{ObjectOpts{Name: "c1test", VersionID: "vid", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[0], false}, // 7. permanent delete of version, disqualified by SSE-C
|
||||
{ObjectOpts{Name: "c1test", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[0], false}, // 8. setting DeleteMarker on SSE-C encrypted object, disqualified by SSE-C
|
||||
{ObjectOpts{Name: "c1test", SSEC: true}, cfgs[0], false}, // 9. replication of SSE-C encrypted object, disqualified
|
||||
|
||||
// using config 2 - no filters, only replication of object, metadata enabled
|
||||
{ObjectOpts{Name: "c2test"}, cfgs[1], true}, //10. valid ObjectOpts passing empty Filter
|
||||
{ObjectOpts{Name: "c2test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[1], false}, //11. DeleteMarker version replication not allowed due to DeleteMarkerReplication status
|
||||
{ObjectOpts{Name: "c2test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[1], false}, //12. permanent delete of version, disallowed by DeleteReplication status
|
||||
{ObjectOpts{Name: "c2test", VersionID: "vid", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[1], false}, //13. permanent delete of DeleteMarker version, disallowed by DeleteReplication status
|
||||
{ObjectOpts{Name: "c2test", VersionID: "vid", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[1], false}, //14. permanent delete of version, disqualified by SSE-C & DeleteReplication status
|
||||
{ObjectOpts{Name: "c2test", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[1], false}, //15. setting DeleteMarker on SSE-C encrypted object, disqualified by SSE-C & DeleteMarkerReplication status
|
||||
{ObjectOpts{Name: "c2test", SSEC: true}, cfgs[1], false}, //16. replication of SSE-C encrypted object, disqualified by default
|
||||
{ObjectOpts{Name: "c2test"}, cfgs[1], true}, // 10. valid ObjectOpts passing empty Filter
|
||||
{ObjectOpts{Name: "c2test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[1], false}, // 11. DeleteMarker version replication not allowed due to DeleteMarkerReplication status
|
||||
{ObjectOpts{Name: "c2test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[1], false}, // 12. permanent delete of version, disallowed by DeleteReplication status
|
||||
{ObjectOpts{Name: "c2test", VersionID: "vid", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[1], false}, // 13. permanent delete of DeleteMarker version, disallowed by DeleteReplication status
|
||||
{ObjectOpts{Name: "c2test", VersionID: "vid", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[1], false}, // 14. permanent delete of version, disqualified by SSE-C & DeleteReplication status
|
||||
{ObjectOpts{Name: "c2test", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[1], false}, // 15. setting DeleteMarker on SSE-C encrypted object, disqualified by SSE-C & DeleteMarkerReplication status
|
||||
{ObjectOpts{Name: "c2test", SSEC: true}, cfgs[1], false}, // 16. replication of SSE-C encrypted object, disqualified by default
|
||||
// using config 2 - has more than one rule with overlapping prefixes
|
||||
{ObjectOpts{Name: "xy/c3test", UserTags: "k1=v1"}, cfgs[2], true}, //17. matches rule 1 for replication of content/metadata
|
||||
{ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1"}, cfgs[2], true}, //18. matches rule 1 for replication of content/metadata
|
||||
{ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[2], false}, //19. matches rule 1 - DeleteMarker replication disallowed by rule
|
||||
{ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], true}, //20. matches rule 1 - DeleteReplication allowed by rule for permanent delete of DeleteMarker
|
||||
{ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], true}, //21. matches rule 1 - DeleteReplication allowed by rule for permanent delete of version
|
||||
{ObjectOpts{Name: "xyz/c3test"}, cfgs[2], true}, //22. matches rule 2 for replication of content/metadata
|
||||
{ObjectOpts{Name: "xy/c3test", UserTags: "k1=v2"}, cfgs[2], false}, //23. does not match rule1 because tag value does not pass filter
|
||||
{ObjectOpts{Name: "xyz/c3test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[2], true}, //24. matches rule 2 - DeleteMarker replication allowed by rule
|
||||
{ObjectOpts{Name: "xyz/c3test", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], false}, //25. matches rule 2 - DeleteReplication disallowed by rule for permanent delete of DeleteMarker
|
||||
{ObjectOpts{Name: "xyz/c3test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], false}, //26. matches rule 1 - DeleteReplication disallowed by rule for permanent delete of version
|
||||
{ObjectOpts{Name: "abc/c3test"}, cfgs[2], false}, //27. matches no rule because object prefix does not match
|
||||
{ObjectOpts{Name: "xy/c3test", UserTags: "k1=v1"}, cfgs[2], true}, // 17. matches rule 1 for replication of content/metadata
|
||||
{ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1"}, cfgs[2], true}, // 18. matches rule 1 for replication of content/metadata
|
||||
{ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[2], false}, // 19. matches rule 1 - DeleteMarker replication disallowed by rule
|
||||
{ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], true}, // 20. matches rule 1 - DeleteReplication allowed by rule for permanent delete of DeleteMarker
|
||||
{ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], true}, // 21. matches rule 1 - DeleteReplication allowed by rule for permanent delete of version
|
||||
{ObjectOpts{Name: "xyz/c3test"}, cfgs[2], true}, // 22. matches rule 2 for replication of content/metadata
|
||||
{ObjectOpts{Name: "xy/c3test", UserTags: "k1=v2"}, cfgs[2], false}, // 23. does not match rule1 because tag value does not pass filter
|
||||
{ObjectOpts{Name: "xyz/c3test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[2], true}, // 24. matches rule 2 - DeleteMarker replication allowed by rule
|
||||
{ObjectOpts{Name: "xyz/c3test", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], false}, // 25. matches rule 2 - DeleteReplication disallowed by rule for permanent delete of DeleteMarker
|
||||
{ObjectOpts{Name: "xyz/c3test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[2], false}, // 26. matches rule 1 - DeleteReplication disallowed by rule for permanent delete of version
|
||||
{ObjectOpts{Name: "abc/c3test"}, cfgs[2], false}, // 27. matches no rule because object prefix does not match
|
||||
|
||||
// using config 3 - has no overlapping rules
|
||||
{ObjectOpts{Name: "xy/c4test", UserTags: "k1=v1"}, cfgs[3], true}, //28. matches rule 1 for replication of content/metadata
|
||||
{ObjectOpts{Name: "xa/c4test", UserTags: "k1=v1"}, cfgs[3], false}, //29. no rule match object prefix not in rules
|
||||
{ObjectOpts{Name: "xyz/c4test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[3], false}, //30. rule 1 not matched because of tags filter
|
||||
{ObjectOpts{Name: "xyz/c4test", UserTags: "k1=v1", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[3], false}, //31. matches rule 1 - DeleteMarker replication disallowed by rule
|
||||
{ObjectOpts{Name: "xyz/c4test", UserTags: "k1=v1", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], true}, //32. matches rule 1 - DeleteReplication allowed by rule for permanent delete of DeleteMarker
|
||||
{ObjectOpts{Name: "xyz/c4test", UserTags: "k1=v1", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], true}, //33. matches rule 1 - DeleteReplication allowed by rule for permanent delete of version
|
||||
{ObjectOpts{Name: "abc/c4test"}, cfgs[3], true}, //34. matches rule 2 for replication of content/metadata
|
||||
{ObjectOpts{Name: "abc/c4test", UserTags: "k1=v2"}, cfgs[3], true}, //35. matches rule 2 for replication of content/metadata
|
||||
{ObjectOpts{Name: "abc/c4test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[3], true}, //36. matches rule 2 - DeleteMarker replication allowed by rule
|
||||
{ObjectOpts{Name: "abc/c4test", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], false}, //37. matches rule 2 - DeleteReplication disallowed by rule for permanent delete of DeleteMarker
|
||||
{ObjectOpts{Name: "abc/c4test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], false}, //38. matches rule 2 - DeleteReplication disallowed by rule for permanent delete of version
|
||||
{ObjectOpts{Name: "xy/c4test", UserTags: "k1=v1"}, cfgs[3], true}, // 28. matches rule 1 for replication of content/metadata
|
||||
{ObjectOpts{Name: "xa/c4test", UserTags: "k1=v1"}, cfgs[3], false}, // 29. no rule match object prefix not in rules
|
||||
{ObjectOpts{Name: "xyz/c4test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[3], false}, // 30. rule 1 not matched because of tags filter
|
||||
{ObjectOpts{Name: "xyz/c4test", UserTags: "k1=v1", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[3], false}, // 31. matches rule 1 - DeleteMarker replication disallowed by rule
|
||||
{ObjectOpts{Name: "xyz/c4test", UserTags: "k1=v1", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], true}, // 32. matches rule 1 - DeleteReplication allowed by rule for permanent delete of DeleteMarker
|
||||
{ObjectOpts{Name: "xyz/c4test", UserTags: "k1=v1", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], true}, // 33. matches rule 1 - DeleteReplication allowed by rule for permanent delete of version
|
||||
{ObjectOpts{Name: "abc/c4test"}, cfgs[3], true}, // 34. matches rule 2 for replication of content/metadata
|
||||
{ObjectOpts{Name: "abc/c4test", UserTags: "k1=v2"}, cfgs[3], true}, // 35. matches rule 2 for replication of content/metadata
|
||||
{ObjectOpts{Name: "abc/c4test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[3], true}, // 36. matches rule 2 - DeleteMarker replication allowed by rule
|
||||
{ObjectOpts{Name: "abc/c4test", DeleteMarker: true, VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], false}, // 37. matches rule 2 - DeleteReplication disallowed by rule for permanent delete of DeleteMarker
|
||||
{ObjectOpts{Name: "abc/c4test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[3], false}, // 38. matches rule 2 - DeleteReplication disallowed by rule for permanent delete of version
|
||||
// using config 4 - with replica modification sync disabled.
|
||||
{ObjectOpts{Name: "xy/c5test", UserTags: "k1=v1", Replica: true}, cfgs[4], false}, //39. replica syncing disabled, this object is a replica
|
||||
{ObjectOpts{Name: "xa/c5test", UserTags: "k1=v1", Replica: false}, cfgs[4], true}, //40. replica syncing disabled, this object is NOT a replica
|
||||
{ObjectOpts{Name: "xy/c5test", UserTags: "k1=v1", Replica: true}, cfgs[4], false}, // 39. replica syncing disabled, this object is a replica
|
||||
{ObjectOpts{Name: "xa/c5test", UserTags: "k1=v1", Replica: false}, cfgs[4], true}, // 40. replica syncing disabled, this object is NOT a replica
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
|
@ -322,7 +322,7 @@ func TestHasActiveRules(t *testing.T) {
|
|||
expectedNonRec: false,
|
||||
expectedRec: true,
|
||||
},
|
||||
//case 5 - has filter with prefix and tags, here we are not matching on tags
|
||||
// case 5 - has filter with prefix and tags, here we are not matching on tags
|
||||
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Filter>
|
||||
<And><Prefix>key-prefix</Prefix><Tag><Key>key1</Key><Value>value1</Value></Tag><Tag><Key>key2</Key><Value>value2</Value></Tag></And></Filter><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination></Rule></ReplicationConfiguration>`,
|
||||
prefix: "testdir/",
|
||||
|
|
|
@ -31,23 +31,23 @@ func TestMetadataReplicate(t *testing.T) {
|
|||
}{
|
||||
// case 1 - rule with replica modification enabled; not a replica
|
||||
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination><SourceSelectionCriteria><ReplicaModifications><Status>Enabled</Status></ReplicaModifications></SourceSelectionCriteria></Rule></ReplicationConfiguration>`,
|
||||
opts: ObjectOpts{Name: "c1test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: false}, //1. Replica mod sync enabled; not a replica
|
||||
opts: ObjectOpts{Name: "c1test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: false}, // 1. Replica mod sync enabled; not a replica
|
||||
expectedResult: true,
|
||||
},
|
||||
// case 2 - rule with replica modification disabled; a replica
|
||||
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination><SourceSelectionCriteria><ReplicaModifications><Status>Disabled</Status></ReplicaModifications></SourceSelectionCriteria></Rule></ReplicationConfiguration>`,
|
||||
opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: true}, //1. Replica mod sync enabled; a replica
|
||||
opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: true}, // 1. Replica mod sync enabled; a replica
|
||||
expectedResult: false,
|
||||
},
|
||||
// case 3 - rule with replica modification disabled; not a replica
|
||||
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination><SourceSelectionCriteria><ReplicaModifications><Status>Disabled</Status></ReplicaModifications></SourceSelectionCriteria></Rule></ReplicationConfiguration>`,
|
||||
opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: false}, //1. Replica mod sync disabled; not a replica
|
||||
opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: ObjectReplicationType, Replica: false}, // 1. Replica mod sync disabled; not a replica
|
||||
expectedResult: true,
|
||||
},
|
||||
|
||||
// case 4 - rule with replica modification enabled; a replica
|
||||
{inputConfig: `<ReplicationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Role>arn:aws:iam::AcctID:role/role-name</Role><Rule><Status>Enabled</Status><DeleteMarkerReplication><Status>Disabled</Status></DeleteMarkerReplication><DeleteReplication><Status>Disabled</Status></DeleteReplication><Prefix>key-prefix</Prefix><Destination><Bucket>arn:aws:s3:::destinationbucket</Bucket></Destination><SourceSelectionCriteria><ReplicaModifications><Status>Enabled</Status></ReplicaModifications></SourceSelectionCriteria></Rule></ReplicationConfiguration>`,
|
||||
opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: MetadataReplicationType, Replica: true}, //1. Replica mod sync enabled; a replica
|
||||
opts: ObjectOpts{Name: "c2test", DeleteMarker: false, OpType: MetadataReplicationType, Replica: true}, // 1. Replica mod sync enabled; a replica
|
||||
expectedResult: true,
|
||||
},
|
||||
}
|
||||
|
|
|
@ -117,9 +117,9 @@ func LoadX509KeyPair(certFile, keyFile string) (tls.Certificate, error) {
|
|||
}
|
||||
|
||||
// EnsureCertAndKey checks if both client certificate and key paths are provided
|
||||
func EnsureCertAndKey(ClientCert, ClientKey string) error {
|
||||
if (ClientCert != "" && ClientKey == "") ||
|
||||
(ClientCert == "" && ClientKey != "") {
|
||||
func EnsureCertAndKey(clientCert, clientKey string) error {
|
||||
if (clientCert != "" && clientKey == "") ||
|
||||
(clientCert == "" && clientKey != "") {
|
||||
return errors.New("cert and key must be specified as a pair")
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -38,6 +38,7 @@ func printName(names []pkix.AttributeTypeAndValue, buf *strings.Builder) []strin
|
|||
values := []string{}
|
||||
for _, name := range names {
|
||||
oid := name.Type
|
||||
//nolint:gocritic
|
||||
if len(oid) == 4 && oid[0] == 2 && oid[1] == 5 && oid[2] == 4 {
|
||||
switch oid[3] {
|
||||
case 3:
|
||||
|
|
|
@ -201,9 +201,9 @@ func Authentication(username, password string) OperatorOption {
|
|||
}
|
||||
|
||||
// RootCAs - add custom trust certs pool
|
||||
func RootCAs(CAs *x509.CertPool) OperatorOption {
|
||||
func RootCAs(certPool *x509.CertPool) OperatorOption {
|
||||
return func(args *OperatorDNS) {
|
||||
args.rootCAs = CAs
|
||||
args.rootCAs = certPool
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -86,7 +86,7 @@ func (opts Config) Wait(currentIO func() int, systemIO func() int) {
|
|||
} else {
|
||||
time.Sleep(waitTick)
|
||||
}
|
||||
tmpMaxWait = tmpMaxWait - waitTick
|
||||
tmpMaxWait -= waitTick
|
||||
}
|
||||
if tmpMaxWait <= 0 {
|
||||
return
|
||||
|
|
|
@ -186,7 +186,7 @@ func (l *Config) lookupBind(conn *ldap.Conn) error {
|
|||
// assumed to be using the lookup bind service account. It is required that the
|
||||
// search result in at most one result.
|
||||
func (l *Config) lookupUserDN(conn *ldap.Conn, username string) (string, error) {
|
||||
filter := strings.Replace(l.UserDNSearchFilter, "%s", ldap.EscapeFilter(username), -1)
|
||||
filter := strings.ReplaceAll(l.UserDNSearchFilter, "%s", ldap.EscapeFilter(username))
|
||||
searchRequest := ldap.NewSearchRequest(
|
||||
l.UserDNSearchBaseDN,
|
||||
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
|
||||
|
@ -213,8 +213,8 @@ func (l *Config) searchForUserGroups(conn *ldap.Conn, username, bindDN string) (
|
|||
var groups []string
|
||||
if l.GroupSearchFilter != "" {
|
||||
for _, groupSearchBase := range l.GroupSearchBaseDistNames {
|
||||
filter := strings.Replace(l.GroupSearchFilter, "%s", ldap.EscapeFilter(username), -1)
|
||||
filter = strings.Replace(filter, "%d", ldap.EscapeFilter(bindDN), -1)
|
||||
filter := strings.ReplaceAll(l.GroupSearchFilter, "%s", ldap.EscapeFilter(username))
|
||||
filter = strings.ReplaceAll(filter, "%d", ldap.EscapeFilter(bindDN))
|
||||
searchRequest := ldap.NewSearchRequest(
|
||||
groupSearchBase,
|
||||
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
|
||||
|
@ -393,7 +393,7 @@ func (l *Config) GetNonEligibleUserDistNames(userDistNames []string) ([]string,
|
|||
}
|
||||
|
||||
// Evaluate the filter again with generic wildcard instead of specific values
|
||||
filter := strings.Replace(l.UserDNSearchFilter, "%s", "*", -1)
|
||||
filter := strings.ReplaceAll(l.UserDNSearchFilter, "%s", "*")
|
||||
|
||||
nonExistentUsers := []string{}
|
||||
for _, dn := range userDistNames {
|
||||
|
|
|
@ -85,6 +85,7 @@ func TestPublicKey(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
//nolint:gocritic
|
||||
if key0, ok := keys[0].(*ecdsa.PublicKey); !ok {
|
||||
t.Fatalf("Expected ECDSA key[0], got %T", keys[0])
|
||||
} else if key1, ok := keys[1].(*rsa.PublicKey); !ok {
|
||||
|
|
|
@ -19,7 +19,7 @@ package config
|
|||
|
||||
import "github.com/minio/minio/internal/auth"
|
||||
|
||||
//// One time migration code section
|
||||
// One time migration code section
|
||||
|
||||
// SetCredentials - One time migration code needed, for migrating from older config to new for server credentials.
|
||||
func SetCredentials(c Config, cred auth.Credentials) {
|
||||
|
|
|
@ -90,7 +90,7 @@ func (key ObjectKey) Seal(extKey []byte, iv [32]byte, domain, bucket, object str
|
|||
sealingKey [32]byte
|
||||
encryptedKey bytes.Buffer
|
||||
)
|
||||
mac := hmac.New(sha256.New, extKey[:])
|
||||
mac := hmac.New(sha256.New, extKey)
|
||||
mac.Write(iv[:])
|
||||
mac.Write([]byte(domain))
|
||||
mac.Write([]byte(SealAlgorithm))
|
||||
|
@ -118,7 +118,7 @@ func (key *ObjectKey) Unseal(extKey []byte, sealedKey SealedKey, domain, bucket,
|
|||
default:
|
||||
return Errorf("The sealing algorithm '%s' is not supported", sealedKey.Algorithm)
|
||||
case SealAlgorithm:
|
||||
mac := hmac.New(sha256.New, extKey[:])
|
||||
mac := hmac.New(sha256.New, extKey)
|
||||
mac.Write(sealedKey.IV[:])
|
||||
mac.Write([]byte(domain))
|
||||
mac.Write([]byte(SealAlgorithm))
|
||||
|
@ -126,7 +126,7 @@ func (key *ObjectKey) Unseal(extKey []byte, sealedKey SealedKey, domain, bucket,
|
|||
unsealConfig = sio.Config{MinVersion: sio.Version20, Key: mac.Sum(nil), CipherSuites: fips.CipherSuitesDARE()}
|
||||
case InsecureSealAlgorithm:
|
||||
sha := sha256.New()
|
||||
sha.Write(extKey[:])
|
||||
sha.Write(extKey)
|
||||
sha.Write(sealedKey.IV[:])
|
||||
unsealConfig = sio.Config{MinVersion: sio.Version10, Key: sha.Sum(nil), CipherSuites: fips.CipherSuitesDARE()}
|
||||
}
|
||||
|
|
|
@ -164,7 +164,7 @@ func TestDerivePartKey(t *testing.T) {
|
|||
t.Fatalf("Test %d failed to decode expected part-key: %v", i, err)
|
||||
}
|
||||
partKey := key.DerivePartKey(test.PartID)
|
||||
if !bytes.Equal(partKey[:], expectedPartKey[:]) {
|
||||
if !bytes.Equal(partKey[:], expectedPartKey) {
|
||||
t.Errorf("Test %d derives wrong part-key: got '%s' want: '%s'", i, hex.EncodeToString(partKey[:]), test.PartKey)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -109,7 +109,7 @@ func (s3 ssekms) UnsealObjectKey(KMS kms.KMS, metadata map[string]string, bucket
|
|||
if err != nil {
|
||||
return key, err
|
||||
}
|
||||
err = key.Unseal(unsealKey[:], sealedKey, s3.String(), bucket, object)
|
||||
err = key.Unseal(unsealKey, sealedKey, s3.String(), bucket, object)
|
||||
return key, err
|
||||
}
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ func (s3 sses3) UnsealObjectKey(KMS kms.KMS, metadata map[string]string, bucket,
|
|||
if err != nil {
|
||||
return key, err
|
||||
}
|
||||
err = key.Unseal(unsealKey[:], sealedKey, s3.String(), bucket, object)
|
||||
err = key.Unseal(unsealKey, sealedKey, s3.String(), bucket, object)
|
||||
return key, err
|
||||
}
|
||||
|
||||
|
|
|
@ -40,12 +40,12 @@ func DisableDirectIO(f *os.File) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
flag = flag & ^(syscall.O_DIRECT)
|
||||
flag &= ^(syscall.O_DIRECT)
|
||||
_, err = unix.FcntlInt(fd, unix.F_SETFL, flag)
|
||||
return err
|
||||
}
|
||||
|
||||
// AlignedBlock - pass through to directio implementation.
|
||||
func AlignedBlock(BlockSize int) []byte {
|
||||
return directio.AlignedBlock(BlockSize)
|
||||
func AlignedBlock(blockSize int) []byte {
|
||||
return directio.AlignedBlock(blockSize)
|
||||
}
|
||||
|
|
|
@ -199,9 +199,9 @@ func (dm *DRWMutex) lockBlocking(ctx context.Context, lockLossCallback func(), i
|
|||
|
||||
// If success, copy array to object
|
||||
if isReadLock {
|
||||
copy(dm.readLocks, locks[:])
|
||||
copy(dm.readLocks, locks)
|
||||
} else {
|
||||
copy(dm.writeLocks, locks[:])
|
||||
copy(dm.writeLocks, locks)
|
||||
}
|
||||
|
||||
dm.m.Unlock()
|
||||
|
@ -579,7 +579,7 @@ func (dm *DRWMutex) Unlock() {
|
|||
}
|
||||
|
||||
// Copy write locks to stack array
|
||||
copy(locks, dm.writeLocks[:])
|
||||
copy(locks, dm.writeLocks)
|
||||
}
|
||||
|
||||
// Tolerance is not set, defaults to half of the locker clients.
|
||||
|
@ -620,7 +620,7 @@ func (dm *DRWMutex) RUnlock() {
|
|||
}
|
||||
|
||||
// Copy write locks to stack array
|
||||
copy(locks, dm.readLocks[:])
|
||||
copy(locks, dm.readLocks)
|
||||
}
|
||||
|
||||
// Tolerance is not set, defaults to half of the locker clients.
|
||||
|
|
|
@ -94,10 +94,8 @@ func (l *lockServer) RLock(args *LockArgs, reply *bool) error {
|
|||
if locksHeld, *reply = l.lockMap[args.Resources[0]]; !*reply {
|
||||
l.lockMap[args.Resources[0]] = ReadLock // No locks held on the given name, so claim (first) read lock
|
||||
*reply = true
|
||||
} else {
|
||||
if *reply = locksHeld != WriteLock; *reply { // Unless there is a write lock
|
||||
l.lockMap[args.Resources[0]] = locksHeld + ReadLock // Grant another read lock
|
||||
}
|
||||
} else if *reply = locksHeld != WriteLock; *reply { // Unless there is a write lock
|
||||
l.lockMap[args.Resources[0]] = locksHeld + ReadLock // Grant another read lock
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -318,7 +318,7 @@ func ParseConfig(reader io.Reader, region string, targetList *TargetList) (*Conf
|
|||
}
|
||||
|
||||
config.SetRegion(region)
|
||||
//If xml namespace is empty, set a default value before returning.
|
||||
// If xml namespace is empty, set a default value before returning.
|
||||
if config.XMLNS == "" {
|
||||
config.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ func NewPattern(prefix, suffix string) (pattern string) {
|
|||
pattern += suffix
|
||||
}
|
||||
|
||||
pattern = strings.Replace(pattern, "**", "*", -1)
|
||||
pattern = strings.ReplaceAll(pattern, "**", "*")
|
||||
|
||||
return pattern
|
||||
}
|
||||
|
|
|
@ -25,13 +25,14 @@ import (
|
|||
)
|
||||
|
||||
func initScramClient(args KafkaArgs, config *sarama.Config) {
|
||||
if args.SASL.Mechanism == "sha512" {
|
||||
switch args.SASL.Mechanism {
|
||||
case "sha512":
|
||||
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: KafkaSHA512} }
|
||||
config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA512)
|
||||
} else if args.SASL.Mechanism == "sha256" {
|
||||
case "sha256":
|
||||
config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: KafkaSHA256} }
|
||||
config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA256)
|
||||
} else {
|
||||
default:
|
||||
// default to PLAIN
|
||||
config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypePlaintext)
|
||||
}
|
||||
|
|
|
@ -272,10 +272,8 @@ func NewMQTTTarget(id string, args MQTTArgs, doneCh <-chan struct{}, loggerOnce
|
|||
// Start replaying events from the store.
|
||||
go sendEvents(target, eventKeyCh, doneCh, target.loggerOnce)
|
||||
}
|
||||
} else {
|
||||
if token.Wait() && token.Error() != nil {
|
||||
return target, token.Error()
|
||||
}
|
||||
} else if token.Wait() && token.Error() != nil {
|
||||
return target, token.Error()
|
||||
}
|
||||
return target, nil
|
||||
}
|
||||
|
|
|
@ -172,6 +172,7 @@ func (n NATSArgs) connectStan() (stan.Conn, error) {
|
|||
}
|
||||
|
||||
var addressURL string
|
||||
//nolint:gocritic
|
||||
if n.Username != "" && n.Password != "" {
|
||||
addressURL = scheme + "://" + n.Username + ":" + n.Password + "@" + n.Address.String()
|
||||
} else if n.Token != "" {
|
||||
|
@ -219,18 +220,14 @@ func (target *NATSTarget) IsActive() (bool, error) {
|
|||
if target.args.Streaming.Enable {
|
||||
if target.stanConn == nil || target.stanConn.NatsConn() == nil {
|
||||
target.stanConn, connErr = target.args.connectStan()
|
||||
} else {
|
||||
if !target.stanConn.NatsConn().IsConnected() {
|
||||
return false, errNotConnected
|
||||
}
|
||||
} else if !target.stanConn.NatsConn().IsConnected() {
|
||||
return false, errNotConnected
|
||||
}
|
||||
} else {
|
||||
if target.natsConn == nil {
|
||||
target.natsConn, connErr = target.args.connectNats()
|
||||
} else {
|
||||
if !target.natsConn.IsConnected() {
|
||||
return false, errNotConnected
|
||||
}
|
||||
} else if !target.natsConn.IsConnected() {
|
||||
return false, errNotConnected
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
func AppendFile(dst string, src string, osync bool) error {
|
||||
flags := os.O_WRONLY | os.O_APPEND | os.O_CREATE
|
||||
if osync {
|
||||
flags = flags | os.O_SYNC
|
||||
flags |= os.O_SYNC
|
||||
}
|
||||
appendFile, err := os.OpenFile(dst, flags, 0666)
|
||||
if err != nil {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue