mirror of
https://github.com/minio/minio.git
synced 2025-04-05 20:30:32 -04:00
Migrate golanglint-ci config to V2 (#21081)
This commit is contained in:
parent
b67f0cf721
commit
e88d494775
@ -1,45 +1,64 @@
|
||||
linters-settings:
|
||||
gofumpt:
|
||||
simplify: true
|
||||
|
||||
misspell:
|
||||
locale: US
|
||||
|
||||
staticcheck:
|
||||
checks: ['all', '-ST1005', '-ST1000', '-SA4000', '-SA9004', '-SA1019', '-SA1008', '-U1000', '-ST1016']
|
||||
|
||||
version: "2"
|
||||
linters:
|
||||
disable-all: true
|
||||
default: none
|
||||
enable:
|
||||
- durationcheck
|
||||
- forcetypeassert
|
||||
- gocritic
|
||||
- gofumpt
|
||||
- goimports
|
||||
- gomodguard
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
- usetesting
|
||||
- forcetypeassert
|
||||
- whitespace
|
||||
|
||||
settings:
|
||||
misspell:
|
||||
locale: US
|
||||
staticcheck:
|
||||
checks:
|
||||
- all
|
||||
- -SA1008
|
||||
- -SA1019
|
||||
- -SA4000
|
||||
- -SA9004
|
||||
- -ST1000
|
||||
- -ST1005
|
||||
- -ST1016
|
||||
- -U1000
|
||||
exclusions:
|
||||
generated: lax
|
||||
rules:
|
||||
- linters:
|
||||
- forcetypeassert
|
||||
path: _test\.go
|
||||
- path: (.+)\.go$
|
||||
text: 'empty-block:'
|
||||
- path: (.+)\.go$
|
||||
text: 'unused-parameter:'
|
||||
- path: (.+)\.go$
|
||||
text: 'dot-imports:'
|
||||
- path: (.+)\.go$
|
||||
text: should have a package comment
|
||||
- path: (.+)\.go$
|
||||
text: error strings should not be capitalized or end with punctuation or a newline
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
max-issues-per-linter: 100
|
||||
max-same-issues: 100
|
||||
exclude:
|
||||
- "empty-block:"
|
||||
- "unused-parameter:"
|
||||
- "dot-imports:"
|
||||
- should have a package comment
|
||||
- error strings should not be capitalized or end with punctuation or a newline
|
||||
exclude-rules:
|
||||
# Exclude some linters from running on tests files.
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- forcetypeassert
|
||||
formatters:
|
||||
enable:
|
||||
- gofumpt
|
||||
- goimports
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
@ -214,10 +214,7 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
||||
}
|
||||
|
||||
// Check if we are creating svc account for request sender.
|
||||
isSvcAccForRequestor := false
|
||||
if targetUser == requestorUser || targetUser == requestorParentUser {
|
||||
isSvcAccForRequestor = true
|
||||
}
|
||||
isSvcAccForRequestor := targetUser == requestorUser || targetUser == requestorParentUser
|
||||
|
||||
var (
|
||||
targetGroups []string
|
||||
|
@ -258,7 +258,7 @@ func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request)
|
||||
// concurrent rebalance-start commands.
|
||||
if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal {
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == ep.Host {
|
||||
if proxyEp.Host == ep.Host {
|
||||
if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success {
|
||||
return
|
||||
}
|
||||
@ -329,7 +329,7 @@ func (a adminAPIHandlers) RebalanceStatus(w http.ResponseWriter, r *http.Request
|
||||
// pools may temporarily have out of date info on the others.
|
||||
if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal {
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == ep.Host {
|
||||
if proxyEp.Host == ep.Host {
|
||||
if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success {
|
||||
return
|
||||
}
|
||||
@ -383,7 +383,7 @@ func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w h
|
||||
return
|
||||
}
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == host && !proxyEp.IsLocal {
|
||||
if proxyEp.Host == host && !proxyEp.IsLocal {
|
||||
if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success {
|
||||
return true
|
||||
}
|
||||
|
@ -197,12 +197,7 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
checkDenyOnly := false
|
||||
if name == cred.AccessKey {
|
||||
// Check that there is no explicit deny - otherwise it's allowed
|
||||
// to view one's own info.
|
||||
checkDenyOnly = true
|
||||
}
|
||||
checkDenyOnly := name == cred.AccessKey
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
@ -493,12 +488,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
checkDenyOnly := false
|
||||
if accessKey == cred.AccessKey {
|
||||
// Check that there is no explicit deny - otherwise it's allowed
|
||||
// to change one's own password.
|
||||
checkDenyOnly = true
|
||||
}
|
||||
checkDenyOnly := accessKey == cred.AccessKey
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
@ -689,10 +679,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
||||
}
|
||||
|
||||
// Check if we are creating svc account for request sender.
|
||||
isSvcAccForRequestor := false
|
||||
if targetUser == requestorUser || targetUser == requestorParentUser {
|
||||
isSvcAccForRequestor = true
|
||||
}
|
||||
isSvcAccForRequestor := targetUser == requestorUser || targetUser == requestorParentUser
|
||||
|
||||
// If we are creating svc account for request sender, ensure
|
||||
// that targetUser is a real user (i.e. not derived
|
||||
@ -2673,7 +2660,7 @@ func addExpirationToCondValues(exp *time.Time, condValues map[string][]string) e
|
||||
if exp == nil || exp.IsZero() || exp.Equal(timeSentinel) {
|
||||
return nil
|
||||
}
|
||||
dur := exp.Sub(time.Now())
|
||||
dur := time.Until(*exp)
|
||||
if dur <= 0 {
|
||||
return errors.New("unsupported expiration time")
|
||||
}
|
||||
|
@ -160,7 +160,7 @@ func (s *TestSuiteIAM) SetUpSuite(c *check) {
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) RestartIAMSuite(c *check) {
|
||||
s.TestSuiteCommon.RestartTestServer(c)
|
||||
s.RestartTestServer(c)
|
||||
|
||||
s.iamSetup(c)
|
||||
}
|
||||
|
@ -2676,7 +2676,7 @@ func fetchHealthInfo(healthCtx context.Context, objectAPI ObjectLayer, query *ur
|
||||
// disk metrics are already included under drive info of each server
|
||||
getRealtimeMetrics := func() *madmin.RealtimeMetrics {
|
||||
var m madmin.RealtimeMetrics
|
||||
var types madmin.MetricType = madmin.MetricsAll &^ madmin.MetricsDisk
|
||||
types := madmin.MetricsAll &^ madmin.MetricsDisk
|
||||
mLocal := collectLocalMetrics(types, collectMetricsOpts{})
|
||||
m.Merge(&mLocal)
|
||||
cctx, cancel := context.WithTimeout(healthCtx, time.Second/2)
|
||||
@ -2720,7 +2720,7 @@ func fetchHealthInfo(healthCtx context.Context, objectAPI ObjectLayer, query *ur
|
||||
poolsArgs := re.ReplaceAllString(cmdLine, `$3`)
|
||||
var anonPools []string
|
||||
|
||||
if !(strings.Contains(poolsArgs, "{") && strings.Contains(poolsArgs, "}")) {
|
||||
if !strings.Contains(poolsArgs, "{") || !strings.Contains(poolsArgs, "}") {
|
||||
// No ellipses pattern. Anonymize host name from every pool arg
|
||||
pools := strings.Fields(poolsArgs)
|
||||
anonPools = make([]string, len(pools))
|
||||
@ -3420,7 +3420,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
||||
}
|
||||
|
||||
// save the format.json as part of inspect by default
|
||||
if !(volume == minioMetaBucket && file == formatConfigFile) {
|
||||
if volume != minioMetaBucket || file != formatConfigFile {
|
||||
err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn)
|
||||
}
|
||||
if !errors.Is(err, errFileNotFound) {
|
||||
|
@ -44,10 +44,10 @@ type DeleteMarkerMTime struct {
|
||||
// MarshalXML encodes expiration date if it is non-zero and encodes
|
||||
// empty string otherwise
|
||||
func (t DeleteMarkerMTime) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
|
||||
if t.Time.IsZero() {
|
||||
if t.IsZero() {
|
||||
return nil
|
||||
}
|
||||
return e.EncodeElement(t.Time.Format(time.RFC3339), startElement)
|
||||
return e.EncodeElement(t.Format(time.RFC3339), startElement)
|
||||
}
|
||||
|
||||
// ObjectV object version key/versionId
|
||||
|
@ -34,7 +34,8 @@ func TestNewRequestID(t *testing.T) {
|
||||
e = char
|
||||
|
||||
// Ensure that it is alphanumeric, in this case, between 0-9 and A-Z.
|
||||
if !(('0' <= e && e <= '9') || ('A' <= e && e <= 'Z')) {
|
||||
isAlnum := ('0' <= e && e <= '9') || ('A' <= e && e <= 'Z')
|
||||
if !isAlnum {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
@ -227,13 +227,13 @@ func s3APIMiddleware(f http.HandlerFunc, flags ...s3HFlag) http.HandlerFunc {
|
||||
}
|
||||
|
||||
// Skip wrapping with the gzip middleware if specified.
|
||||
var gzippedHandler http.HandlerFunc = tracedHandler
|
||||
gzippedHandler := tracedHandler
|
||||
if !handlerFlags.has(noGZS3HFlag) {
|
||||
gzippedHandler = gzipHandler(gzippedHandler)
|
||||
}
|
||||
|
||||
// Skip wrapping with throttling middleware if specified.
|
||||
var throttledHandler http.HandlerFunc = gzippedHandler
|
||||
throttledHandler := gzippedHandler
|
||||
if !handlerFlags.has(noThrottleS3HFlag) {
|
||||
throttledHandler = maxClients(throttledHandler)
|
||||
}
|
||||
|
@ -39,7 +39,6 @@ import (
|
||||
"github.com/lithammer/shortuuid/v4"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio-go/v7"
|
||||
miniogo "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio-go/v7/pkg/encrypt"
|
||||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
@ -47,7 +46,6 @@ import (
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/pkg/v3/console"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
@ -142,7 +140,7 @@ func (r BatchJobReplicateV1) Notify(ctx context.Context, ri *batchJobInfo) error
|
||||
}
|
||||
|
||||
// ReplicateFromSource - this is not implemented yet where source is 'remote' and target is local.
|
||||
func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api ObjectLayer, core *miniogo.Core, srcObjInfo ObjectInfo, retry bool) error {
|
||||
func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api ObjectLayer, core *minio.Core, srcObjInfo ObjectInfo, retry bool) error {
|
||||
srcBucket := r.Source.Bucket
|
||||
tgtBucket := r.Target.Bucket
|
||||
srcObject := srcObjInfo.Name
|
||||
@ -189,7 +187,7 @@ func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api Objec
|
||||
}
|
||||
return r.copyWithMultipartfromSource(ctx, api, core, srcObjInfo, opts, partsCount)
|
||||
}
|
||||
gopts := miniogo.GetObjectOptions{
|
||||
gopts := minio.GetObjectOptions{
|
||||
VersionID: srcObjInfo.VersionID,
|
||||
}
|
||||
if err := gopts.SetMatchETag(srcObjInfo.ETag); err != nil {
|
||||
@ -210,7 +208,7 @@ func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api Objec
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, api ObjectLayer, c *miniogo.Core, srcObjInfo ObjectInfo, opts ObjectOptions, partsCount int) (err error) {
|
||||
func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, api ObjectLayer, c *minio.Core, srcObjInfo ObjectInfo, opts ObjectOptions, partsCount int) (err error) {
|
||||
srcBucket := r.Source.Bucket
|
||||
tgtBucket := r.Target.Bucket
|
||||
srcObject := srcObjInfo.Name
|
||||
@ -251,7 +249,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a
|
||||
)
|
||||
|
||||
for i := 0; i < partsCount; i++ {
|
||||
gopts := miniogo.GetObjectOptions{
|
||||
gopts := minio.GetObjectOptions{
|
||||
VersionID: srcObjInfo.VersionID,
|
||||
PartNumber: i + 1,
|
||||
}
|
||||
@ -382,7 +380,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
|
||||
cred := r.Source.Creds
|
||||
|
||||
c, err := miniogo.New(u.Host, &miniogo.Options{
|
||||
c, err := minio.New(u.Host, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
|
||||
Secure: u.Scheme == "https",
|
||||
Transport: getRemoteInstanceTransport(),
|
||||
@ -393,7 +391,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
}
|
||||
|
||||
c.SetAppInfo("minio-"+batchJobPrefix, r.APIVersion+" "+job.ID)
|
||||
core := &miniogo.Core{Client: c}
|
||||
core := &minio.Core{Client: c}
|
||||
|
||||
workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_REPLICATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2)))
|
||||
if err != nil {
|
||||
@ -414,14 +412,14 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
minioSrc := r.Source.Type == BatchJobReplicateResourceMinIO
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
objInfoCh := make(chan miniogo.ObjectInfo, 1)
|
||||
objInfoCh := make(chan minio.ObjectInfo, 1)
|
||||
go func() {
|
||||
prefixes := r.Source.Prefix.F()
|
||||
if len(prefixes) == 0 {
|
||||
prefixes = []string{""}
|
||||
}
|
||||
for _, prefix := range prefixes {
|
||||
prefixObjInfoCh := c.ListObjects(ctx, r.Source.Bucket, miniogo.ListObjectsOptions{
|
||||
prefixObjInfoCh := c.ListObjects(ctx, r.Source.Bucket, minio.ListObjectsOptions{
|
||||
Prefix: prefix,
|
||||
WithVersions: minioSrc,
|
||||
Recursive: true,
|
||||
@ -444,7 +442,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
// all user metadata or just storageClass. If its only storageClass
|
||||
// List() already returns relevant information for filter to be applied.
|
||||
if isMetadata && !isStorageClassOnly {
|
||||
oi2, err := c.StatObject(ctx, r.Source.Bucket, obj.Key, miniogo.StatObjectOptions{})
|
||||
oi2, err := c.StatObject(ctx, r.Source.Bucket, obj.Key, minio.StatObjectOptions{})
|
||||
if err == nil {
|
||||
oi = toObjectInfo(r.Source.Bucket, obj.Key, oi2)
|
||||
} else {
|
||||
@ -540,7 +538,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
}
|
||||
|
||||
// toObjectInfo converts minio.ObjectInfo to ObjectInfo
|
||||
func toObjectInfo(bucket, object string, objInfo miniogo.ObjectInfo) ObjectInfo {
|
||||
func toObjectInfo(bucket, object string, objInfo minio.ObjectInfo) ObjectInfo {
|
||||
tags, _ := tags.MapToObjectTags(objInfo.UserTags)
|
||||
oi := ObjectInfo{
|
||||
Bucket: bucket,
|
||||
@ -643,7 +641,7 @@ func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLa
|
||||
}
|
||||
|
||||
// ReplicateToTarget read from source and replicate to configured target
|
||||
func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectLayer, c *miniogo.Core, srcObjInfo ObjectInfo, retry bool) error {
|
||||
func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectLayer, c *minio.Core, srcObjInfo ObjectInfo, retry bool) error {
|
||||
srcBucket := r.Source.Bucket
|
||||
tgtBucket := r.Target.Bucket
|
||||
tgtPrefix := r.Target.Prefix
|
||||
@ -652,9 +650,9 @@ func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectL
|
||||
|
||||
if srcObjInfo.DeleteMarker || !srcObjInfo.VersionPurgeStatus.Empty() {
|
||||
if retry && !s3Type {
|
||||
if _, err := c.StatObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), miniogo.StatObjectOptions{
|
||||
if _, err := c.StatObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), minio.StatObjectOptions{
|
||||
VersionID: srcObjInfo.VersionID,
|
||||
Internal: miniogo.AdvancedGetOptions{
|
||||
Internal: minio.AdvancedGetOptions{
|
||||
ReplicationProxyRequest: "false",
|
||||
},
|
||||
}); isErrMethodNotAllowed(ErrorRespToObjectError(err, tgtBucket, pathJoin(tgtPrefix, srcObject))) {
|
||||
@ -671,19 +669,19 @@ func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectL
|
||||
dmVersionID = ""
|
||||
versionID = ""
|
||||
}
|
||||
return c.RemoveObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), miniogo.RemoveObjectOptions{
|
||||
return c.RemoveObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), minio.RemoveObjectOptions{
|
||||
VersionID: versionID,
|
||||
Internal: miniogo.AdvancedRemoveOptions{
|
||||
Internal: minio.AdvancedRemoveOptions{
|
||||
ReplicationDeleteMarker: dmVersionID != "",
|
||||
ReplicationMTime: srcObjInfo.ModTime,
|
||||
ReplicationStatus: miniogo.ReplicationStatusReplica,
|
||||
ReplicationStatus: minio.ReplicationStatusReplica,
|
||||
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if retry && !s3Type { // when we are retrying avoid copying if necessary.
|
||||
gopts := miniogo.GetObjectOptions{}
|
||||
gopts := minio.GetObjectOptions{}
|
||||
if err := gopts.SetMatchETag(srcObjInfo.ETag); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -717,7 +715,7 @@ func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectL
|
||||
return err
|
||||
}
|
||||
if r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 {
|
||||
putOpts.Internal = miniogo.AdvancedPutOptions{}
|
||||
putOpts.Internal = minio.AdvancedPutOptions{}
|
||||
}
|
||||
if isMP {
|
||||
if err := replicateObjectWithMultipart(ctx, c, tgtBucket, pathJoin(tgtPrefix, objInfo.Name), rd, objInfo, putOpts); err != nil {
|
||||
@ -1124,7 +1122,8 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
}
|
||||
|
||||
// if one of source or target is non MinIO, just replicate the top most version like `mc mirror`
|
||||
return !((r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3) && !info.IsLatest)
|
||||
isSourceOrTargetS3 := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3
|
||||
return !isSourceOrTargetS3 || info.IsLatest
|
||||
}
|
||||
|
||||
u, err := url.Parse(r.Target.Endpoint)
|
||||
@ -1134,7 +1133,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
|
||||
cred := r.Target.Creds
|
||||
|
||||
c, err := miniogo.NewCore(u.Host, &miniogo.Options{
|
||||
c, err := minio.NewCore(u.Host, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
|
||||
Secure: u.Scheme == "https",
|
||||
Transport: getRemoteInstanceTransport(),
|
||||
@ -1157,14 +1156,14 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
if r.Source.Snowball.Disable != nil && !*r.Source.Snowball.Disable && r.Source.Type.isMinio() && r.Target.Type.isMinio() {
|
||||
go func() {
|
||||
// Snowball currently needs the high level minio-go Client, not the Core one
|
||||
cl, err := miniogo.New(u.Host, &miniogo.Options{
|
||||
cl, err := minio.New(u.Host, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
|
||||
Secure: u.Scheme == "https",
|
||||
Transport: getRemoteInstanceTransport(),
|
||||
BucketLookup: lookupStyle(r.Target.Path),
|
||||
})
|
||||
if err != nil {
|
||||
batchLogOnceIf(ctx, err, job.ID+"miniogo.New")
|
||||
batchLogOnceIf(ctx, err, job.ID+"minio.New")
|
||||
return
|
||||
}
|
||||
|
||||
@ -1274,7 +1273,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
stopFn := globalBatchJobsMetrics.trace(batchJobMetricReplication, job.ID, attempts)
|
||||
success := true
|
||||
if err := r.ReplicateToTarget(ctx, api, c, result, retry); err != nil {
|
||||
if miniogo.ToErrorResponse(err).Code == "PreconditionFailed" {
|
||||
if minio.ToErrorResponse(err).Code == "PreconditionFailed" {
|
||||
// pre-condition failed means we already have the object copied over.
|
||||
return
|
||||
}
|
||||
@ -1457,7 +1456,7 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := miniogo.NewCore(u.Host, &miniogo.Options{
|
||||
c, err := minio.NewCore(u.Host, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
|
||||
Secure: u.Scheme == "https",
|
||||
Transport: getRemoteInstanceTransport(),
|
||||
@ -1470,7 +1469,7 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
|
||||
|
||||
vcfg, err := c.GetBucketVersioning(ctx, remoteBkt)
|
||||
if err != nil {
|
||||
if miniogo.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
if minio.ToErrorResponse(err).Code == "NoSuchBucket" {
|
||||
return batchReplicationJobError{
|
||||
Code: "NoSuchTargetBucket",
|
||||
Description: "The specified target bucket does not exist",
|
||||
@ -1575,13 +1574,13 @@ func (j *BatchJobRequest) load(ctx context.Context, api ObjectLayer, name string
|
||||
return err
|
||||
}
|
||||
|
||||
func batchReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, isMP bool, err error) {
|
||||
func batchReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts minio.PutObjectOptions, isMP bool, err error) {
|
||||
// TODO: support custom storage class for remote replication
|
||||
putOpts, isMP, err = putReplicationOpts(ctx, "", objInfo)
|
||||
if err != nil {
|
||||
return putOpts, isMP, err
|
||||
}
|
||||
putOpts.Internal = miniogo.AdvancedPutOptions{
|
||||
putOpts.Internal = minio.AdvancedPutOptions{
|
||||
SourceVersionID: objInfo.VersionID,
|
||||
SourceMTime: objInfo.ModTime,
|
||||
SourceETag: objInfo.ETag,
|
||||
@ -1740,7 +1739,7 @@ func (a adminAPIHandlers) StartBatchJob(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
buf, err := io.ReadAll(ioutil.HardLimitReader(r.Body, humanize.MiByte*4))
|
||||
buf, err := io.ReadAll(xioutil.HardLimitReader(r.Body, humanize.MiByte*4))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@ -2300,15 +2299,15 @@ func (m *batchJobMetrics) trace(d batchJobMetric, job string, attempts int) func
|
||||
}
|
||||
}
|
||||
|
||||
func lookupStyle(s string) miniogo.BucketLookupType {
|
||||
var lookup miniogo.BucketLookupType
|
||||
func lookupStyle(s string) minio.BucketLookupType {
|
||||
var lookup minio.BucketLookupType
|
||||
switch s {
|
||||
case "on":
|
||||
lookup = miniogo.BucketLookupPath
|
||||
lookup = minio.BucketLookupPath
|
||||
case "off":
|
||||
lookup = miniogo.BucketLookupDNS
|
||||
lookup = minio.BucketLookupDNS
|
||||
default:
|
||||
lookup = miniogo.BucketLookupAuto
|
||||
lookup = minio.BucketLookupAuto
|
||||
}
|
||||
return lookup
|
||||
}
|
||||
|
@ -48,9 +48,7 @@ func (bs *bootstrapTracer) Events() []madmin.TraceInfo {
|
||||
traceInfo := make([]madmin.TraceInfo, 0, bootstrapTraceLimit)
|
||||
|
||||
bs.mu.RLock()
|
||||
for _, i := range bs.info {
|
||||
traceInfo = append(traceInfo, i)
|
||||
}
|
||||
traceInfo = append(traceInfo, bs.info...)
|
||||
bs.mu.RUnlock()
|
||||
|
||||
return traceInfo
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
//go:generate stringer -type lcEventSrc -trimprefix lcEventSrc_ $GOFILE
|
||||
type lcEventSrc uint8
|
||||
|
||||
//revive:disable:var-naming Underscores is used here to indicate where common prefix ends and the enumeration name begins
|
||||
//nolint:staticcheck,revive // Underscores are used here to indicate where common prefix ends and the enumeration name begins
|
||||
const (
|
||||
lcEventSrc_None lcEventSrc = iota
|
||||
lcEventSrc_Heal
|
||||
|
@ -152,7 +152,7 @@ func (f freeVersionTask) OpHash() uint64 {
|
||||
}
|
||||
|
||||
func (n newerNoncurrentTask) OpHash() uint64 {
|
||||
return xxh3.HashString(n.bucket + n.versions[0].ObjectV.ObjectName)
|
||||
return xxh3.HashString(n.bucket + n.versions[0].ObjectName)
|
||||
}
|
||||
|
||||
func (j jentry) OpHash() uint64 {
|
||||
|
@ -305,7 +305,7 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob
|
||||
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
rMode, rDate, err := objectlock.ParseObjectLockRetentionHeaders(rq.Header)
|
||||
if err != nil && !(replica && rMode == "" && rDate.IsZero()) {
|
||||
if err != nil && (!replica || rMode != "" || !rDate.IsZero()) {
|
||||
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
|
||||
}
|
||||
if retentionPermErr != ErrNone {
|
||||
|
@ -3750,7 +3750,7 @@ func (p *ReplicationPool) queueMRFHeal() error {
|
||||
}
|
||||
|
||||
func (p *ReplicationPool) initialized() bool {
|
||||
return !(p == nil || p.objLayer == nil)
|
||||
return p != nil && p.objLayer != nil
|
||||
}
|
||||
|
||||
// getMRF returns MRF entries for this node.
|
||||
|
@ -382,7 +382,7 @@ func buildServerCtxt(ctx *cli.Context, ctxt *serverCtxt) (err error) {
|
||||
}
|
||||
|
||||
// Check "no-compat" flag from command line argument.
|
||||
ctxt.StrictS3Compat = !(ctx.IsSet("no-compat") || ctx.GlobalIsSet("no-compat"))
|
||||
ctxt.StrictS3Compat = !ctx.IsSet("no-compat") && !ctx.GlobalIsSet("no-compat")
|
||||
|
||||
switch {
|
||||
case ctx.IsSet("config-dir"):
|
||||
@ -717,9 +717,7 @@ func serverHandleEnvVars() {
|
||||
logger.Fatal(err, "Invalid MINIO_BROWSER_REDIRECT_URL value in environment variable")
|
||||
}
|
||||
// Look for if URL has invalid values and return error.
|
||||
if !((u.Scheme == "http" || u.Scheme == "https") &&
|
||||
u.Opaque == "" &&
|
||||
!u.ForceQuery && u.RawQuery == "" && u.Fragment == "") {
|
||||
if !isValidURLEndpoint((*url.URL)(u)) {
|
||||
err := fmt.Errorf("URL contains unexpected resources, expected URL to be one of http(s)://console.example.com or as a subpath via API endpoint http(s)://minio.example.com/minio format: %v", u)
|
||||
logger.Fatal(err, "Invalid MINIO_BROWSER_REDIRECT_URL value is environment variable")
|
||||
}
|
||||
@ -734,9 +732,7 @@ func serverHandleEnvVars() {
|
||||
logger.Fatal(err, "Invalid MINIO_SERVER_URL value in environment variable")
|
||||
}
|
||||
// Look for if URL has invalid values and return error.
|
||||
if !((u.Scheme == "http" || u.Scheme == "https") &&
|
||||
(u.Path == "/" || u.Path == "") && u.Opaque == "" &&
|
||||
!u.ForceQuery && u.RawQuery == "" && u.Fragment == "") {
|
||||
if !isValidURLEndpoint((*url.URL)(u)) {
|
||||
err := fmt.Errorf("URL contains unexpected resources, expected URL to be of http(s)://minio.example.com format: %v", u)
|
||||
logger.Fatal(err, "Invalid MINIO_SERVER_URL value is environment variable")
|
||||
}
|
||||
@ -915,7 +911,7 @@ func handleKMSConfig() {
|
||||
}
|
||||
|
||||
func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secureConn bool, err error) {
|
||||
if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) {
|
||||
if !isFile(getPublicCertFile()) || !isFile(getPrivateKeyFile()) {
|
||||
return nil, nil, false, nil
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,6 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
"github.com/minio/minio/internal/bucket/object/lock"
|
||||
objectlock "github.com/minio/minio/internal/bucket/object/lock"
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
"github.com/minio/minio/internal/color"
|
||||
@ -991,7 +990,7 @@ func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi Obje
|
||||
versionID := oi.VersionID
|
||||
|
||||
var vc *versioning.Versioning
|
||||
var lr objectlock.Retention
|
||||
var lr lock.Retention
|
||||
var rcfg *replication.Config
|
||||
if !isMinioMetaBucketName(i.bucket) {
|
||||
vc, err = globalBucketVersioningSys.Get(i.bucket)
|
||||
|
@ -137,7 +137,7 @@ func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) {
|
||||
close(workers[0])
|
||||
wg.Wait()
|
||||
for _, obj := range expired {
|
||||
switch obj.ObjectV.VersionID {
|
||||
switch obj.VersionID {
|
||||
case uuids[2].String(), uuids[3].String(), uuids[4].String():
|
||||
default:
|
||||
t.Errorf("Unexpected versionID being expired: %#v\n", obj)
|
||||
|
@ -118,7 +118,6 @@ func (ats *allTierStats) populateStats(stats map[string]madmin.TierStats) {
|
||||
NumObjects: st.NumObjects,
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// tierStats holds per-tier stats of a remote tier.
|
||||
|
@ -165,7 +165,7 @@ func TestCmpReaders(t *testing.T) {
|
||||
r1 := bytes.NewReader([]byte("abc"))
|
||||
r2 := bytes.NewReader([]byte("abc"))
|
||||
ok, msg := cmpReaders(r1, r2)
|
||||
if !(ok && msg == "") {
|
||||
if !ok || msg != "" {
|
||||
t.Fatalf("unexpected")
|
||||
}
|
||||
}
|
||||
|
@ -1015,7 +1015,7 @@ func DecryptObjectInfo(info *ObjectInfo, r *http.Request) (encrypted bool, err e
|
||||
|
||||
if encrypted {
|
||||
if crypto.SSEC.IsEncrypted(info.UserDefined) {
|
||||
if !(crypto.SSEC.IsRequested(headers) || crypto.SSECopy.IsRequested(headers)) {
|
||||
if !crypto.SSEC.IsRequested(headers) && !crypto.SSECopy.IsRequested(headers) {
|
||||
if r.Header.Get(xhttp.MinIOSourceReplicationRequest) != "true" {
|
||||
return encrypted, errEncryptedObject
|
||||
}
|
||||
@ -1112,7 +1112,6 @@ func (o *ObjectInfo) decryptPartsChecksums(h http.Header) {
|
||||
o.Parts[i].Checksums = cs[i]
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// metadataEncryptFn provides an encryption function for metadata.
|
||||
|
@ -138,6 +138,17 @@ func (endpoint *Endpoint) SetDiskIndex(i int) {
|
||||
endpoint.DiskIdx = i
|
||||
}
|
||||
|
||||
func isValidURLEndpoint(u *url.URL) bool {
|
||||
// URL style of endpoint.
|
||||
// Valid URL style endpoint is
|
||||
// - Scheme field must contain "http" or "https"
|
||||
// - All field should be empty except Host and Path.
|
||||
isURLOk := (u.Scheme == "http" || u.Scheme == "https") &&
|
||||
u.User == nil && u.Opaque == "" && !u.ForceQuery &&
|
||||
u.RawQuery == "" && u.Fragment == ""
|
||||
return isURLOk
|
||||
}
|
||||
|
||||
// NewEndpoint - returns new endpoint based on given arguments.
|
||||
func NewEndpoint(arg string) (ep Endpoint, e error) {
|
||||
// isEmptyPath - check whether given path is not empty.
|
||||
@ -157,8 +168,7 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
|
||||
// Valid URL style endpoint is
|
||||
// - Scheme field must contain "http" or "https"
|
||||
// - All field should be empty except Host and Path.
|
||||
if !((u.Scheme == "http" || u.Scheme == "https") &&
|
||||
u.User == nil && u.Opaque == "" && !u.ForceQuery && u.RawQuery == "" && u.Fragment == "") {
|
||||
if !isValidURLEndpoint(u) {
|
||||
return ep, fmt.Errorf("invalid URL endpoint format")
|
||||
}
|
||||
|
||||
@ -604,11 +614,8 @@ func (endpoints Endpoints) UpdateIsLocal() error {
|
||||
startTime := time.Now()
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
for {
|
||||
for !foundLocal && (epsResolved != len(endpoints)) {
|
||||
// Break if the local endpoint is found already Or all the endpoints are resolved.
|
||||
if foundLocal || (epsResolved == len(endpoints)) {
|
||||
break
|
||||
}
|
||||
|
||||
// Retry infinitely on Kubernetes and Docker swarm.
|
||||
// This is needed as the remote hosts are sometime
|
||||
@ -794,11 +801,8 @@ func (p PoolEndpointList) UpdateIsLocal() error {
|
||||
startTime := time.Now()
|
||||
keepAliveTicker := time.NewTicker(1 * time.Second)
|
||||
defer keepAliveTicker.Stop()
|
||||
for {
|
||||
for !foundLocal && (epsResolved != epCount) {
|
||||
// Break if the local endpoint is found already Or all the endpoints are resolved.
|
||||
if foundLocal || (epsResolved == epCount) {
|
||||
break
|
||||
}
|
||||
|
||||
// Retry infinitely on Kubernetes and Docker swarm.
|
||||
// This is needed as the remote hosts are sometime
|
||||
|
@ -323,12 +323,7 @@ func checkObjectWithAllParts(ctx context.Context, onlineDisks []StorageAPI, part
|
||||
}
|
||||
}
|
||||
|
||||
erasureDistributionReliable := true
|
||||
if inconsistent > len(partsMetadata)/2 {
|
||||
// If there are too many inconsistent files, then we can't trust erasure.Distribution (most likely
|
||||
// because of bugs found in CopyObject/PutObjectTags) https://github.com/minio/minio/pull/10772
|
||||
erasureDistributionReliable = false
|
||||
}
|
||||
erasureDistributionReliable := inconsistent <= len(partsMetadata)/2
|
||||
|
||||
metaErrs := make([]error, len(errs))
|
||||
|
||||
|
@ -943,12 +943,12 @@ func isObjectDirDangling(errs []error) (ok bool) {
|
||||
var foundNotEmpty int
|
||||
var otherFound int
|
||||
for _, readErr := range errs {
|
||||
switch {
|
||||
case readErr == nil:
|
||||
switch readErr {
|
||||
case nil:
|
||||
found++
|
||||
case readErr == errFileNotFound || readErr == errVolumeNotFound:
|
||||
case errFileNotFound, errVolumeNotFound:
|
||||
notFound++
|
||||
case readErr == errVolumeNotEmpty:
|
||||
case errVolumeNotEmpty:
|
||||
foundNotEmpty++
|
||||
default:
|
||||
otherFound++
|
||||
|
@ -813,8 +813,6 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
|
||||
PoolIndex: er.poolIndex,
|
||||
})
|
||||
}
|
||||
|
||||
return
|
||||
}()
|
||||
|
||||
validResp := 0
|
||||
@ -1634,7 +1632,7 @@ func (er erasureObjects) deleteObjectVersion(ctx context.Context, bucket, object
|
||||
func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) {
|
||||
if !opts.NoAuditLog {
|
||||
for _, obj := range objects {
|
||||
auditObjectErasureSet(ctx, "DeleteObjects", obj.ObjectV.ObjectName, &er)
|
||||
auditObjectErasureSet(ctx, "DeleteObjects", obj.ObjectName, &er)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -189,7 +189,7 @@ func TestDeleteObjectsVersionedTwoPools(t *testing.T) {
|
||||
t.Errorf("Test %d: Failed to remove object `%v` with the error: `%v`", testIdx, names[i], delErrs[i])
|
||||
}
|
||||
_, statErr := obj.GetObjectInfo(ctx, bucketName, objectName, ObjectOptions{
|
||||
VersionID: names[i].ObjectV.VersionID,
|
||||
VersionID: names[i].VersionID,
|
||||
})
|
||||
switch statErr.(type) {
|
||||
case VersionNotFound:
|
||||
@ -265,7 +265,7 @@ func TestDeleteObjectsVersioned(t *testing.T) {
|
||||
|
||||
for i, test := range testCases {
|
||||
_, statErr := obj.GetObjectInfo(ctx, test.bucket, test.object, ObjectOptions{
|
||||
VersionID: names[i].ObjectV.VersionID,
|
||||
VersionID: names[i].VersionID,
|
||||
})
|
||||
switch statErr.(type) {
|
||||
case VersionNotFound:
|
||||
|
@ -1014,11 +1014,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
|
||||
defer wk.Give()
|
||||
// We will perpetually retry listing if it fails, since we cannot
|
||||
// possibly give up in this matter
|
||||
for {
|
||||
if contextCanceled(ctx) {
|
||||
break
|
||||
}
|
||||
|
||||
for !contextCanceled(ctx) {
|
||||
err := set.listObjectsToDecommission(ctx, bi,
|
||||
func(entry metaCacheEntry) {
|
||||
wk.Take()
|
||||
|
@ -2103,7 +2103,7 @@ func (store *IAMStoreSys) getParentUsers(cache *iamCache) map[string]ParentUserI
|
||||
cred := ui.Credentials
|
||||
// Only consider service account or STS credentials with
|
||||
// non-empty session tokens.
|
||||
if !(cred.IsServiceAccount() || cred.IsTemp()) ||
|
||||
if (!cred.IsServiceAccount() && !cred.IsTemp()) ||
|
||||
cred.SessionToken == "" {
|
||||
continue
|
||||
}
|
||||
|
@ -32,7 +32,6 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
libldap "github.com/go-ldap/ldap/v3"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/arn"
|
||||
@ -1691,7 +1690,7 @@ func (sys *IAMSys) NormalizeLDAPMappingImport(ctx context.Context, isGroup bool,
|
||||
|
||||
// We map keys that correspond to LDAP DNs and validate that they exist in
|
||||
// the LDAP server.
|
||||
var dnValidator func(*libldap.Conn, string) (*ldap.DNSearchResult, bool, error) = sys.LDAPConfig.GetValidatedUserDN
|
||||
dnValidator := sys.LDAPConfig.GetValidatedUserDN
|
||||
if isGroup {
|
||||
dnValidator = sys.LDAPConfig.GetValidatedGroupDN
|
||||
}
|
||||
|
@ -215,7 +215,7 @@ func (o *listPathOptions) gatherResults(ctx context.Context, in <-chan metaCache
|
||||
continue
|
||||
}
|
||||
if o.Lifecycle != nil || o.Replication.Config != nil {
|
||||
if skipped := triggerExpiryAndRepl(ctx, *o, entry); skipped == true {
|
||||
if skipped := triggerExpiryAndRepl(ctx, *o, entry); skipped {
|
||||
results.lastSkippedEntry = entry.name
|
||||
continue
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ const (
|
||||
// On success a sorted meta cache stream will be returned.
|
||||
// Metadata has data stripped, if any.
|
||||
func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writer) (err error) {
|
||||
legacyFS := !(s.fsType == xfs || s.fsType == ext4)
|
||||
legacyFS := s.fsType != xfs && s.fsType != ext4
|
||||
|
||||
s.RLock()
|
||||
legacy := s.formatLegacy
|
||||
|
@ -278,7 +278,7 @@ func collectDriveMetrics(m madmin.RealtimeMetrics) {
|
||||
}
|
||||
|
||||
func collectLocalResourceMetrics() {
|
||||
var types madmin.MetricType = madmin.MetricsDisk | madmin.MetricNet | madmin.MetricsMem | madmin.MetricsCPU
|
||||
types := madmin.MetricsDisk | madmin.MetricNet | madmin.MetricsMem | madmin.MetricsCPU
|
||||
|
||||
m := collectLocalMetrics(types, collectMetricsOpts{})
|
||||
for _, hm := range m.ByHost {
|
||||
|
@ -76,10 +76,8 @@ func TestGetHistogramMetrics_BucketCount(t *testing.T) {
|
||||
// Send observations once every 1ms, to simulate delay between
|
||||
// observations. This is to test the channel based
|
||||
// synchronization used internally.
|
||||
select {
|
||||
case <-ticker.C:
|
||||
ttfbHist.With(prometheus.Labels{"api": obs.label}).Observe(obs.val)
|
||||
}
|
||||
<-ticker.C
|
||||
ttfbHist.With(prometheus.Labels{"api": obs.label}).Observe(obs.val)
|
||||
}
|
||||
|
||||
metrics := getHistogramMetrics(ttfbHist, getBucketTTFBDistributionMD(), false, false)
|
||||
@ -137,10 +135,8 @@ func TestGetHistogramMetrics_Values(t *testing.T) {
|
||||
// Send observations once every 1ms, to simulate delay between
|
||||
// observations. This is to test the channel based
|
||||
// synchronization used internally.
|
||||
select {
|
||||
case <-ticker.C:
|
||||
ttfbHist.With(prometheus.Labels{"api": obs.label}).Observe(obs.val)
|
||||
}
|
||||
<-ticker.C
|
||||
ttfbHist.With(prometheus.Labels{"api": obs.label}).Observe(obs.val)
|
||||
}
|
||||
|
||||
// Accumulate regular-cased API label metrics for 'PutObject' for deeper verification
|
||||
|
@ -205,7 +205,7 @@ func newDriveMetricsCache() *cachevalue.Cache[storageMetrics] {
|
||||
|
||||
func newCPUMetricsCache() *cachevalue.Cache[madmin.CPUMetrics] {
|
||||
loadCPUMetrics := func(ctx context.Context) (v madmin.CPUMetrics, err error) {
|
||||
var types madmin.MetricType = madmin.MetricsCPU
|
||||
types := madmin.MetricsCPU
|
||||
|
||||
m := collectLocalMetrics(types, collectMetricsOpts{
|
||||
hosts: map[string]struct{}{
|
||||
@ -230,7 +230,7 @@ func newCPUMetricsCache() *cachevalue.Cache[madmin.CPUMetrics] {
|
||||
|
||||
func newMemoryMetricsCache() *cachevalue.Cache[madmin.MemInfo] {
|
||||
loadMemoryMetrics := func(ctx context.Context) (v madmin.MemInfo, err error) {
|
||||
var types madmin.MetricType = madmin.MetricsMem
|
||||
types := madmin.MetricsMem
|
||||
|
||||
m := collectLocalMetrics(types, collectMetricsOpts{
|
||||
hosts: map[string]struct{}{
|
||||
|
@ -131,7 +131,7 @@ var (
|
||||
)
|
||||
|
||||
func getCurrentDriveIOStats() map[string]madmin.DiskIOStats {
|
||||
var types madmin.MetricType = madmin.MetricsDisk
|
||||
types := madmin.MetricsDisk
|
||||
driveRealtimeMetrics := collectLocalMetrics(types, collectMetricsOpts{
|
||||
hosts: map[string]struct{}{
|
||||
globalLocalNodeName: {},
|
||||
|
@ -208,8 +208,6 @@ func (m *mrfState) startMRFPersistence() {
|
||||
localDrive.Delete(GlobalContext, minioMetaBucket, pathJoin(healMRFDir, "list.bin"), DeleteOptions{})
|
||||
break
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
var healSleeper = newDynamicSleeper(5, time.Second, false)
|
||||
|
@ -208,8 +208,8 @@ func (o *ObjectOptions) SetDeleteReplicationState(dsc ReplicateDecision, vID str
|
||||
o.DeleteReplication = ReplicationState{
|
||||
ReplicateDecisionStr: dsc.String(),
|
||||
}
|
||||
switch {
|
||||
case o.VersionID == "":
|
||||
switch o.VersionID {
|
||||
case "":
|
||||
o.DeleteReplication.ReplicationStatusInternal = dsc.PendingStatus()
|
||||
o.DeleteReplication.Targets = replicationStatusesMap(o.DeleteReplication.ReplicationStatusInternal)
|
||||
default:
|
||||
|
@ -47,7 +47,6 @@ import (
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v3/trie"
|
||||
@ -146,7 +145,7 @@ func IsValidBucketName(bucket string) bool {
|
||||
allNumbers = allNumbers && !isNotNumber
|
||||
}
|
||||
// Does the bucket name look like an IP address?
|
||||
return !(len(pieces) == 4 && allNumbers)
|
||||
return len(pieces) != 4 || !allNumbers
|
||||
}
|
||||
|
||||
// IsValidObjectName verifies an object name in accordance with Amazon's
|
||||
@ -885,7 +884,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, h
|
||||
return nil, err
|
||||
}
|
||||
if decryptSkip > 0 {
|
||||
inputReader = ioutil.NewSkipReader(inputReader, decryptSkip)
|
||||
inputReader = xioutil.NewSkipReader(inputReader, decryptSkip)
|
||||
}
|
||||
oi.Size = decLength
|
||||
}
|
||||
@ -970,7 +969,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, h
|
||||
|
||||
// Apply the skipLen and limit on the
|
||||
// decrypted stream
|
||||
decReader = io.LimitReader(ioutil.NewSkipReader(decReader, skipLen), decRangeLength)
|
||||
decReader = io.LimitReader(xioutil.NewSkipReader(decReader, skipLen), decRangeLength)
|
||||
|
||||
// Assemble the GetObjectReader
|
||||
r = &GetObjectReader{
|
||||
|
@ -408,7 +408,7 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
|
||||
perr error
|
||||
)
|
||||
|
||||
if (isErrObjectNotFound(err) || isErrVersionNotFound(err) || isErrReadQuorum(err)) && !(gr != nil && gr.ObjInfo.DeleteMarker) {
|
||||
if (isErrObjectNotFound(err) || isErrVersionNotFound(err) || isErrReadQuorum(err)) && (gr == nil || !gr.ObjInfo.DeleteMarker) {
|
||||
proxytgts := getProxyTargets(ctx, bucket, object, opts)
|
||||
if !proxytgts.Empty() {
|
||||
globalReplicationStats.Load().incProxy(bucket, getObjectAPI, false)
|
||||
@ -537,8 +537,7 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
|
||||
|
||||
setHeadGetRespHeaders(w, r.Form)
|
||||
|
||||
var iw io.Writer
|
||||
iw = w
|
||||
var iw io.Writer = w
|
||||
|
||||
statusCodeWritten := false
|
||||
httpWriter := xioutil.WriteOnClose(iw)
|
||||
@ -707,8 +706,6 @@ func (api objectAPIHandlers) getObjectAttributesHandler(ctx context.Context, obj
|
||||
UserAgent: r.UserAgent(),
|
||||
Host: handlers.GetSourceIP(r),
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetObjectHandler - GET Object
|
||||
|
@ -130,7 +130,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r
|
||||
break
|
||||
}
|
||||
}
|
||||
if !(ssecRep && sourceReplReq) {
|
||||
if !ssecRep || !sourceReplReq {
|
||||
if err = setEncryptionMetadata(r, bucket, object, encMetadata); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
@ -803,7 +803,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http
|
||||
}
|
||||
}
|
||||
|
||||
if !(sourceReplReq && crypto.SSEC.IsEncrypted(mi.UserDefined)) {
|
||||
if !sourceReplReq || !crypto.SSEC.IsEncrypted(mi.UserDefined) {
|
||||
// Calculating object encryption key
|
||||
key, err = decryptObjectMeta(key, bucket, object, mi.UserDefined)
|
||||
if err != nil {
|
||||
|
@ -39,7 +39,6 @@ import (
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/madmin-go/v3/logger/log"
|
||||
"github.com/minio/minio/internal/bucket/bandwidth"
|
||||
b "github.com/minio/minio/internal/bucket/bandwidth"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/grid"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
@ -1035,7 +1034,7 @@ func (s *peerRESTServer) IsValid(w http.ResponseWriter, r *http.Request) bool {
|
||||
// GetBandwidth gets the bandwidth for the buckets requested.
|
||||
func (s *peerRESTServer) GetBandwidth(params *grid.URLValues) (*bandwidth.BucketBandwidthReport, *grid.RemoteErr) {
|
||||
buckets := params.Values().Get("buckets")
|
||||
selectBuckets := b.SelectBuckets(buckets)
|
||||
selectBuckets := bandwidth.SelectBuckets(buckets)
|
||||
return globalBucketMonitor.GetReport(selectBuckets), nil
|
||||
}
|
||||
|
||||
|
@ -338,10 +338,7 @@ func netperf(ctx context.Context, duration time.Duration) madmin.NetperfNodeResu
|
||||
time.Sleep(duration)
|
||||
xioutil.SafeClose(r.eof)
|
||||
wg.Wait()
|
||||
for {
|
||||
if globalNetPerfRX.ActiveConnections() == 0 {
|
||||
break
|
||||
}
|
||||
for globalNetPerfRX.ActiveConnections() != 0 {
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
rx := float64(globalNetPerfRX.RXSample)
|
||||
@ -396,10 +393,7 @@ func siteNetperf(ctx context.Context, duration time.Duration) madmin.SiteNetPerf
|
||||
time.Sleep(duration)
|
||||
xioutil.SafeClose(r.eof)
|
||||
wg.Wait()
|
||||
for {
|
||||
if globalSiteNetPerfRX.ActiveConnections() == 0 || contextCanceled(ctx) {
|
||||
break
|
||||
}
|
||||
for globalSiteNetPerfRX.ActiveConnections() != 0 && !contextCanceled(ctx) {
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
rx := float64(globalSiteNetPerfRX.RXSample)
|
||||
|
@ -37,7 +37,6 @@ import (
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio-go/v7"
|
||||
minioClient "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio-go/v7/pkg/replication"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
@ -478,8 +477,8 @@ func (c *SiteReplicationSys) AddPeerClusters(ctx context.Context, psites []madmi
|
||||
var secretKey string
|
||||
var svcCred auth.Credentials
|
||||
sa, _, err := globalIAMSys.getServiceAccount(ctx, siteReplicatorSvcAcc)
|
||||
switch {
|
||||
case err == errNoSuchServiceAccount:
|
||||
switch err {
|
||||
case errNoSuchServiceAccount:
|
||||
_, secretKey, err = auth.GenerateCredentials()
|
||||
if err != nil {
|
||||
return madmin.ReplicateAddStatus{}, errSRServiceAccount(fmt.Errorf("unable to create local service account: %w", err))
|
||||
@ -492,7 +491,7 @@ func (c *SiteReplicationSys) AddPeerClusters(ctx context.Context, psites []madmi
|
||||
if err != nil {
|
||||
return madmin.ReplicateAddStatus{}, errSRServiceAccount(fmt.Errorf("unable to create local service account: %w", err))
|
||||
}
|
||||
case err == nil:
|
||||
case nil:
|
||||
svcCred = sa.Credentials
|
||||
secretKey = svcCred.SecretKey
|
||||
default:
|
||||
@ -738,7 +737,6 @@ func (c *SiteReplicationSys) Netperf(ctx context.Context, duration time.Duration
|
||||
resultsMu.Lock()
|
||||
results.NodeResults = append(results.NodeResults, result)
|
||||
resultsMu.Unlock()
|
||||
return
|
||||
}()
|
||||
continue
|
||||
}
|
||||
@ -756,7 +754,6 @@ func (c *SiteReplicationSys) Netperf(ctx context.Context, duration time.Duration
|
||||
resultsMu.Lock()
|
||||
results.NodeResults = append(results.NodeResults, result)
|
||||
resultsMu.Unlock()
|
||||
return
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
@ -2625,7 +2622,7 @@ func getAdminClient(endpoint, accessKey, secretKey string) (*madmin.AdminClient,
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func getS3Client(pc madmin.PeerSite) (*minioClient.Client, error) {
|
||||
func getS3Client(pc madmin.PeerSite) (*minio.Client, error) {
|
||||
ep, err := url.Parse(pc.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -2634,7 +2631,7 @@ func getS3Client(pc madmin.PeerSite) (*minioClient.Client, error) {
|
||||
return nil, RemoteTargetConnectionErr{Endpoint: ep.String(), Err: fmt.Errorf("remote target is offline for endpoint %s", ep.String())}
|
||||
}
|
||||
|
||||
return minioClient.New(ep.Host, &minioClient.Options{
|
||||
return minio.New(ep.Host, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(pc.AccessKey, pc.SecretKey, ""),
|
||||
Secure: ep.Scheme == "https",
|
||||
Transport: globalRemoteTargetTransport,
|
||||
@ -3106,7 +3103,7 @@ func (c *SiteReplicationSys) siteReplicationStatus(ctx context.Context, objAPI O
|
||||
var policies []*policy.Policy
|
||||
uPolicyCount := 0
|
||||
for _, ps := range pslc {
|
||||
plcy, err := policy.ParseConfig(bytes.NewReader([]byte(ps.SRIAMPolicy.Policy)))
|
||||
plcy, err := policy.ParseConfig(bytes.NewReader([]byte(ps.Policy)))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@ -3323,7 +3320,7 @@ func (c *SiteReplicationSys) siteReplicationStatus(ctx context.Context, objAPI O
|
||||
uRuleCount := 0
|
||||
for _, rl := range ilmExpRules {
|
||||
var rule lifecycle.Rule
|
||||
if err := xml.Unmarshal([]byte(rl.ILMExpiryRule.ILMRule), &rule); err != nil {
|
||||
if err := xml.Unmarshal([]byte(rl.ILMRule), &rule); err != nil {
|
||||
continue
|
||||
}
|
||||
rules = append(rules, &rule)
|
||||
@ -3600,7 +3597,7 @@ func isILMExpRuleReplicated(cntReplicated, total int, rules []*lifecycle.Rule) b
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if !(string(prevRData) == string(rData)) {
|
||||
if string(prevRData) != string(rData) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@ -4416,7 +4413,7 @@ func (c *SiteReplicationSys) healILMExpiryConfig(ctx context.Context, objAPI Obj
|
||||
// If latest peers ILM expiry flags are equal to current peer, no need to heal
|
||||
flagEqual := true
|
||||
for id, peer := range latestPeers {
|
||||
if !(ps.Peers[id].ReplicateILMExpiry == peer.ReplicateILMExpiry) {
|
||||
if ps.Peers[id].ReplicateILMExpiry != peer.ReplicateILMExpiry {
|
||||
flagEqual = false
|
||||
break
|
||||
}
|
||||
@ -5478,12 +5475,12 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer,
|
||||
)
|
||||
for dID, ss := range us {
|
||||
if lastUpdate.IsZero() {
|
||||
lastUpdate = ss.userInfo.UserInfo.UpdatedAt
|
||||
lastUpdate = ss.userInfo.UpdatedAt
|
||||
latestID = dID
|
||||
latestUserStat = ss
|
||||
}
|
||||
if !ss.userInfo.UserInfo.UpdatedAt.IsZero() && ss.userInfo.UserInfo.UpdatedAt.After(lastUpdate) {
|
||||
lastUpdate = ss.userInfo.UserInfo.UpdatedAt
|
||||
if !ss.userInfo.UpdatedAt.IsZero() && ss.userInfo.UpdatedAt.After(lastUpdate) {
|
||||
lastUpdate = ss.userInfo.UpdatedAt
|
||||
latestID = dID
|
||||
latestUserStat = ss
|
||||
}
|
||||
|
@ -37,7 +37,6 @@ import (
|
||||
"github.com/minio/minio/internal/cachevalue"
|
||||
"github.com/minio/minio/internal/grid"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/rest"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
@ -928,7 +927,7 @@ func (client *storageRESTClient) ReadMultiple(ctx context.Context, req ReadMulti
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
pw.CloseWithError(waitForHTTPStream(respBody, ioutil.NewDeadlineWriter(pw, globalDriveConfig.GetMaxTimeout())))
|
||||
pw.CloseWithError(waitForHTTPStream(respBody, xioutil.NewDeadlineWriter(pw, globalDriveConfig.GetMaxTimeout())))
|
||||
}()
|
||||
mr := msgp.NewReader(pr)
|
||||
defer readMsgpReaderPoolPut(mr)
|
||||
|
@ -592,9 +592,10 @@ func readChunkLine(b *bufio.Reader) ([]byte, []byte, error) {
|
||||
if err != nil {
|
||||
// We always know when EOF is coming.
|
||||
// If the caller asked for a line, there should be a line.
|
||||
if err == io.EOF {
|
||||
switch err {
|
||||
case io.EOF:
|
||||
err = io.ErrUnexpectedEOF
|
||||
} else if err == bufio.ErrBufferFull {
|
||||
case bufio.ErrBufferFull:
|
||||
err = errLineTooLong
|
||||
}
|
||||
return nil, nil, err
|
||||
|
@ -642,7 +642,7 @@ func (s *TestSuiteIAM) TestSTSForRoot(c *check) {
|
||||
gotBuckets := set.NewStringSet()
|
||||
for _, b := range accInfo.Buckets {
|
||||
gotBuckets.Add(b.Name)
|
||||
if !(b.Access.Read && b.Access.Write) {
|
||||
if !b.Access.Read || !b.Access.Write {
|
||||
c.Fatalf("root user should have read and write access to bucket: %v", b.Name)
|
||||
}
|
||||
}
|
||||
@ -1443,7 +1443,7 @@ func (s *TestSuiteIAM) TestLDAPUnicodeVariationsLegacyAPI(c *check) {
|
||||
idx := slices.IndexFunc(policyResult.PolicyMappings, func(e madmin.PolicyEntities) bool {
|
||||
return e.Policy == policy && slices.Contains(e.Groups, actualGroupDN)
|
||||
})
|
||||
if !(idx >= 0) {
|
||||
if idx < 0 {
|
||||
c.Fatalf("expected groupDN (%s) to be present in mapping list: %#v", actualGroupDN, policyResult)
|
||||
}
|
||||
}
|
||||
@ -1606,7 +1606,7 @@ func (s *TestSuiteIAM) TestLDAPUnicodeVariations(c *check) {
|
||||
idx := slices.IndexFunc(policyResult.PolicyMappings, func(e madmin.PolicyEntities) bool {
|
||||
return e.Policy == policy && slices.Contains(e.Groups, actualGroupDN)
|
||||
})
|
||||
if !(idx >= 0) {
|
||||
if idx < 0 {
|
||||
c.Fatalf("expected groupDN (%s) to be present in mapping list: %#v", actualGroupDN, policyResult)
|
||||
}
|
||||
}
|
||||
|
@ -226,10 +226,7 @@ func prepareErasure(ctx context.Context, nDisks int) (ObjectLayer, []string, err
|
||||
for _, sets := range pool.erasureDisks {
|
||||
for _, s := range sets {
|
||||
if !s.IsLocal() {
|
||||
for {
|
||||
if s.IsOnline() {
|
||||
break
|
||||
}
|
||||
for !s.IsOnline() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if time.Since(t) > 10*time.Second {
|
||||
return nil, nil, errors.New("timeout waiting for disk to come online")
|
||||
@ -642,8 +639,8 @@ func signStreamingRequest(req *http.Request, accessKey, secretKey string, currTi
|
||||
for _, k := range headers {
|
||||
buf.WriteString(k)
|
||||
buf.WriteByte(':')
|
||||
switch {
|
||||
case k == "host":
|
||||
switch k {
|
||||
case "host":
|
||||
buf.WriteString(req.URL.Host)
|
||||
fallthrough
|
||||
default:
|
||||
@ -996,8 +993,8 @@ func signRequestV4(req *http.Request, accessKey, secretKey string) error {
|
||||
for _, k := range headers {
|
||||
buf.WriteString(k)
|
||||
buf.WriteByte(':')
|
||||
switch {
|
||||
case k == "host":
|
||||
switch k {
|
||||
case "host":
|
||||
buf.WriteString(req.URL.Host)
|
||||
fallthrough
|
||||
default:
|
||||
@ -1089,8 +1086,8 @@ func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeek
|
||||
// Save for subsequent use
|
||||
var hashedPayload string
|
||||
var md5Base64 string
|
||||
switch {
|
||||
case body == nil:
|
||||
switch body {
|
||||
case nil:
|
||||
hashedPayload = getSHA256Hash([]byte{})
|
||||
default:
|
||||
payloadBytes, err := io.ReadAll(body)
|
||||
@ -2393,7 +2390,7 @@ func unzipArchive(zipFilePath, targetDir string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, file := range zipReader.Reader.File {
|
||||
for _, file := range zipReader.File {
|
||||
zippedFile, err := file.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -51,9 +51,9 @@ func (gcs *warmBackendGCS) PutWithMeta(ctx context.Context, key string, data io.
|
||||
object := gcs.client.Bucket(gcs.Bucket).Object(gcs.getDest(key))
|
||||
w := object.NewWriter(ctx)
|
||||
if gcs.StorageClass != "" {
|
||||
w.ObjectAttrs.StorageClass = gcs.StorageClass
|
||||
w.StorageClass = gcs.StorageClass
|
||||
}
|
||||
w.ObjectAttrs.Metadata = meta
|
||||
w.Metadata = meta
|
||||
if _, err := xioutil.Copy(w, data); err != nil {
|
||||
return "", gcsToObjectError(err, gcs.Bucket, key)
|
||||
}
|
||||
|
@ -159,7 +159,7 @@ func (e *lockedLastMinuteLatency) addSize(value time.Duration, sz int64) {
|
||||
a.Total = atomic.LoadInt64(&old.Total)
|
||||
a.N = atomic.LoadInt64(&old.N)
|
||||
e.mu.Lock()
|
||||
e.lastMinuteLatency.addAll(t-1, a)
|
||||
e.addAll(t-1, a)
|
||||
e.mu.Unlock()
|
||||
acc = newAcc
|
||||
} else {
|
||||
@ -177,7 +177,7 @@ func (e *lockedLastMinuteLatency) addSize(value time.Duration, sz int64) {
|
||||
func (e *lockedLastMinuteLatency) total() AccElem {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
return e.lastMinuteLatency.getTotal()
|
||||
return e.getTotal()
|
||||
}
|
||||
|
||||
func newXLStorageDiskIDCheck(storage *xlStorage, healthCheck bool) *xlStorageDiskIDCheck {
|
||||
|
@ -1716,7 +1716,7 @@ func (s *xlStorage) ReadVersion(ctx context.Context, origvolume, volume, path, v
|
||||
// If written with header we are fine.
|
||||
return fi, nil
|
||||
}
|
||||
if fi.Size == 0 || !(fi.VersionID != "" && fi.VersionID != nullVersionID) {
|
||||
if fi.Size == 0 || (fi.VersionID == "" || fi.VersionID == nullVersionID) {
|
||||
// If versioned we have no conflicts.
|
||||
fi.SetInlineData()
|
||||
return fi, nil
|
||||
@ -3024,7 +3024,7 @@ func (s *xlStorage) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolum
|
||||
srcIsDir := HasSuffix(srcPath, SlashSeparator)
|
||||
dstIsDir := HasSuffix(dstPath, SlashSeparator)
|
||||
// Either src and dst have to be directories or files, else return error.
|
||||
if !(srcIsDir && dstIsDir || !srcIsDir && !dstIsDir) {
|
||||
if (!srcIsDir || !dstIsDir) && (srcIsDir || dstIsDir) {
|
||||
return errFileAccessDenied
|
||||
}
|
||||
srcFilePath := pathutil.Join(srcVolumeDir, srcPath)
|
||||
|
@ -1194,7 +1194,7 @@ func TestXLStorageReadFile(t *testing.T) {
|
||||
expectErrno = uintptr(errno)
|
||||
}
|
||||
}
|
||||
if !(expectErrno != 0 && resultErrno != 0 && expectErrno == resultErrno) {
|
||||
if expectErrno == 0 || resultErrno == 0 || expectErrno != resultErrno {
|
||||
t.Errorf("Case: %d %#v, expected: %s, got: %s", i+1, testCase, testCase.expectedErr, err)
|
||||
}
|
||||
}
|
||||
|
@ -68,6 +68,7 @@ FLAGS:
|
||||
{{range .VisibleFlags}}{{.}}
|
||||
{{end}}
|
||||
`
|
||||
//nolint:staticcheck
|
||||
isPart := regexp.MustCompile("[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/part\\.[0-9]+$")
|
||||
|
||||
app.HideHelpCommand = true
|
||||
@ -1508,7 +1509,7 @@ func reconPartial(shards [][]byte, k int, parityOK []bool, splitData [][]byte, s
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Println("Reconstructed", reconstructed, "bytes and verified", verified, "bytes of partial shard with config", shardConfig)
|
||||
fmt.Println("Reconstructed", reconstructed, "bytes and verified", verified, "bytes of partial shard with config", string(shardConfig))
|
||||
}
|
||||
|
||||
// bitrot returns a shard beginning at startOffset after doing bitrot checks.
|
||||
@ -1555,6 +1556,7 @@ func shardSize(blockSize, dataBlocks int) (sz int) {
|
||||
return
|
||||
}
|
||||
|
||||
//nolint:staticcheck
|
||||
var rePartNum = regexp.MustCompile("/part\\.([0-9]+)/")
|
||||
|
||||
func getPartNum(s string) int {
|
||||
|
@ -82,7 +82,7 @@ func (eDate *ExpirationDate) UnmarshalXML(d *xml.Decoder, startElement xml.Start
|
||||
hr, m, sec := expDate.Clock()
|
||||
nsec := expDate.Nanosecond()
|
||||
loc := expDate.Location()
|
||||
if !(hr == 0 && m == 0 && sec == 0 && nsec == 0 && loc.String() == time.UTC.String()) {
|
||||
if hr != 0 || m != 0 || sec != 0 || nsec != 0 || loc.String() != time.UTC.String() {
|
||||
return errLifecycleDateNotMidnight
|
||||
}
|
||||
|
||||
@ -93,7 +93,7 @@ func (eDate *ExpirationDate) UnmarshalXML(d *xml.Decoder, startElement xml.Start
|
||||
// MarshalXML encodes expiration date if it is non-zero and encodes
|
||||
// empty string otherwise
|
||||
func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
|
||||
if eDate.Time.IsZero() {
|
||||
if eDate.IsZero() {
|
||||
return nil
|
||||
}
|
||||
return e.EncodeElement(eDate.Format(time.RFC3339), startElement)
|
||||
@ -202,7 +202,7 @@ func (e Expiration) IsDaysNull() bool {
|
||||
|
||||
// IsDateNull returns true if date field is null
|
||||
func (e Expiration) IsDateNull() bool {
|
||||
return e.Date.Time.IsZero()
|
||||
return e.Date.IsZero()
|
||||
}
|
||||
|
||||
// IsNull returns true if both date and days fields are null
|
||||
|
@ -53,7 +53,7 @@ func (tDate *TransitionDate) UnmarshalXML(d *xml.Decoder, startElement xml.Start
|
||||
hr, m, sec := trnDate.Clock()
|
||||
nsec := trnDate.Nanosecond()
|
||||
loc := trnDate.Location()
|
||||
if !(hr == 0 && m == 0 && sec == 0 && nsec == 0 && loc.String() == time.UTC.String()) {
|
||||
if hr != 0 || m != 0 || sec != 0 || nsec != 0 || loc.String() != time.UTC.String() {
|
||||
return errTransitionDateNotMidnight
|
||||
}
|
||||
|
||||
@ -64,7 +64,7 @@ func (tDate *TransitionDate) UnmarshalXML(d *xml.Decoder, startElement xml.Start
|
||||
// MarshalXML encodes expiration date if it is non-zero and encodes
|
||||
// empty string otherwise
|
||||
func (tDate TransitionDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
|
||||
if tDate.Time.IsZero() {
|
||||
if tDate.IsZero() {
|
||||
return nil
|
||||
}
|
||||
return e.EncodeElement(tDate.Format(time.RFC3339), startElement)
|
||||
@ -151,7 +151,7 @@ func (t Transition) Validate() error {
|
||||
|
||||
// IsDateNull returns true if date field is null
|
||||
func (t Transition) IsDateNull() bool {
|
||||
return t.Date.Time.IsZero()
|
||||
return t.Date.IsZero()
|
||||
}
|
||||
|
||||
// IsNull returns true if both date and days fields are null
|
||||
|
@ -176,7 +176,7 @@ func (c Config) HasExistingObjectReplication(arn string) (hasARN, isEnabled bool
|
||||
// FilterActionableRules returns the rules actions that need to be executed
|
||||
// after evaluating prefix/tag filtering
|
||||
func (c Config) FilterActionableRules(obj ObjectOpts) []Rule {
|
||||
if obj.Name == "" && !(obj.OpType == ResyncReplicationType || obj.OpType == AllReplicationType) {
|
||||
if obj.Name == "" && (obj.OpType != ResyncReplicationType && obj.OpType != AllReplicationType) {
|
||||
return nil
|
||||
}
|
||||
var rules []Rule
|
||||
|
@ -29,7 +29,7 @@ type ARN struct {
|
||||
|
||||
// String - returns string representation.
|
||||
func (arn ARN) String() string {
|
||||
if arn.TargetID.ID == "" && arn.TargetID.Name == "" && arn.region == "" {
|
||||
if arn.ID == "" && arn.Name == "" && arn.region == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
|
@ -178,9 +178,7 @@ func (u UUIDHash) Sum(b []byte) []byte {
|
||||
}
|
||||
|
||||
// Reset - implement hash.Hash Reset
|
||||
func (u UUIDHash) Reset() {
|
||||
return
|
||||
}
|
||||
func (u UUIDHash) Reset() {}
|
||||
|
||||
// Size - implement hash.Hash Size
|
||||
func (u UUIDHash) Size() int {
|
||||
|
@ -30,7 +30,7 @@ type ARN struct {
|
||||
|
||||
// String - returns string representation.
|
||||
func (arn ARN) String() string {
|
||||
if arn.TargetID.ID == "" && arn.TargetID.Name == "" && arn.region == "" {
|
||||
if arn.ID == "" && arn.Name == "" && arn.region == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
|
@ -55,9 +55,11 @@ type AMQPArgs struct {
|
||||
QueueLimit uint64 `json:"queueLimit"`
|
||||
}
|
||||
|
||||
//lint:file-ignore ST1003 We cannot change these exported names.
|
||||
|
||||
// AMQP input constants.
|
||||
//
|
||||
// ST1003 We cannot change these exported names.
|
||||
//
|
||||
//nolint:staticcheck
|
||||
const (
|
||||
AmqpQueueDir = "queue_dir"
|
||||
AmqpQueueLimit = "queue_limit"
|
||||
|
@ -62,11 +62,11 @@ type XDGSCRAMClient struct {
|
||||
// and authzID via the SASLprep algorithm, as recommended by RFC-5802. If
|
||||
// SASLprep fails, the method returns an error.
|
||||
func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) {
|
||||
x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID)
|
||||
x.Client, err = x.NewClient(userName, password, authzID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
x.ClientConversation = x.Client.NewConversation()
|
||||
x.ClientConversation = x.NewConversation()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -375,7 +375,7 @@ func (target *MySQLTarget) initMySQL() error {
|
||||
|
||||
err = target.db.Ping()
|
||||
if err != nil {
|
||||
if !(xnet.IsConnRefusedErr(err) || xnet.IsConnResetErr(err)) {
|
||||
if !xnet.IsConnRefusedErr(err) && !xnet.IsConnResetErr(err) {
|
||||
target.loggerOnce(context.Background(), err, target.ID().String())
|
||||
}
|
||||
} else {
|
||||
|
@ -243,7 +243,7 @@ func (target *NSQTarget) initNSQ() error {
|
||||
err = target.producer.Ping()
|
||||
if err != nil {
|
||||
// To treat "connection refused" errors as errNotConnected.
|
||||
if !(xnet.IsConnRefusedErr(err) || xnet.IsConnResetErr(err)) {
|
||||
if !xnet.IsConnRefusedErr(err) && !xnet.IsConnResetErr(err) {
|
||||
target.loggerOnce(context.Background(), err, target.ID().String())
|
||||
}
|
||||
target.producer.Stop()
|
||||
|
@ -376,7 +376,7 @@ func (target *PostgreSQLTarget) initPostgreSQL() error {
|
||||
|
||||
err = target.db.Ping()
|
||||
if err != nil {
|
||||
if !(xnet.IsConnRefusedErr(err) || xnet.IsConnResetErr(err)) {
|
||||
if !xnet.IsConnRefusedErr(err) && !xnet.IsConnResetErr(err) {
|
||||
target.loggerOnce(context.Background(), err, target.ID().String())
|
||||
}
|
||||
} else {
|
||||
|
@ -293,7 +293,7 @@ func (target *RedisTarget) initRedis() error {
|
||||
|
||||
_, pingErr := conn.Do("PING")
|
||||
if pingErr != nil {
|
||||
if !(xnet.IsConnRefusedErr(pingErr) || xnet.IsConnResetErr(pingErr)) {
|
||||
if !xnet.IsConnRefusedErr(pingErr) && !xnet.IsConnResetErr(pingErr) {
|
||||
target.loggerOnce(context.Background(), pingErr, target.ID().String())
|
||||
}
|
||||
return pingErr
|
||||
|
@ -114,7 +114,6 @@ func (list *TargetList) incCurrentSendCalls(id TargetID) {
|
||||
|
||||
stats.currentSendCalls++
|
||||
list.targetStats[id] = stats
|
||||
return
|
||||
}
|
||||
|
||||
func (list *TargetList) decCurrentSendCalls(id TargetID) {
|
||||
@ -129,7 +128,6 @@ func (list *TargetList) decCurrentSendCalls(id TargetID) {
|
||||
|
||||
stats.currentSendCalls--
|
||||
list.targetStats[id] = stats
|
||||
return
|
||||
}
|
||||
|
||||
func (list *TargetList) incFailedEvents(id TargetID) {
|
||||
@ -143,7 +141,6 @@ func (list *TargetList) incFailedEvents(id TargetID) {
|
||||
|
||||
stats.failedEvents++
|
||||
list.targetStats[id] = stats
|
||||
return
|
||||
}
|
||||
|
||||
func (list *TargetList) incTotalEvents(id TargetID) {
|
||||
@ -157,7 +154,6 @@ func (list *TargetList) incTotalEvents(id TargetID) {
|
||||
|
||||
stats.totalEvents++
|
||||
list.targetStats[id] = stats
|
||||
return
|
||||
}
|
||||
|
||||
type asyncEvent struct {
|
||||
|
@ -466,19 +466,14 @@ func benchmarkGridStreamTwoway(b *testing.B, n int) {
|
||||
// Send 10x requests.
|
||||
Handle: func(ctx context.Context, payload []byte, in <-chan []byte, out chan<- []byte) *RemoteErr {
|
||||
got := 0
|
||||
for {
|
||||
select {
|
||||
case b, ok := <-in:
|
||||
if !ok {
|
||||
if got != messages {
|
||||
return NewRemoteErrf("wrong number of requests. want %d, got %d", messages, got)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
out <- b
|
||||
got++
|
||||
}
|
||||
for b := range in {
|
||||
out <- b
|
||||
got++
|
||||
}
|
||||
if got != messages {
|
||||
return NewRemoteErrf("wrong number of requests. want %d, got %d", messages, got)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
||||
Subroute: "some-subroute",
|
||||
|
@ -1511,7 +1511,6 @@ func (c *Connection) handlePing(ctx context.Context, m message) {
|
||||
pong := pongMsg{NotFound: true, T: ping.T}
|
||||
gridLogIf(ctx, c.queueMsg(m, &pong))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Connection) handleDisconnectClientMux(m message) {
|
||||
|
@ -192,7 +192,7 @@ func bytesOrLength(b []byte) string {
|
||||
if len(b) > 100 {
|
||||
return fmt.Sprintf("%d bytes", len(b))
|
||||
}
|
||||
return fmt.Sprint(b)
|
||||
return fmt.Sprint(string(b))
|
||||
}
|
||||
|
||||
// ConnDialer is a function that dials a connection to the given address.
|
||||
|
@ -387,15 +387,14 @@ func (u *URLValues) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (u URLValues) Msgsize() (s int) {
|
||||
s = msgp.MapHeaderSize
|
||||
if u != nil {
|
||||
for zb0006, zb0007 := range u {
|
||||
_ = zb0007
|
||||
s += msgp.StringPrefixSize + len(zb0006) + msgp.ArrayHeaderSize
|
||||
for zb0008 := range zb0007 {
|
||||
s += msgp.StringPrefixSize + len(zb0007[zb0008])
|
||||
}
|
||||
for zb0006, zb0007 := range u {
|
||||
_ = zb0007
|
||||
s += msgp.StringPrefixSize + len(zb0006) + msgp.ArrayHeaderSize
|
||||
for zb0008 := range zb0007 {
|
||||
s += msgp.StringPrefixSize + len(zb0007[zb0008])
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -172,7 +172,7 @@ func (lrw *ResponseRecorder) WriteHeader(code int) {
|
||||
if !lrw.headersLogged {
|
||||
lrw.ttfbHeader = time.Now().UTC().Sub(lrw.StartTime)
|
||||
lrw.StatusCode = code
|
||||
lrw.writeHeaders(&lrw.headers, code, lrw.ResponseWriter.Header())
|
||||
lrw.writeHeaders(&lrw.headers, code, lrw.Header())
|
||||
lrw.headersLogged = true
|
||||
lrw.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ func (srv *Server) Init(listenCtx context.Context, listenErrCallback func(listen
|
||||
}
|
||||
|
||||
serve = func() error {
|
||||
return srv.Server.Serve(l)
|
||||
return srv.Serve(l)
|
||||
}
|
||||
|
||||
return
|
||||
|
@ -147,8 +147,7 @@ func (w *Writer) compress() error {
|
||||
}
|
||||
defer gw.Close()
|
||||
|
||||
var wc io.WriteCloser
|
||||
wc = gzip.NewWriter(gw)
|
||||
var wc io.WriteCloser = gzip.NewWriter(gw)
|
||||
if _, err = io.Copy(wc, r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -147,7 +147,6 @@ func (r *ReqInfo) PopulateTagsMap(tagsMap map[string]string) {
|
||||
for _, t := range r.tags {
|
||||
tagsMap[t.Key] = t.Val
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SetReqInfo sets ReqInfo in the context.
|
||||
|
@ -353,9 +353,9 @@ func (h *Target) startQueueProcessor(ctx context.Context, mainWorker bool) {
|
||||
if count < h.batchSize {
|
||||
tickered := false
|
||||
select {
|
||||
case _ = <-ticker.C:
|
||||
case <-ticker.C:
|
||||
tickered = true
|
||||
case entry, _ = <-globalBuffer:
|
||||
case entry = <-globalBuffer:
|
||||
case entry, ok = <-h.logCh:
|
||||
if !ok {
|
||||
return
|
||||
|
@ -63,11 +63,11 @@ type XDGSCRAMClient struct {
|
||||
// and authzID via the SASLprep algorithm, as recommended by RFC-5802. If
|
||||
// SASLprep fails, the method returns an error.
|
||||
func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) {
|
||||
x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID)
|
||||
x.Client, err = x.NewClient(userName, password, authzID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
x.ClientConversation = x.Client.NewConversation()
|
||||
x.ClientConversation = x.NewConversation()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -284,7 +284,6 @@ func (c *Client) dumpHTTP(req *http.Request, resp *http.Response) {
|
||||
}
|
||||
|
||||
// Returns success.
|
||||
return
|
||||
}
|
||||
|
||||
// ErrClientClosed returned when *Client is closed.
|
||||
|
@ -85,10 +85,8 @@ func (r *RingBuffer) SetBlocking(block bool) *RingBuffer {
|
||||
// A goroutine will be started and run until the provided context is canceled.
|
||||
func (r *RingBuffer) WithCancel(ctx context.Context) *RingBuffer {
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
r.CloseWithError(ctx.Err())
|
||||
}
|
||||
<-ctx.Done()
|
||||
r.CloseWithError(ctx.Err())
|
||||
}()
|
||||
return r
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user