Run staticcheck on CI (#16170)

This commit is contained in:
Klaus Post 2022-12-05 20:18:50 +01:00 committed by GitHub
parent a9f5b58a01
commit a713aee3d5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 140 additions and 162 deletions

View File

@ -22,6 +22,7 @@ getdeps: ## fetch necessary dependencies
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@f3635b96e4838a6c773babb65ef35297fe5fe2f9
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest
@echo "Installing staticcheck" && go install honnef.co/go/tools/cmd/staticcheck@latest
crosscompile: ## cross compile minio
@(env bash $(PWD)/buildscripts/cross-compile.sh)
@ -35,6 +36,7 @@ check-gen: ## check for updated autogenerated files
lint: ## runs golangci-lint suite of linters
@echo "Running $@ check"
@${GOPATH}/bin/golangci-lint run --build-tags kqueue --timeout=10m --config ./.golangci.yml
@${GOPATH}/bin/staticcheck --tests=false ./...
check: test
test: verifiers build ## builds minio, runs linters, tests

View File

@ -34,7 +34,6 @@ import (
// getLocalServerProperty - returns madmin.ServerProperties for only the
// local endpoints from given list of endpoints
func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Request) madmin.ServerProperties {
var localEndpoints Endpoints
addr := globalLocalNodeName
if r != nil {
addr = r.Host
@ -52,7 +51,6 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
if endpoint.IsLocal {
// Only proceed for local endpoints
network[nodeName] = string(madmin.ItemOnline)
localEndpoints = append(localEndpoints, endpoint)
continue
}
_, present := network[nodeName]

View File

@ -2267,11 +2267,7 @@ func toAPIError(ctx context.Context, err error) APIError {
// their internal error types.
switch e := err.(type) {
case batchReplicationJobError:
apiErr = APIError{
Code: e.Code,
Description: e.Description,
HTTPStatusCode: e.HTTPStatusCode,
}
apiErr = APIError(e)
case InvalidArgument:
apiErr = APIError{
Code: "InvalidArgument",

View File

@ -540,7 +540,7 @@ type RestoreObjectRequest struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ RestoreRequest" json:"-"`
Days int `xml:"Days,omitempty"`
Type RestoreRequestType `xml:"Type,omitempty"`
Tier string `xml:"Tier,-"`
Tier string `xml:"Tier"`
Description string `xml:"Description,omitempty"`
SelectParameters *SelectParameters `xml:"SelectParameters,omitempty"`
OutputLocation OutputLocation `xml:"OutputLocation,omitempty"`

View File

@ -36,7 +36,6 @@ import (
"github.com/dustin/go-humanize"
"github.com/minio/madmin-go"
"github.com/minio/minio-go/v7"
miniogo "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/internal/bucket/bandwidth"
@ -568,9 +567,9 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
}
// early return if already replicated delete marker for existing object replication/ healing delete markers
if dobj.DeleteMarkerVersionID != "" {
toi, err := tgt.StatObject(ctx, tgt.Bucket, dobj.ObjectName, miniogo.StatObjectOptions{
toi, err := tgt.StatObject(ctx, tgt.Bucket, dobj.ObjectName, minio.StatObjectOptions{
VersionID: versionID,
Internal: miniogo.AdvancedGetOptions{
Internal: minio.AdvancedGetOptions{
ReplicationProxyRequest: "false",
IsReplicationReadyForDeleteMarker: true,
},
@ -589,12 +588,12 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
}
}
rmErr := tgt.RemoveObject(ctx, tgt.Bucket, dobj.ObjectName, miniogo.RemoveObjectOptions{
rmErr := tgt.RemoveObject(ctx, tgt.Bucket, dobj.ObjectName, minio.RemoveObjectOptions{
VersionID: versionID,
Internal: miniogo.AdvancedRemoveOptions{
Internal: minio.AdvancedRemoveOptions{
ReplicationDeleteMarker: dobj.DeleteMarkerVersionID != "",
ReplicationMTime: dobj.DeleteMarkerMTime.Time,
ReplicationStatus: miniogo.ReplicationStatusReplica,
ReplicationStatus: minio.ReplicationStatusReplica,
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
},
})
@ -680,7 +679,7 @@ func (m caseInsensitiveMap) Lookup(key string) (string, bool) {
return "", false
}
func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, err error) {
func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts minio.PutObjectOptions, err error) {
meta := make(map[string]string)
for k, v := range objInfo.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
@ -695,14 +694,14 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (put
if sc == "" && (objInfo.StorageClass == storageclass.STANDARD || objInfo.StorageClass == storageclass.RRS) {
sc = objInfo.StorageClass
}
putOpts = miniogo.PutObjectOptions{
putOpts = minio.PutObjectOptions{
UserMetadata: meta,
ContentType: objInfo.ContentType,
ContentEncoding: objInfo.ContentEncoding,
StorageClass: sc,
Internal: miniogo.AdvancedPutOptions{
Internal: minio.AdvancedPutOptions{
SourceVersionID: objInfo.VersionID,
ReplicationStatus: miniogo.ReplicationStatusReplica,
ReplicationStatus: minio.ReplicationStatusReplica,
SourceMTime: objInfo.ModTime,
SourceETag: objInfo.ETag,
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
@ -735,7 +734,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (put
putOpts.CacheControl = cc
}
if mode, ok := lkMap.Lookup(xhttp.AmzObjectLockMode); ok {
rmode := miniogo.RetentionMode(mode)
rmode := minio.RetentionMode(mode)
putOpts.Mode = rmode
}
if retainDateStr, ok := lkMap.Lookup(xhttp.AmzObjectLockRetainUntilDate); ok {
@ -758,7 +757,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (put
putOpts.Internal.RetentionTimestamp = retTimestamp
}
if lhold, ok := lkMap.Lookup(xhttp.AmzObjectLockLegalHold); ok {
putOpts.LegalHold = miniogo.LegalHoldStatus(lhold)
putOpts.LegalHold = minio.LegalHoldStatus(lhold)
// set legalhold timestamp in opts
lholdTimestamp := objInfo.ModTime
if lholdTmstampStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockLegalHoldTimestamp]; ok {
@ -1126,7 +1125,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
rinfo.Size = size
rinfo.ReplicationAction = rAction
// use core client to avoid doing multipart on PUT
c := &miniogo.Core{Client: tgt.Client}
c := &minio.Core{Client: tgt.Client}
putOpts, err := putReplicationOpts(ctx, tgt.StorageClass, objInfo)
if err != nil {
@ -1271,9 +1270,9 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
}()
rAction = replicateAll
oi, cerr := tgt.StatObject(ctx, tgt.Bucket, object, miniogo.StatObjectOptions{
oi, cerr := tgt.StatObject(ctx, tgt.Bucket, object, minio.StatObjectOptions{
VersionID: objInfo.VersionID,
Internal: miniogo.AdvancedGetOptions{
Internal: minio.AdvancedGetOptions{
ReplicationProxyRequest: "false",
},
})
@ -1309,16 +1308,16 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
rinfo.Size = size
rinfo.ReplicationAction = rAction
// use core client to avoid doing multipart on PUT
c := &miniogo.Core{Client: tgt.Client}
c := &minio.Core{Client: tgt.Client}
if rAction != replicateAll {
// replicate metadata for object tagging/copy with metadata replacement
srcOpts := miniogo.CopySrcOptions{
srcOpts := minio.CopySrcOptions{
Bucket: tgt.Bucket,
Object: object,
VersionID: objInfo.VersionID,
}
dstOpts := miniogo.PutObjectOptions{
Internal: miniogo.AdvancedPutOptions{
dstOpts := minio.PutObjectOptions{
Internal: minio.AdvancedPutOptions{
SourceVersionID: objInfo.VersionID,
ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside
},
@ -1372,8 +1371,8 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
return
}
func replicateObjectWithMultipart(ctx context.Context, c *miniogo.Core, bucket, object string, r io.Reader, objInfo ObjectInfo, opts miniogo.PutObjectOptions) (err error) {
var uploadedParts []miniogo.CompletePart
func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, object string, r io.Reader, objInfo ObjectInfo, opts minio.PutObjectOptions) (err error) {
var uploadedParts []minio.CompletePart
uploadID, err := c.NewMultipartUpload(context.Background(), bucket, object, opts)
if err != nil {
return err
@ -1399,7 +1398,7 @@ func replicateObjectWithMultipart(ctx context.Context, c *miniogo.Core, bucket,
var (
hr *hash.Reader
pInfo miniogo.ObjectPart
pInfo minio.ObjectPart
)
for _, partInfo := range objInfo.Parts {
@ -1414,13 +1413,13 @@ func replicateObjectWithMultipart(ctx context.Context, c *miniogo.Core, bucket,
if pInfo.Size != partInfo.ActualSize {
return fmt.Errorf("Part size mismatch: got %d, want %d", pInfo.Size, partInfo.ActualSize)
}
uploadedParts = append(uploadedParts, miniogo.CompletePart{
uploadedParts = append(uploadedParts, minio.CompletePart{
PartNumber: pInfo.PartNumber,
ETag: pInfo.ETag,
})
}
_, err = c.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, miniogo.PutObjectOptions{
Internal: miniogo.AdvancedPutOptions{
_, err = c.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, minio.PutObjectOptions{
Internal: minio.AdvancedPutOptions{
SourceMTime: objInfo.ModTime,
// always set this to distinguish between `mc mirror` replication and serverside
ReplicationRequest: true,
@ -1874,7 +1873,7 @@ type proxyResult struct {
// get Reader from replication target if active-active replication is in place and
// this node returns a 404
func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions, proxyTargets *madmin.BucketTargets) (gr *GetObjectReader, proxy proxyResult, err error) {
func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, _ http.Header, opts ObjectOptions, proxyTargets *madmin.BucketTargets) (gr *GetObjectReader, proxy proxyResult, err error) {
tgt, oi, proxy := proxyHeadToRepTarget(ctx, bucket, object, rs, opts, proxyTargets)
if !proxy.Proxy {
return nil, proxy, nil
@ -1883,10 +1882,10 @@ func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs
if err != nil {
return nil, proxy, err
}
gopts := miniogo.GetObjectOptions{
gopts := minio.GetObjectOptions{
VersionID: opts.VersionID,
ServerSideEncryption: opts.ServerSideEncryption,
Internal: miniogo.AdvancedGetOptions{
Internal: minio.AdvancedGetOptions{
ReplicationProxyRequest: "true",
},
PartNumber: opts.PartNumber,
@ -1903,7 +1902,7 @@ func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs
if err = gopts.SetMatchETag(oi.ETag); err != nil {
return nil, proxy, err
}
c := miniogo.Core{Client: tgt.Client}
c := minio.Core{Client: tgt.Client}
obj, _, h, err := c.GetObject(ctx, bucket, object, gopts)
if err != nil {
return nil, proxy, err
@ -1963,10 +1962,10 @@ func proxyHeadToRepTarget(ctx context.Context, bucket, object string, rs *HTTPRa
continue
}
gopts := miniogo.GetObjectOptions{
gopts := minio.GetObjectOptions{
VersionID: opts.VersionID,
ServerSideEncryption: opts.ServerSideEncryption,
Internal: miniogo.AdvancedGetOptions{
Internal: minio.AdvancedGetOptions{
ReplicationProxyRequest: "true",
},
PartNumber: opts.PartNumber,
@ -2356,9 +2355,9 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
roi.EventType = ReplicateExisting
replicateObject(ctx, roi, objectAPI)
}
_, err = tgt.StatObject(ctx, tgt.Bucket, roi.Name, miniogo.StatObjectOptions{
_, err = tgt.StatObject(ctx, tgt.Bucket, roi.Name, minio.StatObjectOptions{
VersionID: roi.VersionID,
Internal: miniogo.AdvancedGetOptions{
Internal: minio.AdvancedGetOptions{
ReplicationProxyRequest: "false",
},
})

View File

@ -27,7 +27,6 @@ import (
jsoniter "github.com/json-iterator/go"
"github.com/minio/madmin-go"
"github.com/minio/minio-go/v7"
miniogo "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/crypto"
@ -442,7 +441,7 @@ func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*T
config := tcfg.Credentials
creds := credentials.NewStaticV4(config.AccessKey, config.SecretKey, "")
api, err := minio.New(tcfg.Endpoint, &miniogo.Options{
api, err := minio.New(tcfg.Endpoint, &minio.Options{
Creds: creds,
Secure: tcfg.Secure,
Region: tcfg.Region,
@ -549,7 +548,7 @@ func parseBucketTargetConfig(bucket string, cdata, cmetadata []byte) (*madmin.Bu
// TargetClient is the struct for remote target client.
type TargetClient struct {
*miniogo.Client
*minio.Client
healthCheckDuration time.Duration
Bucket string // remote bucket target
replicateSync bool

View File

@ -61,6 +61,7 @@ const (
scannerMetricScanFolder // Scan a folder on disk, recursively.
scannerMetricScanCycle // Full cycle, cluster global
scannerMetricScanBucketDrive // Single bucket on one drive
scannerMetricCompactFolder // Folder compacted.
// Must be last:
scannerMetricLast
@ -194,7 +195,7 @@ func (p *scannerMetrics) activeDrives() int {
// lifetime returns the lifetime count of the specified metric.
func (p *scannerMetrics) lifetime(m scannerMetric) uint64 {
if m < 0 || m >= scannerMetricLast {
if m >= scannerMetricLast {
return 0
}
val := atomic.LoadUint64(&p.operations[m])
@ -204,7 +205,7 @@ func (p *scannerMetrics) lifetime(m scannerMetric) uint64 {
// lastMinute returns the last minute statistics of a metric.
// m should be < scannerMetricLastRealtime
func (p *scannerMetrics) lastMinute(m scannerMetric) AccElem {
if m < 0 || m >= scannerMetricLastRealtime {
if m >= scannerMetricLastRealtime {
return AccElem{}
}
val := p.latency[m].total()

View File

@ -813,9 +813,8 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
flat.Compacted = true
var compact bool
if flat.Objects < dataScannerCompactLeastObject {
if f.dataUsageScannerDebug && flat.Objects > 1 {
// Disabled, rather chatty:
// console.Debugf(scannerLogPrefix+" Only %d objects, compacting %s -> %+v\n", flat.Objects, folder.name, flat)
if flat.Objects > 1 {
globalScannerMetrics.log(scannerMetricCompactFolder, folder.name)
}
compact = true
} else {
@ -829,9 +828,8 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
}
}
}
if f.dataUsageScannerDebug && compact {
// Disabled, rather chatty:
// console.Debugf(scannerLogPrefix+" Only objects (%d), compacting %s -> %+v\n", flat.Objects, folder.name, flat)
if compact {
globalScannerMetrics.log(scannerMetricCompactFolder, folder.name)
}
}
if compact {
@ -945,10 +943,6 @@ func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi Obje
logger.LogIf(ctx, err)
}
if i.lifeCycle == nil {
if i.debug {
// disabled, very chatty:
// console.Debugf(applyActionsLogPrefix+" no lifecycle rules to apply: %q\n", i.objectPath())
}
return false, size
}

View File

@ -207,7 +207,7 @@ func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint6
}
// Returns all the expanded endpoints, each argument is expanded separately.
func (s endpointSet) getEndpoints() (endpoints []string) {
func (s *endpointSet) getEndpoints() (endpoints []string) {
if len(s.endpoints) != 0 {
return s.endpoints
}

View File

@ -636,10 +636,7 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
if errors.Is(reducedErr, errErasureReadQuorum) && !strings.HasPrefix(bucket, minioMetaBucket) {
_, derr := er.deleteIfDangling(ctx, bucket, object, metaArr, errs, nil, opts)
if derr != nil {
err = derr
}
er.deleteIfDangling(ctx, bucket, object, metaArr, errs, nil, opts)
}
return fi, nil, nil, toObjectErr(reducedErr, bucket, object)
}

View File

@ -811,7 +811,6 @@ func (z *erasureServerPools) StartRebalance() {
stopfn(err)
}(poolIdx)
}
return
}
// StopRebalance signals the rebalance goroutine running on this node (if any)

View File

@ -2191,9 +2191,7 @@ func (z *erasureServerPools) ReadHealth(ctx context.Context) bool {
b := z.BackendInfo()
poolReadQuorums := make([]int, len(b.StandardSCData))
for i, data := range b.StandardSCData {
poolReadQuorums[i] = data
}
copy(poolReadQuorums, b.StandardSCData)
for poolIdx := range erasureSetUpCount {
for setIdx := range erasureSetUpCount[poolIdx] {

View File

@ -24,7 +24,6 @@ import (
"net/url"
"github.com/minio/minio/internal/dsync"
"github.com/minio/minio/internal/http"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/rest"
)
@ -95,7 +94,7 @@ func (client *lockRESTClient) restCall(ctx context.Context, call string, args ds
defer metaDataPoolPut(argsBytes)
body := bytes.NewReader(argsBytes)
respBody, err := client.callWithContext(ctx, call, nil, body, body.Size())
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
switch err {
case nil:
return true, nil

View File

@ -49,7 +49,7 @@ func (o *listPathOptions) parseMarker() {
switch kv[0] {
case "minio_cache":
if kv[1] != markerTagVersion {
break
continue
}
case "id":
o.ID = kv[1]

View File

@ -31,7 +31,6 @@ import (
"github.com/minio/madmin-go"
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/http"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/logger/message/log"
@ -92,7 +91,7 @@ func (client *peerRESTClient) GetLocks() (lockMap map[string][]lockRequesterInfo
return
}
lockMap = map[string][]lockRequesterInfo{}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&lockMap)
return lockMap, err
}
@ -103,7 +102,7 @@ func (client *peerRESTClient) LocalStorageInfo() (info StorageInfo, err error) {
if err != nil {
return
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
}
@ -114,7 +113,7 @@ func (client *peerRESTClient) ServerInfo() (info madmin.ServerProperties, err er
if err != nil {
return
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
}
@ -125,7 +124,7 @@ func (client *peerRESTClient) GetCPUs(ctx context.Context) (info madmin.CPUs, er
if err != nil {
return
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
}
@ -136,7 +135,7 @@ func (client *peerRESTClient) GetPartitions(ctx context.Context) (info madmin.Pa
if err != nil {
return
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
}
@ -147,7 +146,7 @@ func (client *peerRESTClient) GetOSInfo(ctx context.Context) (info madmin.OSInfo
if err != nil {
return
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
}
@ -158,7 +157,7 @@ func (client *peerRESTClient) GetSELinuxInfo(ctx context.Context) (info madmin.S
if err != nil {
return
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
}
@ -171,7 +170,7 @@ func (client *peerRESTClient) GetSysConfig(ctx context.Context) (info madmin.Sys
return
}
roundtrip := int32(time.Since(sent).Milliseconds())
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
cfg := info.Config["time-info"]
@ -189,7 +188,7 @@ func (client *peerRESTClient) GetSysErrors(ctx context.Context) (info madmin.Sys
if err != nil {
return
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
}
@ -200,7 +199,7 @@ func (client *peerRESTClient) GetMemInfo(ctx context.Context) (info madmin.MemIn
if err != nil {
return
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
}
@ -219,7 +218,7 @@ func (client *peerRESTClient) GetMetrics(ctx context.Context, t madmin.MetricTyp
if err != nil {
return
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
}
@ -230,7 +229,7 @@ func (client *peerRESTClient) GetProcInfo(ctx context.Context) (info madmin.Proc
if err != nil {
return
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
}
@ -243,7 +242,7 @@ func (client *peerRESTClient) StartProfiling(profiler string) error {
if err != nil {
return err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return nil
}
@ -253,7 +252,7 @@ func (client *peerRESTClient) DownloadProfileData() (data map[string][]byte, err
if err != nil {
return
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&data)
return data, err
}
@ -268,7 +267,7 @@ func (client *peerRESTClient) GetBucketStats(bucket string) (BucketStats, error)
}
var bs BucketStats
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return bs, msgp.Decode(respBody, &bs)
}
@ -281,7 +280,7 @@ func (client *peerRESTClient) GetAllBucketStats() (BucketStatsMap, error) {
}
bsMap := BucketStatsMap{}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return bsMap, msgp.Decode(respBody, &bsMap)
}
@ -293,7 +292,7 @@ func (client *peerRESTClient) LoadBucketMetadata(bucket string) error {
if err != nil {
return err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return nil
}
@ -305,7 +304,7 @@ func (client *peerRESTClient) DeleteBucketMetadata(bucket string) error {
if err != nil {
return err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return nil
}
@ -324,7 +323,7 @@ func (client *peerRESTClient) cycleServerBloomFilter(ctx context.Context, req bl
return nil, err
}
var resp bloomFilterResponse
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return &resp, gob.NewDecoder(respBody).Decode(&resp)
}
@ -337,7 +336,7 @@ func (client *peerRESTClient) DeletePolicy(policyName string) (err error) {
if err != nil {
return
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return nil
}
@ -350,7 +349,7 @@ func (client *peerRESTClient) LoadPolicy(policyName string) (err error) {
if err != nil {
return
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return nil
}
@ -367,7 +366,7 @@ func (client *peerRESTClient) LoadPolicyMapping(userOrGroup string, userType IAM
if err != nil {
return err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return nil
}
@ -380,7 +379,7 @@ func (client *peerRESTClient) DeleteUser(accessKey string) (err error) {
if err != nil {
return
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return nil
}
@ -393,7 +392,7 @@ func (client *peerRESTClient) DeleteServiceAccount(accessKey string) (err error)
if err != nil {
return
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return nil
}
@ -407,7 +406,7 @@ func (client *peerRESTClient) LoadUser(accessKey string, temp bool) (err error)
if err != nil {
return
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return nil
}
@ -420,7 +419,7 @@ func (client *peerRESTClient) LoadServiceAccount(accessKey string) (err error) {
if err != nil {
return
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return nil
}
@ -432,7 +431,7 @@ func (client *peerRESTClient) LoadGroup(group string) error {
if err != nil {
return err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return nil
}
@ -459,7 +458,7 @@ func (client *peerRESTClient) VerifyBinary(ctx context.Context, u *url.URL, sha2
if err != nil {
return err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return nil
}
@ -469,7 +468,7 @@ func (client *peerRESTClient) CommitBinary(ctx context.Context) error {
if err != nil {
return err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return nil
}
@ -482,7 +481,7 @@ func (client *peerRESTClient) SignalService(sig serviceSignal, subSys string) er
if err != nil {
return err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return nil
}
@ -491,7 +490,7 @@ func (client *peerRESTClient) BackgroundHealStatus() (madmin.BgHealState, error)
if err != nil {
return madmin.BgHealState{}, err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
state := madmin.BgHealState{}
err = gob.NewDecoder(respBody).Decode(&state)
@ -505,7 +504,7 @@ func (client *peerRESTClient) GetLocalDiskIDs(ctx context.Context) (diskIDs []st
logger.LogIf(ctx, err)
return nil
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
if err = gob.NewDecoder(respBody).Decode(&diskIDs); err != nil {
logger.LogIf(ctx, err)
return nil
@ -531,7 +530,7 @@ func (client *peerRESTClient) GetMetacacheListing(ctx context.Context, o listPat
return nil, err
}
var resp metacache
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return &resp, msgp.Decode(respBody, &resp)
}
@ -549,7 +548,7 @@ func (client *peerRESTClient) UpdateMetacacheListing(ctx context.Context, m meta
logger.LogIf(ctx, err)
return m, err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
var resp metacache
return resp, msgp.Decode(respBody, &resp)
}
@ -560,7 +559,7 @@ func (client *peerRESTClient) ReloadPoolMeta(ctx context.Context) error {
logger.LogIf(ctx, err)
return err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return nil
}
@ -570,7 +569,7 @@ func (client *peerRESTClient) StopRebalance(ctx context.Context) error {
logger.LogIf(ctx, err)
return err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return nil
}
@ -582,7 +581,7 @@ func (client *peerRESTClient) LoadRebalanceMeta(ctx context.Context, startRebala
logger.LogIf(ctx, err)
return err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return nil
}
@ -592,7 +591,7 @@ func (client *peerRESTClient) LoadTransitionTierConfig(ctx context.Context) erro
logger.LogIf(ctx, err)
return err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return nil
}
@ -615,7 +614,7 @@ func (client *peerRESTClient) doTrace(traceCh chan<- madmin.TraceInfo, doneCh <-
}()
respBody, err := client.callWithContext(ctx, peerRESTMethodTrace, values, nil, -1)
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
if err != nil {
return
@ -653,7 +652,7 @@ func (client *peerRESTClient) doListen(listenCh chan<- event.Event, doneCh <-cha
}()
respBody, err := client.callWithContext(ctx, peerRESTMethodListen, v, nil, -1)
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
if err != nil {
return
@ -723,7 +722,7 @@ func (client *peerRESTClient) doConsoleLog(logCh chan log.Info, doneCh <-chan st
}()
respBody, err := client.callWithContext(ctx, peerRESTMethodLog, nil, nil, -1)
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
if err != nil {
return
}
@ -822,7 +821,7 @@ func (client *peerRESTClient) MonitorBandwidth(ctx context.Context, buckets []st
if err != nil {
return nil, err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
dec := gob.NewDecoder(respBody)
var bandwidthReport madmin.BucketBandwidthReport
@ -841,7 +840,7 @@ func (client *peerRESTClient) GetPeerMetrics(ctx context.Context) (<-chan Metric
for {
var metric Metric
if err := dec.Decode(&metric); err != nil {
http.DrainBody(respBody)
xhttp.DrainBody(respBody)
close(ch)
return
}
@ -863,7 +862,7 @@ func (client *peerRESTClient) SpeedTest(ctx context.Context, opts speedTestOpts)
if err != nil {
return SpeedTestResult{}, err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
waitReader, err := waitForHTTPResponse(respBody)
if err != nil {
return SpeedTestResult{}, err
@ -892,7 +891,7 @@ func (client *peerRESTClient) DriveSpeedTest(ctx context.Context, opts madmin.Dr
if err != nil {
return madmin.DriveSpeedTestResult{}, err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
waitReader, err := waitForHTTPResponse(respBody)
if err != nil {
return madmin.DriveSpeedTestResult{}, err
@ -914,7 +913,7 @@ func (client *peerRESTClient) ReloadSiteReplicationConfig(ctx context.Context) e
if err != nil {
return err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return nil
}
@ -924,7 +923,7 @@ func (client *peerRESTClient) GetLastDayTierStats(ctx context.Context) (DailyAll
if err != nil {
return result, err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&result)
if err != nil {
@ -939,7 +938,7 @@ func (client *peerRESTClient) DevNull(ctx context.Context, r io.Reader) error {
if err != nil {
return err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
return err
}
@ -952,7 +951,7 @@ func (client *peerRESTClient) Netperf(ctx context.Context, duration time.Duratio
if err != nil {
return result, err
}
defer http.DrainBody(respBody)
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&result)
return result, err
}

View File

@ -26,12 +26,13 @@ func _() {
_ = x[scannerMetricScanFolder-15]
_ = x[scannerMetricScanCycle-16]
_ = x[scannerMetricScanBucketDrive-17]
_ = x[scannerMetricLast-18]
_ = x[scannerMetricCompactFolder-18]
_ = x[scannerMetricLast-19]
}
const _scannerMetric_name = "ReadMetadataCheckMissingSaveUsageApplyAllApplyVersionTierObjSweepHealCheckILMCheckReplicationYieldCleanAbandonedApplyNonCurrentStartTraceScanObjectLastRealtimeScanFolderScanCycleScanBucketDriveLast"
const _scannerMetric_name = "ReadMetadataCheckMissingSaveUsageApplyAllApplyVersionTierObjSweepHealCheckILMCheckReplicationYieldCleanAbandonedApplyNonCurrentStartTraceScanObjectLastRealtimeScanFolderScanCycleScanBucketDriveCompactFolderLast"
var _scannerMetric_index = [...]uint8{0, 12, 24, 33, 41, 53, 65, 74, 77, 93, 98, 112, 127, 137, 147, 159, 169, 178, 193, 197}
var _scannerMetric_index = [...]uint8{0, 12, 24, 33, 41, 53, 65, 74, 77, 93, 98, 112, 127, 137, 147, 159, 169, 178, 193, 206, 210}
func (i scannerMetric) String() string {
if i >= scannerMetric(len(_scannerMetric_index)-1) {

View File

@ -40,6 +40,7 @@ func (ld sharedLock) backgroundRoutine(ctx context.Context, objAPI ObjectLayer,
continue
}
keepLock:
for {
select {
case <-ctx.Done():
@ -48,7 +49,7 @@ func (ld sharedLock) backgroundRoutine(ctx context.Context, objAPI ObjectLayer,
// The context of the lock is canceled, this can happen
// if one lock lost quorum due to cluster instability
// in that case, try to lock again.
break
break keepLock
case ld.lockContext <- lkctx:
// Send the lock context to anyone asking for it
}
@ -70,10 +71,8 @@ func mergeContext(ctx1, ctx2 context.Context) (context.Context, context.CancelFu
}
func (ld sharedLock) GetLock(ctx context.Context) (context.Context, context.CancelFunc) {
select {
case l := <-ld.lockContext:
l := <-ld.lockContext
return mergeContext(l.Context(), ctx)
}
}
func newSharedLock(ctx context.Context, objAPI ObjectLayer, lockName string) *sharedLock {

View File

@ -42,7 +42,6 @@ import (
"github.com/minio/minio/internal/auth"
sreplication "github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/bucket/policy"
bktpolicy "github.com/minio/pkg/bucket/policy"
iampolicy "github.com/minio/pkg/iam/policy"
)
@ -1355,7 +1354,7 @@ func (c *SiteReplicationSys) PeerBucketVersioningHandler(ctx context.Context, bu
}
// PeerBucketPolicyHandler - copies/deletes policy to local cluster.
func (c *SiteReplicationSys) PeerBucketPolicyHandler(ctx context.Context, bucket string, policy *policy.Policy, updatedAt time.Time) error {
func (c *SiteReplicationSys) PeerBucketPolicyHandler(ctx context.Context, bucket string, policy *bktpolicy.Policy, updatedAt time.Time) error {
// skip overwrite if local update is newer than peer update.
if !updatedAt.IsZero() {
if _, updateTm, err := globalBucketMetadataSys.GetPolicyConfig(bucket); err == nil && updateTm.After(updatedAt) {

View File

@ -320,7 +320,7 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) {
return n, cr.err
}
b, err = cr.reader.ReadByte()
if b != '\r' {
if b != '\r' || err != nil {
cr.err = errMalformedEncoding
return n, cr.err
}

View File

@ -241,6 +241,7 @@ func untar(ctx context.Context, r io.Reader, putObject func(reader io.Reader, in
rc.Close()
<-asyncWriters
wg.Done()
//lint:ignore SA6002 we are fine with the tiny alloc
poolBuf128k.Put(b)
}()
if err := putObject(&rc, fi, name); err != nil {

View File

@ -28,8 +28,7 @@ import (
"time"
"github.com/minio/madmin-go"
minio "github.com/minio/minio-go/v7"
miniogo "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
)
@ -80,7 +79,7 @@ func (s3 *warmBackendS3) Get(ctx context.Context, object string, rv remoteVersio
return nil, s3.ToObjectError(err, object)
}
}
c := &miniogo.Core{Client: s3.client}
c := &minio.Core{Client: s3.client}
// Important to use core primitives here to pass range get options as is.
r, _, _, err := c.GetObject(ctx, s3.Bucket, s3.getDest(object), gopts)
if err != nil {

View File

@ -678,6 +678,7 @@ func metaDataPoolGet() []byte {
// metaDataPoolPut will put an unused small buffer back into the pool.
func metaDataPoolPut(buf []byte) {
if cap(buf) >= metaDataReadDefault && cap(buf) < metaDataReadDefault*4 {
//lint:ignore SA6002 we are fine with the tiny alloc
metaDataPool.Put(buf)
}
}
@ -1845,11 +1846,9 @@ func mergeXLV2Versions(quorum int, strict bool, requestedVersions int, versions
}
var latest xlMetaV2ShallowVersion
var latestCount int
if consistent {
// All had the same signature, easy.
latest = tops[0]
latestCount = len(tops)
merged = append(merged, latest)
// Calculate latest 'n' non-free versions.
@ -1859,6 +1858,7 @@ func mergeXLV2Versions(quorum int, strict bool, requestedVersions int, versions
} else {
// Find latest.
var latestCount int
for i, ver := range tops {
if ver.header == latest.header {
latestCount++

View File

@ -264,7 +264,7 @@ func (o *AuthNPlugin) Authenticate(roleArn arn.ARN, token string) (AuthNResponse
return AuthNResponse{}, fmt.Errorf("Invalid role ARN value: %s", roleArn.String())
}
var u url.URL = url.URL(*o.args.URL)
u := url.URL(*o.args.URL)
q := u.Query()
q.Set("token", token)
u.RawQuery = q.Encode()

View File

@ -103,8 +103,7 @@ func NewWithConfig(config Config) (KMS, error) {
go func() {
for {
var prevCertificate tls.Certificate
select {
case certificate, ok := <-config.ReloadCertEvents:
certificate, ok := <-config.ReloadCertEvents
if !ok {
return
}
@ -133,7 +132,6 @@ func NewWithConfig(config Config) (KMS, error) {
prevCertificate = certificate
}
}
}
}()
return c, nil
}