From 49727355071cfb39bc38b2ce0a4020d30bc0f74b Mon Sep 17 00:00:00 2001 From: Klaus Post Date: Mon, 11 Nov 2024 06:51:43 -0800 Subject: [PATCH] Fix lint issues from v1.62.0 upgrade (#20633) * Fix lint issues from v1.62.0 upgrade * Fix xlMetaV2TrimData version checks. --- cmd/bucket-stats.go | 14 +++++----- cmd/dynamic-timeouts.go | 12 ++++----- cmd/erasure-metadata-utils.go | 26 +++++++++--------- cmd/erasure-server-pool.go | 6 ++--- cmd/metacache-set.go | 16 +++++------ cmd/object-handlers-common.go | 8 +++--- cmd/os_windows.go | 36 ++++++++++++------------- cmd/postpolicyform.go | 8 +++--- cmd/test-utils_test.go | 8 +++--- cmd/xl-storage-meta-inline.go | 6 ++--- internal/bucket/lifecycle/expiration.go | 4 +-- internal/bucket/lifecycle/transition.go | 4 +-- internal/config/dns/operator_dns.go | 4 +-- internal/config/drive/drive.go | 4 +-- internal/dsync/utils.go | 8 +++--- internal/http/transports.go | 7 +++-- internal/rest/client.go | 6 ++--- 17 files changed, 89 insertions(+), 88 deletions(-) diff --git a/cmd/bucket-stats.go b/cmd/bucket-stats.go index afb885089..20b4ebebf 100644 --- a/cmd/bucket-stats.go +++ b/cmd/bucket-stats.go @@ -108,18 +108,18 @@ func (l ReplicationLastHour) merge(o ReplicationLastHour) (merged ReplicationLas // Add a new duration data func (l *ReplicationLastHour) addsize(sz int64) { - min := time.Now().Unix() / 60 - l.forwardTo(min) - winIdx := min % 60 - l.Totals[winIdx].merge(AccElem{Total: min, Size: sz, N: 1}) - l.LastMin = min + minutes := time.Now().Unix() / 60 + l.forwardTo(minutes) + winIdx := minutes % 60 + l.Totals[winIdx].merge(AccElem{Total: minutes, Size: sz, N: 1}) + l.LastMin = minutes } // Merge all recorded counts of last hour into one func (l *ReplicationLastHour) getTotal() AccElem { var res AccElem - min := time.Now().Unix() / 60 - l.forwardTo(min) + minutes := time.Now().Unix() / 60 + l.forwardTo(minutes) for _, elem := range l.Totals[:] { res.merge(elem) } diff --git a/cmd/dynamic-timeouts.go b/cmd/dynamic-timeouts.go index 23d3b1266..04c78c63a 100644 --- a/cmd/dynamic-timeouts.go +++ b/cmd/dynamic-timeouts.go @@ -117,12 +117,12 @@ func (dt *dynamicTimeout) logEntry(duration time.Duration) { // adjust changes the value of the dynamic timeout based on the // previous results func (dt *dynamicTimeout) adjust(entries [dynamicTimeoutLogSize]time.Duration) { - failures, max := 0, time.Duration(0) + failures, maxDur := 0, time.Duration(0) for _, dur := range entries[:] { if dur == maxDuration { failures++ - } else if dur > max { - max = dur + } else if dur > maxDur { + maxDur = dur } } @@ -144,12 +144,12 @@ func (dt *dynamicTimeout) adjust(entries [dynamicTimeoutLogSize]time.Duration) { } else if failPct < dynamicTimeoutDecreaseThresholdPct { // We are hitting the timeout relatively few times, // so decrease the timeout towards 25 % of maximum time spent. - max = max * 125 / 100 + maxDur = maxDur * 125 / 100 timeout := atomic.LoadInt64(&dt.timeout) - if max < time.Duration(timeout) { + if maxDur < time.Duration(timeout) { // Move 50% toward the max. - timeout = (int64(max) + timeout) / 2 + timeout = (int64(maxDur) + timeout) / 2 } if timeout < dt.minimum { timeout = dt.minimum diff --git a/cmd/erasure-metadata-utils.go b/cmd/erasure-metadata-utils.go index d742425cc..56bfd8137 100644 --- a/cmd/erasure-metadata-utils.go +++ b/cmd/erasure-metadata-utils.go @@ -52,15 +52,15 @@ func reduceCommonVersions(diskVersions [][]byte, writeQuorum int) (versions []by } var commonVersions uint64 - max := 0 + maxCnt := 0 for versions, count := range diskVersionsCount { - if max < count { - max = count + if maxCnt < count { + maxCnt = count commonVersions = versions } } - if max >= writeQuorum { + if maxCnt >= writeQuorum { for _, versions := range diskVersions { if binary.BigEndian.Uint64(versions) == commonVersions { return versions @@ -80,15 +80,15 @@ func reduceCommonDataDir(dataDirs []string, writeQuorum int) (dataDir string) { dataDirsCount[ddir]++ } - max := 0 + maxCnt := 0 for ddir, count := range dataDirsCount { - if max < count { - max = count + if maxCnt < count { + maxCnt = count dataDir = ddir } } - if max >= writeQuorum { + if maxCnt >= writeQuorum { return dataDir } @@ -115,20 +115,20 @@ func reduceErrs(errs []error, ignoredErrs []error) (maxCount int, maxErr error) errorCounts[err]++ } - max := 0 + maxCnt := 0 for err, count := range errorCounts { switch { - case max < count: - max = count + case maxCnt < count: + maxCnt = count maxErr = err // Prefer `nil` over other error values with the same // number of occurrences. - case max == count && err == nil: + case maxCnt == count && err == nil: maxErr = err } } - return max, maxErr + return maxCnt, maxErr } // reduceQuorumErrs behaves like reduceErrs by only for returning diff --git a/cmd/erasure-server-pool.go b/cmd/erasure-server-pool.go index 1ebafad0f..32f5c03c5 100644 --- a/cmd/erasure-server-pool.go +++ b/cmd/erasure-server-pool.go @@ -357,7 +357,7 @@ func (p serverPoolsAvailableSpace) TotalAvailable() uint64 { // FilterMaxUsed will filter out any pools that has used percent bigger than max, // unless all have that, in which case all are preserved. -func (p serverPoolsAvailableSpace) FilterMaxUsed(max int) { +func (p serverPoolsAvailableSpace) FilterMaxUsed(maxUsed int) { // We aren't modifying p, only entries in it, so we don't need to receive a pointer. if len(p) <= 1 { // Nothing to do. @@ -365,7 +365,7 @@ func (p serverPoolsAvailableSpace) FilterMaxUsed(max int) { } var ok bool for _, z := range p { - if z.Available > 0 && z.MaxUsedPct < max { + if z.Available > 0 && z.MaxUsedPct < maxUsed { ok = true break } @@ -378,7 +378,7 @@ func (p serverPoolsAvailableSpace) FilterMaxUsed(max int) { // Remove entries that are above. for i, z := range p { - if z.Available > 0 && z.MaxUsedPct < max { + if z.Available > 0 && z.MaxUsedPct < maxUsed { continue } p[i].Available = 0 diff --git a/cmd/metacache-set.go b/cmd/metacache-set.go index d4eda4dd6..509d451da 100644 --- a/cmd/metacache-set.go +++ b/cmd/metacache-set.go @@ -626,18 +626,18 @@ func calcCommonWritesDeletes(infos []DiskInfo, readQuorum int) (commonWrite, com } filter := func(list []uint64) (commonCount uint64) { - max := 0 + maxCnt := 0 signatureMap := map[uint64]int{} for _, v := range list { signatureMap[v]++ } for ops, count := range signatureMap { - if max < count && commonCount < ops { - max = count + if maxCnt < count && commonCount < ops { + maxCnt = count commonCount = ops } } - if max < readQuorum { + if maxCnt < readQuorum { return 0 } return commonCount @@ -650,7 +650,7 @@ func calcCommonWritesDeletes(infos []DiskInfo, readQuorum int) (commonWrite, com func calcCommonCounter(infos []DiskInfo, readQuorum int) (commonCount uint64) { filter := func() (commonCount uint64) { - max := 0 + maxCnt := 0 signatureMap := map[uint64]int{} for _, info := range infos { if info.Error != "" { @@ -660,12 +660,12 @@ func calcCommonCounter(infos []DiskInfo, readQuorum int) (commonCount uint64) { signatureMap[mutations]++ } for ops, count := range signatureMap { - if max < count && commonCount < ops { - max = count + if maxCnt < count && commonCount < ops { + maxCnt = count commonCount = ops } } - if max < readQuorum { + if maxCnt < readQuorum { return 0 } return commonCount diff --git a/cmd/object-handlers-common.go b/cmd/object-handlers-common.go index 9325d8ecf..7645b46a4 100644 --- a/cmd/object-handlers-common.go +++ b/cmd/object-handlers-common.go @@ -352,11 +352,11 @@ func isETagEqual(left, right string) bool { // setPutObjHeaders sets all the necessary headers returned back // upon a success Put/Copy/CompleteMultipart/Delete requests // to activate delete only headers set delete as true -func setPutObjHeaders(w http.ResponseWriter, objInfo ObjectInfo, delete bool, h http.Header) { +func setPutObjHeaders(w http.ResponseWriter, objInfo ObjectInfo, del bool, h http.Header) { // We must not use the http.Header().Set method here because some (broken) // clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive). // Therefore, we have to set the ETag directly as map entry. - if objInfo.ETag != "" && !delete { + if objInfo.ETag != "" && !del { w.Header()[xhttp.ETag] = []string{`"` + objInfo.ETag + `"`} } @@ -364,13 +364,13 @@ func setPutObjHeaders(w http.ResponseWriter, objInfo ObjectInfo, delete bool, h if objInfo.VersionID != "" && objInfo.VersionID != nullVersionID { w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID} // If version is a deleted marker, set this header as well - if objInfo.DeleteMarker && delete { // only returned during delete object + if objInfo.DeleteMarker && del { // only returned during delete object w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(objInfo.DeleteMarker)} } } if objInfo.Bucket != "" && objInfo.Name != "" { - if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil && !delete { + if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil && !del { lc.SetPredictionHeaders(w, objInfo.ToLifecycleOpts()) } } diff --git a/cmd/os_windows.go b/cmd/os_windows.go index 758f3856a..97230e221 100644 --- a/cmd/os_windows.go +++ b/cmd/os_windows.go @@ -60,20 +60,19 @@ func readDirFn(dirPath string, filter func(name string, typ os.FileMode) error) if err != nil { if err == syscall.ERROR_NO_MORE_FILES { break - } else { - if isSysErrPathNotFound(err) { - return nil - } - err = osErrToFileErr(&os.PathError{ - Op: "FindNextFile", - Path: dirPath, - Err: err, - }) - if err == errFileNotFound { - return nil - } - return err } + if isSysErrPathNotFound(err) { + return nil + } + err = osErrToFileErr(&os.PathError{ + Op: "FindNextFile", + Path: dirPath, + Err: err, + }) + if err == errFileNotFound { + return nil + } + return err } name := syscall.UTF16ToString(data.FileName[0:]) if name == "" || name == "." || name == ".." { // Useless names @@ -136,13 +135,12 @@ func readDirWithOpts(dirPath string, opts readDirOpts) (entries []string, err er if err != nil { if err == syscall.ERROR_NO_MORE_FILES { break - } else { - return nil, osErrToFileErr(&os.PathError{ - Op: "FindNextFile", - Path: dirPath, - Err: err, - }) } + return nil, osErrToFileErr(&os.PathError{ + Op: "FindNextFile", + Path: dirPath, + Err: err, + }) } name := syscall.UTF16ToString(data.FileName[0:]) diff --git a/cmd/postpolicyform.go b/cmd/postpolicyform.go index 2ced4fbd0..ad8aadc6a 100644 --- a/cmd/postpolicyform.go +++ b/cmd/postpolicyform.go @@ -232,19 +232,19 @@ func parsePostPolicyForm(r io.Reader) (PostPolicyForm, error) { operator, matchType, value, }) case policyCondContentLength: - min, err := toInteger(condt[1]) + minLen, err := toInteger(condt[1]) if err != nil { return parsedPolicy, err } - max, err := toInteger(condt[2]) + maxLen, err := toInteger(condt[2]) if err != nil { return parsedPolicy, err } parsedPolicy.Conditions.ContentLengthRange = contentLengthRange{ - Min: min, - Max: max, + Min: minLen, + Max: maxLen, Valid: true, } default: diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index cf8165189..89ebda28c 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -2247,12 +2247,12 @@ func getEndpointsLocalAddr(endpointServerPools EndpointServerPools) string { } // fetches a random number between range min-max. -func getRandomRange(min, max int, seed int64) int { +func getRandomRange(minN, maxN int, seed int64) int { // special value -1 means no explicit seeding. - if seed != -1 { - rand.Seed(seed) + if seed == -1 { + return rand.New(rand.NewSource(time.Now().UnixNano())).Intn(maxN-minN) + minN } - return rand.Intn(max-min) + min + return rand.New(rand.NewSource(seed)).Intn(maxN-minN) + minN } // Randomizes the order of bytes in the byte array diff --git a/cmd/xl-storage-meta-inline.go b/cmd/xl-storage-meta-inline.go index 08e3878ea..76b1f8a79 100644 --- a/cmd/xl-storage-meta-inline.go +++ b/cmd/xl-storage-meta-inline.go @@ -378,11 +378,11 @@ func (x *xlMetaInlineData) remove(keys ...string) bool { // xlMetaV2TrimData will trim any data from the metadata without unmarshalling it. // If any error occurs the unmodified data is returned. func xlMetaV2TrimData(buf []byte) []byte { - metaBuf, min, maj, err := checkXL2V1(buf) + metaBuf, maj, minor, err := checkXL2V1(buf) if err != nil { return buf } - if maj == 1 && min < 1 { + if maj == 1 && minor < 1 { // First version to carry data. return buf } @@ -393,7 +393,7 @@ func xlMetaV2TrimData(buf []byte) []byte { return buf } // Skip CRC - if maj > 1 || min >= 2 { + if maj > 1 || minor >= 2 { _, metaBuf, err = msgp.ReadUint32Bytes(metaBuf) storageLogIf(GlobalContext, err) } diff --git a/internal/bucket/lifecycle/expiration.go b/internal/bucket/lifecycle/expiration.go index 16a5fd14b..d33a4301d 100644 --- a/internal/bucket/lifecycle/expiration.go +++ b/internal/bucket/lifecycle/expiration.go @@ -79,10 +79,10 @@ func (eDate *ExpirationDate) UnmarshalXML(d *xml.Decoder, startElement xml.Start return errLifecycleInvalidDate } // Allow only date timestamp specifying midnight GMT - hr, min, sec := expDate.Clock() + hr, m, sec := expDate.Clock() nsec := expDate.Nanosecond() loc := expDate.Location() - if !(hr == 0 && min == 0 && sec == 0 && nsec == 0 && loc.String() == time.UTC.String()) { + if !(hr == 0 && m == 0 && sec == 0 && nsec == 0 && loc.String() == time.UTC.String()) { return errLifecycleDateNotMidnight } diff --git a/internal/bucket/lifecycle/transition.go b/internal/bucket/lifecycle/transition.go index 948510d01..96673d5d2 100644 --- a/internal/bucket/lifecycle/transition.go +++ b/internal/bucket/lifecycle/transition.go @@ -50,10 +50,10 @@ func (tDate *TransitionDate) UnmarshalXML(d *xml.Decoder, startElement xml.Start return errTransitionInvalidDate } // Allow only date timestamp specifying midnight GMT - hr, min, sec := trnDate.Clock() + hr, m, sec := trnDate.Clock() nsec := trnDate.Nanosecond() loc := trnDate.Location() - if !(hr == 0 && min == 0 && sec == 0 && nsec == 0 && loc.String() == time.UTC.String()) { + if !(hr == 0 && m == 0 && sec == 0 && nsec == 0 && loc.String() == time.UTC.String()) { return errTransitionDateNotMidnight } diff --git a/internal/config/dns/operator_dns.go b/internal/config/dns/operator_dns.go index 3f720267f..e703103c3 100644 --- a/internal/config/dns/operator_dns.go +++ b/internal/config/dns/operator_dns.go @@ -63,14 +63,14 @@ func (c *OperatorDNS) addAuthHeader(r *http.Request) error { return nil } -func (c *OperatorDNS) endpoint(bucket string, delete bool) (string, error) { +func (c *OperatorDNS) endpoint(bucket string, del bool) (string, error) { u, err := url.Parse(c.Endpoint) if err != nil { return "", err } q := u.Query() q.Add("bucket", bucket) - q.Add("delete", strconv.FormatBool(delete)) + q.Add("delete", strconv.FormatBool(del)) u.RawQuery = q.Encode() return u.String(), nil } diff --git a/internal/config/drive/drive.go b/internal/config/drive/drive.go index abd95b414..862c62ab7 100644 --- a/internal/config/drive/drive.go +++ b/internal/config/drive/drive.go @@ -49,10 +49,10 @@ type Config struct { } // Update - updates the config with latest values -func (c *Config) Update(new Config) error { +func (c *Config) Update(updated Config) error { configLk.Lock() defer configLk.Unlock() - c.MaxTimeout = getMaxTimeout(new.MaxTimeout) + c.MaxTimeout = getMaxTimeout(updated.MaxTimeout) return nil } diff --git a/internal/dsync/utils.go b/internal/dsync/utils.go index 9debd558f..6a6d29148 100644 --- a/internal/dsync/utils.go +++ b/internal/dsync/utils.go @@ -22,16 +22,16 @@ import ( "time" ) -func backoffWait(min, unit, cap time.Duration) func(*rand.Rand, uint) time.Duration { +func backoffWait(minSleep, unit, maxSleep time.Duration) func(*rand.Rand, uint) time.Duration { if unit > time.Hour { // Protect against integer overflow panic("unit cannot exceed one hour") } return func(r *rand.Rand, attempt uint) time.Duration { - sleep := min + sleep := minSleep sleep += unit * time.Duration(attempt) - if sleep > cap { - sleep = cap + if sleep > maxSleep { + sleep = maxSleep } sleep -= time.Duration(r.Float64() * float64(sleep)) return sleep diff --git a/internal/http/transports.go b/internal/http/transports.go index f44df16ba..55fa9b380 100644 --- a/internal/http/transports.go +++ b/internal/http/transports.go @@ -32,8 +32,11 @@ import ( var tlsClientSessionCacheSize = 100 const ( - WriteBufferSize = 64 << 10 // WriteBufferSize 64KiB moving up from 4KiB default - ReadBufferSize = 64 << 10 // ReadBufferSize 64KiB moving up from 4KiB default + // WriteBufferSize 64KiB moving up from 4KiB default + WriteBufferSize = 64 << 10 + + // ReadBufferSize 64KiB moving up from 4KiB default + ReadBufferSize = 64 << 10 ) // ConnSettings - contains connection settings. diff --git a/internal/rest/client.go b/internal/rest/client.go index c07de174f..d8e0901bc 100644 --- a/internal/rest/client.go +++ b/internal/rest/client.go @@ -448,7 +448,7 @@ func (c *Client) LastError() error { // computes the exponential backoff duration according to // https://www.awsarchitectureblog.com/2015/03/backoff.html -func exponentialBackoffWait(r *rand.Rand, unit, cap time.Duration) func(uint) time.Duration { +func exponentialBackoffWait(r *rand.Rand, unit, maxSleep time.Duration) func(uint) time.Duration { if unit > time.Hour { // Protect against integer overflow panic("unit cannot exceed one hour") @@ -460,8 +460,8 @@ func exponentialBackoffWait(r *rand.Rand, unit, cap time.Duration) func(uint) ti } // sleep = random_between(unit, min(cap, base * 2 ** attempt)) sleep := unit * time.Duration(1< cap { - sleep = cap + if sleep > maxSleep { + sleep = maxSleep } sleep -= time.Duration(r.Float64() * float64(sleep-unit)) return sleep