Fix lint issues from v1.62.0 upgrade (#20633)

* Fix lint issues from v1.62.0 upgrade

* Fix xlMetaV2TrimData version checks.
This commit is contained in:
Klaus Post 2024-11-11 06:51:43 -08:00 committed by GitHub
parent e6ca6de194
commit 4972735507
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 89 additions and 88 deletions

View File

@ -108,18 +108,18 @@ func (l ReplicationLastHour) merge(o ReplicationLastHour) (merged ReplicationLas
// Add a new duration data // Add a new duration data
func (l *ReplicationLastHour) addsize(sz int64) { func (l *ReplicationLastHour) addsize(sz int64) {
min := time.Now().Unix() / 60 minutes := time.Now().Unix() / 60
l.forwardTo(min) l.forwardTo(minutes)
winIdx := min % 60 winIdx := minutes % 60
l.Totals[winIdx].merge(AccElem{Total: min, Size: sz, N: 1}) l.Totals[winIdx].merge(AccElem{Total: minutes, Size: sz, N: 1})
l.LastMin = min l.LastMin = minutes
} }
// Merge all recorded counts of last hour into one // Merge all recorded counts of last hour into one
func (l *ReplicationLastHour) getTotal() AccElem { func (l *ReplicationLastHour) getTotal() AccElem {
var res AccElem var res AccElem
min := time.Now().Unix() / 60 minutes := time.Now().Unix() / 60
l.forwardTo(min) l.forwardTo(minutes)
for _, elem := range l.Totals[:] { for _, elem := range l.Totals[:] {
res.merge(elem) res.merge(elem)
} }

View File

@ -117,12 +117,12 @@ func (dt *dynamicTimeout) logEntry(duration time.Duration) {
// adjust changes the value of the dynamic timeout based on the // adjust changes the value of the dynamic timeout based on the
// previous results // previous results
func (dt *dynamicTimeout) adjust(entries [dynamicTimeoutLogSize]time.Duration) { func (dt *dynamicTimeout) adjust(entries [dynamicTimeoutLogSize]time.Duration) {
failures, max := 0, time.Duration(0) failures, maxDur := 0, time.Duration(0)
for _, dur := range entries[:] { for _, dur := range entries[:] {
if dur == maxDuration { if dur == maxDuration {
failures++ failures++
} else if dur > max { } else if dur > maxDur {
max = dur maxDur = dur
} }
} }
@ -144,12 +144,12 @@ func (dt *dynamicTimeout) adjust(entries [dynamicTimeoutLogSize]time.Duration) {
} else if failPct < dynamicTimeoutDecreaseThresholdPct { } else if failPct < dynamicTimeoutDecreaseThresholdPct {
// We are hitting the timeout relatively few times, // We are hitting the timeout relatively few times,
// so decrease the timeout towards 25 % of maximum time spent. // so decrease the timeout towards 25 % of maximum time spent.
max = max * 125 / 100 maxDur = maxDur * 125 / 100
timeout := atomic.LoadInt64(&dt.timeout) timeout := atomic.LoadInt64(&dt.timeout)
if max < time.Duration(timeout) { if maxDur < time.Duration(timeout) {
// Move 50% toward the max. // Move 50% toward the max.
timeout = (int64(max) + timeout) / 2 timeout = (int64(maxDur) + timeout) / 2
} }
if timeout < dt.minimum { if timeout < dt.minimum {
timeout = dt.minimum timeout = dt.minimum

View File

@ -52,15 +52,15 @@ func reduceCommonVersions(diskVersions [][]byte, writeQuorum int) (versions []by
} }
var commonVersions uint64 var commonVersions uint64
max := 0 maxCnt := 0
for versions, count := range diskVersionsCount { for versions, count := range diskVersionsCount {
if max < count { if maxCnt < count {
max = count maxCnt = count
commonVersions = versions commonVersions = versions
} }
} }
if max >= writeQuorum { if maxCnt >= writeQuorum {
for _, versions := range diskVersions { for _, versions := range diskVersions {
if binary.BigEndian.Uint64(versions) == commonVersions { if binary.BigEndian.Uint64(versions) == commonVersions {
return versions return versions
@ -80,15 +80,15 @@ func reduceCommonDataDir(dataDirs []string, writeQuorum int) (dataDir string) {
dataDirsCount[ddir]++ dataDirsCount[ddir]++
} }
max := 0 maxCnt := 0
for ddir, count := range dataDirsCount { for ddir, count := range dataDirsCount {
if max < count { if maxCnt < count {
max = count maxCnt = count
dataDir = ddir dataDir = ddir
} }
} }
if max >= writeQuorum { if maxCnt >= writeQuorum {
return dataDir return dataDir
} }
@ -115,20 +115,20 @@ func reduceErrs(errs []error, ignoredErrs []error) (maxCount int, maxErr error)
errorCounts[err]++ errorCounts[err]++
} }
max := 0 maxCnt := 0
for err, count := range errorCounts { for err, count := range errorCounts {
switch { switch {
case max < count: case maxCnt < count:
max = count maxCnt = count
maxErr = err maxErr = err
// Prefer `nil` over other error values with the same // Prefer `nil` over other error values with the same
// number of occurrences. // number of occurrences.
case max == count && err == nil: case maxCnt == count && err == nil:
maxErr = err maxErr = err
} }
} }
return max, maxErr return maxCnt, maxErr
} }
// reduceQuorumErrs behaves like reduceErrs by only for returning // reduceQuorumErrs behaves like reduceErrs by only for returning

View File

@ -357,7 +357,7 @@ func (p serverPoolsAvailableSpace) TotalAvailable() uint64 {
// FilterMaxUsed will filter out any pools that has used percent bigger than max, // FilterMaxUsed will filter out any pools that has used percent bigger than max,
// unless all have that, in which case all are preserved. // unless all have that, in which case all are preserved.
func (p serverPoolsAvailableSpace) FilterMaxUsed(max int) { func (p serverPoolsAvailableSpace) FilterMaxUsed(maxUsed int) {
// We aren't modifying p, only entries in it, so we don't need to receive a pointer. // We aren't modifying p, only entries in it, so we don't need to receive a pointer.
if len(p) <= 1 { if len(p) <= 1 {
// Nothing to do. // Nothing to do.
@ -365,7 +365,7 @@ func (p serverPoolsAvailableSpace) FilterMaxUsed(max int) {
} }
var ok bool var ok bool
for _, z := range p { for _, z := range p {
if z.Available > 0 && z.MaxUsedPct < max { if z.Available > 0 && z.MaxUsedPct < maxUsed {
ok = true ok = true
break break
} }
@ -378,7 +378,7 @@ func (p serverPoolsAvailableSpace) FilterMaxUsed(max int) {
// Remove entries that are above. // Remove entries that are above.
for i, z := range p { for i, z := range p {
if z.Available > 0 && z.MaxUsedPct < max { if z.Available > 0 && z.MaxUsedPct < maxUsed {
continue continue
} }
p[i].Available = 0 p[i].Available = 0

View File

@ -626,18 +626,18 @@ func calcCommonWritesDeletes(infos []DiskInfo, readQuorum int) (commonWrite, com
} }
filter := func(list []uint64) (commonCount uint64) { filter := func(list []uint64) (commonCount uint64) {
max := 0 maxCnt := 0
signatureMap := map[uint64]int{} signatureMap := map[uint64]int{}
for _, v := range list { for _, v := range list {
signatureMap[v]++ signatureMap[v]++
} }
for ops, count := range signatureMap { for ops, count := range signatureMap {
if max < count && commonCount < ops { if maxCnt < count && commonCount < ops {
max = count maxCnt = count
commonCount = ops commonCount = ops
} }
} }
if max < readQuorum { if maxCnt < readQuorum {
return 0 return 0
} }
return commonCount return commonCount
@ -650,7 +650,7 @@ func calcCommonWritesDeletes(infos []DiskInfo, readQuorum int) (commonWrite, com
func calcCommonCounter(infos []DiskInfo, readQuorum int) (commonCount uint64) { func calcCommonCounter(infos []DiskInfo, readQuorum int) (commonCount uint64) {
filter := func() (commonCount uint64) { filter := func() (commonCount uint64) {
max := 0 maxCnt := 0
signatureMap := map[uint64]int{} signatureMap := map[uint64]int{}
for _, info := range infos { for _, info := range infos {
if info.Error != "" { if info.Error != "" {
@ -660,12 +660,12 @@ func calcCommonCounter(infos []DiskInfo, readQuorum int) (commonCount uint64) {
signatureMap[mutations]++ signatureMap[mutations]++
} }
for ops, count := range signatureMap { for ops, count := range signatureMap {
if max < count && commonCount < ops { if maxCnt < count && commonCount < ops {
max = count maxCnt = count
commonCount = ops commonCount = ops
} }
} }
if max < readQuorum { if maxCnt < readQuorum {
return 0 return 0
} }
return commonCount return commonCount

View File

@ -352,11 +352,11 @@ func isETagEqual(left, right string) bool {
// setPutObjHeaders sets all the necessary headers returned back // setPutObjHeaders sets all the necessary headers returned back
// upon a success Put/Copy/CompleteMultipart/Delete requests // upon a success Put/Copy/CompleteMultipart/Delete requests
// to activate delete only headers set delete as true // to activate delete only headers set delete as true
func setPutObjHeaders(w http.ResponseWriter, objInfo ObjectInfo, delete bool, h http.Header) { func setPutObjHeaders(w http.ResponseWriter, objInfo ObjectInfo, del bool, h http.Header) {
// We must not use the http.Header().Set method here because some (broken) // We must not use the http.Header().Set method here because some (broken)
// clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive). // clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive).
// Therefore, we have to set the ETag directly as map entry. // Therefore, we have to set the ETag directly as map entry.
if objInfo.ETag != "" && !delete { if objInfo.ETag != "" && !del {
w.Header()[xhttp.ETag] = []string{`"` + objInfo.ETag + `"`} w.Header()[xhttp.ETag] = []string{`"` + objInfo.ETag + `"`}
} }
@ -364,13 +364,13 @@ func setPutObjHeaders(w http.ResponseWriter, objInfo ObjectInfo, delete bool, h
if objInfo.VersionID != "" && objInfo.VersionID != nullVersionID { if objInfo.VersionID != "" && objInfo.VersionID != nullVersionID {
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID} w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
// If version is a deleted marker, set this header as well // If version is a deleted marker, set this header as well
if objInfo.DeleteMarker && delete { // only returned during delete object if objInfo.DeleteMarker && del { // only returned during delete object
w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(objInfo.DeleteMarker)} w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(objInfo.DeleteMarker)}
} }
} }
if objInfo.Bucket != "" && objInfo.Name != "" { if objInfo.Bucket != "" && objInfo.Name != "" {
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil && !delete { if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil && !del {
lc.SetPredictionHeaders(w, objInfo.ToLifecycleOpts()) lc.SetPredictionHeaders(w, objInfo.ToLifecycleOpts())
} }
} }

View File

@ -60,20 +60,19 @@ func readDirFn(dirPath string, filter func(name string, typ os.FileMode) error)
if err != nil { if err != nil {
if err == syscall.ERROR_NO_MORE_FILES { if err == syscall.ERROR_NO_MORE_FILES {
break break
} else {
if isSysErrPathNotFound(err) {
return nil
}
err = osErrToFileErr(&os.PathError{
Op: "FindNextFile",
Path: dirPath,
Err: err,
})
if err == errFileNotFound {
return nil
}
return err
} }
if isSysErrPathNotFound(err) {
return nil
}
err = osErrToFileErr(&os.PathError{
Op: "FindNextFile",
Path: dirPath,
Err: err,
})
if err == errFileNotFound {
return nil
}
return err
} }
name := syscall.UTF16ToString(data.FileName[0:]) name := syscall.UTF16ToString(data.FileName[0:])
if name == "" || name == "." || name == ".." { // Useless names if name == "" || name == "." || name == ".." { // Useless names
@ -136,13 +135,12 @@ func readDirWithOpts(dirPath string, opts readDirOpts) (entries []string, err er
if err != nil { if err != nil {
if err == syscall.ERROR_NO_MORE_FILES { if err == syscall.ERROR_NO_MORE_FILES {
break break
} else {
return nil, osErrToFileErr(&os.PathError{
Op: "FindNextFile",
Path: dirPath,
Err: err,
})
} }
return nil, osErrToFileErr(&os.PathError{
Op: "FindNextFile",
Path: dirPath,
Err: err,
})
} }
name := syscall.UTF16ToString(data.FileName[0:]) name := syscall.UTF16ToString(data.FileName[0:])

View File

@ -232,19 +232,19 @@ func parsePostPolicyForm(r io.Reader) (PostPolicyForm, error) {
operator, matchType, value, operator, matchType, value,
}) })
case policyCondContentLength: case policyCondContentLength:
min, err := toInteger(condt[1]) minLen, err := toInteger(condt[1])
if err != nil { if err != nil {
return parsedPolicy, err return parsedPolicy, err
} }
max, err := toInteger(condt[2]) maxLen, err := toInteger(condt[2])
if err != nil { if err != nil {
return parsedPolicy, err return parsedPolicy, err
} }
parsedPolicy.Conditions.ContentLengthRange = contentLengthRange{ parsedPolicy.Conditions.ContentLengthRange = contentLengthRange{
Min: min, Min: minLen,
Max: max, Max: maxLen,
Valid: true, Valid: true,
} }
default: default:

View File

@ -2247,12 +2247,12 @@ func getEndpointsLocalAddr(endpointServerPools EndpointServerPools) string {
} }
// fetches a random number between range min-max. // fetches a random number between range min-max.
func getRandomRange(min, max int, seed int64) int { func getRandomRange(minN, maxN int, seed int64) int {
// special value -1 means no explicit seeding. // special value -1 means no explicit seeding.
if seed != -1 { if seed == -1 {
rand.Seed(seed) return rand.New(rand.NewSource(time.Now().UnixNano())).Intn(maxN-minN) + minN
} }
return rand.Intn(max-min) + min return rand.New(rand.NewSource(seed)).Intn(maxN-minN) + minN
} }
// Randomizes the order of bytes in the byte array // Randomizes the order of bytes in the byte array

View File

@ -378,11 +378,11 @@ func (x *xlMetaInlineData) remove(keys ...string) bool {
// xlMetaV2TrimData will trim any data from the metadata without unmarshalling it. // xlMetaV2TrimData will trim any data from the metadata without unmarshalling it.
// If any error occurs the unmodified data is returned. // If any error occurs the unmodified data is returned.
func xlMetaV2TrimData(buf []byte) []byte { func xlMetaV2TrimData(buf []byte) []byte {
metaBuf, min, maj, err := checkXL2V1(buf) metaBuf, maj, minor, err := checkXL2V1(buf)
if err != nil { if err != nil {
return buf return buf
} }
if maj == 1 && min < 1 { if maj == 1 && minor < 1 {
// First version to carry data. // First version to carry data.
return buf return buf
} }
@ -393,7 +393,7 @@ func xlMetaV2TrimData(buf []byte) []byte {
return buf return buf
} }
// Skip CRC // Skip CRC
if maj > 1 || min >= 2 { if maj > 1 || minor >= 2 {
_, metaBuf, err = msgp.ReadUint32Bytes(metaBuf) _, metaBuf, err = msgp.ReadUint32Bytes(metaBuf)
storageLogIf(GlobalContext, err) storageLogIf(GlobalContext, err)
} }

View File

@ -79,10 +79,10 @@ func (eDate *ExpirationDate) UnmarshalXML(d *xml.Decoder, startElement xml.Start
return errLifecycleInvalidDate return errLifecycleInvalidDate
} }
// Allow only date timestamp specifying midnight GMT // Allow only date timestamp specifying midnight GMT
hr, min, sec := expDate.Clock() hr, m, sec := expDate.Clock()
nsec := expDate.Nanosecond() nsec := expDate.Nanosecond()
loc := expDate.Location() loc := expDate.Location()
if !(hr == 0 && min == 0 && sec == 0 && nsec == 0 && loc.String() == time.UTC.String()) { if !(hr == 0 && m == 0 && sec == 0 && nsec == 0 && loc.String() == time.UTC.String()) {
return errLifecycleDateNotMidnight return errLifecycleDateNotMidnight
} }

View File

@ -50,10 +50,10 @@ func (tDate *TransitionDate) UnmarshalXML(d *xml.Decoder, startElement xml.Start
return errTransitionInvalidDate return errTransitionInvalidDate
} }
// Allow only date timestamp specifying midnight GMT // Allow only date timestamp specifying midnight GMT
hr, min, sec := trnDate.Clock() hr, m, sec := trnDate.Clock()
nsec := trnDate.Nanosecond() nsec := trnDate.Nanosecond()
loc := trnDate.Location() loc := trnDate.Location()
if !(hr == 0 && min == 0 && sec == 0 && nsec == 0 && loc.String() == time.UTC.String()) { if !(hr == 0 && m == 0 && sec == 0 && nsec == 0 && loc.String() == time.UTC.String()) {
return errTransitionDateNotMidnight return errTransitionDateNotMidnight
} }

View File

@ -63,14 +63,14 @@ func (c *OperatorDNS) addAuthHeader(r *http.Request) error {
return nil return nil
} }
func (c *OperatorDNS) endpoint(bucket string, delete bool) (string, error) { func (c *OperatorDNS) endpoint(bucket string, del bool) (string, error) {
u, err := url.Parse(c.Endpoint) u, err := url.Parse(c.Endpoint)
if err != nil { if err != nil {
return "", err return "", err
} }
q := u.Query() q := u.Query()
q.Add("bucket", bucket) q.Add("bucket", bucket)
q.Add("delete", strconv.FormatBool(delete)) q.Add("delete", strconv.FormatBool(del))
u.RawQuery = q.Encode() u.RawQuery = q.Encode()
return u.String(), nil return u.String(), nil
} }

View File

@ -49,10 +49,10 @@ type Config struct {
} }
// Update - updates the config with latest values // Update - updates the config with latest values
func (c *Config) Update(new Config) error { func (c *Config) Update(updated Config) error {
configLk.Lock() configLk.Lock()
defer configLk.Unlock() defer configLk.Unlock()
c.MaxTimeout = getMaxTimeout(new.MaxTimeout) c.MaxTimeout = getMaxTimeout(updated.MaxTimeout)
return nil return nil
} }

View File

@ -22,16 +22,16 @@ import (
"time" "time"
) )
func backoffWait(min, unit, cap time.Duration) func(*rand.Rand, uint) time.Duration { func backoffWait(minSleep, unit, maxSleep time.Duration) func(*rand.Rand, uint) time.Duration {
if unit > time.Hour { if unit > time.Hour {
// Protect against integer overflow // Protect against integer overflow
panic("unit cannot exceed one hour") panic("unit cannot exceed one hour")
} }
return func(r *rand.Rand, attempt uint) time.Duration { return func(r *rand.Rand, attempt uint) time.Duration {
sleep := min sleep := minSleep
sleep += unit * time.Duration(attempt) sleep += unit * time.Duration(attempt)
if sleep > cap { if sleep > maxSleep {
sleep = cap sleep = maxSleep
} }
sleep -= time.Duration(r.Float64() * float64(sleep)) sleep -= time.Duration(r.Float64() * float64(sleep))
return sleep return sleep

View File

@ -32,8 +32,11 @@ import (
var tlsClientSessionCacheSize = 100 var tlsClientSessionCacheSize = 100
const ( const (
WriteBufferSize = 64 << 10 // WriteBufferSize 64KiB moving up from 4KiB default // WriteBufferSize 64KiB moving up from 4KiB default
ReadBufferSize = 64 << 10 // ReadBufferSize 64KiB moving up from 4KiB default WriteBufferSize = 64 << 10
// ReadBufferSize 64KiB moving up from 4KiB default
ReadBufferSize = 64 << 10
) )
// ConnSettings - contains connection settings. // ConnSettings - contains connection settings.

View File

@ -448,7 +448,7 @@ func (c *Client) LastError() error {
// computes the exponential backoff duration according to // computes the exponential backoff duration according to
// https://www.awsarchitectureblog.com/2015/03/backoff.html // https://www.awsarchitectureblog.com/2015/03/backoff.html
func exponentialBackoffWait(r *rand.Rand, unit, cap time.Duration) func(uint) time.Duration { func exponentialBackoffWait(r *rand.Rand, unit, maxSleep time.Duration) func(uint) time.Duration {
if unit > time.Hour { if unit > time.Hour {
// Protect against integer overflow // Protect against integer overflow
panic("unit cannot exceed one hour") panic("unit cannot exceed one hour")
@ -460,8 +460,8 @@ func exponentialBackoffWait(r *rand.Rand, unit, cap time.Duration) func(uint) ti
} }
// sleep = random_between(unit, min(cap, base * 2 ** attempt)) // sleep = random_between(unit, min(cap, base * 2 ** attempt))
sleep := unit * time.Duration(1<<attempt) sleep := unit * time.Duration(1<<attempt)
if sleep > cap { if sleep > maxSleep {
sleep = cap sleep = maxSleep
} }
sleep -= time.Duration(r.Float64() * float64(sleep-unit)) sleep -= time.Duration(r.Float64() * float64(sleep-unit))
return sleep return sleep