modernizes for loop in cmd/, internal/ (#21309)

This commit is contained in:
ILIYA 2025-05-28 00:19:03 +09:00 committed by GitHub
parent ea77bcfc98
commit 0a36d41dcd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
27 changed files with 44 additions and 44 deletions

View File

@ -43,7 +43,7 @@ func shouldEscape(c byte) bool {
// - Force encoding of '~'
func s3URLEncode(s string) string {
spaceCount, hexCount := 0, 0
for i := 0; i < len(s); i++ {
for i := range len(s) {
c := s[i]
if shouldEscape(c) {
if c == ' ' {
@ -70,7 +70,7 @@ func s3URLEncode(s string) string {
if hexCount == 0 {
copy(t, s)
for i := 0; i < len(s); i++ {
for i := range len(s) {
if s[i] == ' ' {
t[i] = '+'
}
@ -79,7 +79,7 @@ func s3URLEncode(s string) string {
}
j := 0
for i := 0; i < len(s); i++ {
for i := range len(s) {
switch c := s[i]; {
case c == ' ':
t[j] = '+'

View File

@ -102,7 +102,7 @@ func waitForLowHTTPReq() {
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
bgSeq := newBgHealSequence()
// Run the background healer
for i := 0; i < globalBackgroundHealRoutine.workers; i++ {
for range globalBackgroundHealRoutine.workers {
go globalBackgroundHealRoutine.AddWorker(ctx, objAPI, bgSeq)
}

View File

@ -248,7 +248,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a
pInfo PartInfo
)
for i := 0; i < partsCount; i++ {
for i := range partsCount {
gopts := minio.GetObjectOptions{
VersionID: srcObjInfo.VersionID,
PartNumber: i + 1,

View File

@ -113,7 +113,7 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan log.Info, doneCh <-chan st
sys.RUnlock()
// send last n console log messages in order filtered by node
if cnt > 0 {
for i := 0; i < last; i++ {
for i := range last {
entry := lastN[(cnt+i)%last]
if (entry == log.Info{}) {
continue

View File

@ -1481,7 +1481,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
}
}
for i := 0; i < len(onlineDisks); i++ {
for i := range len(onlineDisks) {
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
// Object info is the same in all disks, so we can pick
// the first meta from online disk

View File

@ -504,7 +504,7 @@ func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object st
// count the number of offline disks
offline := 0
for i := 0; i < len(errs); i++ {
for i := range len(errs) {
var found bool
switch {
case errors.Is(errs[i], errDiskNotFound):
@ -1221,7 +1221,7 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *
partsMetadata[index].SetInlineData()
}
for i := 0; i < len(onlineDisks); i++ {
for i := range len(onlineDisks) {
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
// Object info is the same in all disks, so we can pick
// the first meta from online disk
@ -1557,7 +1557,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
return ObjectInfo{}, toObjectErr(err, bucket, object)
}
for i := 0; i < len(onlineDisks); i++ {
for i := range len(onlineDisks) {
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
// Object info is the same in all disks, so we can pick
// the first meta from online disk
@ -1574,7 +1574,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
if len(versions) == 0 {
// Whether a disk was initially or becomes offline
// during this upload, send it to the MRF list.
for i := 0; i < len(onlineDisks); i++ {
for i := range len(onlineDisks) {
if onlineDisks[i] != nil && onlineDisks[i].IsOnline() {
continue
}

View File

@ -149,7 +149,7 @@ func (z *erasureServerPools) findIndex(index int) int {
if z.rebalMeta == nil {
return 0
}
for i := 0; i < len(z.rebalMeta.PoolStats); i++ {
for i := range len(z.rebalMeta.PoolStats) {
if i == index {
return index
}

View File

@ -95,7 +95,7 @@ func (s *erasureSets) getDiskMap() map[Endpoint]StorageAPI {
s.erasureDisksMu.RLock()
defer s.erasureDisksMu.RUnlock()
for i := 0; i < s.setCount; i++ {
for i := range s.setCount {
for j := 0; j < s.setDriveCount; j++ {
disk := s.erasureDisks[i][j]
if disk == OfflineDisk {
@ -150,7 +150,7 @@ func findDiskIndexByDiskID(refFormat *formatErasureV3, diskID string) (int, int,
if diskID == offlineDiskUUID {
return -1, -1, fmt.Errorf("DriveID: %s is offline", diskID)
}
for i := 0; i < len(refFormat.Erasure.Sets); i++ {
for i := range len(refFormat.Erasure.Sets) {
for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ {
if refFormat.Erasure.Sets[i][j] == diskID {
return i, j, nil
@ -174,7 +174,7 @@ func findDiskIndex(refFormat, format *formatErasureV3) (int, int, error) {
return -1, -1, fmt.Errorf("DriveID: %s is offline", format.Erasure.This)
}
for i := 0; i < len(refFormat.Erasure.Sets); i++ {
for i := range len(refFormat.Erasure.Sets) {
for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ {
if refFormat.Erasure.Sets[i][j] == format.Erasure.This {
return i, j, nil
@ -377,7 +377,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
mutex := newNSLock(globalIsDistErasure)
for i := 0; i < setCount; i++ {
for i := range setCount {
s.erasureDisks[i] = make([]StorageAPI, setDriveCount)
}
@ -390,7 +390,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
var wg sync.WaitGroup
var lk sync.Mutex
for i := 0; i < setCount; i++ {
for i := range setCount {
lockerEpSet := set.NewStringSet()
for j := 0; j < setDriveCount; j++ {
wg.Add(1)
@ -409,7 +409,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
}
wg.Wait()
for i := 0; i < setCount; i++ {
for i := range setCount {
wg.Add(1)
go func(i int) {
defer wg.Done()

View File

@ -98,7 +98,7 @@ func fmtGenMain(ctxt *cli.Context) {
setCount, setDriveCount := pool.SetCount, pool.DrivesPerSet
format := newFormatErasureV3(setCount, setDriveCount)
format.ID = deploymentID
for i := 0; i < setCount; i++ { // for each erasure set
for i := range setCount { // for each erasure set
for j := 0; j < setDriveCount; j++ {
newFormat := format.Clone()
newFormat.Erasure.This = format.Erasure.Sets[i][j]

View File

@ -157,7 +157,7 @@ func newFormatErasureV3(numSets int, setLen int) *formatErasureV3 {
format.Erasure.DistributionAlgo = formatErasureVersionV3DistributionAlgoV3
format.Erasure.Sets = make([][]string, numSets)
for i := 0; i < numSets; i++ {
for i := range numSets {
format.Erasure.Sets[i] = make([]string, setLen)
for j := 0; j < setLen; j++ {
format.Erasure.Sets[i][j] = mustGetUUID()
@ -514,7 +514,7 @@ func formatErasureV3Check(reference *formatErasureV3, format *formatErasureV3) e
}
// Make sure that the diskID is found in the set.
for i := 0; i < len(tmpFormat.Erasure.Sets); i++ {
for i := range len(tmpFormat.Erasure.Sets) {
for j := 0; j < len(tmpFormat.Erasure.Sets[i]); j++ {
if this == tmpFormat.Erasure.Sets[i][j] {
return nil
@ -639,7 +639,7 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount,
return nil, err
}
for i := 0; i < setCount; i++ {
for i := range setCount {
hostCount := make(map[string]int, setDriveCount)
for j := 0; j < setDriveCount; j++ {
disk := storageDisks[i*setDriveCount+j]

View File

@ -266,7 +266,7 @@ func (m *mrfState) healRoutine(z *erasureServerPools) {
if len(u.Versions) > 0 {
vers := len(u.Versions) / 16
if vers > 0 {
for i := 0; i < vers; i++ {
for i := range vers {
healObject(u.Bucket, u.Object, uuid.UUID(u.Versions[16*i:]).String(), scan)
}
}

View File

@ -123,7 +123,7 @@ func (g *NotificationGroup) Go(ctx context.Context, f func() error, index int, a
}
retryCount := g.retryCount
for i := 0; i < retryCount; i++ {
for i := range retryCount {
g.errs[index].Err = nil
if err := f(); err != nil {
g.errs[index].Err = err

View File

@ -128,7 +128,7 @@ func IsValidBucketName(bucket string) bool {
// 'label' in AWS terminology and if the bucket looks
// like an IP address.
isNotNumber := false
for i := 0; i < len(piece); i++ {
for i := range len(piece) {
switch {
case (piece[i] >= 'a' && piece[i] <= 'z' ||
piece[i] == '-'):
@ -254,11 +254,11 @@ func concat(ss ...string) string {
}
// create & allocate the memory in advance.
n := 0
for i := 0; i < length; i++ {
for i := range length {
n += len(ss[i])
}
b := make([]byte, 0, n)
for i := 0; i < length; i++ {
for i := range length {
b = append(b, ss[i]...)
}
return unsafe.String(unsafe.SliceData(b), n)

View File

@ -77,7 +77,7 @@ func setupTestReadDirEmpty(t *testing.T) (testResults []result) {
func setupTestReadDirFiles(t *testing.T) (testResults []result) {
dir := t.TempDir()
entries := []string{}
for i := 0; i < 10; i++ {
for i := range 10 {
name := fmt.Sprintf("file-%d", i)
if err := os.WriteFile(filepath.Join(dir, name), []byte{}, os.ModePerm); err != nil {
// For cleanup, its required to add these entries into test results.
@ -102,7 +102,7 @@ func setupTestReadDirGeneric(t *testing.T) (testResults []result) {
t.Fatalf("Unable to create prefix directory \"mydir\", %s", err)
}
entries := []string{"mydir/"}
for i := 0; i < 10; i++ {
for i := range 10 {
name := fmt.Sprintf("file-%d", i)
if err := os.WriteFile(filepath.Join(dir, "mydir", name), []byte{}, os.ModePerm); err != nil {
// For cleanup, its required to add these entries into test results.
@ -126,7 +126,7 @@ func setupTestReadDirSymlink(t *testing.T) (testResults []result) {
}
dir := t.TempDir()
entries := []string{}
for i := 0; i < 10; i++ {
for i := range 10 {
name1 := fmt.Sprintf("file-%d", i)
name2 := fmt.Sprintf("file-%d", i+10)
if err := os.WriteFile(filepath.Join(dir, name1), []byte{}, os.ModePerm); err != nil {

View File

@ -102,7 +102,7 @@ func objectSpeedTest(ctx context.Context, opts speedTestOpts) chan madmin.SpeedT
var totalUploadTimes madmin.TimeDurations
var totalDownloadTimes madmin.TimeDurations
var totalDownloadTTFB madmin.TimeDurations
for i := 0; i < len(throughputHighestResults); i++ {
for i := range len(throughputHighestResults) {
errStr := ""
if throughputHighestResults[i].Error != "" {
errStr = throughputHighestResults[i].Error

View File

@ -675,7 +675,7 @@ func (s *storageRESTServer) DeleteVersionsHandler(w http.ResponseWriter, r *http
versions := make([]FileInfoVersions, totalVersions)
decoder := msgpNewReader(r.Body)
defer readMsgpReaderPoolPut(decoder)
for i := 0; i < totalVersions; i++ {
for i := range totalVersions {
dst := &versions[i]
if err := dst.DecodeMsg(decoder); err != nil {
s.writeErrorResponse(w, err)

View File

@ -851,7 +851,7 @@ func lcp(strs []string, pre bool) string {
// compare letters
if pre {
// prefix, iterate left to right
for i := 0; i < maxl; i++ {
for i := range maxl {
if xfix[i] != str[i] {
xfix = xfix[:i]
break
@ -859,7 +859,7 @@ func lcp(strs []string, pre bool) string {
}
} else {
// suffix, iterate right to left
for i := 0; i < maxl; i++ {
for i := range maxl {
xi := xfixl - i - 1
si := strl - i - 1
if xfix[xi] != str[si] {

View File

@ -846,7 +846,7 @@ func decodeXLHeaders(buf []byte) (versions int, headerV, metaV uint8, b []byte,
// Any non-nil error is returned.
func decodeVersions(buf []byte, versions int, fn func(idx int, hdr, meta []byte) error) (err error) {
var tHdr, tMeta []byte // Zero copy bytes
for i := 0; i < versions; i++ {
for i := range versions {
tHdr, buf, err = msgp.ReadBytesZC(buf)
if err != nil {
return err

View File

@ -381,7 +381,7 @@ func refreshLock(ctx context.Context, ds *Dsync, id, source string, quorum int)
lockNotFound, lockRefreshed := 0, 0
done := false
for i := 0; i < len(restClnts); i++ {
for range len(restClnts) {
select {
case refreshResult := <-ch:
if refreshResult.offline {

View File

@ -357,7 +357,7 @@ func (list *TargetList) startSendWorkers(workerCount int) {
if err != nil {
panic(err)
}
for i := 0; i < workerCount; i++ {
for range workerCount {
wk.Take()
go func() {
defer wk.Give()

View File

@ -1041,7 +1041,7 @@ func (c *Connection) readStream(ctx context.Context, conn net.Conn, cancel conte
// Handle merged messages.
messages := int(m.Seq)
c.inMessages.Add(int64(messages))
for i := 0; i < messages; i++ {
for range messages {
if atomic.LoadUint32((*uint32)(&c.state)) != StateConnected {
return
}

View File

@ -143,7 +143,7 @@ func (t *TestGrid) WaitAllConnect(ctx context.Context) {
}
func getHosts(n int) (hosts []string, listeners []net.Listener, err error) {
for i := 0; i < n; i++ {
for range n {
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
if l, err = net.Listen("tcp6", "[::1]:0"); err != nil {

View File

@ -574,7 +574,7 @@ func (m *muxClient) ack(seq uint32) {
return
}
available := cap(m.outBlock)
for i := 0; i < available; i++ {
for range available {
m.outBlock <- struct{}{}
}
m.acked = true

View File

@ -130,7 +130,7 @@ func newMuxStream(ctx context.Context, msg message, c *Connection, handler Strea
// Fill outbound block.
// Each token represents a message that can be sent to the client without blocking.
// The client will refill the tokens as they confirm delivery of the messages.
for i := 0; i < outboundCap; i++ {
for range outboundCap {
m.outBlock <- struct{}{}
}

View File

@ -230,7 +230,7 @@ func (r *Reader) startReaders(newReader func(io.Reader) *csv.Reader) error {
}()
// Start parsers
for i := 0; i < runtime.GOMAXPROCS(0); i++ {
for range runtime.GOMAXPROCS(0) {
go func() {
for in := range r.input {
if len(in.input) == 0 {

View File

@ -173,7 +173,7 @@ func (r *PReader) startReaders() {
}()
// Start parsers
for i := 0; i < runtime.GOMAXPROCS(0); i++ {
for range runtime.GOMAXPROCS(0) {
go func() {
for in := range r.input {
if len(in.input) == 0 {

View File

@ -332,7 +332,7 @@ func (d *Decoder) u4() rune {
// logic taken from:
// github.com/buger/jsonparser/blob/master/escape.go#L20
var h [4]int
for i := 0; i < 4; i++ {
for i := range 4 {
c := d.next()
switch {
case c >= '0' && c <= '9':