mirror of
https://github.com/minio/minio.git
synced 2025-11-07 12:52:58 -05:00
Run modernize (#21546)
`go run golang.org/x/tools/gopls/internal/analysis/modernize/cmd/modernize@latest -fix -test ./...` executed. `go generate ./...` ran afterwards to keep generated.
This commit is contained in:
@@ -21,6 +21,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
@@ -60,7 +61,7 @@ func init() {
|
||||
)
|
||||
}
|
||||
|
||||
func log(format string, data ...interface{}) {
|
||||
func log(format string, data ...any) {
|
||||
if dsyncLog {
|
||||
console.Printf(format, data...)
|
||||
}
|
||||
@@ -621,13 +622,7 @@ func (dm *DRWMutex) Unlock(ctx context.Context) {
|
||||
defer dm.m.Unlock()
|
||||
|
||||
// Check if minimally a single bool is set in the writeLocks array
|
||||
lockFound := false
|
||||
for _, uid := range dm.writeLocks {
|
||||
if isLocked(uid) {
|
||||
lockFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
lockFound := slices.ContainsFunc(dm.writeLocks, isLocked)
|
||||
if !lockFound {
|
||||
panic("Trying to Unlock() while no Lock() is active")
|
||||
}
|
||||
@@ -672,13 +667,7 @@ func (dm *DRWMutex) RUnlock(ctx context.Context) {
|
||||
defer dm.m.Unlock()
|
||||
|
||||
// Check if minimally a single bool is set in the writeLocks array
|
||||
lockFound := false
|
||||
for _, uid := range dm.readLocks {
|
||||
if isLocked(uid) {
|
||||
lockFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
lockFound := slices.ContainsFunc(dm.readLocks, isLocked)
|
||||
if !lockFound {
|
||||
panic("Trying to RUnlock() while no RLock() is active")
|
||||
}
|
||||
|
||||
@@ -157,18 +157,18 @@ func doTestParallelReaders(numReaders, gomaxprocs int) {
|
||||
clocked := make(chan bool)
|
||||
cunlock := make(chan bool)
|
||||
cdone := make(chan bool)
|
||||
for i := 0; i < numReaders; i++ {
|
||||
for range numReaders {
|
||||
go parallelReader(context.Background(), m, clocked, cunlock, cdone)
|
||||
}
|
||||
// Wait for all parallel RLock()s to succeed.
|
||||
for i := 0; i < numReaders; i++ {
|
||||
for range numReaders {
|
||||
<-clocked
|
||||
}
|
||||
for i := 0; i < numReaders; i++ {
|
||||
for range numReaders {
|
||||
cunlock <- true
|
||||
}
|
||||
// Wait for the goroutines to finish.
|
||||
for i := 0; i < numReaders; i++ {
|
||||
for range numReaders {
|
||||
<-cdone
|
||||
}
|
||||
}
|
||||
@@ -184,13 +184,13 @@ func TestParallelReaders(t *testing.T) {
|
||||
// Borrowed from rwmutex_test.go
|
||||
func reader(resource string, numIterations int, activity *int32, cdone chan bool) {
|
||||
rwm := NewDRWMutex(ds, resource)
|
||||
for i := 0; i < numIterations; i++ {
|
||||
for range numIterations {
|
||||
if rwm.GetRLock(context.Background(), nil, id, source, Options{Timeout: time.Second}) {
|
||||
n := atomic.AddInt32(activity, 1)
|
||||
if n < 1 || n >= 10000 {
|
||||
panic(fmt.Sprintf("wlock(%d)\n", n))
|
||||
}
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
}
|
||||
atomic.AddInt32(activity, -1)
|
||||
rwm.RUnlock(context.Background())
|
||||
@@ -202,13 +202,13 @@ func reader(resource string, numIterations int, activity *int32, cdone chan bool
|
||||
// Borrowed from rwmutex_test.go
|
||||
func writer(resource string, numIterations int, activity *int32, cdone chan bool) {
|
||||
rwm := NewDRWMutex(ds, resource)
|
||||
for i := 0; i < numIterations; i++ {
|
||||
for range numIterations {
|
||||
if rwm.GetLock(context.Background(), nil, id, source, Options{Timeout: time.Second}) {
|
||||
n := atomic.AddInt32(activity, 10000)
|
||||
if n != 10000 {
|
||||
panic(fmt.Sprintf("wlock(%d)\n", n))
|
||||
}
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
}
|
||||
atomic.AddInt32(activity, -10000)
|
||||
rwm.Unlock(context.Background())
|
||||
|
||||
@@ -149,13 +149,13 @@ func (lh *lockServerHandler) RLockHandler(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
|
||||
func stopLockServers() {
|
||||
for i := 0; i < numberOfNodes; i++ {
|
||||
for i := range numberOfNodes {
|
||||
nodes[i].Close()
|
||||
}
|
||||
}
|
||||
|
||||
func startLockServers() {
|
||||
for i := 0; i < numberOfNodes; i++ {
|
||||
for i := range numberOfNodes {
|
||||
lsrv := &lockServer{
|
||||
mutex: sync.Mutex{},
|
||||
lockMap: make(map[string]int64),
|
||||
|
||||
@@ -42,7 +42,7 @@ func TestMain(m *testing.M) {
|
||||
|
||||
// Initialize locker clients for dsync.
|
||||
var clnts []NetLocker
|
||||
for i := 0; i < len(nodes); i++ {
|
||||
for i := range nodes {
|
||||
clnts = append(clnts, newClient(nodes[i].URL))
|
||||
}
|
||||
|
||||
@@ -310,7 +310,7 @@ func TestUnlockShouldNotTimeout(t *testing.T) {
|
||||
|
||||
// Borrowed from mutex_test.go
|
||||
func HammerMutex(m *DRWMutex, loops int, cdone chan bool) {
|
||||
for i := 0; i < loops; i++ {
|
||||
for range loops {
|
||||
m.Lock(id, source)
|
||||
m.Unlock(context.Background())
|
||||
}
|
||||
@@ -325,10 +325,10 @@ func TestMutex(t *testing.T) {
|
||||
}
|
||||
c := make(chan bool)
|
||||
m := NewDRWMutex(ds, "test")
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
go HammerMutex(m, loops, c)
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
<-c
|
||||
}
|
||||
}
|
||||
@@ -363,7 +363,7 @@ func benchmarkMutex(b *testing.B, slack, work bool) {
|
||||
mu.Lock(id, source)
|
||||
mu.Unlock(b.Context())
|
||||
if work {
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
foo *= 2
|
||||
foo /= 2
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user