Run modernize (#21546)

`go run golang.org/x/tools/gopls/internal/analysis/modernize/cmd/modernize@latest -fix -test ./...` executed.

`go generate ./...` ran afterwards to keep generated.
This commit is contained in:
Klaus Post
2025-08-29 04:39:48 +02:00
committed by GitHub
parent 3b7cb6512c
commit f0b91e5504
238 changed files with 913 additions and 1257 deletions

View File

@@ -231,7 +231,7 @@ func benchmarkGridStreamRespOnly(b *testing.B, n int) {
errFatal(remote.RegisterStreamingHandler(handlerTest, StreamHandler{
// Send 10x response.
Handle: func(ctx context.Context, payload []byte, _ <-chan []byte, out chan<- []byte) *RemoteErr {
for i := 0; i < responses; i++ {
for i := range responses {
toSend := GetByteBuffer()[:0]
toSend = append(toSend, byte(i))
toSend = append(toSend, payload...)
@@ -407,7 +407,7 @@ func benchmarkGridStreamReqOnly(b *testing.B, n int) {
b.Fatal(err.Error())
}
got := 0
for i := 0; i < requests; i++ {
for range requests {
got++
st.Requests <- append(GetByteBuffer()[:0], payload...)
}
@@ -525,7 +525,7 @@ func benchmarkGridStreamTwoway(b *testing.B, n int) {
got := 0
sent := 0
go func() {
for i := 0; i < messages; i++ {
for range messages {
st.Requests <- append(GetByteBuffer()[:0], payload...)
if sent++; sent == messages {
close(st.Requests)

View File

@@ -47,7 +47,7 @@ import (
"github.com/zeebo/xxh3"
)
func gridLogIf(ctx context.Context, err error, errKind ...interface{}) {
func gridLogIf(ctx context.Context, err error, errKind ...any) {
logger.LogIf(ctx, "grid", err, errKind...)
}
@@ -55,7 +55,7 @@ func gridLogIfNot(ctx context.Context, err error, ignored ...error) {
logger.LogIfNot(ctx, "grid", err, ignored...)
}
func gridLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func gridLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
logger.LogOnceIf(ctx, "grid", err, id, errKind...)
}
@@ -659,10 +659,7 @@ func (c *Connection) connect() {
}
sleep := defaultDialTimeout + time.Duration(rng.Int63n(int64(defaultDialTimeout)))
next := dialStarted.Add(sleep / 2)
sleep = time.Until(next).Round(time.Millisecond)
if sleep < 0 {
sleep = 0
}
sleep = max(time.Until(next).Round(time.Millisecond), 0)
gotState := c.State()
if gotState == StateShutdown {
return

View File

@@ -22,6 +22,7 @@ import (
"context"
"errors"
"fmt"
"maps"
"os"
"runtime"
"strconv"
@@ -266,9 +267,7 @@ func TestSingleRoundtripGenericsRecycle(t *testing.T) {
// Handles incoming requests, returns a response
handler1 := func(req *MSS) (resp *MSS, err *RemoteErr) {
resp = h1.NewResponse()
for k, v := range *req {
(*resp)[k] = v
}
maps.Copy((*resp), *req)
return resp, nil
}
// Return error
@@ -708,7 +707,7 @@ func testServerOutCongestion(t *testing.T, local, remote *Manager) {
Handle: func(ctx context.Context, payload []byte, request <-chan []byte, resp chan<- []byte) *RemoteErr {
// Send many responses.
// Test that this doesn't block.
for i := byte(0); i < 100; i++ {
for i := range byte(100) {
select {
case resp <- []byte{i}:
// ok
@@ -744,7 +743,7 @@ func testServerOutCongestion(t *testing.T, local, remote *Manager) {
<-serverSent
// Now do 100 other requests to ensure that the server doesn't block.
for i := 0; i < 100; i++ {
for range 100 {
_, err := remoteConn.Request(ctx, handlerTest2, []byte(testPayload))
errFatal(err)
}
@@ -820,13 +819,13 @@ func testServerInCongestion(t *testing.T, local, remote *Manager) {
// Start sending requests.
go func() {
for i := byte(0); i < 100; i++ {
for i := range byte(100) {
st.Requests <- []byte{i}
}
close(st.Requests)
}()
// Now do 100 other requests to ensure that the server doesn't block.
for i := 0; i < 100; i++ {
for range 100 {
_, err := remoteConn.Request(ctx, handlerTest2, []byte(testPayload))
errFatal(err)
}
@@ -897,7 +896,7 @@ func testGenericsStreamRoundtrip(t *testing.T, local, remote *Manager) {
errFatal(err)
go func() {
defer close(stream.Requests)
for i := 0; i < payloads; i++ {
for i := range payloads {
// t.Log("sending new client request")
stream.Requests <- &testRequest{Num: i, String: testPayload}
}
@@ -974,7 +973,7 @@ func testGenericsStreamRoundtripSubroute(t *testing.T, local, remote *Manager) {
errFatal(err)
go func() {
defer close(stream.Requests)
for i := 0; i < payloads; i++ {
for i := range payloads {
// t.Log("sending new client request")
stream.Requests <- &testRequest{Num: i, String: testPayload}
}
@@ -1019,7 +1018,7 @@ func testServerStreamResponseBlocked(t *testing.T, local, remote *Manager) {
Handle: func(ctx context.Context, payload []byte, _ <-chan []byte, resp chan<- []byte) *RemoteErr {
// Send many responses.
// Test that this doesn't block.
for i := byte(0); i < 100; i++ {
for i := range byte(100) {
select {
case resp <- []byte{i}:
// ok

View File

@@ -411,7 +411,7 @@ func NewJSONPool[T any]() *JSONPool[T] {
}
return &JSONPool[T]{
pool: sync.Pool{
New: func() interface{} {
New: func() any {
var t T
return &t
},
@@ -700,7 +700,7 @@ func (j *Array[T]) UnmarshalMsg(bytes []byte) ([]byte, error) {
} else {
j.val = j.val[:0]
}
for i := uint32(0); i < l; i++ {
for range l {
v := j.p.newE()
bytes, err = v.UnmarshalMsg(bytes)
if err != nil {

View File

@@ -81,8 +81,8 @@ func TestMarshalUnmarshalMSSNil(t *testing.T) {
func BenchmarkMarshalMsgMSS(b *testing.B) {
v := MSS{"abc": "def", "ghi": "jkl"}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
v.MarshalMsg(nil)
}
}
@@ -93,8 +93,8 @@ func BenchmarkAppendMsgMSS(b *testing.B) {
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
@@ -104,8 +104,8 @@ func BenchmarkUnmarshalMSS(b *testing.B) {
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)