mirror of
https://github.com/minio/minio.git
synced 2025-11-07 12:52:58 -05:00
Run modernize (#21546)
`go run golang.org/x/tools/gopls/internal/analysis/modernize/cmd/modernize@latest -fix -test ./...` executed. `go generate ./...` ran afterwards to keep generated.
This commit is contained in:
@@ -46,7 +46,6 @@ func TestISO8601Format(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run(testCase.expectedOutput, func(t *testing.T) {
|
||||
gotOutput := ISO8601Format(testCase.date)
|
||||
t.Log("Go", testCase.date.Format(iso8601TimeFormat))
|
||||
|
||||
@@ -44,7 +44,6 @@ func TestParse(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run(testCase.timeStr, func(t *testing.T) {
|
||||
gott, goterr := Parse(testCase.timeStr)
|
||||
if !errors.Is(goterr, testCase.expectedErr) {
|
||||
|
||||
@@ -111,16 +111,16 @@ const (
|
||||
|
||||
// Credentials holds access and secret keys.
|
||||
type Credentials struct {
|
||||
AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty" yaml:"accessKey"`
|
||||
SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty" yaml:"secretKey"`
|
||||
SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty" yaml:"sessionToken"`
|
||||
Expiration time.Time `xml:"Expiration" json:"expiration,omitempty" yaml:"-"`
|
||||
Status string `xml:"-" json:"status,omitempty"`
|
||||
ParentUser string `xml:"-" json:"parentUser,omitempty"`
|
||||
Groups []string `xml:"-" json:"groups,omitempty"`
|
||||
Claims map[string]interface{} `xml:"-" json:"claims,omitempty"`
|
||||
Name string `xml:"-" json:"name,omitempty"`
|
||||
Description string `xml:"-" json:"description,omitempty"`
|
||||
AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty" yaml:"accessKey"`
|
||||
SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty" yaml:"secretKey"`
|
||||
SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty" yaml:"sessionToken"`
|
||||
Expiration time.Time `xml:"Expiration" json:"expiration" yaml:"-"`
|
||||
Status string `xml:"-" json:"status,omitempty"`
|
||||
ParentUser string `xml:"-" json:"parentUser,omitempty"`
|
||||
Groups []string `xml:"-" json:"groups,omitempty"`
|
||||
Claims map[string]any `xml:"-" json:"claims,omitempty"`
|
||||
Name string `xml:"-" json:"name,omitempty"`
|
||||
Description string `xml:"-" json:"description,omitempty"`
|
||||
|
||||
// Deprecated: In favor of Description - when reading credentials from
|
||||
// storage the value of this field is placed in the Description field above
|
||||
@@ -196,7 +196,7 @@ var timeSentinel = time.Unix(0, 0).UTC()
|
||||
var ErrInvalidDuration = errors.New("invalid token expiry")
|
||||
|
||||
// ExpToInt64 - convert input interface value to int64.
|
||||
func ExpToInt64(expI interface{}) (expAt int64, err error) {
|
||||
func ExpToInt64(expI any) (expAt int64, err error) {
|
||||
switch exp := expI.(type) {
|
||||
case string:
|
||||
expAt, err = strconv.ParseInt(exp, 10, 64)
|
||||
@@ -293,7 +293,7 @@ func GenerateSecretKey(length int, random io.Reader) (string, error) {
|
||||
}
|
||||
|
||||
// GetNewCredentialsWithMetadata generates and returns new credential with expiry.
|
||||
func GetNewCredentialsWithMetadata(m map[string]interface{}, tokenSecret string) (Credentials, error) {
|
||||
func GetNewCredentialsWithMetadata(m map[string]any, tokenSecret string) (Credentials, error) {
|
||||
accessKey, secretKey, err := GenerateCredentials()
|
||||
if err != nil {
|
||||
return Credentials{}, err
|
||||
@@ -303,7 +303,7 @@ func GetNewCredentialsWithMetadata(m map[string]interface{}, tokenSecret string)
|
||||
|
||||
// CreateNewCredentialsWithMetadata - creates new credentials using the specified access & secret keys
|
||||
// and generate a session token if a secret token is provided.
|
||||
func CreateNewCredentialsWithMetadata(accessKey, secretKey string, m map[string]interface{}, tokenSecret string) (cred Credentials, err error) {
|
||||
func CreateNewCredentialsWithMetadata(accessKey, secretKey string, m map[string]any, tokenSecret string) (cred Credentials, err error) {
|
||||
if len(accessKey) < accessKeyMinLen || len(accessKey) > accessKeyMaxLen {
|
||||
return Credentials{}, ErrInvalidAccessKeyLength
|
||||
}
|
||||
@@ -336,7 +336,7 @@ func CreateNewCredentialsWithMetadata(accessKey, secretKey string, m map[string]
|
||||
}
|
||||
|
||||
// JWTSignWithAccessKey - generates a session token.
|
||||
func JWTSignWithAccessKey(accessKey string, m map[string]interface{}, tokenSecret string) (string, error) {
|
||||
func JWTSignWithAccessKey(accessKey string, m map[string]any, tokenSecret string) (string, error) {
|
||||
m["accessKey"] = accessKey
|
||||
jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, jwtgo.MapClaims(m))
|
||||
return jwt.SignedString([]byte(tokenSecret))
|
||||
@@ -362,7 +362,7 @@ func ExtractClaims(token, secretKey string) (*jwt.MapClaims, error) {
|
||||
|
||||
// GetNewCredentials generates and returns new credential.
|
||||
func GetNewCredentials() (cred Credentials, err error) {
|
||||
return GetNewCredentialsWithMetadata(map[string]interface{}{}, "")
|
||||
return GetNewCredentialsWithMetadata(map[string]any{}, "")
|
||||
}
|
||||
|
||||
// CreateCredentials returns new credential with the given access key and secret key.
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
|
||||
func TestExpToInt64(t *testing.T) {
|
||||
testCases := []struct {
|
||||
exp interface{}
|
||||
exp any
|
||||
expectedFailure bool
|
||||
}{
|
||||
{"", true},
|
||||
@@ -42,7 +42,6 @@ func TestExpToInt64(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
_, err := ExpToInt64(testCase.exp)
|
||||
if err != nil && !testCase.expectedFailure {
|
||||
|
||||
@@ -71,7 +71,7 @@ func TestBytePool(t *testing.T) {
|
||||
}
|
||||
|
||||
// lets drain the buf channel first before we validate invalid buffers.
|
||||
for i := uint64(0); i < size; i++ {
|
||||
for range size {
|
||||
bp.Get() // discard
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ package bandwidth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -83,12 +84,7 @@ func SelectBuckets(buckets ...string) SelectionFunction {
|
||||
}
|
||||
}
|
||||
return func(bucket string) bool {
|
||||
for _, bkt := range buckets {
|
||||
if bkt == bucket {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(buckets, bucket)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -99,7 +99,6 @@ func TestMonitor_GetReport(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
thr := bucketThrottle{
|
||||
|
||||
@@ -29,7 +29,7 @@ type Error struct {
|
||||
|
||||
// Errorf - formats according to a format specifier and returns
|
||||
// the string as a value that satisfies error of type tagging.Error
|
||||
func Errorf(format string, a ...interface{}) error {
|
||||
func Errorf(format string, a ...any) error {
|
||||
return Error{err: fmt.Errorf(format, a...)}
|
||||
}
|
||||
|
||||
|
||||
@@ -738,7 +738,6 @@ func TestEval(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run("", func(t *testing.T) {
|
||||
lc, err := ParseLifecycleConfig(bytes.NewReader([]byte(tc.inputConfig)))
|
||||
if err != nil {
|
||||
@@ -823,7 +822,6 @@ func TestHasActiveRules(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(fmt.Sprintf("Test_%d", i+1), func(t *testing.T) {
|
||||
lc, err := ParseLifecycleConfig(bytes.NewReader([]byte(tc.inputConfig)))
|
||||
if err != nil {
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"strings"
|
||||
@@ -601,9 +602,7 @@ func FilterObjectLockMetadata(metadata map[string]string, filterRetention, filte
|
||||
}
|
||||
if !copied {
|
||||
dst = make(map[string]string, len(metadata))
|
||||
for k, v := range metadata {
|
||||
dst[k] = v
|
||||
}
|
||||
maps.Copy(dst, metadata)
|
||||
copied = true
|
||||
}
|
||||
delete(dst, key)
|
||||
|
||||
@@ -174,7 +174,6 @@ func TestParseObjectLockConfig(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run("", func(t *testing.T) {
|
||||
_, err := ParseObjectLockConfig(strings.NewReader(tt.value))
|
||||
//nolint:gocritic
|
||||
@@ -219,7 +218,6 @@ func TestParseObjectRetention(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run("", func(t *testing.T) {
|
||||
_, err := ParseObjectRetention(strings.NewReader(tt.value))
|
||||
//nolint:gocritic
|
||||
|
||||
@@ -29,7 +29,7 @@ type Error struct {
|
||||
|
||||
// Errorf - formats according to a format specifier and returns
|
||||
// the string as a value that satisfies error of type tagging.Error
|
||||
func Errorf(format string, a ...interface{}) error {
|
||||
func Errorf(format string, a ...any) error {
|
||||
return Error{err: fmt.Errorf(format, a...)}
|
||||
}
|
||||
|
||||
|
||||
@@ -296,7 +296,6 @@ func TestReplicate(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run(testCase.opts.Name, func(t *testing.T) {
|
||||
result := testCase.c.Replicate(testCase.opts)
|
||||
if result != testCase.expectedResult {
|
||||
@@ -352,7 +351,6 @@ func TestHasActiveRules(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(fmt.Sprintf("Test_%d", i+1), func(t *testing.T) {
|
||||
cfg, err := ParseConfig(bytes.NewReader([]byte(tc.inputConfig)))
|
||||
if err != nil {
|
||||
@@ -402,7 +400,6 @@ func TestFilterActionableRules(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
cfg, err := ParseConfig(bytes.NewReader([]byte(tc.inputConfig)))
|
||||
if err != nil {
|
||||
t.Fatalf("Got unexpected error: %v", err)
|
||||
|
||||
@@ -139,7 +139,7 @@ type Rule struct {
|
||||
Destination Destination `xml:"Destination" json:"Destination"`
|
||||
SourceSelectionCriteria SourceSelectionCriteria `xml:"SourceSelectionCriteria" json:"SourceSelectionCriteria"`
|
||||
Filter Filter `xml:"Filter" json:"Filter"`
|
||||
ExistingObjectReplication ExistingObjectReplication `xml:"ExistingObjectReplication,omitempty" json:"ExistingObjectReplication,omitempty"`
|
||||
ExistingObjectReplication ExistingObjectReplication `xml:"ExistingObjectReplication,omitempty" json:"ExistingObjectReplication"`
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -57,7 +57,6 @@ func TestMetadataReplicate(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(fmt.Sprintf("Test_%d", i+1), func(t *testing.T) {
|
||||
cfg, err := ParseConfig(bytes.NewReader([]byte(tc.inputConfig)))
|
||||
if err != nil {
|
||||
|
||||
@@ -29,7 +29,7 @@ type Error struct {
|
||||
|
||||
// Errorf - formats according to a format specifier and returns
|
||||
// the string as a value that satisfies error of type tagging.Error
|
||||
func Errorf(format string, a ...interface{}) error {
|
||||
func Errorf(format string, a ...any) error {
|
||||
return Error{err: fmt.Errorf(format, a...)}
|
||||
}
|
||||
|
||||
|
||||
@@ -31,119 +31,119 @@ var (
|
||||
return !color.NoColor
|
||||
}
|
||||
|
||||
Bold = func() func(format string, a ...interface{}) string {
|
||||
Bold = func() func(format string, a ...any) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.Bold).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
|
||||
RedBold = func() func(a ...interface{}) string {
|
||||
RedBold = func() func(a ...any) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgRed, color.Bold).SprintFunc()
|
||||
}
|
||||
return fmt.Sprint
|
||||
}()
|
||||
|
||||
RedBoldf = func() func(format string, a ...interface{}) string {
|
||||
RedBoldf = func() func(format string, a ...any) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgRed, color.Bold).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
|
||||
Red = func() func(format string, a ...interface{}) string {
|
||||
Red = func() func(format string, a ...any) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgRed).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
|
||||
Blue = func() func(format string, a ...interface{}) string {
|
||||
Blue = func() func(format string, a ...any) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgBlue).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
|
||||
Yellow = func() func(format string, a ...interface{}) string {
|
||||
Yellow = func() func(format string, a ...any) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgYellow).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
|
||||
Green = func() func(a ...interface{}) string {
|
||||
Green = func() func(a ...any) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgGreen).SprintFunc()
|
||||
}
|
||||
return fmt.Sprint
|
||||
}()
|
||||
|
||||
Greenf = func() func(format string, a ...interface{}) string {
|
||||
Greenf = func() func(format string, a ...any) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgGreen).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
|
||||
GreenBold = func() func(a ...interface{}) string {
|
||||
GreenBold = func() func(a ...any) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgGreen, color.Bold).SprintFunc()
|
||||
}
|
||||
return fmt.Sprint
|
||||
}()
|
||||
|
||||
CyanBold = func() func(a ...interface{}) string {
|
||||
CyanBold = func() func(a ...any) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgCyan, color.Bold).SprintFunc()
|
||||
}
|
||||
return fmt.Sprint
|
||||
}()
|
||||
|
||||
YellowBold = func() func(format string, a ...interface{}) string {
|
||||
YellowBold = func() func(format string, a ...any) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgYellow, color.Bold).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
|
||||
BlueBold = func() func(format string, a ...interface{}) string {
|
||||
BlueBold = func() func(format string, a ...any) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgBlue, color.Bold).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
|
||||
BgYellow = func() func(format string, a ...interface{}) string {
|
||||
BgYellow = func() func(format string, a ...any) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.BgYellow).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
|
||||
Black = func() func(format string, a ...interface{}) string {
|
||||
Black = func() func(format string, a ...any) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgBlack).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
|
||||
FgRed = func() func(a ...interface{}) string {
|
||||
FgRed = func() func(a ...any) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgRed).SprintFunc()
|
||||
}
|
||||
return fmt.Sprint
|
||||
}()
|
||||
|
||||
BgRed = func() func(format string, a ...interface{}) string {
|
||||
BgRed = func() func(format string, a ...any) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.BgRed).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
|
||||
FgWhite = func() func(format string, a ...interface{}) string {
|
||||
FgWhite = func() func(format string, a ...any) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgWhite).SprintfFunc()
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -224,10 +225,8 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) {
|
||||
corsAllowOrigin = []string{"*"} // defaults to '*'
|
||||
} else {
|
||||
corsAllowOrigin = strings.Split(corsList, ",")
|
||||
for _, cors := range corsAllowOrigin {
|
||||
if cors == "" {
|
||||
return cfg, errors.New("invalid cors value")
|
||||
}
|
||||
if slices.Contains(corsAllowOrigin, "") {
|
||||
return cfg, errors.New("invalid cors value")
|
||||
}
|
||||
}
|
||||
cfg.CorsAllowOrigin = corsAllowOrigin
|
||||
|
||||
@@ -41,7 +41,6 @@ func TestParseCompressIncludes(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run(testCase.str, func(t *testing.T) {
|
||||
gotPatterns, err := parseCompressIncludes(testCase.str)
|
||||
if !testCase.success && err == nil {
|
||||
|
||||
@@ -21,7 +21,9 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"regexp"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -61,7 +63,7 @@ type ErrConfigNotFound struct {
|
||||
func Error[T ErrorConfig, PT interface {
|
||||
*T
|
||||
setMsg(string)
|
||||
}](format string, vals ...interface{},
|
||||
}](format string, vals ...any,
|
||||
) T {
|
||||
pt := PT(new(T))
|
||||
pt.setMsg(fmt.Sprintf(format, vals...))
|
||||
@@ -69,7 +71,7 @@ func Error[T ErrorConfig, PT interface {
|
||||
}
|
||||
|
||||
// Errorf formats an error and returns it as a generic config error
|
||||
func Errorf(format string, vals ...interface{}) ErrConfigGeneric {
|
||||
func Errorf(format string, vals ...any) ErrConfigGeneric {
|
||||
return Error[ErrConfigGeneric](format, vals...)
|
||||
}
|
||||
|
||||
@@ -238,9 +240,7 @@ var DefaultKVS = map[string]KVS{}
|
||||
// globally, this should be called only once preferably
|
||||
// during `init()`.
|
||||
func RegisterDefaultKVS(kvsMap map[string]KVS) {
|
||||
for subSys, kvs := range kvsMap {
|
||||
DefaultKVS[subSys] = kvs
|
||||
}
|
||||
maps.Copy(DefaultKVS, kvsMap)
|
||||
}
|
||||
|
||||
// HelpSubSysMap - help for all individual KVS for each sub-systems
|
||||
@@ -253,9 +253,7 @@ var HelpSubSysMap = map[string]HelpKVS{}
|
||||
// this function should be called only once
|
||||
// preferably in during `init()`.
|
||||
func RegisterHelpSubSys(helpKVSMap map[string]HelpKVS) {
|
||||
for subSys, hkvs := range helpKVSMap {
|
||||
HelpSubSysMap[subSys] = hkvs
|
||||
}
|
||||
maps.Copy(HelpSubSysMap, helpKVSMap)
|
||||
}
|
||||
|
||||
// HelpDeprecatedSubSysMap - help for all deprecated sub-systems, that may be
|
||||
@@ -265,9 +263,7 @@ var HelpDeprecatedSubSysMap = map[string]HelpKV{}
|
||||
// RegisterHelpDeprecatedSubSys - saves input help KVS for deprecated
|
||||
// sub-systems globally. Should be called only once at init.
|
||||
func RegisterHelpDeprecatedSubSys(helpDeprecatedKVMap map[string]HelpKV) {
|
||||
for k, v := range helpDeprecatedKVMap {
|
||||
HelpDeprecatedSubSysMap[k] = v
|
||||
}
|
||||
maps.Copy(HelpDeprecatedSubSysMap, helpDeprecatedKVMap)
|
||||
}
|
||||
|
||||
// KV - is a shorthand of each key value.
|
||||
@@ -353,9 +349,7 @@ func Merge(cfgKVS map[string]KVS, envname string, defaultKVS KVS) map[string]KVS
|
||||
}
|
||||
newCfgKVS[tgt] = defaultKVS
|
||||
}
|
||||
for tgt, kv := range cfgKVS {
|
||||
newCfgKVS[tgt] = kv
|
||||
}
|
||||
maps.Copy(newCfgKVS, cfgKVS)
|
||||
return newCfgKVS
|
||||
}
|
||||
|
||||
@@ -642,11 +636,8 @@ func CheckValidKeys(subSys string, kv KVS, validKVS KVS, deprecatedKeys ...strin
|
||||
continue
|
||||
}
|
||||
var skip bool
|
||||
for _, deprecatedKey := range deprecatedKeys {
|
||||
if kv.Key == deprecatedKey {
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
if slices.Contains(deprecatedKeys, kv.Key) {
|
||||
skip = true
|
||||
}
|
||||
if skip {
|
||||
continue
|
||||
@@ -852,7 +843,7 @@ func (c Config) DelKVS(s string) error {
|
||||
if len(inputs) == 2 {
|
||||
currKVS := ck.Clone()
|
||||
defKVS := DefaultKVS[subSys]
|
||||
for _, delKey := range strings.Fields(inputs[1]) {
|
||||
for delKey := range strings.FieldsSeq(inputs[1]) {
|
||||
_, ok := currKVS.Lookup(delKey)
|
||||
if !ok {
|
||||
return Error[ErrConfigNotFound]("key %s doesn't exist", delKey)
|
||||
@@ -1407,13 +1398,7 @@ func (c Config) GetSubsysInfo(subSys, target string, redactSecrets bool) ([]Subs
|
||||
}
|
||||
|
||||
if target != "" {
|
||||
found := false
|
||||
for _, t := range targets {
|
||||
if t == target {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
found := slices.Contains(targets, target)
|
||||
if !found {
|
||||
return nil, Errorf("there is no target `%s` for subsystem `%s`", target, subSys)
|
||||
}
|
||||
|
||||
@@ -88,7 +88,6 @@ func TestKVFields(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run("", func(t *testing.T) {
|
||||
gotFields := kvFields(test.input, test.keys)
|
||||
if len(gotFields) != len(test.expectedFields) {
|
||||
|
||||
@@ -100,7 +100,7 @@ func BenchmarkEncrypt(b *testing.B) {
|
||||
context = kms.Context{"key": "value"}
|
||||
)
|
||||
b.SetBytes(int64(size))
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
ciphertext, err := Encrypt(KMS, plaintext, context)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
|
||||
@@ -65,7 +65,7 @@ func (u Err) Msg(m string) Err {
|
||||
}
|
||||
|
||||
// Msgf - Replace the current error's message
|
||||
func (u Err) Msgf(m string, args ...interface{}) Err {
|
||||
func (u Err) Msgf(m string, args ...any) Err {
|
||||
e := u.Clone()
|
||||
if len(args) == 0 {
|
||||
e.msg = m
|
||||
@@ -76,7 +76,7 @@ func (u Err) Msgf(m string, args ...interface{}) Err {
|
||||
}
|
||||
|
||||
// Hint - Replace the current error's message
|
||||
func (u Err) Hint(m string, args ...interface{}) Err {
|
||||
func (u Err) Hint(m string, args ...any) Err {
|
||||
e := u.Clone()
|
||||
e.hint = fmt.Sprintf(m, args...)
|
||||
return e
|
||||
|
||||
@@ -49,7 +49,6 @@ func TestParseEndpoints(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run(testCase.s, func(t *testing.T) {
|
||||
endpoints, secure, err := parseEndpoints(testCase.s)
|
||||
if err != nil && testCase.success {
|
||||
|
||||
@@ -38,7 +38,7 @@ type publicKeys struct {
|
||||
*sync.RWMutex
|
||||
|
||||
// map of kid to public key
|
||||
pkMap map[string]interface{}
|
||||
pkMap map[string]any
|
||||
}
|
||||
|
||||
func (pk *publicKeys) parseAndAdd(b io.Reader) error {
|
||||
@@ -59,14 +59,14 @@ func (pk *publicKeys) parseAndAdd(b io.Reader) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pk *publicKeys) add(keyID string, key interface{}) {
|
||||
func (pk *publicKeys) add(keyID string, key any) {
|
||||
pk.Lock()
|
||||
defer pk.Unlock()
|
||||
|
||||
pk.pkMap[keyID] = key
|
||||
}
|
||||
|
||||
func (pk *publicKeys) get(kid string) interface{} {
|
||||
func (pk *publicKeys) get(kid string) any {
|
||||
pk.RLock()
|
||||
defer pk.RUnlock()
|
||||
return pk.pkMap[kid]
|
||||
@@ -103,7 +103,7 @@ var (
|
||||
ErrTokenExpired = errors.New("token expired")
|
||||
)
|
||||
|
||||
func updateClaimsExpiry(dsecs string, claims map[string]interface{}) error {
|
||||
func updateClaimsExpiry(dsecs string, claims map[string]any) error {
|
||||
expStr := claims["exp"]
|
||||
if expStr == "" {
|
||||
return ErrTokenExpired
|
||||
@@ -133,7 +133,7 @@ const (
|
||||
)
|
||||
|
||||
// Validate - validates the id_token.
|
||||
func (r *Config) Validate(ctx context.Context, arn arn.ARN, token, accessToken, dsecs string, claims map[string]interface{}) error {
|
||||
func (r *Config) Validate(ctx context.Context, arn arn.ARN, token, accessToken, dsecs string, claims map[string]any) error {
|
||||
jp := new(jwtgo.Parser)
|
||||
jp.ValidMethods = []string{
|
||||
"RS256", "RS384", "RS512",
|
||||
@@ -143,7 +143,7 @@ func (r *Config) Validate(ctx context.Context, arn arn.ARN, token, accessToken,
|
||||
"ES3256", "ES3384", "ES3512",
|
||||
}
|
||||
|
||||
keyFuncCallback := func(jwtToken *jwtgo.Token) (interface{}, error) {
|
||||
keyFuncCallback := func(jwtToken *jwtgo.Token) (any, error) {
|
||||
kid, ok := jwtToken.Header["kid"].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Invalid kid value %v", jwtToken.Header["kid"])
|
||||
@@ -221,7 +221,7 @@ func (r *Config) Validate(ctx context.Context, arn arn.ARN, token, accessToken,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Config) updateUserinfoClaims(ctx context.Context, arn arn.ARN, accessToken string, claims map[string]interface{}) error {
|
||||
func (r *Config) updateUserinfoClaims(ctx context.Context, arn arn.ARN, accessToken string, claims map[string]any) error {
|
||||
pCfg, ok := r.arnProviderCfgsMap[arn]
|
||||
// If claim user info is enabled, get claims from userInfo
|
||||
// and overwrite them with the claims from JWT.
|
||||
|
||||
@@ -39,7 +39,7 @@ import (
|
||||
|
||||
func TestUpdateClaimsExpiry(t *testing.T) {
|
||||
testCases := []struct {
|
||||
exp interface{}
|
||||
exp any
|
||||
dsecs string
|
||||
expectedFailure bool
|
||||
}{
|
||||
@@ -58,9 +58,8 @@ func TestUpdateClaimsExpiry(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
claims := map[string]interface{}{}
|
||||
claims := map[string]any{}
|
||||
claims["exp"] = testCase.exp
|
||||
err := updateClaimsExpiry(testCase.dsecs, claims)
|
||||
if err != nil && !testCase.expectedFailure {
|
||||
@@ -99,7 +98,7 @@ func TestJWTHMACType(t *testing.T) {
|
||||
ExpiresAt: 253428928061,
|
||||
Audience: "76b95ae5-33ef-4283-97b7-d2a85dc2d8f4",
|
||||
},
|
||||
Header: map[string]interface{}{
|
||||
Header: map[string]any{
|
||||
"typ": "JWT",
|
||||
"alg": jwtgo.SigningMethodHS256.Alg(),
|
||||
"kid": "76b95ae5-33ef-4283-97b7-d2a85dc2d8f4",
|
||||
@@ -119,7 +118,7 @@ func TestJWTHMACType(t *testing.T) {
|
||||
|
||||
pubKeys := publicKeys{
|
||||
RWMutex: &sync.RWMutex{},
|
||||
pkMap: map[string]interface{}{},
|
||||
pkMap: map[string]any{},
|
||||
}
|
||||
pubKeys.add("76b95ae5-33ef-4283-97b7-d2a85dc2d8f4", []byte("WNGvKVyyNmXq0TraSvjaDN9CtpFgx35IXtGEffMCPR0"))
|
||||
|
||||
@@ -165,7 +164,7 @@ func TestJWT(t *testing.T) {
|
||||
|
||||
pubKeys := publicKeys{
|
||||
RWMutex: &sync.RWMutex{},
|
||||
pkMap: map[string]interface{}{},
|
||||
pkMap: map[string]any{},
|
||||
}
|
||||
err := pubKeys.parseAndAdd(bytes.NewBuffer([]byte(jsonkey)))
|
||||
if err != nil {
|
||||
|
||||
@@ -22,7 +22,9 @@ import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"io"
|
||||
"maps"
|
||||
"net/http"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -186,15 +188,9 @@ func (r *Config) Clone() Config {
|
||||
transport: r.transport,
|
||||
closeRespFn: r.closeRespFn,
|
||||
}
|
||||
for k, v := range r.arnProviderCfgsMap {
|
||||
cfg.arnProviderCfgsMap[k] = v
|
||||
}
|
||||
for k, v := range r.ProviderCfgs {
|
||||
cfg.ProviderCfgs[k] = v
|
||||
}
|
||||
for k, v := range r.roleArnPolicyMap {
|
||||
cfg.roleArnPolicyMap[k] = v
|
||||
}
|
||||
maps.Copy(cfg.arnProviderCfgsMap, r.arnProviderCfgsMap)
|
||||
maps.Copy(cfg.ProviderCfgs, r.ProviderCfgs)
|
||||
maps.Copy(cfg.roleArnPolicyMap, r.roleArnPolicyMap)
|
||||
return cfg
|
||||
}
|
||||
|
||||
@@ -210,7 +206,7 @@ func LookupConfig(s config.Config, transport http.RoundTripper, closeRespFn func
|
||||
ProviderCfgs: map[string]*providerCfg{},
|
||||
pubKeys: publicKeys{
|
||||
RWMutex: &sync.RWMutex{},
|
||||
pkMap: map[string]interface{}{},
|
||||
pkMap: map[string]any{},
|
||||
},
|
||||
roleArnPolicyMap: map[arn.ARN]string{},
|
||||
transport: openIDClientTransport,
|
||||
@@ -308,7 +304,7 @@ func LookupConfig(s config.Config, transport http.RoundTripper, closeRespFn func
|
||||
|
||||
if scopeList := getCfgVal(Scopes); scopeList != "" {
|
||||
var scopes []string
|
||||
for _, scope := range strings.Split(scopeList, ",") {
|
||||
for scope := range strings.SplitSeq(scopeList, ",") {
|
||||
scope = strings.TrimSpace(scope)
|
||||
if scope == "" {
|
||||
return c, config.Errorf("empty scope value is not allowed '%s', please refer to our documentation", scopeList)
|
||||
@@ -414,13 +410,7 @@ func (r *Config) GetConfigInfo(s config.Config, cfgName string) ([]madmin.IDPCfg
|
||||
return nil, err
|
||||
}
|
||||
|
||||
present := false
|
||||
for _, cfg := range openIDConfigs {
|
||||
if cfg == cfgName {
|
||||
present = true
|
||||
break
|
||||
}
|
||||
}
|
||||
present := slices.Contains(openIDConfigs, cfgName)
|
||||
|
||||
if !present {
|
||||
return nil, ErrProviderConfigNotFound
|
||||
|
||||
@@ -113,7 +113,7 @@ func (p *providerCfg) GetRoleArn() string {
|
||||
// claims as part of the normal oauth2 flow, instead rely
|
||||
// on service providers making calls to IDP to fetch additional
|
||||
// claims available from the UserInfo endpoint
|
||||
func (p *providerCfg) UserInfo(ctx context.Context, accessToken string, transport http.RoundTripper) (map[string]interface{}, error) {
|
||||
func (p *providerCfg) UserInfo(ctx context.Context, accessToken string, transport http.RoundTripper) (map[string]any, error) {
|
||||
if p.JWKS.URL == nil || p.JWKS.URL.String() == "" {
|
||||
return nil, errors.New("openid not configured")
|
||||
}
|
||||
@@ -147,7 +147,7 @@ func (p *providerCfg) UserInfo(ctx context.Context, accessToken string, transpor
|
||||
return nil, errors.New(resp.Status)
|
||||
}
|
||||
|
||||
claims := map[string]interface{}{}
|
||||
claims := map[string]any{}
|
||||
if err = json.NewDecoder(resp.Body).Decode(&claims); err != nil {
|
||||
// uncomment this for debugging when needed.
|
||||
// reqBytes, _ := httputil.DumpRequest(req, false)
|
||||
|
||||
@@ -333,9 +333,9 @@ func New(shutdownCtx context.Context, args Args) *AuthNPlugin {
|
||||
// AuthNSuccessResponse - represents the response from the authentication plugin
|
||||
// service.
|
||||
type AuthNSuccessResponse struct {
|
||||
User string `json:"user"`
|
||||
MaxValiditySeconds int `json:"maxValiditySeconds"`
|
||||
Claims map[string]interface{} `json:"claims"`
|
||||
User string `json:"user"`
|
||||
MaxValiditySeconds int `json:"maxValiditySeconds"`
|
||||
Claims map[string]any `json:"claims"`
|
||||
}
|
||||
|
||||
// AuthNErrorResponse - represents an error response from the authN plugin.
|
||||
|
||||
@@ -17,6 +17,8 @@
|
||||
|
||||
package event
|
||||
|
||||
import "maps"
|
||||
|
||||
// TargetIDSet - Set representation of TargetIDs.
|
||||
type TargetIDSet map[TargetID]struct{}
|
||||
|
||||
@@ -28,9 +30,7 @@ func (set TargetIDSet) IsEmpty() bool {
|
||||
// Clone - returns copy of this set.
|
||||
func (set TargetIDSet) Clone() TargetIDSet {
|
||||
setCopy := NewTargetIDSet()
|
||||
for k, v := range set {
|
||||
setCopy[k] = v
|
||||
}
|
||||
maps.Copy(setCopy, set)
|
||||
return setCopy
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@ package event
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -151,9 +152,7 @@ func (list *TargetList) TargetMap() map[TargetID]Target {
|
||||
defer list.RUnlock()
|
||||
|
||||
ntargets := make(map[TargetID]Target, len(list.targets))
|
||||
for k, v := range list.targets {
|
||||
ntargets[k] = v
|
||||
}
|
||||
maps.Copy(ntargets, list.targets)
|
||||
return ntargets
|
||||
}
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ const (
|
||||
logSubsys = "notify"
|
||||
)
|
||||
|
||||
func logOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
func logOnceIf(ctx context.Context, err error, id string, errKind ...any) {
|
||||
logger.LogOnceIf(ctx, logSubsys, err, id, errKind...)
|
||||
}
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ const (
|
||||
logSubsys = "notify"
|
||||
)
|
||||
|
||||
func logOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
func logOnceIf(ctx context.Context, err error, id string, errKind ...any) {
|
||||
logger.LogOnceIf(ctx, logSubsys, err, id, errKind...)
|
||||
}
|
||||
|
||||
@@ -412,7 +412,7 @@ func GetNotifyKafka(kafkaKVS map[string]config.KVS) (map[string]target.KafkaArgs
|
||||
if len(kafkaBrokers) == 0 {
|
||||
return nil, config.Errorf("kafka 'brokers' cannot be empty")
|
||||
}
|
||||
for _, s := range strings.Split(kafkaBrokers, config.ValueSeparator) {
|
||||
for s := range strings.SplitSeq(kafkaBrokers, config.ValueSeparator) {
|
||||
var host *xnet.Host
|
||||
host, err = xnet.ParseHost(s)
|
||||
if err != nil {
|
||||
|
||||
@@ -170,7 +170,7 @@ func (o *Opa) IsAllowed(args policy.Args) (bool, error) {
|
||||
}
|
||||
|
||||
// OPA input
|
||||
body := make(map[string]interface{})
|
||||
body := make(map[string]any)
|
||||
body["input"] = args
|
||||
|
||||
inputBytes, err := json.Marshal(body)
|
||||
|
||||
@@ -185,7 +185,7 @@ func (o *AuthZPlugin) IsAllowed(args policy.Args) (bool, error) {
|
||||
}
|
||||
|
||||
// Access Management Plugin Input
|
||||
body := make(map[string]interface{})
|
||||
body := make(map[string]any)
|
||||
body["input"] = args
|
||||
|
||||
inputBytes, err := json.Marshal(body)
|
||||
|
||||
@@ -147,7 +147,7 @@ func (sc *StorageClass) UnmarshalText(b []byte) error {
|
||||
// MarshalText - marshals storage class string.
|
||||
func (sc *StorageClass) MarshalText() ([]byte, error) {
|
||||
if sc.Parity != 0 {
|
||||
return []byte(fmt.Sprintf("%s:%d", schemePrefix, sc.Parity)), nil
|
||||
return fmt.Appendf(nil, "%s:%d", schemePrefix, sc.Parity), nil
|
||||
}
|
||||
return []byte{}, nil
|
||||
}
|
||||
@@ -430,6 +430,6 @@ func LookupConfig(kvs config.KVS, setDriveCount int) (cfg Config, err error) {
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func configLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
func configLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
|
||||
logger.LogOnceIf(ctx, "config", err, id, errKind...)
|
||||
}
|
||||
|
||||
@@ -95,7 +95,7 @@ func (c Config) submitPost(r *http.Request) (string, error) {
|
||||
}
|
||||
|
||||
// Post submit 'payload' to specified URL
|
||||
func (c Config) Post(reqURL string, payload interface{}) (string, error) {
|
||||
func (c Config) Post(reqURL string, payload any) (string, error) {
|
||||
if !c.Registered() {
|
||||
return "", errors.New("Deployment is not registered with SUBNET. Please register the deployment via 'mc license register ALIAS'")
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ type Error struct {
|
||||
|
||||
// Errorf - formats according to a format specifier and returns
|
||||
// the string as a value that satisfies error of type crypto.Error
|
||||
func Errorf(format string, a ...interface{}) error {
|
||||
func Errorf(format string, a ...any) error {
|
||||
e := fmt.Errorf(format, a...)
|
||||
ee := Error{}
|
||||
ee.msg = e.Error()
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"slices"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
@@ -60,7 +61,7 @@ func init() {
|
||||
)
|
||||
}
|
||||
|
||||
func log(format string, data ...interface{}) {
|
||||
func log(format string, data ...any) {
|
||||
if dsyncLog {
|
||||
console.Printf(format, data...)
|
||||
}
|
||||
@@ -621,13 +622,7 @@ func (dm *DRWMutex) Unlock(ctx context.Context) {
|
||||
defer dm.m.Unlock()
|
||||
|
||||
// Check if minimally a single bool is set in the writeLocks array
|
||||
lockFound := false
|
||||
for _, uid := range dm.writeLocks {
|
||||
if isLocked(uid) {
|
||||
lockFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
lockFound := slices.ContainsFunc(dm.writeLocks, isLocked)
|
||||
if !lockFound {
|
||||
panic("Trying to Unlock() while no Lock() is active")
|
||||
}
|
||||
@@ -672,13 +667,7 @@ func (dm *DRWMutex) RUnlock(ctx context.Context) {
|
||||
defer dm.m.Unlock()
|
||||
|
||||
// Check if minimally a single bool is set in the writeLocks array
|
||||
lockFound := false
|
||||
for _, uid := range dm.readLocks {
|
||||
if isLocked(uid) {
|
||||
lockFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
lockFound := slices.ContainsFunc(dm.readLocks, isLocked)
|
||||
if !lockFound {
|
||||
panic("Trying to RUnlock() while no RLock() is active")
|
||||
}
|
||||
|
||||
@@ -157,18 +157,18 @@ func doTestParallelReaders(numReaders, gomaxprocs int) {
|
||||
clocked := make(chan bool)
|
||||
cunlock := make(chan bool)
|
||||
cdone := make(chan bool)
|
||||
for i := 0; i < numReaders; i++ {
|
||||
for range numReaders {
|
||||
go parallelReader(context.Background(), m, clocked, cunlock, cdone)
|
||||
}
|
||||
// Wait for all parallel RLock()s to succeed.
|
||||
for i := 0; i < numReaders; i++ {
|
||||
for range numReaders {
|
||||
<-clocked
|
||||
}
|
||||
for i := 0; i < numReaders; i++ {
|
||||
for range numReaders {
|
||||
cunlock <- true
|
||||
}
|
||||
// Wait for the goroutines to finish.
|
||||
for i := 0; i < numReaders; i++ {
|
||||
for range numReaders {
|
||||
<-cdone
|
||||
}
|
||||
}
|
||||
@@ -184,13 +184,13 @@ func TestParallelReaders(t *testing.T) {
|
||||
// Borrowed from rwmutex_test.go
|
||||
func reader(resource string, numIterations int, activity *int32, cdone chan bool) {
|
||||
rwm := NewDRWMutex(ds, resource)
|
||||
for i := 0; i < numIterations; i++ {
|
||||
for range numIterations {
|
||||
if rwm.GetRLock(context.Background(), nil, id, source, Options{Timeout: time.Second}) {
|
||||
n := atomic.AddInt32(activity, 1)
|
||||
if n < 1 || n >= 10000 {
|
||||
panic(fmt.Sprintf("wlock(%d)\n", n))
|
||||
}
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
}
|
||||
atomic.AddInt32(activity, -1)
|
||||
rwm.RUnlock(context.Background())
|
||||
@@ -202,13 +202,13 @@ func reader(resource string, numIterations int, activity *int32, cdone chan bool
|
||||
// Borrowed from rwmutex_test.go
|
||||
func writer(resource string, numIterations int, activity *int32, cdone chan bool) {
|
||||
rwm := NewDRWMutex(ds, resource)
|
||||
for i := 0; i < numIterations; i++ {
|
||||
for range numIterations {
|
||||
if rwm.GetLock(context.Background(), nil, id, source, Options{Timeout: time.Second}) {
|
||||
n := atomic.AddInt32(activity, 10000)
|
||||
if n != 10000 {
|
||||
panic(fmt.Sprintf("wlock(%d)\n", n))
|
||||
}
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
}
|
||||
atomic.AddInt32(activity, -10000)
|
||||
rwm.Unlock(context.Background())
|
||||
|
||||
@@ -149,13 +149,13 @@ func (lh *lockServerHandler) RLockHandler(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
|
||||
func stopLockServers() {
|
||||
for i := 0; i < numberOfNodes; i++ {
|
||||
for i := range numberOfNodes {
|
||||
nodes[i].Close()
|
||||
}
|
||||
}
|
||||
|
||||
func startLockServers() {
|
||||
for i := 0; i < numberOfNodes; i++ {
|
||||
for i := range numberOfNodes {
|
||||
lsrv := &lockServer{
|
||||
mutex: sync.Mutex{},
|
||||
lockMap: make(map[string]int64),
|
||||
|
||||
@@ -42,7 +42,7 @@ func TestMain(m *testing.M) {
|
||||
|
||||
// Initialize locker clients for dsync.
|
||||
var clnts []NetLocker
|
||||
for i := 0; i < len(nodes); i++ {
|
||||
for i := range nodes {
|
||||
clnts = append(clnts, newClient(nodes[i].URL))
|
||||
}
|
||||
|
||||
@@ -310,7 +310,7 @@ func TestUnlockShouldNotTimeout(t *testing.T) {
|
||||
|
||||
// Borrowed from mutex_test.go
|
||||
func HammerMutex(m *DRWMutex, loops int, cdone chan bool) {
|
||||
for i := 0; i < loops; i++ {
|
||||
for range loops {
|
||||
m.Lock(id, source)
|
||||
m.Unlock(context.Background())
|
||||
}
|
||||
@@ -325,10 +325,10 @@ func TestMutex(t *testing.T) {
|
||||
}
|
||||
c := make(chan bool)
|
||||
m := NewDRWMutex(ds, "test")
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
go HammerMutex(m, loops, c)
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
<-c
|
||||
}
|
||||
}
|
||||
@@ -363,7 +363,7 @@ func benchmarkMutex(b *testing.B, slack, work bool) {
|
||||
mu.Lock(id, source)
|
||||
mu.Unlock(b.Context())
|
||||
if work {
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
foo *= 2
|
||||
foo /= 2
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
|
||||
// ValidateFilterRuleValue - checks if given value is filter rule value or not.
|
||||
func ValidateFilterRuleValue(value string) error {
|
||||
for _, segment := range strings.Split(value, "/") {
|
||||
for segment := range strings.SplitSeq(value, "/") {
|
||||
if segment == "." || segment == ".." {
|
||||
return &ErrInvalidFilterValue{value}
|
||||
}
|
||||
@@ -139,7 +139,7 @@ func (ruleList FilterRuleList) Pattern() string {
|
||||
|
||||
// S3Key - represents elements inside <S3Key>...</S3Key>
|
||||
type S3Key struct {
|
||||
RuleList FilterRuleList `xml:"S3Key,omitempty" json:"S3Key,omitempty"`
|
||||
RuleList FilterRuleList `xml:"S3Key,omitempty" json:"S3Key"`
|
||||
}
|
||||
|
||||
// MarshalXML implements a custom marshaller to support `omitempty` feature.
|
||||
|
||||
@@ -427,13 +427,13 @@ func (c *esClientV7) getServerSupportStatus(ctx context.Context) (ESSupportStatu
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
m := make(map[string]interface{})
|
||||
m := make(map[string]any)
|
||||
err = json.NewDecoder(resp.Body).Decode(&m)
|
||||
if err != nil {
|
||||
return ESSUnknown, "", fmt.Errorf("unable to get ES Server version - json parse error: %v", err)
|
||||
}
|
||||
|
||||
if v, ok := m["version"].(map[string]interface{}); ok {
|
||||
if v, ok := m["version"].(map[string]any); ok {
|
||||
if ver, ok := v["number"].(string); ok {
|
||||
status, err := getESVersionSupportStatus(ver)
|
||||
return status, ver, err
|
||||
@@ -454,16 +454,16 @@ func (c *esClientV7) createIndex(args ElasticsearchArgs) error {
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
var v map[string]interface{}
|
||||
var v map[string]any
|
||||
found := false
|
||||
if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
|
||||
return fmt.Errorf("Error parsing response body: %v", err)
|
||||
}
|
||||
|
||||
indices, ok := v["indices"].([]interface{})
|
||||
indices, ok := v["indices"].([]any)
|
||||
if ok {
|
||||
for _, index := range indices {
|
||||
if name, ok := index.(map[string]interface{}); ok && name["name"] == args.Index {
|
||||
if name, ok := index.(map[string]any); ok && name["name"] == args.Index {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
@@ -529,7 +529,7 @@ func (c *esClientV7) removeEntry(ctx context.Context, index string, key string)
|
||||
}
|
||||
|
||||
func (c *esClientV7) updateEntry(ctx context.Context, index string, key string, eventData event.Event) error {
|
||||
doc := map[string]interface{}{
|
||||
doc := map[string]any{
|
||||
"Records": []event.Event{eventData},
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
@@ -556,7 +556,7 @@ func (c *esClientV7) updateEntry(ctx context.Context, index string, key string,
|
||||
}
|
||||
|
||||
func (c *esClientV7) addEntry(ctx context.Context, index string, eventData event.Event) error {
|
||||
doc := map[string]interface{}{
|
||||
doc := map[string]any{
|
||||
"Records": []event.Event{eventData},
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
|
||||
@@ -19,6 +19,7 @@ package target
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"slices"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -26,11 +27,8 @@ import (
|
||||
// is registered and fails otherwise.
|
||||
func TestMySQLRegistration(t *testing.T) {
|
||||
var found bool
|
||||
for _, drv := range sql.Drivers() {
|
||||
if drv == "mysql" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
if slices.Contains(sql.Drivers(), "mysql") {
|
||||
found = true
|
||||
}
|
||||
if !found {
|
||||
t.Fatal("mysql driver not registered")
|
||||
|
||||
@@ -19,6 +19,7 @@ package target
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"slices"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -26,11 +27,8 @@ import (
|
||||
// is registered and fails otherwise.
|
||||
func TestPostgreSQLRegistration(t *testing.T) {
|
||||
var found bool
|
||||
for _, drv := range sql.Drivers() {
|
||||
if drv == "postgres" {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
if slices.Contains(sql.Drivers(), "postgres") {
|
||||
found = true
|
||||
}
|
||||
if !found {
|
||||
t.Fatal("postgres driver not registered")
|
||||
|
||||
@@ -17,15 +17,15 @@
|
||||
|
||||
package event
|
||||
|
||||
import "maps"
|
||||
|
||||
// TargetIDSet - Set representation of TargetIDs.
|
||||
type TargetIDSet map[TargetID]struct{}
|
||||
|
||||
// Clone - returns copy of this set.
|
||||
func (set TargetIDSet) Clone() TargetIDSet {
|
||||
setCopy := NewTargetIDSet()
|
||||
for k, v := range set {
|
||||
setCopy[k] = v
|
||||
}
|
||||
maps.Copy(setCopy, set)
|
||||
return setCopy
|
||||
}
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ package event
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -252,9 +253,7 @@ func (list *TargetList) TargetMap() map[TargetID]Target {
|
||||
defer list.RUnlock()
|
||||
|
||||
ntargets := make(map[TargetID]Target, len(list.targets))
|
||||
for k, v := range list.targets {
|
||||
ntargets[k] = v
|
||||
}
|
||||
maps.Copy(ntargets, list.targets)
|
||||
return ntargets
|
||||
}
|
||||
|
||||
|
||||
@@ -231,7 +231,7 @@ func benchmarkGridStreamRespOnly(b *testing.B, n int) {
|
||||
errFatal(remote.RegisterStreamingHandler(handlerTest, StreamHandler{
|
||||
// Send 10x response.
|
||||
Handle: func(ctx context.Context, payload []byte, _ <-chan []byte, out chan<- []byte) *RemoteErr {
|
||||
for i := 0; i < responses; i++ {
|
||||
for i := range responses {
|
||||
toSend := GetByteBuffer()[:0]
|
||||
toSend = append(toSend, byte(i))
|
||||
toSend = append(toSend, payload...)
|
||||
@@ -407,7 +407,7 @@ func benchmarkGridStreamReqOnly(b *testing.B, n int) {
|
||||
b.Fatal(err.Error())
|
||||
}
|
||||
got := 0
|
||||
for i := 0; i < requests; i++ {
|
||||
for range requests {
|
||||
got++
|
||||
st.Requests <- append(GetByteBuffer()[:0], payload...)
|
||||
}
|
||||
@@ -525,7 +525,7 @@ func benchmarkGridStreamTwoway(b *testing.B, n int) {
|
||||
got := 0
|
||||
sent := 0
|
||||
go func() {
|
||||
for i := 0; i < messages; i++ {
|
||||
for range messages {
|
||||
st.Requests <- append(GetByteBuffer()[:0], payload...)
|
||||
if sent++; sent == messages {
|
||||
close(st.Requests)
|
||||
|
||||
@@ -47,7 +47,7 @@ import (
|
||||
"github.com/zeebo/xxh3"
|
||||
)
|
||||
|
||||
func gridLogIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func gridLogIf(ctx context.Context, err error, errKind ...any) {
|
||||
logger.LogIf(ctx, "grid", err, errKind...)
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ func gridLogIfNot(ctx context.Context, err error, ignored ...error) {
|
||||
logger.LogIfNot(ctx, "grid", err, ignored...)
|
||||
}
|
||||
|
||||
func gridLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
func gridLogOnceIf(ctx context.Context, err error, id string, errKind ...any) {
|
||||
logger.LogOnceIf(ctx, "grid", err, id, errKind...)
|
||||
}
|
||||
|
||||
@@ -659,10 +659,7 @@ func (c *Connection) connect() {
|
||||
}
|
||||
sleep := defaultDialTimeout + time.Duration(rng.Int63n(int64(defaultDialTimeout)))
|
||||
next := dialStarted.Add(sleep / 2)
|
||||
sleep = time.Until(next).Round(time.Millisecond)
|
||||
if sleep < 0 {
|
||||
sleep = 0
|
||||
}
|
||||
sleep = max(time.Until(next).Round(time.Millisecond), 0)
|
||||
gotState := c.State()
|
||||
if gotState == StateShutdown {
|
||||
return
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
@@ -266,9 +267,7 @@ func TestSingleRoundtripGenericsRecycle(t *testing.T) {
|
||||
// Handles incoming requests, returns a response
|
||||
handler1 := func(req *MSS) (resp *MSS, err *RemoteErr) {
|
||||
resp = h1.NewResponse()
|
||||
for k, v := range *req {
|
||||
(*resp)[k] = v
|
||||
}
|
||||
maps.Copy((*resp), *req)
|
||||
return resp, nil
|
||||
}
|
||||
// Return error
|
||||
@@ -708,7 +707,7 @@ func testServerOutCongestion(t *testing.T, local, remote *Manager) {
|
||||
Handle: func(ctx context.Context, payload []byte, request <-chan []byte, resp chan<- []byte) *RemoteErr {
|
||||
// Send many responses.
|
||||
// Test that this doesn't block.
|
||||
for i := byte(0); i < 100; i++ {
|
||||
for i := range byte(100) {
|
||||
select {
|
||||
case resp <- []byte{i}:
|
||||
// ok
|
||||
@@ -744,7 +743,7 @@ func testServerOutCongestion(t *testing.T, local, remote *Manager) {
|
||||
<-serverSent
|
||||
|
||||
// Now do 100 other requests to ensure that the server doesn't block.
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
_, err := remoteConn.Request(ctx, handlerTest2, []byte(testPayload))
|
||||
errFatal(err)
|
||||
}
|
||||
@@ -820,13 +819,13 @@ func testServerInCongestion(t *testing.T, local, remote *Manager) {
|
||||
|
||||
// Start sending requests.
|
||||
go func() {
|
||||
for i := byte(0); i < 100; i++ {
|
||||
for i := range byte(100) {
|
||||
st.Requests <- []byte{i}
|
||||
}
|
||||
close(st.Requests)
|
||||
}()
|
||||
// Now do 100 other requests to ensure that the server doesn't block.
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
_, err := remoteConn.Request(ctx, handlerTest2, []byte(testPayload))
|
||||
errFatal(err)
|
||||
}
|
||||
@@ -897,7 +896,7 @@ func testGenericsStreamRoundtrip(t *testing.T, local, remote *Manager) {
|
||||
errFatal(err)
|
||||
go func() {
|
||||
defer close(stream.Requests)
|
||||
for i := 0; i < payloads; i++ {
|
||||
for i := range payloads {
|
||||
// t.Log("sending new client request")
|
||||
stream.Requests <- &testRequest{Num: i, String: testPayload}
|
||||
}
|
||||
@@ -974,7 +973,7 @@ func testGenericsStreamRoundtripSubroute(t *testing.T, local, remote *Manager) {
|
||||
errFatal(err)
|
||||
go func() {
|
||||
defer close(stream.Requests)
|
||||
for i := 0; i < payloads; i++ {
|
||||
for i := range payloads {
|
||||
// t.Log("sending new client request")
|
||||
stream.Requests <- &testRequest{Num: i, String: testPayload}
|
||||
}
|
||||
@@ -1019,7 +1018,7 @@ func testServerStreamResponseBlocked(t *testing.T, local, remote *Manager) {
|
||||
Handle: func(ctx context.Context, payload []byte, _ <-chan []byte, resp chan<- []byte) *RemoteErr {
|
||||
// Send many responses.
|
||||
// Test that this doesn't block.
|
||||
for i := byte(0); i < 100; i++ {
|
||||
for i := range byte(100) {
|
||||
select {
|
||||
case resp <- []byte{i}:
|
||||
// ok
|
||||
|
||||
@@ -411,7 +411,7 @@ func NewJSONPool[T any]() *JSONPool[T] {
|
||||
}
|
||||
return &JSONPool[T]{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
New: func() any {
|
||||
var t T
|
||||
return &t
|
||||
},
|
||||
@@ -700,7 +700,7 @@ func (j *Array[T]) UnmarshalMsg(bytes []byte) ([]byte, error) {
|
||||
} else {
|
||||
j.val = j.val[:0]
|
||||
}
|
||||
for i := uint32(0); i < l; i++ {
|
||||
for range l {
|
||||
v := j.p.newE()
|
||||
bytes, err = v.UnmarshalMsg(bytes)
|
||||
if err != nil {
|
||||
|
||||
@@ -81,8 +81,8 @@ func TestMarshalUnmarshalMSSNil(t *testing.T) {
|
||||
func BenchmarkMarshalMsgMSS(b *testing.B) {
|
||||
v := MSS{"abc": "def", "ghi": "jkl"}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
@@ -93,8 +93,8 @@ func BenchmarkAppendMsgMSS(b *testing.B) {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
@@ -104,8 +104,8 @@ func BenchmarkUnmarshalMSS(b *testing.B) {
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
|
||||
@@ -227,7 +227,7 @@ func TestHTTPListenerAddr(t *testing.T) {
|
||||
|
||||
nonLoopBackIP := getNonLoopBackIP(t)
|
||||
var casePorts []string
|
||||
for i := 0; i < 6; i++ {
|
||||
for range 6 {
|
||||
casePorts = append(casePorts, getNextPort())
|
||||
}
|
||||
|
||||
@@ -278,7 +278,7 @@ func TestHTTPListenerAddrs(t *testing.T) {
|
||||
|
||||
nonLoopBackIP := getNonLoopBackIP(t)
|
||||
var casePorts []string
|
||||
for i := 0; i < 6; i++ {
|
||||
for range 6 {
|
||||
casePorts = append(casePorts, getNextPort())
|
||||
}
|
||||
|
||||
|
||||
@@ -246,7 +246,7 @@ func NewMapClaims() *MapClaims {
|
||||
}
|
||||
|
||||
// Set Adds new arbitrary claim keys and values.
|
||||
func (c *MapClaims) Set(key string, val interface{}) {
|
||||
func (c *MapClaims) Set(key string, val any) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
@@ -266,7 +266,7 @@ func (c *MapClaims) Lookup(key string) (value string, ok bool) {
|
||||
if c == nil {
|
||||
return "", false
|
||||
}
|
||||
var vinterface interface{}
|
||||
var vinterface any
|
||||
vinterface, ok = c.MapClaims[key]
|
||||
if ok {
|
||||
value, ok = vinterface.(string)
|
||||
@@ -302,7 +302,7 @@ func (c *MapClaims) Valid() error {
|
||||
}
|
||||
|
||||
// Map returns underlying low-level map claims.
|
||||
func (c *MapClaims) Map() map[string]interface{} {
|
||||
func (c *MapClaims) Map() map[string]any {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -176,7 +176,6 @@ func standardClaimsToken(claims *StandardClaims) string {
|
||||
func TestParserParse(t *testing.T) {
|
||||
// Iterate over test data set and run tests
|
||||
for _, data := range jwtTestData {
|
||||
data := data
|
||||
t.Run(data.name, func(t *testing.T) {
|
||||
// Parse the token
|
||||
var err error
|
||||
|
||||
@@ -419,7 +419,7 @@ func IsPresent() (bool, error) {
|
||||
|
||||
func expandEndpoints(s string) ([]string, error) {
|
||||
var endpoints []string
|
||||
for _, endpoint := range strings.Split(s, ",") {
|
||||
for endpoint := range strings.SplitSeq(s, ",") {
|
||||
endpoint = strings.TrimSpace(endpoint)
|
||||
if endpoint == "" {
|
||||
continue
|
||||
|
||||
@@ -60,7 +60,7 @@ func GetAuditEntry(ctx context.Context) *audit.Entry {
|
||||
}
|
||||
|
||||
// AuditLog - logs audit logs to all audit targets.
|
||||
func AuditLog(ctx context.Context, w http.ResponseWriter, r *http.Request, reqClaims map[string]interface{}, filterKeys ...string) {
|
||||
func AuditLog(ctx context.Context, w http.ResponseWriter, r *http.Request, reqClaims map[string]any, filterKeys ...string) {
|
||||
auditTgts := AuditTargets()
|
||||
if len(auditTgts) == 0 {
|
||||
return
|
||||
@@ -124,7 +124,7 @@ func AuditLog(ctx context.Context, w http.ResponseWriter, r *http.Request, reqCl
|
||||
entry.API.TimeToResponse = strconv.FormatInt(timeToResponse.Nanoseconds(), 10) + "ns"
|
||||
entry.API.TimeToResponseInNS = strconv.FormatInt(timeToResponse.Nanoseconds(), 10)
|
||||
// We hold the lock, so we cannot call reqInfo.GetTagsMap().
|
||||
tags := make(map[string]interface{}, len(reqInfo.tags))
|
||||
tags := make(map[string]any, len(reqInfo.tags))
|
||||
for _, t := range reqInfo.tags {
|
||||
tags[t.Key] = t.Val
|
||||
}
|
||||
|
||||
@@ -389,7 +389,7 @@ func lookupAuditKafkaConfig(scfg config.Config, cfg Config) (Config, error) {
|
||||
if len(kafkaBrokers) == 0 {
|
||||
return cfg, config.Errorf("kafka 'brokers' cannot be empty")
|
||||
}
|
||||
for _, s := range strings.Split(kafkaBrokers, config.ValueSeparator) {
|
||||
for s := range strings.SplitSeq(kafkaBrokers, config.ValueSeparator) {
|
||||
var host *xnet.Host
|
||||
host, err = xnet.ParseHost(s)
|
||||
if err != nil {
|
||||
|
||||
@@ -36,12 +36,12 @@ var ExitFunc = os.Exit
|
||||
|
||||
// Logger interface describes the methods that need to be implemented to satisfy the interface requirements.
|
||||
type Logger interface {
|
||||
json(msg string, args ...interface{})
|
||||
quiet(msg string, args ...interface{})
|
||||
pretty(msg string, args ...interface{})
|
||||
json(msg string, args ...any)
|
||||
quiet(msg string, args ...any)
|
||||
pretty(msg string, args ...any)
|
||||
}
|
||||
|
||||
func consoleLog(console Logger, msg string, args ...interface{}) {
|
||||
func consoleLog(console Logger, msg string, args ...any) {
|
||||
switch {
|
||||
case jsonFlag:
|
||||
// Strip escape control characters from json message
|
||||
@@ -64,11 +64,11 @@ func consoleLog(console Logger, msg string, args ...interface{}) {
|
||||
|
||||
// Fatal prints only fatal error message with no stack trace
|
||||
// it will be called for input validation failures
|
||||
func Fatal(err error, msg string, data ...interface{}) {
|
||||
func Fatal(err error, msg string, data ...any) {
|
||||
fatal(err, msg, data...)
|
||||
}
|
||||
|
||||
func fatal(err error, msg string, data ...interface{}) {
|
||||
func fatal(err error, msg string, data ...any) {
|
||||
if msg == "" {
|
||||
if len(data) > 0 {
|
||||
msg = fmt.Sprint(data...)
|
||||
@@ -85,7 +85,7 @@ var fatalMessage fatalMsg
|
||||
|
||||
type fatalMsg struct{}
|
||||
|
||||
func (f fatalMsg) json(msg string, args ...interface{}) {
|
||||
func (f fatalMsg) json(msg string, args ...any) {
|
||||
var message string
|
||||
if msg != "" {
|
||||
message = fmt.Sprintf(msg, args...)
|
||||
@@ -105,7 +105,7 @@ func (f fatalMsg) json(msg string, args ...interface{}) {
|
||||
ExitFunc(1)
|
||||
}
|
||||
|
||||
func (f fatalMsg) quiet(msg string, args ...interface{}) {
|
||||
func (f fatalMsg) quiet(msg string, args ...any) {
|
||||
f.pretty(msg, args...)
|
||||
}
|
||||
|
||||
@@ -116,7 +116,7 @@ var (
|
||||
bannerWidth = len(logTag) + 1
|
||||
)
|
||||
|
||||
func (f fatalMsg) pretty(msg string, args ...interface{}) {
|
||||
func (f fatalMsg) pretty(msg string, args ...any) {
|
||||
// Build the passed error message
|
||||
errMsg := fmt.Sprintf(msg, args...)
|
||||
|
||||
@@ -128,30 +128,27 @@ func (f fatalMsg) pretty(msg string, args ...interface{}) {
|
||||
// message itself contains some colored text, we needed
|
||||
// to use some ANSI control escapes to cursor color state
|
||||
// and freely move in the screen.
|
||||
for _, line := range strings.Split(errMsg, "\n") {
|
||||
for line := range strings.SplitSeq(errMsg, "\n") {
|
||||
if len(line) == 0 {
|
||||
// No more text to print, just quit.
|
||||
break
|
||||
}
|
||||
|
||||
for {
|
||||
// Save the attributes of the current cursor helps
|
||||
// us save the text color of the passed error message
|
||||
ansiSaveAttributes()
|
||||
// Print banner with or without the log tag
|
||||
if !tagPrinted {
|
||||
fmt.Fprint(Output, logBanner)
|
||||
tagPrinted = true
|
||||
} else {
|
||||
fmt.Fprint(Output, emptyBanner)
|
||||
}
|
||||
// Restore the text color of the error message
|
||||
ansiRestoreAttributes()
|
||||
ansiMoveRight(bannerWidth)
|
||||
// Continue error message printing
|
||||
fmt.Fprintln(Output, line)
|
||||
break
|
||||
// Save the attributes of the current cursor helps
|
||||
// us save the text color of the passed error message
|
||||
ansiSaveAttributes()
|
||||
// Print banner with or without the log tag
|
||||
if !tagPrinted {
|
||||
fmt.Fprint(Output, logBanner)
|
||||
tagPrinted = true
|
||||
} else {
|
||||
fmt.Fprint(Output, emptyBanner)
|
||||
}
|
||||
// Restore the text color of the error message
|
||||
ansiRestoreAttributes()
|
||||
ansiMoveRight(bannerWidth)
|
||||
// Continue error message printing
|
||||
fmt.Fprintln(Output, line)
|
||||
}
|
||||
|
||||
// Exit because this is a fatal error message
|
||||
@@ -162,7 +159,7 @@ type infoMsg struct{}
|
||||
|
||||
var info infoMsg
|
||||
|
||||
func (i infoMsg) json(msg string, args ...interface{}) {
|
||||
func (i infoMsg) json(msg string, args ...any) {
|
||||
var message string
|
||||
if msg != "" {
|
||||
message = fmt.Sprintf(msg, args...)
|
||||
@@ -180,10 +177,10 @@ func (i infoMsg) json(msg string, args ...interface{}) {
|
||||
fmt.Fprintln(Output, string(logJSON))
|
||||
}
|
||||
|
||||
func (i infoMsg) quiet(msg string, args ...interface{}) {
|
||||
func (i infoMsg) quiet(msg string, args ...any) {
|
||||
}
|
||||
|
||||
func (i infoMsg) pretty(msg string, args ...interface{}) {
|
||||
func (i infoMsg) pretty(msg string, args ...any) {
|
||||
if msg == "" {
|
||||
fmt.Fprintln(Output, args...)
|
||||
} else {
|
||||
@@ -195,7 +192,7 @@ type errorMsg struct{}
|
||||
|
||||
var errorMessage errorMsg
|
||||
|
||||
func (i errorMsg) json(msg string, args ...interface{}) {
|
||||
func (i errorMsg) json(msg string, args ...any) {
|
||||
var message string
|
||||
if msg != "" {
|
||||
message = fmt.Sprintf(msg, args...)
|
||||
@@ -214,11 +211,11 @@ func (i errorMsg) json(msg string, args ...interface{}) {
|
||||
fmt.Fprintln(Output, string(logJSON))
|
||||
}
|
||||
|
||||
func (i errorMsg) quiet(msg string, args ...interface{}) {
|
||||
func (i errorMsg) quiet(msg string, args ...any) {
|
||||
i.pretty(msg, args...)
|
||||
}
|
||||
|
||||
func (i errorMsg) pretty(msg string, args ...interface{}) {
|
||||
func (i errorMsg) pretty(msg string, args ...any) {
|
||||
if msg == "" {
|
||||
fmt.Fprintln(Output, args...)
|
||||
} else {
|
||||
@@ -227,7 +224,7 @@ func (i errorMsg) pretty(msg string, args ...interface{}) {
|
||||
}
|
||||
|
||||
// Error :
|
||||
func Error(msg string, data ...interface{}) {
|
||||
func Error(msg string, data ...any) {
|
||||
if DisableLog {
|
||||
return
|
||||
}
|
||||
@@ -235,7 +232,7 @@ func Error(msg string, data ...interface{}) {
|
||||
}
|
||||
|
||||
// Info :
|
||||
func Info(msg string, data ...interface{}) {
|
||||
func Info(msg string, data ...any) {
|
||||
if DisableLog {
|
||||
return
|
||||
}
|
||||
@@ -243,7 +240,7 @@ func Info(msg string, data ...interface{}) {
|
||||
}
|
||||
|
||||
// Startup :
|
||||
func Startup(msg string, data ...interface{}) {
|
||||
func Startup(msg string, data ...any) {
|
||||
if DisableLog {
|
||||
return
|
||||
}
|
||||
@@ -254,7 +251,7 @@ type startupMsg struct{}
|
||||
|
||||
var startup startupMsg
|
||||
|
||||
func (i startupMsg) json(msg string, args ...interface{}) {
|
||||
func (i startupMsg) json(msg string, args ...any) {
|
||||
var message string
|
||||
if msg != "" {
|
||||
message = fmt.Sprintf(msg, args...)
|
||||
@@ -272,10 +269,10 @@ func (i startupMsg) json(msg string, args ...interface{}) {
|
||||
fmt.Fprintln(Output, string(logJSON))
|
||||
}
|
||||
|
||||
func (i startupMsg) quiet(msg string, args ...interface{}) {
|
||||
func (i startupMsg) quiet(msg string, args ...any) {
|
||||
}
|
||||
|
||||
func (i startupMsg) pretty(msg string, args ...interface{}) {
|
||||
func (i startupMsg) pretty(msg string, args ...any) {
|
||||
if msg == "" {
|
||||
fmt.Fprintln(Output, args...)
|
||||
} else {
|
||||
@@ -287,7 +284,7 @@ type warningMsg struct{}
|
||||
|
||||
var warningMessage warningMsg
|
||||
|
||||
func (i warningMsg) json(msg string, args ...interface{}) {
|
||||
func (i warningMsg) json(msg string, args ...any) {
|
||||
var message string
|
||||
if msg != "" {
|
||||
message = fmt.Sprintf(msg, args...)
|
||||
@@ -306,11 +303,11 @@ func (i warningMsg) json(msg string, args ...interface{}) {
|
||||
fmt.Fprintln(Output, string(logJSON))
|
||||
}
|
||||
|
||||
func (i warningMsg) quiet(msg string, args ...interface{}) {
|
||||
func (i warningMsg) quiet(msg string, args ...any) {
|
||||
i.pretty(msg, args...)
|
||||
}
|
||||
|
||||
func (i warningMsg) pretty(msg string, args ...interface{}) {
|
||||
func (i warningMsg) pretty(msg string, args ...any) {
|
||||
if msg == "" {
|
||||
fmt.Fprintln(Output, args...)
|
||||
} else {
|
||||
@@ -319,7 +316,7 @@ func (i warningMsg) pretty(msg string, args ...interface{}) {
|
||||
}
|
||||
|
||||
// Warning :
|
||||
func Warning(msg string, data ...interface{}) {
|
||||
func Warning(msg string, data ...any) {
|
||||
if DisableLog {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -254,7 +254,7 @@ func HashString(input string) string {
|
||||
|
||||
// LogAlwaysIf prints a detailed error message during
|
||||
// the execution of the server.
|
||||
func LogAlwaysIf(ctx context.Context, subsystem string, err error, errKind ...interface{}) {
|
||||
func LogAlwaysIf(ctx context.Context, subsystem string, err error, errKind ...any) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
@@ -264,7 +264,7 @@ func LogAlwaysIf(ctx context.Context, subsystem string, err error, errKind ...in
|
||||
// LogIf prints a detailed error message during
|
||||
// the execution of the server, if it is not an
|
||||
// ignored error.
|
||||
func LogIf(ctx context.Context, subsystem string, err error, errKind ...interface{}) {
|
||||
func LogIf(ctx context.Context, subsystem string, err error, errKind ...any) {
|
||||
if logIgnoreError(err) {
|
||||
return
|
||||
}
|
||||
@@ -285,7 +285,7 @@ func LogIfNot(ctx context.Context, subsystem string, err error, ignored ...error
|
||||
logIf(ctx, subsystem, err)
|
||||
}
|
||||
|
||||
func errToEntry(ctx context.Context, subsystem string, err error, errKind ...interface{}) log.Entry {
|
||||
func errToEntry(ctx context.Context, subsystem string, err error, errKind ...any) log.Entry {
|
||||
var l string
|
||||
if anonFlag {
|
||||
l = reflect.TypeOf(err).String()
|
||||
@@ -295,11 +295,11 @@ func errToEntry(ctx context.Context, subsystem string, err error, errKind ...int
|
||||
return buildLogEntry(ctx, subsystem, l, getTrace(3), errKind...)
|
||||
}
|
||||
|
||||
func logToEntry(ctx context.Context, subsystem, message string, errKind ...interface{}) log.Entry {
|
||||
func logToEntry(ctx context.Context, subsystem, message string, errKind ...any) log.Entry {
|
||||
return buildLogEntry(ctx, subsystem, message, nil, errKind...)
|
||||
}
|
||||
|
||||
func buildLogEntry(ctx context.Context, subsystem, message string, trace []string, errKind ...interface{}) log.Entry {
|
||||
func buildLogEntry(ctx context.Context, subsystem, message string, trace []string, errKind ...any) log.Entry {
|
||||
logKind := madmin.LogKindError
|
||||
if len(errKind) > 0 {
|
||||
if ek, ok := errKind[0].(madmin.LogKind); ok {
|
||||
@@ -326,7 +326,7 @@ func buildLogEntry(ctx context.Context, subsystem, message string, trace []strin
|
||||
}
|
||||
|
||||
// Copy tags. We hold read lock already.
|
||||
tags := make(map[string]interface{}, len(req.tags))
|
||||
tags := make(map[string]any, len(req.tags))
|
||||
for _, entry := range req.tags {
|
||||
tags[entry.Key] = entry.Val
|
||||
}
|
||||
@@ -379,7 +379,7 @@ func buildLogEntry(ctx context.Context, subsystem, message string, trace []strin
|
||||
entry.API.Args.Object = HashString(entry.API.Args.Object)
|
||||
entry.RemoteHost = HashString(entry.RemoteHost)
|
||||
if entry.Trace != nil {
|
||||
entry.Trace.Variables = make(map[string]interface{})
|
||||
entry.Trace.Variables = make(map[string]any)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -388,7 +388,7 @@ func buildLogEntry(ctx context.Context, subsystem, message string, trace []strin
|
||||
|
||||
// consoleLogIf prints a detailed error message during
|
||||
// the execution of the server.
|
||||
func consoleLogIf(ctx context.Context, subsystem string, err error, errKind ...interface{}) {
|
||||
func consoleLogIf(ctx context.Context, subsystem string, err error, errKind ...any) {
|
||||
if DisableLog {
|
||||
return
|
||||
}
|
||||
@@ -403,7 +403,7 @@ func consoleLogIf(ctx context.Context, subsystem string, err error, errKind ...i
|
||||
|
||||
// logIf prints a detailed error message during
|
||||
// the execution of the server.
|
||||
func logIf(ctx context.Context, subsystem string, err error, errKind ...interface{}) {
|
||||
func logIf(ctx context.Context, subsystem string, err error, errKind ...any) {
|
||||
if DisableLog {
|
||||
return
|
||||
}
|
||||
@@ -431,7 +431,7 @@ func sendLog(ctx context.Context, entry log.Entry) {
|
||||
}
|
||||
|
||||
// Event sends a event log to log targets
|
||||
func Event(ctx context.Context, subsystem, msg string, args ...interface{}) {
|
||||
func Event(ctx context.Context, subsystem, msg string, args ...any) {
|
||||
if DisableLog {
|
||||
return
|
||||
}
|
||||
@@ -444,7 +444,7 @@ var ErrCritical struct{}
|
||||
|
||||
// CriticalIf logs the provided error on the console. It fails the
|
||||
// current go-routine by causing a `panic(ErrCritical)`.
|
||||
func CriticalIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
func CriticalIf(ctx context.Context, err error, errKind ...any) {
|
||||
if err != nil {
|
||||
LogIf(ctx, "", err, errKind...)
|
||||
panic(ErrCritical)
|
||||
@@ -452,7 +452,7 @@ func CriticalIf(ctx context.Context, err error, errKind ...interface{}) {
|
||||
}
|
||||
|
||||
// FatalIf is similar to Fatal() but it ignores passed nil error
|
||||
func FatalIf(err error, msg string, data ...interface{}) {
|
||||
func FatalIf(err error, msg string, data ...any) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
// LogOnce provides the function type for logger.LogOnceIf() function
|
||||
type LogOnce func(ctx context.Context, err error, id string, errKind ...interface{})
|
||||
type LogOnce func(ctx context.Context, err error, id string, errKind ...any)
|
||||
|
||||
type onceErr struct {
|
||||
Err error
|
||||
@@ -38,7 +38,7 @@ type logOnceType struct {
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (l *logOnceType) logOnceConsoleIf(ctx context.Context, subsystem string, err error, id string, errKind ...interface{}) {
|
||||
func (l *logOnceType) logOnceConsoleIf(ctx context.Context, subsystem string, err error, id string, errKind ...any) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
@@ -92,7 +92,7 @@ func unwrapErrs(err error) (leafErr error) {
|
||||
}
|
||||
|
||||
// One log message per error.
|
||||
func (l *logOnceType) logOnceIf(ctx context.Context, subsystem string, err error, id string, errKind ...interface{}) {
|
||||
func (l *logOnceType) logOnceIf(ctx context.Context, subsystem string, err error, id string, errKind ...any) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
@@ -142,7 +142,7 @@ var logOnce = newLogOnceType()
|
||||
// LogOnceIf - Logs notification errors - once per error.
|
||||
// id is a unique identifier for related log messages, refer to cmd/notification.go
|
||||
// on how it is used.
|
||||
func LogOnceIf(ctx context.Context, subsystem string, err error, id string, errKind ...interface{}) {
|
||||
func LogOnceIf(ctx context.Context, subsystem string, err error, id string, errKind ...any) {
|
||||
if logIgnoreError(err) {
|
||||
return
|
||||
}
|
||||
@@ -150,7 +150,7 @@ func LogOnceIf(ctx context.Context, subsystem string, err error, id string, errK
|
||||
}
|
||||
|
||||
// LogOnceConsoleIf - similar to LogOnceIf but exclusively only logs to console target.
|
||||
func LogOnceConsoleIf(ctx context.Context, subsystem string, err error, id string, errKind ...interface{}) {
|
||||
func LogOnceConsoleIf(ctx context.Context, subsystem string, err error, id string, errKind ...any) {
|
||||
if logIgnoreError(err) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func NewEntry(deploymentID string) audit.Entry {
|
||||
}
|
||||
|
||||
// ToEntry - constructs an audit entry from a http request
|
||||
func ToEntry(w http.ResponseWriter, r *http.Request, reqClaims map[string]interface{}, deploymentID string) audit.Entry {
|
||||
func ToEntry(w http.ResponseWriter, r *http.Request, reqClaims map[string]any, deploymentID string) audit.Entry {
|
||||
entry := NewEntry(deploymentID)
|
||||
|
||||
entry.RemoteHost = handlers.GetSourceIP(r)
|
||||
|
||||
@@ -50,7 +50,7 @@ func (c *Target) String() string {
|
||||
}
|
||||
|
||||
// Send log message 'e' to console
|
||||
func (c *Target) Send(e interface{}) error {
|
||||
func (c *Target) Send(e any) error {
|
||||
entry, ok := e.(log.Entry)
|
||||
if !ok {
|
||||
return fmt.Errorf("Uexpected log entry structure %#v", e)
|
||||
|
||||
@@ -61,7 +61,7 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
logChBuffers = make(map[string]chan interface{})
|
||||
logChBuffers = make(map[string]chan any)
|
||||
logChLock = sync.Mutex{}
|
||||
)
|
||||
|
||||
@@ -84,7 +84,7 @@ type Config struct {
|
||||
HTTPTimeout time.Duration `json:"httpTimeout"`
|
||||
|
||||
// Custom logger
|
||||
LogOnceIf func(ctx context.Context, err error, id string, errKind ...interface{}) `json:"-"`
|
||||
LogOnceIf func(ctx context.Context, err error, id string, errKind ...any) `json:"-"`
|
||||
}
|
||||
|
||||
// Target implements logger.Target and sends the json
|
||||
@@ -109,7 +109,7 @@ type Target struct {
|
||||
// Channel of log entries.
|
||||
// Reading logCh must hold read lock on logChMu (to avoid read race)
|
||||
// Sending a value on logCh must hold read lock on logChMu (to avoid closing)
|
||||
logCh chan interface{}
|
||||
logCh chan any
|
||||
logChMu sync.RWMutex
|
||||
|
||||
// If this webhook is being re-configured we will
|
||||
@@ -131,7 +131,7 @@ type Target struct {
|
||||
|
||||
// store to persist and replay the logs to the target
|
||||
// to avoid missing events when the target is down.
|
||||
store store.Store[interface{}]
|
||||
store store.Store[any]
|
||||
storeCtxCancel context.CancelFunc
|
||||
|
||||
initQueueOnce once.Init
|
||||
@@ -199,7 +199,7 @@ func (h *Target) initDiskStore(ctx context.Context) (err error) {
|
||||
h.lastStarted = time.Now()
|
||||
go h.startQueueProcessor(ctx, true)
|
||||
|
||||
queueStore := store.NewQueueStore[interface{}](
|
||||
queueStore := store.NewQueueStore[any](
|
||||
filepath.Join(h.config.QueueDir, h.Name()),
|
||||
uint64(h.config.QueueSize),
|
||||
httpLoggerExtension,
|
||||
@@ -289,7 +289,7 @@ func (h *Target) startQueueProcessor(ctx context.Context, mainWorker bool) {
|
||||
h.wg.Add(1)
|
||||
defer h.wg.Done()
|
||||
|
||||
entries := make([]interface{}, 0)
|
||||
entries := make([]any, 0)
|
||||
name := h.Name()
|
||||
|
||||
defer func() {
|
||||
@@ -455,7 +455,7 @@ func (h *Target) startQueueProcessor(ctx context.Context, mainWorker bool) {
|
||||
}
|
||||
}
|
||||
|
||||
entries = make([]interface{}, 0)
|
||||
entries = make([]any, 0)
|
||||
count = 0
|
||||
if !isDirQueue {
|
||||
buf.Reset()
|
||||
@@ -481,7 +481,7 @@ func CreateOrAdjustGlobalBuffer(currentTgt *Target, newTgt *Target) {
|
||||
|
||||
currentBuff, ok := logChBuffers[name]
|
||||
if !ok {
|
||||
logChBuffers[name] = make(chan interface{}, requiredCap)
|
||||
logChBuffers[name] = make(chan any, requiredCap)
|
||||
currentCap = requiredCap
|
||||
} else {
|
||||
currentCap = cap(currentBuff)
|
||||
@@ -489,7 +489,7 @@ func CreateOrAdjustGlobalBuffer(currentTgt *Target, newTgt *Target) {
|
||||
}
|
||||
|
||||
if requiredCap > currentCap {
|
||||
logChBuffers[name] = make(chan interface{}, requiredCap)
|
||||
logChBuffers[name] = make(chan any, requiredCap)
|
||||
|
||||
if len(currentBuff) > 0 {
|
||||
drain:
|
||||
@@ -519,7 +519,7 @@ func New(config Config) (*Target, error) {
|
||||
}
|
||||
|
||||
h := &Target{
|
||||
logCh: make(chan interface{}, config.QueueSize),
|
||||
logCh: make(chan any, config.QueueSize),
|
||||
config: config,
|
||||
batchSize: config.BatchSize,
|
||||
maxWorkers: int64(maxWorkers),
|
||||
@@ -579,7 +579,7 @@ func (h *Target) SendFromStore(key store.Key) (err error) {
|
||||
// Send the log message 'entry' to the http target.
|
||||
// Messages are queued in the disk if the store is enabled
|
||||
// If Cancel has been called the message is ignored.
|
||||
func (h *Target) Send(ctx context.Context, entry interface{}) error {
|
||||
func (h *Target) Send(ctx context.Context, entry any) error {
|
||||
if h.status.Load() == statusClosed {
|
||||
if h.migrateTarget != nil {
|
||||
return h.migrateTarget.Send(ctx, entry)
|
||||
|
||||
@@ -75,7 +75,7 @@ type Config struct {
|
||||
QueueDir string `json:"queueDir"`
|
||||
|
||||
// Custom logger
|
||||
LogOnce func(ctx context.Context, err error, id string, errKind ...interface{}) `json:"-"`
|
||||
LogOnce func(ctx context.Context, err error, id string, errKind ...any) `json:"-"`
|
||||
}
|
||||
|
||||
// Target - Kafka target.
|
||||
@@ -90,12 +90,12 @@ type Target struct {
|
||||
// Channel of log entries.
|
||||
// Reading logCh must hold read lock on logChMu (to avoid read race)
|
||||
// Sending a value on logCh must hold read lock on logChMu (to avoid closing)
|
||||
logCh chan interface{}
|
||||
logCh chan any
|
||||
logChMu sync.RWMutex
|
||||
|
||||
// store to persist and replay the logs to the target
|
||||
// to avoid missing events when the target is down.
|
||||
store store.Store[interface{}]
|
||||
store store.Store[any]
|
||||
storeCtxCancel context.CancelFunc
|
||||
|
||||
initKafkaOnce once.Init
|
||||
@@ -170,7 +170,7 @@ func (h *Target) Init(ctx context.Context) error {
|
||||
|
||||
func (h *Target) initQueueStore(ctx context.Context) (err error) {
|
||||
queueDir := filepath.Join(h.kconfig.QueueDir, h.Name())
|
||||
queueStore := store.NewQueueStore[interface{}](queueDir, uint64(h.kconfig.QueueSize), kafkaLoggerExtension)
|
||||
queueStore := store.NewQueueStore[any](queueDir, uint64(h.kconfig.QueueSize), kafkaLoggerExtension)
|
||||
if err = queueStore.Open(); err != nil {
|
||||
return fmt.Errorf("unable to initialize the queue store of %s webhook: %w", h.Name(), err)
|
||||
}
|
||||
@@ -202,7 +202,7 @@ func (h *Target) startKafkaLogger() {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Target) logEntry(entry interface{}) {
|
||||
func (h *Target) logEntry(entry any) {
|
||||
atomic.AddInt64(&h.totalMessages, 1)
|
||||
if err := h.send(entry); err != nil {
|
||||
atomic.AddInt64(&h.failedMessages, 1)
|
||||
@@ -210,7 +210,7 @@ func (h *Target) logEntry(entry interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Target) send(entry interface{}) error {
|
||||
func (h *Target) send(entry any) error {
|
||||
if err := h.initKafkaOnce.Do(h.init); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -311,7 +311,7 @@ func (h *Target) IsOnline(_ context.Context) bool {
|
||||
}
|
||||
|
||||
// Send log message 'e' to kafka target.
|
||||
func (h *Target) Send(ctx context.Context, entry interface{}) error {
|
||||
func (h *Target) Send(ctx context.Context, entry any) error {
|
||||
if h.store != nil {
|
||||
// save the entry to the queue store which will be replayed to the target.
|
||||
_, err := h.store.Put(entry)
|
||||
@@ -391,7 +391,7 @@ func (h *Target) Cancel() {
|
||||
// sends log over http to the specified endpoint
|
||||
func New(config Config) *Target {
|
||||
target := &Target{
|
||||
logCh: make(chan interface{}, config.QueueSize),
|
||||
logCh: make(chan any, config.QueueSize),
|
||||
kconfig: config,
|
||||
status: statusOffline,
|
||||
}
|
||||
|
||||
@@ -113,7 +113,7 @@ func (t *testLogger) Cancel() {
|
||||
t.current.Store(nil)
|
||||
}
|
||||
|
||||
func (t *testLogger) Send(ctx context.Context, entry interface{}) error {
|
||||
func (t *testLogger) Send(ctx context.Context, entry any) error {
|
||||
tb := t.current.Load()
|
||||
var logf func(format string, args ...any)
|
||||
if tb != nil {
|
||||
|
||||
@@ -39,7 +39,7 @@ type Target interface {
|
||||
Init(ctx context.Context) error
|
||||
IsOnline(ctx context.Context) bool
|
||||
Cancel()
|
||||
Send(ctx context.Context, entry interface{}) error
|
||||
Send(ctx context.Context, entry any) error
|
||||
Type() types.TargetType
|
||||
}
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ import (
|
||||
var ansiRE = regexp.MustCompile("(\x1b[^m]*m)")
|
||||
|
||||
// Print ANSI Control escape
|
||||
func ansiEscape(format string, args ...interface{}) {
|
||||
func ansiEscape(format string, args ...any) {
|
||||
Esc := "\x1b"
|
||||
fmt.Printf("%s%s", Esc, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
@@ -152,18 +152,18 @@ func doTestParallelReaders(numReaders, gomaxprocs int) {
|
||||
clocked := make(chan bool)
|
||||
cunlock := make(chan bool)
|
||||
cdone := make(chan bool)
|
||||
for i := 0; i < numReaders; i++ {
|
||||
for range numReaders {
|
||||
go parallelReader(context.Background(), m, clocked, cunlock, cdone)
|
||||
}
|
||||
// Wait for all parallel RLock()s to succeed.
|
||||
for i := 0; i < numReaders; i++ {
|
||||
for range numReaders {
|
||||
<-clocked
|
||||
}
|
||||
for i := 0; i < numReaders; i++ {
|
||||
for range numReaders {
|
||||
cunlock <- true
|
||||
}
|
||||
// Wait for the goroutines to finish.
|
||||
for i := 0; i < numReaders; i++ {
|
||||
for range numReaders {
|
||||
<-cdone
|
||||
}
|
||||
}
|
||||
@@ -178,13 +178,13 @@ func TestParallelReaders(t *testing.T) {
|
||||
|
||||
// Borrowed from rwmutex_test.go
|
||||
func reader(rwm *LRWMutex, numIterations int, activity *int32, cdone chan bool) {
|
||||
for i := 0; i < numIterations; i++ {
|
||||
for range numIterations {
|
||||
if rwm.GetRLock(context.Background(), "", "", time.Second) {
|
||||
n := atomic.AddInt32(activity, 1)
|
||||
if n < 1 || n >= 10000 {
|
||||
panic(fmt.Sprintf("wlock(%d)\n", n))
|
||||
}
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
}
|
||||
atomic.AddInt32(activity, -1)
|
||||
rwm.RUnlock()
|
||||
@@ -195,13 +195,13 @@ func reader(rwm *LRWMutex, numIterations int, activity *int32, cdone chan bool)
|
||||
|
||||
// Borrowed from rwmutex_test.go
|
||||
func writer(rwm *LRWMutex, numIterations int, activity *int32, cdone chan bool) {
|
||||
for i := 0; i < numIterations; i++ {
|
||||
for range numIterations {
|
||||
if rwm.GetLock(context.Background(), "", "", time.Second) {
|
||||
n := atomic.AddInt32(activity, 10000)
|
||||
if n != 10000 {
|
||||
panic(fmt.Sprintf("wlock(%d)\n", n))
|
||||
}
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
}
|
||||
atomic.AddInt32(activity, -10000)
|
||||
rwm.Unlock()
|
||||
@@ -260,7 +260,7 @@ func TestDRLocker(t *testing.T) {
|
||||
rl = wl.DRLocker()
|
||||
n := 10
|
||||
go func() {
|
||||
for i := 0; i < n; i++ {
|
||||
for range n {
|
||||
rl.Lock()
|
||||
rl.Lock()
|
||||
rlocked <- true
|
||||
@@ -268,7 +268,7 @@ func TestDRLocker(t *testing.T) {
|
||||
wlocked <- true
|
||||
}
|
||||
}()
|
||||
for i := 0; i < n; i++ {
|
||||
for range n {
|
||||
<-rlocked
|
||||
rl.Unlock()
|
||||
select {
|
||||
|
||||
@@ -458,10 +458,7 @@ func exponentialBackoffWait(r *rand.Rand, unit, maxSleep time.Duration) func(uin
|
||||
attempt = 16
|
||||
}
|
||||
// sleep = random_between(unit, min(cap, base * 2 ** attempt))
|
||||
sleep := unit * time.Duration(1<<attempt)
|
||||
if sleep > maxSleep {
|
||||
sleep = maxSleep
|
||||
}
|
||||
sleep := min(unit*time.Duration(1<<attempt), maxSleep)
|
||||
sleep -= time.Duration(r.Float64() * float64(sleep-unit))
|
||||
return sleep
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ func TestNetworkError_Unwrap(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
err error
|
||||
target interface{}
|
||||
target any
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
|
||||
@@ -193,19 +193,13 @@ func (r *RingBuffer) read(p []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
if r.w > r.r {
|
||||
n = r.w - r.r
|
||||
if n > len(p) {
|
||||
n = len(p)
|
||||
}
|
||||
n = min(r.w-r.r, len(p))
|
||||
copy(p, r.buf[r.r:r.r+n])
|
||||
r.r = (r.r + n) % r.size
|
||||
return
|
||||
}
|
||||
|
||||
n = r.size - r.r + r.w
|
||||
if n > len(p) {
|
||||
n = len(p)
|
||||
}
|
||||
n = min(r.size-r.r+r.w, len(p))
|
||||
|
||||
if r.r+n <= r.size {
|
||||
copy(p, r.buf[r.r:r.r+n])
|
||||
|
||||
@@ -11,8 +11,7 @@ func BenchmarkRingBuffer_Sync(b *testing.B) {
|
||||
data := []byte(strings.Repeat("a", 512))
|
||||
buf := make([]byte, 512)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
rb.Write(data)
|
||||
rb.Read(buf)
|
||||
}
|
||||
@@ -30,8 +29,7 @@ func BenchmarkRingBuffer_AsyncRead(b *testing.B) {
|
||||
}
|
||||
}()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
rb.Write(data)
|
||||
}
|
||||
}
|
||||
@@ -50,8 +48,7 @@ func BenchmarkRingBuffer_AsyncReadBlocking(b *testing.B) {
|
||||
}
|
||||
}()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
rb.Write(data)
|
||||
}
|
||||
}
|
||||
@@ -67,8 +64,7 @@ func BenchmarkRingBuffer_AsyncWrite(b *testing.B) {
|
||||
}
|
||||
}()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
rb.Read(buf)
|
||||
}
|
||||
}
|
||||
@@ -87,8 +83,7 @@ func BenchmarkRingBuffer_AsyncWriteBlocking(b *testing.B) {
|
||||
}
|
||||
}()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
rb.Read(buf)
|
||||
}
|
||||
}
|
||||
@@ -104,8 +99,7 @@ func BenchmarkIoPipeReader(b *testing.B) {
|
||||
}
|
||||
}()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
pr.Read(buf)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -429,7 +429,7 @@ func TestRingBuffer_Blocking(t *testing.T) {
|
||||
read = io.MultiWriter(read, &readBuf)
|
||||
wrote = io.MultiWriter(wrote, &wroteBuf)
|
||||
}
|
||||
debugln := func(args ...interface{}) {
|
||||
debugln := func(args ...any) {
|
||||
if debug {
|
||||
fmt.Println(args...)
|
||||
}
|
||||
@@ -488,7 +488,7 @@ func TestRingBuffer_Blocking(t *testing.T) {
|
||||
{
|
||||
buf := make([]byte, 1024)
|
||||
writeRng := rand.New(rand.NewSource(2))
|
||||
for i := 0; i < 2500; i++ {
|
||||
for range 2500 {
|
||||
writeRng.Read(buf)
|
||||
// Write
|
||||
n, err := rb.Write(buf[:writeRng.Intn(len(buf))])
|
||||
@@ -592,7 +592,7 @@ func TestRingBuffer_BlockingBig(t *testing.T) {
|
||||
read = io.MultiWriter(read, &readBuf)
|
||||
wrote = io.MultiWriter(wrote, &wroteBuf)
|
||||
}
|
||||
debugln := func(args ...interface{}) {
|
||||
debugln := func(args ...any) {
|
||||
if debug {
|
||||
fmt.Println(args...)
|
||||
}
|
||||
@@ -651,7 +651,7 @@ func TestRingBuffer_BlockingBig(t *testing.T) {
|
||||
{
|
||||
writeRng := rand.New(rand.NewSource(2))
|
||||
buf := make([]byte, 64<<10)
|
||||
for i := 0; i < 500; i++ {
|
||||
for range 500 {
|
||||
writeRng.Read(buf)
|
||||
// Write
|
||||
n, err := rb.Write(buf[:writeRng.Intn(len(buf))])
|
||||
|
||||
@@ -84,7 +84,7 @@ func TestRead(t *testing.T) {
|
||||
}
|
||||
|
||||
type tester interface {
|
||||
Fatal(...interface{})
|
||||
Fatal(...any)
|
||||
}
|
||||
|
||||
func openTestFile(t tester, file string) []byte {
|
||||
@@ -508,10 +508,10 @@ func BenchmarkReaderBasic(b *testing.B) {
|
||||
}
|
||||
defer r.Close()
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
b.SetBytes(int64(len(f)))
|
||||
var record sql.Record
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
r, err = NewReader(io.NopCloser(bytes.NewBuffer(f)), &args)
|
||||
if err != nil {
|
||||
b.Fatalf("Reading init failed with %s", err)
|
||||
@@ -537,7 +537,7 @@ func BenchmarkReaderHuge(b *testing.B) {
|
||||
AllowQuotedRecordDelimiter: false,
|
||||
unmarshaled: true,
|
||||
}
|
||||
for n := 0; n < 11; n++ {
|
||||
for n := range 11 {
|
||||
f := openTestFile(b, "nyc-taxi-data-100k.csv")
|
||||
want := 309
|
||||
for i := 0; i < n; i++ {
|
||||
@@ -549,7 +549,7 @@ func BenchmarkReaderHuge(b *testing.B) {
|
||||
b.SetBytes(int64(len(f)))
|
||||
b.ResetTimer()
|
||||
var record sql.Record
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
r, err := NewReader(io.NopCloser(bytes.NewBuffer(f)), &args)
|
||||
if err != nil {
|
||||
b.Fatalf("Reading init failed with %s", err)
|
||||
@@ -590,10 +590,10 @@ func BenchmarkReaderReplace(b *testing.B) {
|
||||
}
|
||||
defer r.Close()
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
b.SetBytes(int64(len(f)))
|
||||
var record sql.Record
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
r, err = NewReader(io.NopCloser(bytes.NewBuffer(f)), &args)
|
||||
if err != nil {
|
||||
b.Fatalf("Reading init failed with %s", err)
|
||||
@@ -627,10 +627,10 @@ func BenchmarkReaderReplaceTwo(b *testing.B) {
|
||||
}
|
||||
defer r.Close()
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
b.SetBytes(int64(len(f)))
|
||||
var record sql.Record
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
r, err = NewReader(io.NopCloser(bytes.NewBuffer(f)), &args)
|
||||
if err != nil {
|
||||
b.Fatalf("Reading init failed with %s", err)
|
||||
|
||||
@@ -46,8 +46,8 @@ func (r *Record) Get(name string) (*sql.Value, error) {
|
||||
index, found := r.nameIndexMap[name]
|
||||
if !found {
|
||||
// Check if index.
|
||||
if strings.HasPrefix(name, "_") {
|
||||
idx, err := strconv.Atoi(strings.TrimPrefix(name, "_"))
|
||||
if after, ok := strings.CutPrefix(name, "_"); ok {
|
||||
idx, err := strconv.Atoi(after)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("column %v not found", name)
|
||||
}
|
||||
@@ -133,12 +133,12 @@ func (r *Record) WriteJSON(writer io.Writer) error {
|
||||
}
|
||||
|
||||
// Raw - returns the underlying data with format info.
|
||||
func (r *Record) Raw() (sql.SelectObjectFormat, interface{}) {
|
||||
func (r *Record) Raw() (sql.SelectObjectFormat, any) {
|
||||
return sql.SelectFmtCSV, r
|
||||
}
|
||||
|
||||
// Replace - is not supported for CSV
|
||||
func (r *Record) Replace(_ interface{}) error {
|
||||
func (r *Record) Replace(_ any) error {
|
||||
return errors.New("Replace is not supported for CSV")
|
||||
}
|
||||
|
||||
|
||||
@@ -88,7 +88,7 @@ func BenchmarkPReader(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
var record sql.Record
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
r := NewPReader(io.NopCloser(bytes.NewBuffer(f)), &ReaderArgs{})
|
||||
for {
|
||||
record, err = r.Read(record)
|
||||
|
||||
@@ -88,7 +88,7 @@ func BenchmarkReader(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
var record sql.Record
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
r := NewReader(io.NopCloser(bytes.NewBuffer(f)), &ReaderArgs{})
|
||||
for {
|
||||
record, err = r.Read(record)
|
||||
|
||||
@@ -76,7 +76,7 @@ func (r *Record) Clone(dst sql.Record) sql.Record {
|
||||
|
||||
// Set - sets the value for a column name.
|
||||
func (r *Record) Set(name string, value *sql.Value) (sql.Record, error) {
|
||||
var v interface{}
|
||||
var v any
|
||||
if b, ok := value.ToBool(); ok {
|
||||
v = b
|
||||
} else if f, ok := value.ToFloat(); ok {
|
||||
@@ -126,7 +126,7 @@ func (r *Record) WriteCSV(writer io.Writer, opts sql.WriteCSVOpts) error {
|
||||
columnValue = ""
|
||||
case RawJSON:
|
||||
columnValue = string([]byte(val))
|
||||
case []interface{}:
|
||||
case []any:
|
||||
b, err := json.Marshal(val)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -151,7 +151,7 @@ func (r *Record) WriteCSV(writer io.Writer, opts sql.WriteCSVOpts) error {
|
||||
}
|
||||
|
||||
// Raw - returns the underlying representation.
|
||||
func (r *Record) Raw() (sql.SelectObjectFormat, interface{}) {
|
||||
func (r *Record) Raw() (sql.SelectObjectFormat, any) {
|
||||
return r.SelectFormat, r.KVS
|
||||
}
|
||||
|
||||
@@ -161,7 +161,7 @@ func (r *Record) WriteJSON(writer io.Writer) error {
|
||||
}
|
||||
|
||||
// Replace the underlying buffer of json data.
|
||||
func (r *Record) Replace(k interface{}) error {
|
||||
func (r *Record) Replace(k any) error {
|
||||
v, ok := k.(jstream.KVS)
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot replace internal data in json record with type %T", k)
|
||||
|
||||
@@ -29,14 +29,14 @@ type MetaValue struct {
|
||||
Offset int
|
||||
Length int
|
||||
Depth int
|
||||
Value interface{}
|
||||
Value any
|
||||
ValueType ValueType
|
||||
}
|
||||
|
||||
// KV contains a key and value pair parsed from a decoded object
|
||||
type KV struct {
|
||||
Key string `json:"key"`
|
||||
Value interface{} `json:"value"`
|
||||
Key string `json:"key"`
|
||||
Value any `json:"value"`
|
||||
}
|
||||
|
||||
// KVS - represents key values in an JSON object
|
||||
@@ -160,7 +160,7 @@ func (d *Decoder) decode() {
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Decoder) emitAny() (interface{}, error) {
|
||||
func (d *Decoder) emitAny() (any, error) {
|
||||
if d.pos >= atomic.LoadInt64(&d.end) {
|
||||
return nil, d.mkError(ErrUnexpectedEOF)
|
||||
}
|
||||
@@ -189,7 +189,7 @@ func (d *Decoder) willEmit() bool {
|
||||
|
||||
// any used to decode any valid JSON value, and returns an
|
||||
// interface{} that holds the actual data
|
||||
func (d *Decoder) any() (interface{}, ValueType, error) {
|
||||
func (d *Decoder) any() (any, ValueType, error) {
|
||||
c := d.cur()
|
||||
|
||||
switch c {
|
||||
@@ -239,7 +239,7 @@ func (d *Decoder) any() (interface{}, ValueType, error) {
|
||||
i, err := d.array()
|
||||
return i, Array, err
|
||||
case '{':
|
||||
var i interface{}
|
||||
var i any
|
||||
var err error
|
||||
if d.objectAsKVS {
|
||||
i, err = d.objectOrdered()
|
||||
@@ -426,7 +426,7 @@ func (d *Decoder) number() (float64, error) {
|
||||
}
|
||||
|
||||
// array accept valid JSON array value
|
||||
func (d *Decoder) array() ([]interface{}, error) {
|
||||
func (d *Decoder) array() ([]any, error) {
|
||||
d.depth++
|
||||
if d.maxDepth > 0 && d.depth > d.maxDepth {
|
||||
return nil, ErrMaxDepth
|
||||
@@ -434,9 +434,9 @@ func (d *Decoder) array() ([]interface{}, error) {
|
||||
|
||||
var (
|
||||
c byte
|
||||
v interface{}
|
||||
v any
|
||||
err error
|
||||
array = make([]interface{}, 0)
|
||||
array = make([]any, 0)
|
||||
)
|
||||
|
||||
// look ahead for ] - if the array is empty.
|
||||
@@ -470,7 +470,7 @@ out:
|
||||
}
|
||||
|
||||
// object accept valid JSON array value
|
||||
func (d *Decoder) object() (map[string]interface{}, error) {
|
||||
func (d *Decoder) object() (map[string]any, error) {
|
||||
d.depth++
|
||||
if d.maxDepth > 0 && d.depth > d.maxDepth {
|
||||
return nil, ErrMaxDepth
|
||||
@@ -479,15 +479,15 @@ func (d *Decoder) object() (map[string]interface{}, error) {
|
||||
var (
|
||||
c byte
|
||||
k string
|
||||
v interface{}
|
||||
v any
|
||||
t ValueType
|
||||
err error
|
||||
obj map[string]interface{}
|
||||
obj map[string]any
|
||||
)
|
||||
|
||||
// skip allocating map if it will not be emitted
|
||||
if d.depth > d.emitDepth {
|
||||
obj = make(map[string]interface{})
|
||||
obj = make(map[string]any)
|
||||
}
|
||||
|
||||
// if the object has no keys
|
||||
@@ -567,7 +567,7 @@ func (d *Decoder) objectOrdered() (KVS, error) {
|
||||
var (
|
||||
c byte
|
||||
k string
|
||||
v interface{}
|
||||
v any
|
||||
t ValueType
|
||||
err error
|
||||
obj KVS
|
||||
|
||||
@@ -74,7 +74,7 @@ func TestDecoderFlat(t *testing.T) {
|
||||
1, 2.5
|
||||
]`
|
||||
expected = []struct {
|
||||
Value interface{}
|
||||
Value any
|
||||
ValueType ValueType
|
||||
}{
|
||||
{
|
||||
|
||||
@@ -85,17 +85,17 @@ func TestScannerFailure(t *testing.T) {
|
||||
|
||||
func BenchmarkBufioScanner(b *testing.B) {
|
||||
b.Run("small", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
benchmarkBufioScanner(smallInput)
|
||||
}
|
||||
})
|
||||
b.Run("medium", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
benchmarkBufioScanner(mediumInput)
|
||||
}
|
||||
})
|
||||
b.Run("large", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
benchmarkBufioScanner(largeInput)
|
||||
}
|
||||
})
|
||||
@@ -111,17 +111,17 @@ func benchmarkBufioScanner(b []byte) {
|
||||
|
||||
func BenchmarkBufioReader(b *testing.B) {
|
||||
b.Run("small", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
benchmarkBufioReader(smallInput)
|
||||
}
|
||||
})
|
||||
b.Run("medium", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
benchmarkBufioReader(mediumInput)
|
||||
}
|
||||
})
|
||||
b.Run("large", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
benchmarkBufioReader(largeInput)
|
||||
}
|
||||
})
|
||||
@@ -145,17 +145,17 @@ loop:
|
||||
|
||||
func BenchmarkScanner(b *testing.B) {
|
||||
b.Run("small", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
benchmarkScanner(smallInput)
|
||||
}
|
||||
})
|
||||
b.Run("medium", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
benchmarkScanner(mediumInput)
|
||||
}
|
||||
})
|
||||
b.Run("large", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
benchmarkScanner(largeInput)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -56,7 +56,7 @@ func (pr *Reader) Read(dst sql.Record) (rec sql.Record, rerr error) {
|
||||
|
||||
kvs := jstream.KVS{}
|
||||
for _, col := range pr.r.Columns() {
|
||||
var value interface{}
|
||||
var value any
|
||||
if v, ok := nextRow[col.FlatName()]; ok {
|
||||
value, err = convertFromAnnotation(col.Element(), v)
|
||||
if err != nil {
|
||||
@@ -80,12 +80,12 @@ func (pr *Reader) Read(dst sql.Record) (rec sql.Record, rerr error) {
|
||||
// annotations. LogicalType annotations if present override the deprecated
|
||||
// ConvertedType annotations. Ref:
|
||||
// https://github.com/apache/parquet-format/blob/master/LogicalTypes.md
|
||||
func convertFromAnnotation(se *parquettypes.SchemaElement, v interface{}) (interface{}, error) {
|
||||
func convertFromAnnotation(se *parquettypes.SchemaElement, v any) (any, error) {
|
||||
if se == nil {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
var value interface{}
|
||||
var value any
|
||||
switch val := v.(type) {
|
||||
case []byte:
|
||||
// TODO: only strings are supported in s3select output (not
|
||||
|
||||
@@ -46,7 +46,7 @@ func genSampleCSVData(count int) []byte {
|
||||
csvWriter := csv.NewWriter(buf)
|
||||
csvWriter.Write([]string{"id", "name", "age", "city"})
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
for i := range count {
|
||||
csvWriter.Write([]string{
|
||||
strconv.Itoa(i),
|
||||
newRandString(10),
|
||||
|
||||
@@ -630,7 +630,7 @@ func TestJSONQueries(t *testing.T) {
|
||||
if len(testReq) == 0 {
|
||||
var escaped bytes.Buffer
|
||||
xml.EscapeText(&escaped, []byte(testCase.query))
|
||||
testReq = []byte(fmt.Sprintf(defRequest, escaped.String()))
|
||||
testReq = fmt.Appendf(nil, defRequest, escaped.String())
|
||||
}
|
||||
s3Select, err := NewS3Select(bytes.NewReader(testReq))
|
||||
if err != nil {
|
||||
@@ -676,7 +676,7 @@ func TestJSONQueries(t *testing.T) {
|
||||
if len(testReq) == 0 {
|
||||
var escaped bytes.Buffer
|
||||
xml.EscapeText(&escaped, []byte(testCase.query))
|
||||
testReq = []byte(fmt.Sprintf(defRequest, escaped.String()))
|
||||
testReq = fmt.Appendf(nil, defRequest, escaped.String())
|
||||
}
|
||||
s3Select, err := NewS3Select(bytes.NewReader(testReq))
|
||||
if err != nil {
|
||||
@@ -761,7 +761,7 @@ func TestCSVQueries(t *testing.T) {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
testReq := testCase.requestXML
|
||||
if len(testReq) == 0 {
|
||||
testReq = []byte(fmt.Sprintf(defRequest, testCase.query))
|
||||
testReq = fmt.Appendf(nil, defRequest, testCase.query)
|
||||
}
|
||||
s3Select, err := NewS3Select(bytes.NewReader(testReq))
|
||||
if err != nil {
|
||||
@@ -944,7 +944,7 @@ func TestCSVQueries2(t *testing.T) {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
testReq := testCase.requestXML
|
||||
if len(testReq) == 0 {
|
||||
testReq = []byte(fmt.Sprintf(defRequest, testCase.query))
|
||||
testReq = fmt.Appendf(nil, defRequest, testCase.query)
|
||||
}
|
||||
s3Select, err := NewS3Select(bytes.NewReader(testReq))
|
||||
if err != nil {
|
||||
@@ -1088,7 +1088,7 @@ true`,
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
testReq := testCase.requestXML
|
||||
if len(testReq) == 0 {
|
||||
testReq = []byte(fmt.Sprintf(defRequest, testCase.query))
|
||||
testReq = fmt.Appendf(nil, defRequest, testCase.query)
|
||||
}
|
||||
s3Select, err := NewS3Select(bytes.NewReader(testReq))
|
||||
if err != nil {
|
||||
|
||||
@@ -31,7 +31,7 @@ import (
|
||||
)
|
||||
|
||||
type tester interface {
|
||||
Fatal(args ...interface{})
|
||||
Fatal(args ...any)
|
||||
}
|
||||
|
||||
func loadCompressed(t tester, file string) (js []byte) {
|
||||
|
||||
@@ -185,7 +185,7 @@ allElems:
|
||||
}
|
||||
|
||||
// Raw - returns the underlying representation.
|
||||
func (r *Record) Raw() (sql.SelectObjectFormat, interface{}) {
|
||||
func (r *Record) Raw() (sql.SelectObjectFormat, any) {
|
||||
return sql.SelectFmtSIMDJSON, r.object
|
||||
}
|
||||
|
||||
@@ -211,7 +211,7 @@ func (r *Record) WriteJSON(writer io.Writer) error {
|
||||
}
|
||||
|
||||
// Replace the underlying buffer of json data.
|
||||
func (r *Record) Replace(k interface{}) error {
|
||||
func (r *Record) Replace(k any) error {
|
||||
v, ok := k.(simdjson.Object)
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot replace internal data in simd json record with type %T", k)
|
||||
|
||||
@@ -413,7 +413,7 @@ func (e *JSONPath) evalNode(r Record, tableAlias string) (*Value, error) {
|
||||
}
|
||||
|
||||
// jsonToValue will convert the json value to an internal value.
|
||||
func jsonToValue(result interface{}) (*Value, error) {
|
||||
func jsonToValue(result any) (*Value, error) {
|
||||
switch rval := result.(type) {
|
||||
case string:
|
||||
return FromString(rval), nil
|
||||
@@ -434,7 +434,7 @@ func jsonToValue(result interface{}) (*Value, error) {
|
||||
return nil, err
|
||||
}
|
||||
return FromBytes(bs), nil
|
||||
case []interface{}:
|
||||
case []any:
|
||||
dst := make([]Value, len(rval))
|
||||
for i := range rval {
|
||||
v, err := jsonToValue(rval[i])
|
||||
|
||||
@@ -34,7 +34,7 @@ var (
|
||||
|
||||
// jsonpathEval evaluates a JSON path and returns the value at the path.
|
||||
// If the value should be considered flat (from wildcards) any array returned should be considered individual values.
|
||||
func jsonpathEval(p []*JSONPathElement, v interface{}) (r interface{}, flat bool, err error) {
|
||||
func jsonpathEval(p []*JSONPathElement, v any) (r any, flat bool, err error) {
|
||||
// fmt.Printf("JPATHexpr: %v jsonobj: %v\n\n", p, v)
|
||||
if len(p) == 0 || v == nil {
|
||||
return v, false, nil
|
||||
@@ -71,7 +71,7 @@ func jsonpathEval(p []*JSONPathElement, v interface{}) (r interface{}, flat bool
|
||||
case p[0].Index != nil:
|
||||
idx := *p[0].Index
|
||||
|
||||
arr, ok := v.([]interface{})
|
||||
arr, ok := v.([]any)
|
||||
if !ok {
|
||||
return nil, false, errIndexLookup
|
||||
}
|
||||
@@ -100,14 +100,14 @@ func jsonpathEval(p []*JSONPathElement, v interface{}) (r interface{}, flat bool
|
||||
}
|
||||
|
||||
case p[0].ArrayWildcard:
|
||||
arr, ok := v.([]interface{})
|
||||
arr, ok := v.([]any)
|
||||
if !ok {
|
||||
return nil, false, errWildcardArrayLookup
|
||||
}
|
||||
|
||||
// Lookup remainder of path in each array element and
|
||||
// make result array.
|
||||
var result []interface{}
|
||||
var result []any
|
||||
for _, a := range arr {
|
||||
rval, flatten, err := jsonpathEval(p[1:], a)
|
||||
if err != nil {
|
||||
@@ -116,7 +116,7 @@ func jsonpathEval(p []*JSONPathElement, v interface{}) (r interface{}, flat bool
|
||||
|
||||
if flatten {
|
||||
// Flatten if array.
|
||||
if arr, ok := rval.([]interface{}); ok {
|
||||
if arr, ok := rval.([]any); ok {
|
||||
result = append(result, arr...)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -30,9 +30,9 @@ import (
|
||||
"github.com/minio/minio/internal/s3select/jstream"
|
||||
)
|
||||
|
||||
func getJSONStructs(b []byte) ([]interface{}, error) {
|
||||
func getJSONStructs(b []byte) ([]any, error) {
|
||||
dec := jstream.NewDecoder(bytes.NewBuffer(b), 0).ObjectAsKVS().MaxDepth(100)
|
||||
var result []interface{}
|
||||
var result []any
|
||||
for parsedVal := range dec.Stream() {
|
||||
result = append(result, parsedVal.Value)
|
||||
}
|
||||
@@ -60,13 +60,13 @@ func TestJsonpathEval(t *testing.T) {
|
||||
)
|
||||
cases := []struct {
|
||||
str string
|
||||
res []interface{}
|
||||
res []any
|
||||
}{
|
||||
{"s.title", []interface{}{"Murder on the Orient Express", "The Robots of Dawn", "Pigs Have Wings"}},
|
||||
{"s.authorInfo.yearRange", []interface{}{[]interface{}{1890.0, 1976.0}, []interface{}{1920.0, 1992.0}, []interface{}{1881.0, 1975.0}}},
|
||||
{"s.authorInfo.name", []interface{}{"Agatha Christie", "Isaac Asimov", "P. G. Wodehouse"}},
|
||||
{"s.authorInfo.yearRange[0]", []interface{}{1890.0, 1920.0, 1881.0}},
|
||||
{"s.publicationHistory[0].pages", []interface{}{256.0, 336.0, Missing{}}},
|
||||
{"s.title", []any{"Murder on the Orient Express", "The Robots of Dawn", "Pigs Have Wings"}},
|
||||
{"s.authorInfo.yearRange", []any{[]any{1890.0, 1976.0}, []any{1920.0, 1992.0}, []any{1881.0, 1975.0}}},
|
||||
{"s.authorInfo.name", []any{"Agatha Christie", "Isaac Asimov", "P. G. Wodehouse"}},
|
||||
{"s.authorInfo.yearRange[0]", []any{1890.0, 1920.0, 1881.0}},
|
||||
{"s.publicationHistory[0].pages", []any{256.0, 336.0, Missing{}}},
|
||||
}
|
||||
for i, tc := range cases {
|
||||
t.Run(tc.str, func(t *testing.T) {
|
||||
|
||||
@@ -63,16 +63,16 @@ type Record interface {
|
||||
Reset()
|
||||
|
||||
// Returns underlying representation
|
||||
Raw() (SelectObjectFormat, interface{})
|
||||
Raw() (SelectObjectFormat, any)
|
||||
|
||||
// Replaces the underlying data
|
||||
Replace(k interface{}) error
|
||||
Replace(k any) error
|
||||
}
|
||||
|
||||
// IterToValue converts a simdjson Iter to its underlying value.
|
||||
// Objects are returned as simdjson.Object
|
||||
// Arrays are returned as []interface{} with parsed values.
|
||||
func IterToValue(iter simdjson.Iter) (interface{}, error) {
|
||||
func IterToValue(iter simdjson.Iter) (any, error) {
|
||||
switch iter.Type() {
|
||||
case simdjson.TypeString:
|
||||
v, err := iter.String()
|
||||
@@ -118,7 +118,7 @@ func IterToValue(iter simdjson.Iter) (interface{}, error) {
|
||||
return nil, err
|
||||
}
|
||||
iter := arr.Iter()
|
||||
var dst []interface{}
|
||||
var dst []any
|
||||
var next simdjson.Iter
|
||||
for {
|
||||
typ, err := iter.AdvanceIter(&next)
|
||||
|
||||
@@ -174,7 +174,7 @@ func (e *SelectStatement) EvalFrom(format string, input Record) ([]*Record, erro
|
||||
case jstream.KVS:
|
||||
kvs = v
|
||||
|
||||
case []interface{}:
|
||||
case []any:
|
||||
recs := make([]*Record, len(v))
|
||||
for i, val := range v {
|
||||
tmpRec := input.Clone(nil)
|
||||
@@ -207,7 +207,7 @@ func (e *SelectStatement) EvalFrom(format string, input Record) ([]*Record, erro
|
||||
return nil, err
|
||||
}
|
||||
|
||||
case []interface{}:
|
||||
case []any:
|
||||
recs := make([]*Record, len(v))
|
||||
for i, val := range v {
|
||||
tmpRec := input.Clone(nil)
|
||||
|
||||
@@ -46,7 +46,7 @@ var (
|
||||
// the type may not be determined yet. In these cases, a byte-slice is
|
||||
// used.
|
||||
type Value struct {
|
||||
value interface{}
|
||||
value any
|
||||
}
|
||||
|
||||
// Missing is used to indicate a non-existing value.
|
||||
|
||||
@@ -217,7 +217,7 @@ func TestValue_CSVString(t *testing.T) {
|
||||
|
||||
func TestValue_bytesToInt(t *testing.T) {
|
||||
type fields struct {
|
||||
value interface{}
|
||||
value any
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -367,7 +367,7 @@ func TestValue_bytesToInt(t *testing.T) {
|
||||
|
||||
func TestValue_bytesToFloat(t *testing.T) {
|
||||
type fields struct {
|
||||
value interface{}
|
||||
value any
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -569,7 +569,7 @@ func TestValue_bytesToFloat(t *testing.T) {
|
||||
|
||||
func TestValue_bytesToBool(t *testing.T) {
|
||||
type fields struct {
|
||||
value interface{}
|
||||
value any
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -41,7 +41,7 @@ func TestBatchCommit(t *testing.T) {
|
||||
Limit: limit,
|
||||
Store: store,
|
||||
CommitTimeout: 5 * time.Minute,
|
||||
Log: func(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
Log: func(ctx context.Context, err error, id string, errKind ...any) {
|
||||
t.Log(err)
|
||||
},
|
||||
})
|
||||
@@ -106,7 +106,7 @@ func TestBatchCommitOnExit(t *testing.T) {
|
||||
Limit: limit,
|
||||
Store: store,
|
||||
CommitTimeout: 5 * time.Minute,
|
||||
Log: func(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
Log: func(ctx context.Context, err error, id string, errKind ...any) {
|
||||
t.Log([]any{err, id, errKind}...)
|
||||
},
|
||||
})
|
||||
@@ -163,7 +163,7 @@ func TestBatchWithConcurrency(t *testing.T) {
|
||||
Limit: limit,
|
||||
Store: store,
|
||||
CommitTimeout: 5 * time.Minute,
|
||||
Log: func(ctx context.Context, err error, id string, errKind ...interface{}) {
|
||||
Log: func(ctx context.Context, err error, id string, errKind ...any) {
|
||||
t.Log(err)
|
||||
},
|
||||
})
|
||||
|
||||
@@ -69,7 +69,7 @@ func TestQueueStorePut(t *testing.T) {
|
||||
t.Fatal("Failed to create a queue store ", err)
|
||||
}
|
||||
// Put 100 items.
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
if _, err := store.Put(testItem); err != nil {
|
||||
t.Fatal("Failed to put to queue store ", err)
|
||||
}
|
||||
@@ -93,7 +93,7 @@ func TestQueueStoreGet(t *testing.T) {
|
||||
t.Fatal("Failed to create a queue store ", err)
|
||||
}
|
||||
// Put 10 items
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
if _, err := store.Put(testItem); err != nil {
|
||||
t.Fatal("Failed to put to queue store ", err)
|
||||
}
|
||||
@@ -127,7 +127,7 @@ func TestQueueStoreDel(t *testing.T) {
|
||||
t.Fatal("Failed to create a queue store ", err)
|
||||
}
|
||||
// Put 20 items.
|
||||
for i := 0; i < 20; i++ {
|
||||
for range 20 {
|
||||
if _, err := store.Put(testItem); err != nil {
|
||||
t.Fatal("Failed to put to queue store ", err)
|
||||
}
|
||||
@@ -163,7 +163,7 @@ func TestQueueStoreLimit(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal("Failed to create a queue store ", err)
|
||||
}
|
||||
for i := 0; i < 5; i++ {
|
||||
for range 5 {
|
||||
if _, err := store.Put(testItem); err != nil {
|
||||
t.Fatal("Failed to put to queue store ", err)
|
||||
}
|
||||
@@ -185,7 +185,7 @@ func TestQueueStoreListN(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal("Failed to create a queue store ", err)
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
if _, err := store.Put(testItem); err != nil {
|
||||
t.Fatal("Failed to put to queue store ", err)
|
||||
}
|
||||
@@ -237,7 +237,7 @@ func TestMultiplePutGetRaw(t *testing.T) {
|
||||
}
|
||||
// TestItem{Name: "test-item", Property: "property"}
|
||||
var items []TestItem
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
items = append(items, TestItem{
|
||||
Name: fmt.Sprintf("test-item-%d", i),
|
||||
Property: "property",
|
||||
@@ -303,7 +303,7 @@ func TestMultiplePutGets(t *testing.T) {
|
||||
}
|
||||
// TestItem{Name: "test-item", Property: "property"}
|
||||
var items []TestItem
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
items = append(items, TestItem{
|
||||
Name: fmt.Sprintf("test-item-%d", i),
|
||||
Property: "property",
|
||||
@@ -359,7 +359,7 @@ func TestMixedPutGets(t *testing.T) {
|
||||
}
|
||||
// TestItem{Name: "test-item", Property: "property"}
|
||||
var items []TestItem
|
||||
for i := 0; i < 5; i++ {
|
||||
for i := range 5 {
|
||||
items = append(items, TestItem{
|
||||
Name: fmt.Sprintf("test-item-%d", i),
|
||||
Property: "property",
|
||||
|
||||
@@ -32,7 +32,7 @@ const (
|
||||
retryInterval = 3 * time.Second
|
||||
)
|
||||
|
||||
type logger = func(ctx context.Context, err error, id string, errKind ...interface{})
|
||||
type logger = func(ctx context.Context, err error, id string, errKind ...any)
|
||||
|
||||
// ErrNotConnected - indicates that the target connection is not active.
|
||||
var ErrNotConnected = errors.New("not connected to target server/service")
|
||||
|
||||
Reference in New Issue
Block a user