mirror of
https://github.com/juanfont/headscale.git
synced 2025-12-02 14:15:47 -05:00
modernize: run gopls modernize to bring up to 1.25 (#2920)
This commit is contained in:
@@ -2760,7 +2760,7 @@ func TestPreAuthKeyLogoutAndReloginDifferentUser(t *testing.T) {
|
||||
require.Equal(t, 2, user2NodesAfter.Len(), "user2 should still have 2 nodes (old nodes from original registration)")
|
||||
|
||||
// Verify original nodes still exist with original users
|
||||
for i := 0; i < 2; i++ {
|
||||
for i := range 2 {
|
||||
node := nodes[i]
|
||||
// User1's original nodes should still be owned by user1
|
||||
registeredNode, found := app.state.GetNodeByMachineKey(node.machineKey.Public(), types.UserID(user1.ID))
|
||||
@@ -3195,6 +3195,7 @@ func TestNodeReregistrationWithExpiredPreAuthKey(t *testing.T) {
|
||||
assert.Error(t, err, "expired pre-auth key should be rejected")
|
||||
assert.Contains(t, err.Error(), "authkey expired", "error should mention key expiration")
|
||||
}
|
||||
|
||||
// TestGitHubIssue2830_ExistingNodeCanReregisterWithUsedPreAuthKey tests that an existing node
|
||||
// can re-register using a pre-auth key that's already marked as Used=true, as long as:
|
||||
// 1. The node is re-registering with the same MachineKey it originally used
|
||||
@@ -3204,7 +3205,8 @@ func TestNodeReregistrationWithExpiredPreAuthKey(t *testing.T) {
|
||||
//
|
||||
// Background: When Docker/Kubernetes containers restart, they keep their persistent state
|
||||
// (including the MachineKey), but container entrypoints unconditionally run:
|
||||
// tailscale up --authkey=$TS_AUTHKEY
|
||||
//
|
||||
// tailscale up --authkey=$TS_AUTHKEY
|
||||
//
|
||||
// This caused nodes to be rejected after restart because the pre-auth key was already
|
||||
// marked as Used=true from the initial registration. The fix allows re-registration of
|
||||
|
||||
@@ -31,7 +31,7 @@ func decodingError(name string, err error) error {
|
||||
// have a type that implements encoding.TextUnmarshaler.
|
||||
type TextSerialiser struct{}
|
||||
|
||||
func (TextSerialiser) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue interface{}) (err error) {
|
||||
func (TextSerialiser) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue any) error {
|
||||
fieldValue := reflect.New(field.FieldType)
|
||||
|
||||
// If the field is a pointer, we need to dereference it to get the actual type
|
||||
@@ -77,10 +77,10 @@ func (TextSerialiser) Scan(ctx context.Context, field *schema.Field, dst reflect
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (TextSerialiser) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue interface{}) (interface{}, error) {
|
||||
func (TextSerialiser) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue any) (any, error) {
|
||||
switch v := fieldValue.(type) {
|
||||
case encoding.TextMarshaler:
|
||||
// If the value is nil, we return nil, however, go nil values are not
|
||||
|
||||
@@ -1136,13 +1136,9 @@ func XTestBatcherChannelClosingRace(t *testing.T) {
|
||||
// First connection
|
||||
ch1 := make(chan *tailcfg.MapResponse, 1)
|
||||
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
wg.Go(func() {
|
||||
batcher.AddNode(testNode.n.ID, ch1, tailcfg.CapabilityVersion(100))
|
||||
}()
|
||||
})
|
||||
|
||||
// Add real work during connection chaos
|
||||
if i%10 == 0 {
|
||||
@@ -1152,24 +1148,17 @@ func XTestBatcherChannelClosingRace(t *testing.T) {
|
||||
// Rapid second connection - should replace ch1
|
||||
ch2 := make(chan *tailcfg.MapResponse, 1)
|
||||
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
wg.Go(func() {
|
||||
time.Sleep(1 * time.Microsecond)
|
||||
batcher.AddNode(testNode.n.ID, ch2, tailcfg.CapabilityVersion(100))
|
||||
}()
|
||||
})
|
||||
|
||||
// Remove second connection
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
wg.Go(func() {
|
||||
time.Sleep(2 * time.Microsecond)
|
||||
batcher.RemoveNode(testNode.n.ID, ch2)
|
||||
}()
|
||||
})
|
||||
|
||||
wg.Wait()
|
||||
|
||||
@@ -1789,10 +1778,7 @@ func XTestBatcherScalability(t *testing.T) {
|
||||
// This ensures some nodes stay connected to continue receiving updates
|
||||
startIdx := cycle % len(testNodes)
|
||||
|
||||
endIdx := startIdx + len(testNodes)/4
|
||||
if endIdx > len(testNodes) {
|
||||
endIdx = len(testNodes)
|
||||
}
|
||||
endIdx := min(startIdx+len(testNodes)/4, len(testNodes))
|
||||
|
||||
if startIdx >= endIdx {
|
||||
startIdx = 0
|
||||
@@ -2313,7 +2299,7 @@ func TestBatcherRapidReconnection(t *testing.T) {
|
||||
receivedCount := 0
|
||||
timeout := time.After(500 * time.Millisecond)
|
||||
|
||||
for i := 0; i < len(allNodes); i++ {
|
||||
for i := range allNodes {
|
||||
select {
|
||||
case update := <-newChannels[i]:
|
||||
if update != nil {
|
||||
|
||||
@@ -3,6 +3,7 @@ package v2
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
@@ -178,11 +179,8 @@ func (pol *Policy) compileACLWithAutogroupSelf(
|
||||
for _, ips := range resolvedSrcIPs {
|
||||
for _, n := range sameUserNodes {
|
||||
// Check if any of this node's IPs are in the source set
|
||||
for _, nodeIP := range n.IPs() {
|
||||
if ips.Contains(nodeIP) {
|
||||
n.AppendToIPSet(&srcIPs)
|
||||
break
|
||||
}
|
||||
if slices.ContainsFunc(n.IPs(), ips.Contains) {
|
||||
n.AppendToIPSet(&srcIPs)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -375,11 +373,8 @@ func (pol *Policy) compileSSHPolicy(
|
||||
var filteredSrcIPs netipx.IPSetBuilder
|
||||
for _, n := range sameUserNodes {
|
||||
// Check if any of this node's IPs are in the source set
|
||||
for _, nodeIP := range n.IPs() {
|
||||
if srcIPs.Contains(nodeIP) {
|
||||
n.AppendToIPSet(&filteredSrcIPs)
|
||||
break // Found this node, move to next
|
||||
}
|
||||
if slices.ContainsFunc(n.IPs(), srcIPs.Contains) {
|
||||
n.AppendToIPSet(&filteredSrcIPs) // Found this node, move to next
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package v2
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -906,14 +907,7 @@ func TestCompileFilterRulesForNodeWithAutogroupSelf(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, expectedIP := range expectedDestIPs {
|
||||
found := false
|
||||
|
||||
for _, actualIP := range actualDestIPs {
|
||||
if actualIP == expectedIP {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
found := slices.Contains(actualDestIPs, expectedIP)
|
||||
|
||||
if !found {
|
||||
t.Errorf("expected destination IP %s to be included, got: %v", expectedIP, actualDestIPs)
|
||||
|
||||
@@ -1007,7 +1007,7 @@ func (g Groups) Contains(group *Group) error {
|
||||
// with "group:". If any group name is invalid, an error is returned.
|
||||
func (g *Groups) UnmarshalJSON(b []byte) error {
|
||||
// First unmarshal as a generic map to validate group names first
|
||||
var rawMap map[string]interface{}
|
||||
var rawMap map[string]any
|
||||
if err := json.Unmarshal(b, &rawMap); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1024,7 +1024,7 @@ func (g *Groups) UnmarshalJSON(b []byte) error {
|
||||
rawGroups := make(map[string][]string)
|
||||
for key, value := range rawMap {
|
||||
switch v := value.(type) {
|
||||
case []interface{}:
|
||||
case []any:
|
||||
// Convert []interface{} to []string
|
||||
var stringSlice []string
|
||||
for _, item := range v {
|
||||
|
||||
@@ -39,9 +39,10 @@ func parsePortRange(portDef string) ([]tailcfg.PortRange, error) {
|
||||
}
|
||||
|
||||
var portRanges []tailcfg.PortRange
|
||||
parts := strings.Split(portDef, ",")
|
||||
|
||||
for _, part := range parts {
|
||||
parts := strings.SplitSeq(portDef, ",")
|
||||
|
||||
for part := range parts {
|
||||
if strings.Contains(part, "-") {
|
||||
rangeParts := strings.Split(part, "-")
|
||||
rangeParts = slices.DeleteFunc(rangeParts, func(e string) bool {
|
||||
|
||||
@@ -200,9 +200,9 @@ func (s *State) DebugSSHPolicies() map[string]*tailcfg.SSHPolicy {
|
||||
}
|
||||
|
||||
// DebugRegistrationCache returns debug information about the registration cache.
|
||||
func (s *State) DebugRegistrationCache() map[string]interface{} {
|
||||
func (s *State) DebugRegistrationCache() map[string]any {
|
||||
// The cache doesn't expose internal statistics, so we provide basic info
|
||||
result := map[string]interface{}{
|
||||
result := map[string]any{
|
||||
"type": "zcache",
|
||||
"expiration": registerCacheExpiration.String(),
|
||||
"cleanup": registerCacheCleanup.String(),
|
||||
|
||||
@@ -872,7 +872,7 @@ func TestNodeStoreConcurrentPutNode(t *testing.T) {
|
||||
|
||||
var wg sync.WaitGroup
|
||||
results := make(chan bool, concurrentOps)
|
||||
for i := 0; i < concurrentOps; i++ {
|
||||
for i := range concurrentOps {
|
||||
wg.Add(1)
|
||||
go func(nodeID int) {
|
||||
defer wg.Done()
|
||||
@@ -904,7 +904,7 @@ func TestNodeStoreBatchingEfficiency(t *testing.T) {
|
||||
|
||||
var wg sync.WaitGroup
|
||||
results := make(chan bool, ops)
|
||||
for i := 0; i < ops; i++ {
|
||||
for i := range ops {
|
||||
wg.Add(1)
|
||||
go func(nodeID int) {
|
||||
defer wg.Done()
|
||||
@@ -941,11 +941,12 @@ func TestNodeStoreRaceConditions(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
errors := make(chan error, numGoroutines*opsPerGoroutine)
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
for i := range numGoroutines {
|
||||
wg.Add(1)
|
||||
go func(gid int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < opsPerGoroutine; j++ {
|
||||
|
||||
for j := range opsPerGoroutine {
|
||||
switch j % 3 {
|
||||
case 0:
|
||||
resultNode, _ := store.UpdateNode(nodeID, func(n *types.Node) {
|
||||
@@ -993,7 +994,7 @@ func TestNodeStoreResourceCleanup(t *testing.T) {
|
||||
afterStartGoroutines := runtime.NumGoroutine()
|
||||
|
||||
const ops = 100
|
||||
for i := 0; i < ops; i++ {
|
||||
for i := range ops {
|
||||
nodeID := types.NodeID(i + 1)
|
||||
node := createConcurrentTestNode(nodeID, "cleanup-node")
|
||||
resultNode := store.PutNode(node)
|
||||
@@ -1100,7 +1101,7 @@ func TestNodeStoreOperationTimeout(t *testing.T) {
|
||||
|
||||
// --- Edge case: update non-existent node ---
|
||||
func TestNodeStoreUpdateNonExistentNode(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
store := NewNodeStore(nil, allowAllPeersFunc, TestBatchSize, TestBatchTimeout)
|
||||
store.Start()
|
||||
nonExistentID := types.NodeID(999 + i)
|
||||
@@ -1124,8 +1125,7 @@ func BenchmarkNodeStoreAllocations(b *testing.B) {
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for i := 0; b.Loop(); i++ {
|
||||
nodeID := types.NodeID(i + 1)
|
||||
node := createConcurrentTestNode(nodeID, "bench-node")
|
||||
store.PutNode(node)
|
||||
|
||||
@@ -220,10 +220,12 @@ func DefaultBatcherWorkers() int {
|
||||
// DefaultBatcherWorkersFor returns the default number of batcher workers for a given CPU count.
|
||||
// Default to 3/4 of CPU cores, minimum 1, no maximum.
|
||||
func DefaultBatcherWorkersFor(cpuCount int) int {
|
||||
defaultWorkers := (cpuCount * 3) / 4
|
||||
if defaultWorkers < 1 {
|
||||
defaultWorkers = 1
|
||||
}
|
||||
const (
|
||||
workerNumerator = 3
|
||||
workerDenominator = 4
|
||||
)
|
||||
|
||||
defaultWorkers := max((cpuCount*workerNumerator)/workerDenominator, 1)
|
||||
|
||||
return defaultWorkers
|
||||
}
|
||||
|
||||
@@ -49,22 +49,22 @@ func (l *DBLogWrapper) LogMode(gormLogger.LogLevel) gormLogger.Interface {
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *DBLogWrapper) Info(ctx context.Context, msg string, data ...interface{}) {
|
||||
func (l *DBLogWrapper) Info(ctx context.Context, msg string, data ...any) {
|
||||
l.Logger.Info().Msgf(msg, data...)
|
||||
}
|
||||
|
||||
func (l *DBLogWrapper) Warn(ctx context.Context, msg string, data ...interface{}) {
|
||||
func (l *DBLogWrapper) Warn(ctx context.Context, msg string, data ...any) {
|
||||
l.Logger.Warn().Msgf(msg, data...)
|
||||
}
|
||||
|
||||
func (l *DBLogWrapper) Error(ctx context.Context, msg string, data ...interface{}) {
|
||||
func (l *DBLogWrapper) Error(ctx context.Context, msg string, data ...any) {
|
||||
l.Logger.Error().Msgf(msg, data...)
|
||||
}
|
||||
|
||||
func (l *DBLogWrapper) Trace(ctx context.Context, begin time.Time, fc func() (sql string, rowsAffected int64), err error) {
|
||||
elapsed := time.Since(begin)
|
||||
sql, rowsAffected := fc()
|
||||
fields := map[string]interface{}{
|
||||
fields := map[string]any{
|
||||
"duration": elapsed,
|
||||
"sql": sql,
|
||||
"rowsAffected": rowsAffected,
|
||||
@@ -83,7 +83,7 @@ func (l *DBLogWrapper) Trace(ctx context.Context, begin time.Time, fc func() (sq
|
||||
l.Logger.Debug().Fields(fields).Msgf("")
|
||||
}
|
||||
|
||||
func (l *DBLogWrapper) ParamsFilter(ctx context.Context, sql string, params ...interface{}) (string, []interface{}) {
|
||||
func (l *DBLogWrapper) ParamsFilter(ctx context.Context, sql string, params ...any) (string, []any) {
|
||||
if l.ParameterizedQueries {
|
||||
return sql, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user