mirror of
https://github.com/juanfont/headscale.git
synced 2025-11-10 14:09:39 -05:00
stability and race conditions in auth and node store (#2781)
This PR addresses some consistency issues that was introduced or discovered with the nodestore. nodestore: Now returns the node that is being put or updated when it is finished. This closes a race condition where when we read it back, we do not necessarily get the node with the given change and it ensures we get all the other updates from that batch write. auth: Authentication paths have been unified and simplified. It removes a lot of bad branches and ensures we only do the minimal work. A comprehensive auth test set has been created so we do not have to run integration tests to validate auth and it has allowed us to generate test cases for all the branches we currently know of. integration: added a lot more tooling and checks to validate that nodes reach the expected state when they come up and down. Standardised between the different auth models. A lot of this is to support or detect issues in the changes to nodestore (races) and auth (inconsistencies after login and reaching correct state) This PR was assisted, particularly tests, by claude code.
This commit is contained in:
@@ -60,9 +60,6 @@ type DebugStringInfo struct {
|
||||
|
||||
// DebugOverview returns a comprehensive overview of the current state for debugging.
|
||||
func (s *State) DebugOverview() string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
allNodes := s.nodeStore.ListNodes()
|
||||
users, _ := s.ListAllUsers()
|
||||
|
||||
@@ -270,9 +267,6 @@ func (s *State) PolicyDebugString() string {
|
||||
|
||||
// DebugOverviewJSON returns a structured overview of the current state for debugging.
|
||||
func (s *State) DebugOverviewJSON() DebugOverviewInfo {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
allNodes := s.nodeStore.ListNodes()
|
||||
users, _ := s.ListAllUsers()
|
||||
|
||||
|
||||
@@ -33,8 +33,8 @@ func TestNodeStoreDebugString(t *testing.T) {
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
|
||||
store.PutNode(node1)
|
||||
store.PutNode(node2)
|
||||
_ = store.PutNode(node1)
|
||||
_ = store.PutNode(node2)
|
||||
|
||||
return store
|
||||
},
|
||||
|
||||
460
hscontrol/state/ephemeral_test.go
Normal file
460
hscontrol/state/ephemeral_test.go
Normal file
@@ -0,0 +1,460 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
// TestEphemeralNodeDeleteWithConcurrentUpdate tests the race condition where UpdateNode and DeleteNode
|
||||
// are called concurrently and may be batched together. This reproduces the issue where ephemeral nodes
|
||||
// are not properly deleted during logout because UpdateNodeFromMapRequest returns a stale node view
|
||||
// after the node has been deleted from the NodeStore.
|
||||
func TestEphemeralNodeDeleteWithConcurrentUpdate(t *testing.T) {
|
||||
// Create a simple test node
|
||||
node := createTestNode(1, 1, "test-user", "test-node")
|
||||
|
||||
// Create NodeStore
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Put the node in the store
|
||||
resultNode := store.PutNode(node)
|
||||
require.True(t, resultNode.Valid(), "initial PutNode should return valid node")
|
||||
|
||||
// Verify node exists
|
||||
retrievedNode, found := store.GetNode(node.ID)
|
||||
require.True(t, found)
|
||||
require.Equal(t, node.ID, retrievedNode.ID())
|
||||
|
||||
// Test scenario: UpdateNode is called, returns a node view from the batch,
|
||||
// but in the same batch a DeleteNode removes the node.
|
||||
// This simulates what happens when:
|
||||
// 1. UpdateNodeFromMapRequest calls UpdateNode and gets back updatedNode
|
||||
// 2. At the same time, handleLogout calls DeleteNode
|
||||
// 3. They get batched together: [UPDATE, DELETE]
|
||||
// 4. UPDATE modifies the node, DELETE removes it
|
||||
// 5. UpdateNode returns a node view based on the state AFTER both operations
|
||||
// 6. If DELETE came after UPDATE, the returned node should be invalid
|
||||
|
||||
done := make(chan bool, 2)
|
||||
var updatedNode types.NodeView
|
||||
var updateOk bool
|
||||
|
||||
// Goroutine 1: UpdateNode (simulates UpdateNodeFromMapRequest)
|
||||
go func() {
|
||||
updatedNode, updateOk = store.UpdateNode(node.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
})
|
||||
done <- true
|
||||
}()
|
||||
|
||||
// Goroutine 2: DeleteNode (simulates handleLogout for ephemeral node)
|
||||
go func() {
|
||||
// Small delay to increase chance of batching together
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
store.DeleteNode(node.ID)
|
||||
done <- true
|
||||
}()
|
||||
|
||||
// Wait for both operations
|
||||
<-done
|
||||
<-done
|
||||
|
||||
// Give batching time to complete
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// The key assertion: if UpdateNode and DeleteNode were batched together
|
||||
// with DELETE after UPDATE, then UpdateNode should return an invalid node
|
||||
// OR it should return a valid node but the node should no longer exist in the store
|
||||
|
||||
_, found = store.GetNode(node.ID)
|
||||
assert.False(t, found, "node should be deleted from NodeStore")
|
||||
|
||||
// If the update happened before delete in the batch, the returned node might be invalid
|
||||
if updateOk {
|
||||
t.Logf("UpdateNode returned ok=true, valid=%v", updatedNode.Valid())
|
||||
// This is the bug scenario - UpdateNode thinks it succeeded but node is gone
|
||||
if updatedNode.Valid() {
|
||||
t.Logf("WARNING: UpdateNode returned valid node but node was deleted - this indicates the race condition bug")
|
||||
}
|
||||
} else {
|
||||
t.Logf("UpdateNode correctly returned ok=false (node deleted in same batch)")
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpdateNodeReturnsInvalidWhenDeletedInSameBatch specifically tests that when
|
||||
// UpdateNode and DeleteNode are in the same batch with DELETE after UPDATE,
|
||||
// the UpdateNode should return an invalid node view.
|
||||
func TestUpdateNodeReturnsInvalidWhenDeletedInSameBatch(t *testing.T) {
|
||||
node := createTestNode(2, 1, "test-user", "test-node-2")
|
||||
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Put node in store
|
||||
_ = store.PutNode(node)
|
||||
|
||||
// Simulate the exact sequence: UpdateNode gets queued, then DeleteNode gets queued,
|
||||
// they batch together, and we check what UpdateNode returns
|
||||
|
||||
resultChan := make(chan struct {
|
||||
node types.NodeView
|
||||
ok bool
|
||||
})
|
||||
|
||||
// Start UpdateNode - it will block until batch is applied
|
||||
go func() {
|
||||
node, ok := store.UpdateNode(node.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
})
|
||||
resultChan <- struct {
|
||||
node types.NodeView
|
||||
ok bool
|
||||
}{node, ok}
|
||||
}()
|
||||
|
||||
// Give UpdateNode a moment to queue its work
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
|
||||
// Now queue DeleteNode - should batch with the UPDATE
|
||||
store.DeleteNode(node.ID)
|
||||
|
||||
// Get the result from UpdateNode
|
||||
result := <-resultChan
|
||||
|
||||
// Wait for batch to complete
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Node should be deleted
|
||||
_, found := store.GetNode(node.ID)
|
||||
assert.False(t, found, "node should be deleted")
|
||||
|
||||
// The critical check: what did UpdateNode return?
|
||||
// After the commit c6b09289988f34398eb3157e31ba092eb8721a9f,
|
||||
// UpdateNode returns the node state from the batch.
|
||||
// If DELETE came after UPDATE in the batch, the node doesn't exist anymore,
|
||||
// so UpdateNode should return (invalid, false)
|
||||
t.Logf("UpdateNode returned: ok=%v, valid=%v", result.ok, result.node.Valid())
|
||||
|
||||
// This is the expected behavior - if node was deleted in same batch,
|
||||
// UpdateNode should return invalid node
|
||||
if result.ok && result.node.Valid() {
|
||||
t.Error("BUG: UpdateNode returned valid node even though it was deleted in same batch")
|
||||
}
|
||||
}
|
||||
|
||||
// TestPersistNodeToDBPreventsRaceCondition tests that persistNodeToDB correctly handles
|
||||
// the race condition where a node is deleted after UpdateNode returns but before
|
||||
// persistNodeToDB is called. This reproduces the ephemeral node deletion bug.
|
||||
func TestPersistNodeToDBPreventsRaceCondition(t *testing.T) {
|
||||
node := createTestNode(3, 1, "test-user", "test-node-3")
|
||||
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Put node in store
|
||||
_ = store.PutNode(node)
|
||||
|
||||
// Simulate UpdateNode being called
|
||||
updatedNode, ok := store.UpdateNode(node.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
})
|
||||
require.True(t, ok, "UpdateNode should succeed")
|
||||
require.True(t, updatedNode.Valid(), "UpdateNode should return valid node")
|
||||
|
||||
// Now delete the node (simulating ephemeral logout happening concurrently)
|
||||
store.DeleteNode(node.ID)
|
||||
|
||||
// Wait for deletion to complete
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Verify node is deleted
|
||||
_, found := store.GetNode(node.ID)
|
||||
require.False(t, found, "node should be deleted")
|
||||
|
||||
// Now try to use the updatedNode from before the deletion
|
||||
// In the old code, this would re-insert the node into the database
|
||||
// With our fix, GetNode check in persistNodeToDB should prevent this
|
||||
|
||||
// Simulate what persistNodeToDB does - check if node still exists
|
||||
_, exists := store.GetNode(updatedNode.ID())
|
||||
if !exists {
|
||||
t.Log("SUCCESS: persistNodeToDB check would prevent re-insertion of deleted node")
|
||||
} else {
|
||||
t.Error("BUG: Node still exists in NodeStore after deletion")
|
||||
}
|
||||
|
||||
// The key assertion: after deletion, attempting to persist the old updatedNode
|
||||
// should fail because the node no longer exists in NodeStore
|
||||
assert.False(t, exists, "persistNodeToDB should detect node was deleted and refuse to persist")
|
||||
}
|
||||
|
||||
// TestEphemeralNodeLogoutRaceCondition tests the specific race condition that occurs
|
||||
// when an ephemeral node logs out. This reproduces the bug where:
|
||||
// 1. UpdateNodeFromMapRequest calls UpdateNode and receives a node view
|
||||
// 2. Concurrently, handleLogout is called for the ephemeral node and calls DeleteNode
|
||||
// 3. UpdateNode and DeleteNode get batched together
|
||||
// 4. If UpdateNode's result is used to call persistNodeToDB after the deletion,
|
||||
// the node could be re-inserted into the database even though it was deleted
|
||||
func TestEphemeralNodeLogoutRaceCondition(t *testing.T) {
|
||||
ephemeralNode := createTestNode(4, 1, "test-user", "ephemeral-node")
|
||||
ephemeralNode.AuthKey = &types.PreAuthKey{
|
||||
ID: 1,
|
||||
Key: "test-key",
|
||||
Ephemeral: true,
|
||||
}
|
||||
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Put ephemeral node in store
|
||||
_ = store.PutNode(ephemeralNode)
|
||||
|
||||
// Simulate concurrent operations:
|
||||
// 1. UpdateNode (from UpdateNodeFromMapRequest during polling)
|
||||
// 2. DeleteNode (from handleLogout when client sends logout request)
|
||||
|
||||
var updatedNode types.NodeView
|
||||
var updateOk bool
|
||||
done := make(chan bool, 2)
|
||||
|
||||
// Goroutine 1: UpdateNode (simulates UpdateNodeFromMapRequest)
|
||||
go func() {
|
||||
updatedNode, updateOk = store.UpdateNode(ephemeralNode.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
})
|
||||
done <- true
|
||||
}()
|
||||
|
||||
// Goroutine 2: DeleteNode (simulates handleLogout for ephemeral node)
|
||||
go func() {
|
||||
time.Sleep(1 * time.Millisecond) // Slight delay to batch operations
|
||||
store.DeleteNode(ephemeralNode.ID)
|
||||
done <- true
|
||||
}()
|
||||
|
||||
// Wait for both operations
|
||||
<-done
|
||||
<-done
|
||||
|
||||
// Give batching time to complete
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Node should be deleted from store
|
||||
_, found := store.GetNode(ephemeralNode.ID)
|
||||
assert.False(t, found, "ephemeral node should be deleted from NodeStore")
|
||||
|
||||
// Critical assertion: if UpdateNode returned before DeleteNode completed,
|
||||
// the updatedNode might be valid but the node is actually deleted.
|
||||
// This is the bug - UpdateNodeFromMapRequest would get a valid node,
|
||||
// then try to persist it, re-inserting the deleted ephemeral node.
|
||||
if updateOk && updatedNode.Valid() {
|
||||
t.Log("UpdateNode returned valid node, but node is deleted - this is the race condition")
|
||||
|
||||
// In the real code, this would cause persistNodeToDB to be called with updatedNode
|
||||
// The fix in persistNodeToDB checks if the node still exists:
|
||||
_, stillExists := store.GetNode(updatedNode.ID())
|
||||
assert.False(t, stillExists, "persistNodeToDB should check NodeStore and find node deleted")
|
||||
} else if !updateOk || !updatedNode.Valid() {
|
||||
t.Log("UpdateNode correctly returned invalid/not-ok result (delete happened in same batch)")
|
||||
}
|
||||
}
|
||||
|
||||
// TestUpdateNodeFromMapRequestEphemeralLogoutSequence tests the exact sequence
|
||||
// that causes ephemeral node logout failures:
|
||||
// 1. Client sends MapRequest with updated endpoint info
|
||||
// 2. UpdateNodeFromMapRequest starts processing, calls UpdateNode
|
||||
// 3. Client sends logout request (past expiry)
|
||||
// 4. handleLogout calls DeleteNode for ephemeral node
|
||||
// 5. UpdateNode and DeleteNode batch together
|
||||
// 6. UpdateNode returns a valid node (from before delete in batch)
|
||||
// 7. persistNodeToDB is called with the stale valid node
|
||||
// 8. Node gets re-inserted into database instead of staying deleted
|
||||
func TestUpdateNodeFromMapRequestEphemeralLogoutSequence(t *testing.T) {
|
||||
ephemeralNode := createTestNode(5, 1, "test-user", "ephemeral-node-5")
|
||||
ephemeralNode.AuthKey = &types.PreAuthKey{
|
||||
ID: 2,
|
||||
Key: "test-key-2",
|
||||
Ephemeral: true,
|
||||
}
|
||||
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Initial state: ephemeral node exists
|
||||
_ = store.PutNode(ephemeralNode)
|
||||
|
||||
// Step 1: UpdateNodeFromMapRequest calls UpdateNode
|
||||
// (simulating client sending MapRequest with endpoint updates)
|
||||
updateStarted := make(chan bool)
|
||||
var updatedNode types.NodeView
|
||||
var updateOk bool
|
||||
|
||||
go func() {
|
||||
updateStarted <- true
|
||||
updatedNode, updateOk = store.UpdateNode(ephemeralNode.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
endpoint := netip.MustParseAddrPort("10.0.0.1:41641")
|
||||
n.Endpoints = []netip.AddrPort{endpoint}
|
||||
})
|
||||
}()
|
||||
|
||||
<-updateStarted
|
||||
// Small delay to ensure UpdateNode is queued
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
|
||||
// Step 2: Logout happens - handleLogout calls DeleteNode
|
||||
// (simulating client sending logout with past expiry)
|
||||
store.DeleteNode(ephemeralNode.ID)
|
||||
|
||||
// Wait for batching to complete
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Step 3: Check results
|
||||
_, nodeExists := store.GetNode(ephemeralNode.ID)
|
||||
assert.False(t, nodeExists, "ephemeral node must be deleted after logout")
|
||||
|
||||
// Step 4: Simulate what happens if we try to persist the updatedNode
|
||||
if updateOk && updatedNode.Valid() {
|
||||
// This is the problematic path - UpdateNode returned a valid node
|
||||
// but the node was deleted in the same batch
|
||||
t.Log("UpdateNode returned valid node even though node was deleted")
|
||||
|
||||
// The fix: persistNodeToDB must check NodeStore before persisting
|
||||
_, checkExists := store.GetNode(updatedNode.ID())
|
||||
if checkExists {
|
||||
t.Error("BUG: Node still exists in NodeStore after deletion - should be impossible")
|
||||
} else {
|
||||
t.Log("SUCCESS: persistNodeToDB would detect node is deleted and refuse to persist")
|
||||
}
|
||||
} else {
|
||||
t.Log("UpdateNode correctly indicated node was deleted (returned invalid or not-ok)")
|
||||
}
|
||||
|
||||
// Final assertion: node must not exist
|
||||
_, finalExists := store.GetNode(ephemeralNode.ID)
|
||||
assert.False(t, finalExists, "ephemeral node must remain deleted")
|
||||
}
|
||||
|
||||
// TestUpdateNodeDeletedInSameBatchReturnsInvalid specifically tests that when
|
||||
// UpdateNode and DeleteNode are batched together with DELETE after UPDATE,
|
||||
// UpdateNode returns ok=false to indicate the node was deleted.
|
||||
func TestUpdateNodeDeletedInSameBatchReturnsInvalid(t *testing.T) {
|
||||
node := createTestNode(6, 1, "test-user", "test-node-6")
|
||||
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Put node in store
|
||||
_ = store.PutNode(node)
|
||||
|
||||
// Queue UpdateNode
|
||||
updateDone := make(chan struct {
|
||||
node types.NodeView
|
||||
ok bool
|
||||
})
|
||||
|
||||
go func() {
|
||||
updatedNode, ok := store.UpdateNode(node.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
})
|
||||
updateDone <- struct {
|
||||
node types.NodeView
|
||||
ok bool
|
||||
}{updatedNode, ok}
|
||||
}()
|
||||
|
||||
// Small delay to ensure UpdateNode is queued
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
|
||||
// Queue DeleteNode - should batch with UpdateNode
|
||||
store.DeleteNode(node.ID)
|
||||
|
||||
// Get UpdateNode result
|
||||
result := <-updateDone
|
||||
|
||||
// Wait for batch to complete
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Node should be deleted
|
||||
_, exists := store.GetNode(node.ID)
|
||||
assert.False(t, exists, "node should be deleted from store")
|
||||
|
||||
// UpdateNode should indicate the node was deleted
|
||||
// After c6b09289988f34398eb3157e31ba092eb8721a9f, when UPDATE and DELETE
|
||||
// are in the same batch with DELETE after UPDATE, UpdateNode returns
|
||||
// the state after the batch is applied - which means the node doesn't exist
|
||||
assert.False(t, result.ok, "UpdateNode should return ok=false when node deleted in same batch")
|
||||
assert.False(t, result.node.Valid(), "UpdateNode should return invalid node when node deleted in same batch")
|
||||
}
|
||||
|
||||
// TestPersistNodeToDBChecksNodeStoreBeforePersist verifies that persistNodeToDB
|
||||
// checks if the node still exists in NodeStore before persisting to database.
|
||||
// This prevents the race condition where:
|
||||
// 1. UpdateNodeFromMapRequest calls UpdateNode and gets a valid node
|
||||
// 2. Ephemeral node logout calls DeleteNode
|
||||
// 3. UpdateNode and DeleteNode batch together
|
||||
// 4. UpdateNode returns a valid node (from before delete in batch)
|
||||
// 5. UpdateNodeFromMapRequest calls persistNodeToDB with the stale node
|
||||
// 6. persistNodeToDB must detect the node is deleted and refuse to persist
|
||||
func TestPersistNodeToDBChecksNodeStoreBeforePersist(t *testing.T) {
|
||||
ephemeralNode := createTestNode(7, 1, "test-user", "ephemeral-node-7")
|
||||
ephemeralNode.AuthKey = &types.PreAuthKey{
|
||||
ID: 3,
|
||||
Key: "test-key-3",
|
||||
Ephemeral: true,
|
||||
}
|
||||
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
// Put node in store
|
||||
_ = store.PutNode(ephemeralNode)
|
||||
|
||||
// Simulate the race:
|
||||
// 1. UpdateNode is called (from UpdateNodeFromMapRequest)
|
||||
updatedNode, ok := store.UpdateNode(ephemeralNode.ID, func(n *types.Node) {
|
||||
n.LastSeen = ptr.To(time.Now())
|
||||
})
|
||||
require.True(t, ok, "UpdateNode should succeed")
|
||||
require.True(t, updatedNode.Valid(), "UpdateNode should return valid node")
|
||||
|
||||
// 2. Node is deleted (from handleLogout for ephemeral node)
|
||||
store.DeleteNode(ephemeralNode.ID)
|
||||
|
||||
// Wait for deletion
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// 3. Verify node is deleted from store
|
||||
_, exists := store.GetNode(ephemeralNode.ID)
|
||||
require.False(t, exists, "node should be deleted from NodeStore")
|
||||
|
||||
// 4. Simulate what persistNodeToDB does - check if node still exists
|
||||
// The fix in persistNodeToDB checks NodeStore before persisting:
|
||||
// if !exists { return error }
|
||||
// This prevents re-inserting the deleted node into the database
|
||||
|
||||
// Verify the node from UpdateNode is valid but node is gone from store
|
||||
assert.True(t, updatedNode.Valid(), "UpdateNode returned a valid node view")
|
||||
_, stillExists := store.GetNode(updatedNode.ID())
|
||||
assert.False(t, stillExists, "but node should be deleted from NodeStore")
|
||||
|
||||
// This is the critical test: persistNodeToDB must check NodeStore
|
||||
// and refuse to persist if the node doesn't exist anymore
|
||||
// The actual persistNodeToDB implementation does:
|
||||
// _, exists := s.nodeStore.GetNode(node.ID())
|
||||
// if !exists { return error }
|
||||
}
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
// NetInfoFromMapRequest determines the correct NetInfo to use.
|
||||
// netInfoFromMapRequest determines the correct NetInfo to use.
|
||||
// Returns the NetInfo that should be used for this request.
|
||||
func NetInfoFromMapRequest(
|
||||
func netInfoFromMapRequest(
|
||||
nodeID types.NodeID,
|
||||
currentHostinfo *tailcfg.Hostinfo,
|
||||
reqHostinfo *tailcfg.Hostinfo,
|
||||
|
||||
@@ -61,7 +61,7 @@ func TestNetInfoFromMapRequest(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := NetInfoFromMapRequest(nodeID, tt.currentHostinfo, tt.reqHostinfo)
|
||||
result := netInfoFromMapRequest(nodeID, tt.currentHostinfo, tt.reqHostinfo)
|
||||
|
||||
if tt.expectNetInfo == nil {
|
||||
assert.Nil(t, result, "expected nil NetInfo")
|
||||
@@ -100,14 +100,40 @@ func TestNetInfoPreservationInRegistrationFlow(t *testing.T) {
|
||||
}
|
||||
|
||||
// BUG: Using the node being modified (no NetInfo) instead of existing node (has NetInfo)
|
||||
buggyResult := NetInfoFromMapRequest(nodeID, nodeBeingModifiedHostinfo, newRegistrationHostinfo)
|
||||
buggyResult := netInfoFromMapRequest(nodeID, nodeBeingModifiedHostinfo, newRegistrationHostinfo)
|
||||
assert.Nil(t, buggyResult, "Bug: Should return nil when using wrong hostinfo reference")
|
||||
|
||||
// CORRECT: Using the existing node's hostinfo (has NetInfo)
|
||||
correctResult := NetInfoFromMapRequest(nodeID, existingNodeHostinfo, newRegistrationHostinfo)
|
||||
correctResult := netInfoFromMapRequest(nodeID, existingNodeHostinfo, newRegistrationHostinfo)
|
||||
assert.NotNil(t, correctResult, "Fix: Should preserve NetInfo when using correct hostinfo reference")
|
||||
assert.Equal(t, 5, correctResult.PreferredDERP, "Should preserve the DERP region from existing node")
|
||||
})
|
||||
|
||||
t.Run("new_node_creation_for_different_user_should_preserve_netinfo", func(t *testing.T) {
|
||||
// This test covers the scenario where:
|
||||
// 1. A node exists for user1 with NetInfo
|
||||
// 2. The same machine logs in as user2 (different user)
|
||||
// 3. A NEW node is created for user2 (pre-auth key flow)
|
||||
// 4. The new node should preserve NetInfo from the old node
|
||||
|
||||
// Existing node for user1 with NetInfo
|
||||
existingNodeUser1Hostinfo := &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
NetInfo: &tailcfg.NetInfo{PreferredDERP: 7},
|
||||
}
|
||||
|
||||
// New registration request for user2 (no NetInfo yet)
|
||||
newNodeUser2Hostinfo := &tailcfg.Hostinfo{
|
||||
Hostname: "test-node",
|
||||
OS: "linux",
|
||||
// NetInfo is nil - registration request doesn't include it
|
||||
}
|
||||
|
||||
// When creating a new node for user2, we should preserve NetInfo from user1's node
|
||||
result := netInfoFromMapRequest(types.NodeID(2), existingNodeUser1Hostinfo, newNodeUser2Hostinfo)
|
||||
assert.NotNil(t, result, "New node for user2 should preserve NetInfo from user1's node")
|
||||
assert.Equal(t, 7, result.PreferredDERP, "Should preserve DERP region from existing node")
|
||||
})
|
||||
}
|
||||
|
||||
// Simple helper function for tests
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
batchSize = 10
|
||||
batchSize = 100
|
||||
batchTimeout = 500 * time.Millisecond
|
||||
)
|
||||
|
||||
@@ -121,10 +121,11 @@ type Snapshot struct {
|
||||
nodesByID map[types.NodeID]types.Node
|
||||
|
||||
// calculated from nodesByID
|
||||
nodesByNodeKey map[key.NodePublic]types.NodeView
|
||||
peersByNode map[types.NodeID][]types.NodeView
|
||||
nodesByUser map[types.UserID][]types.NodeView
|
||||
allNodes []types.NodeView
|
||||
nodesByNodeKey map[key.NodePublic]types.NodeView
|
||||
nodesByMachineKey map[key.MachinePublic]map[types.UserID]types.NodeView
|
||||
peersByNode map[types.NodeID][]types.NodeView
|
||||
nodesByUser map[types.UserID][]types.NodeView
|
||||
allNodes []types.NodeView
|
||||
}
|
||||
|
||||
// PeersFunc is a function that takes a list of nodes and returns a map
|
||||
@@ -135,26 +136,29 @@ type PeersFunc func(nodes []types.NodeView) map[types.NodeID][]types.NodeView
|
||||
|
||||
// work represents a single operation to be performed on the NodeStore.
|
||||
type work struct {
|
||||
op int
|
||||
nodeID types.NodeID
|
||||
node types.Node
|
||||
updateFn UpdateNodeFunc
|
||||
result chan struct{}
|
||||
op int
|
||||
nodeID types.NodeID
|
||||
node types.Node
|
||||
updateFn UpdateNodeFunc
|
||||
result chan struct{}
|
||||
nodeResult chan types.NodeView // Channel to return the resulting node after batch application
|
||||
}
|
||||
|
||||
// PutNode adds or updates a node in the store.
|
||||
// If the node already exists, it will be replaced.
|
||||
// If the node does not exist, it will be added.
|
||||
// This is a blocking operation that waits for the write to complete.
|
||||
func (s *NodeStore) PutNode(n types.Node) {
|
||||
// Returns the resulting node after all modifications in the batch have been applied.
|
||||
func (s *NodeStore) PutNode(n types.Node) types.NodeView {
|
||||
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("put"))
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
work := work{
|
||||
op: put,
|
||||
nodeID: n.ID,
|
||||
node: n,
|
||||
result: make(chan struct{}),
|
||||
op: put,
|
||||
nodeID: n.ID,
|
||||
node: n,
|
||||
result: make(chan struct{}),
|
||||
nodeResult: make(chan types.NodeView, 1),
|
||||
}
|
||||
|
||||
nodeStoreQueueDepth.Inc()
|
||||
@@ -162,7 +166,10 @@ func (s *NodeStore) PutNode(n types.Node) {
|
||||
<-work.result
|
||||
nodeStoreQueueDepth.Dec()
|
||||
|
||||
resultNode := <-work.nodeResult
|
||||
nodeStoreOperations.WithLabelValues("put").Inc()
|
||||
|
||||
return resultNode
|
||||
}
|
||||
|
||||
// UpdateNodeFunc is a function type that takes a pointer to a Node and modifies it.
|
||||
@@ -173,6 +180,7 @@ type UpdateNodeFunc func(n *types.Node)
|
||||
// This is analogous to a database "transaction", or, the caller should
|
||||
// rather collect all data they want to change, and then call this function.
|
||||
// Fewer calls are better.
|
||||
// Returns the resulting node after all modifications in the batch have been applied.
|
||||
//
|
||||
// TODO(kradalby): Technically we could have a version of this that modifies the node
|
||||
// in the current snapshot if _we know_ that the change will not affect the peer relationships.
|
||||
@@ -181,15 +189,16 @@ type UpdateNodeFunc func(n *types.Node)
|
||||
// a lock around the nodesByID map to ensure that no other writes are happening
|
||||
// while we are modifying the node. Which mean we would need to implement read-write locks
|
||||
// on all read operations.
|
||||
func (s *NodeStore) UpdateNode(nodeID types.NodeID, updateFn func(n *types.Node)) {
|
||||
func (s *NodeStore) UpdateNode(nodeID types.NodeID, updateFn func(n *types.Node)) (types.NodeView, bool) {
|
||||
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("update"))
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
work := work{
|
||||
op: update,
|
||||
nodeID: nodeID,
|
||||
updateFn: updateFn,
|
||||
result: make(chan struct{}),
|
||||
op: update,
|
||||
nodeID: nodeID,
|
||||
updateFn: updateFn,
|
||||
result: make(chan struct{}),
|
||||
nodeResult: make(chan types.NodeView, 1),
|
||||
}
|
||||
|
||||
nodeStoreQueueDepth.Inc()
|
||||
@@ -197,7 +206,11 @@ func (s *NodeStore) UpdateNode(nodeID types.NodeID, updateFn func(n *types.Node)
|
||||
<-work.result
|
||||
nodeStoreQueueDepth.Dec()
|
||||
|
||||
resultNode := <-work.nodeResult
|
||||
nodeStoreOperations.WithLabelValues("update").Inc()
|
||||
|
||||
// Return the node and whether it exists (is valid)
|
||||
return resultNode, resultNode.Valid()
|
||||
}
|
||||
|
||||
// DeleteNode removes a node from the store by its ID.
|
||||
@@ -282,18 +295,32 @@ func (s *NodeStore) applyBatch(batch []work) {
|
||||
nodes := make(map[types.NodeID]types.Node)
|
||||
maps.Copy(nodes, s.data.Load().nodesByID)
|
||||
|
||||
for _, w := range batch {
|
||||
// Track which work items need node results
|
||||
nodeResultRequests := make(map[types.NodeID][]*work)
|
||||
|
||||
for i := range batch {
|
||||
w := &batch[i]
|
||||
switch w.op {
|
||||
case put:
|
||||
nodes[w.nodeID] = w.node
|
||||
if w.nodeResult != nil {
|
||||
nodeResultRequests[w.nodeID] = append(nodeResultRequests[w.nodeID], w)
|
||||
}
|
||||
case update:
|
||||
// Update the specific node identified by nodeID
|
||||
if n, exists := nodes[w.nodeID]; exists {
|
||||
w.updateFn(&n)
|
||||
nodes[w.nodeID] = n
|
||||
}
|
||||
if w.nodeResult != nil {
|
||||
nodeResultRequests[w.nodeID] = append(nodeResultRequests[w.nodeID], w)
|
||||
}
|
||||
case del:
|
||||
delete(nodes, w.nodeID)
|
||||
// For delete operations, send an invalid NodeView if requested
|
||||
if w.nodeResult != nil {
|
||||
nodeResultRequests[w.nodeID] = append(nodeResultRequests[w.nodeID], w)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -303,6 +330,24 @@ func (s *NodeStore) applyBatch(batch []work) {
|
||||
// Update node count gauge
|
||||
nodeStoreNodesCount.Set(float64(len(nodes)))
|
||||
|
||||
// Send the resulting nodes to all work items that requested them
|
||||
for nodeID, workItems := range nodeResultRequests {
|
||||
if node, exists := nodes[nodeID]; exists {
|
||||
nodeView := node.View()
|
||||
for _, w := range workItems {
|
||||
w.nodeResult <- nodeView
|
||||
close(w.nodeResult)
|
||||
}
|
||||
} else {
|
||||
// Node was deleted or doesn't exist
|
||||
for _, w := range workItems {
|
||||
w.nodeResult <- types.NodeView{} // Send invalid view
|
||||
close(w.nodeResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Signal completion for all work items
|
||||
for _, w := range batch {
|
||||
close(w.result)
|
||||
}
|
||||
@@ -323,9 +368,10 @@ func snapshotFromNodes(nodes map[types.NodeID]types.Node, peersFunc PeersFunc) S
|
||||
}
|
||||
|
||||
newSnap := Snapshot{
|
||||
nodesByID: nodes,
|
||||
allNodes: allNodes,
|
||||
nodesByNodeKey: make(map[key.NodePublic]types.NodeView),
|
||||
nodesByID: nodes,
|
||||
allNodes: allNodes,
|
||||
nodesByNodeKey: make(map[key.NodePublic]types.NodeView),
|
||||
nodesByMachineKey: make(map[key.MachinePublic]map[types.UserID]types.NodeView),
|
||||
|
||||
// peersByNode is most likely the most expensive operation,
|
||||
// it will use the list of all nodes, combined with the
|
||||
@@ -339,11 +385,19 @@ func snapshotFromNodes(nodes map[types.NodeID]types.Node, peersFunc PeersFunc) S
|
||||
nodesByUser: make(map[types.UserID][]types.NodeView),
|
||||
}
|
||||
|
||||
// Build nodesByUser and nodesByNodeKey maps
|
||||
// Build nodesByUser, nodesByNodeKey, and nodesByMachineKey maps
|
||||
for _, n := range nodes {
|
||||
nodeView := n.View()
|
||||
newSnap.nodesByUser[types.UserID(n.UserID)] = append(newSnap.nodesByUser[types.UserID(n.UserID)], nodeView)
|
||||
userID := types.UserID(n.UserID)
|
||||
|
||||
newSnap.nodesByUser[userID] = append(newSnap.nodesByUser[userID], nodeView)
|
||||
newSnap.nodesByNodeKey[n.NodeKey] = nodeView
|
||||
|
||||
// Build machine key index
|
||||
if newSnap.nodesByMachineKey[n.MachineKey] == nil {
|
||||
newSnap.nodesByMachineKey[n.MachineKey] = make(map[types.UserID]types.NodeView)
|
||||
}
|
||||
newSnap.nodesByMachineKey[n.MachineKey][userID] = nodeView
|
||||
}
|
||||
|
||||
return newSnap
|
||||
@@ -382,19 +436,40 @@ func (s *NodeStore) GetNodeByNodeKey(nodeKey key.NodePublic) (types.NodeView, bo
|
||||
return nodeView, exists
|
||||
}
|
||||
|
||||
// GetNodeByMachineKey returns a node by its machine key. The bool indicates if the node exists.
|
||||
func (s *NodeStore) GetNodeByMachineKey(machineKey key.MachinePublic) (types.NodeView, bool) {
|
||||
// GetNodeByMachineKey returns a node by its machine key and user ID. The bool indicates if the node exists.
|
||||
func (s *NodeStore) GetNodeByMachineKey(machineKey key.MachinePublic, userID types.UserID) (types.NodeView, bool) {
|
||||
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("get_by_machine_key"))
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
nodeStoreOperations.WithLabelValues("get_by_machine_key").Inc()
|
||||
|
||||
snapshot := s.data.Load()
|
||||
// We don't have a byMachineKey map, so we need to iterate
|
||||
// This could be optimized by adding a byMachineKey map if this becomes a hot path
|
||||
for _, node := range snapshot.nodesByID {
|
||||
if node.MachineKey == machineKey {
|
||||
return node.View(), true
|
||||
if userMap, exists := snapshot.nodesByMachineKey[machineKey]; exists {
|
||||
if node, exists := userMap[userID]; exists {
|
||||
return node, true
|
||||
}
|
||||
}
|
||||
|
||||
return types.NodeView{}, false
|
||||
}
|
||||
|
||||
// GetNodeByMachineKeyAnyUser returns the first node with the given machine key,
|
||||
// regardless of which user it belongs to. This is useful for scenarios like
|
||||
// transferring a node to a different user when re-authenticating with a
|
||||
// different user's auth key.
|
||||
// If multiple nodes exist with the same machine key (different users), the
|
||||
// first one found is returned (order is not guaranteed).
|
||||
func (s *NodeStore) GetNodeByMachineKeyAnyUser(machineKey key.MachinePublic) (types.NodeView, bool) {
|
||||
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("get_by_machine_key_any_user"))
|
||||
defer timer.ObserveDuration()
|
||||
|
||||
nodeStoreOperations.WithLabelValues("get_by_machine_key_any_user").Inc()
|
||||
|
||||
snapshot := s.data.Load()
|
||||
if userMap, exists := snapshot.nodesByMachineKey[machineKey]; exists {
|
||||
// Return the first node found (order not guaranteed due to map iteration)
|
||||
for _, node := range userMap {
|
||||
return node, true
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -249,7 +253,9 @@ func TestNodeStoreOperations(t *testing.T) {
|
||||
name: "add first node",
|
||||
action: func(store *NodeStore) {
|
||||
node := createTestNode(1, 1, "user1", "node1")
|
||||
store.PutNode(node)
|
||||
resultNode := store.PutNode(node)
|
||||
assert.True(t, resultNode.Valid(), "PutNode should return valid node")
|
||||
assert.Equal(t, node.ID, resultNode.ID())
|
||||
|
||||
snapshot := store.data.Load()
|
||||
assert.Len(t, snapshot.nodesByID, 1)
|
||||
@@ -288,7 +294,9 @@ func TestNodeStoreOperations(t *testing.T) {
|
||||
name: "add second node same user",
|
||||
action: func(store *NodeStore) {
|
||||
node2 := createTestNode(2, 1, "user1", "node2")
|
||||
store.PutNode(node2)
|
||||
resultNode := store.PutNode(node2)
|
||||
assert.True(t, resultNode.Valid(), "PutNode should return valid node")
|
||||
assert.Equal(t, types.NodeID(2), resultNode.ID())
|
||||
|
||||
snapshot := store.data.Load()
|
||||
assert.Len(t, snapshot.nodesByID, 2)
|
||||
@@ -308,7 +316,9 @@ func TestNodeStoreOperations(t *testing.T) {
|
||||
name: "add third node different user",
|
||||
action: func(store *NodeStore) {
|
||||
node3 := createTestNode(3, 2, "user2", "node3")
|
||||
store.PutNode(node3)
|
||||
resultNode := store.PutNode(node3)
|
||||
assert.True(t, resultNode.Valid(), "PutNode should return valid node")
|
||||
assert.Equal(t, types.NodeID(3), resultNode.ID())
|
||||
|
||||
snapshot := store.data.Load()
|
||||
assert.Len(t, snapshot.nodesByID, 3)
|
||||
@@ -409,10 +419,14 @@ func TestNodeStoreOperations(t *testing.T) {
|
||||
{
|
||||
name: "update node hostname",
|
||||
action: func(store *NodeStore) {
|
||||
store.UpdateNode(1, func(n *types.Node) {
|
||||
resultNode, ok := store.UpdateNode(1, func(n *types.Node) {
|
||||
n.Hostname = "updated-node1"
|
||||
n.GivenName = "updated-node1"
|
||||
})
|
||||
assert.True(t, ok, "UpdateNode should return true for existing node")
|
||||
assert.True(t, resultNode.Valid(), "Result node should be valid")
|
||||
assert.Equal(t, "updated-node1", resultNode.Hostname())
|
||||
assert.Equal(t, "updated-node1", resultNode.GivenName())
|
||||
|
||||
snapshot := store.data.Load()
|
||||
assert.Equal(t, "updated-node1", snapshot.nodesByID[1].Hostname)
|
||||
@@ -436,10 +450,14 @@ func TestNodeStoreOperations(t *testing.T) {
|
||||
name: "add nodes with odd-even filtering",
|
||||
action: func(store *NodeStore) {
|
||||
// Add nodes in sequence
|
||||
store.PutNode(createTestNode(1, 1, "user1", "node1"))
|
||||
store.PutNode(createTestNode(2, 2, "user2", "node2"))
|
||||
store.PutNode(createTestNode(3, 3, "user3", "node3"))
|
||||
store.PutNode(createTestNode(4, 4, "user4", "node4"))
|
||||
n1 := store.PutNode(createTestNode(1, 1, "user1", "node1"))
|
||||
assert.True(t, n1.Valid())
|
||||
n2 := store.PutNode(createTestNode(2, 2, "user2", "node2"))
|
||||
assert.True(t, n2.Valid())
|
||||
n3 := store.PutNode(createTestNode(3, 3, "user3", "node3"))
|
||||
assert.True(t, n3.Valid())
|
||||
n4 := store.PutNode(createTestNode(4, 4, "user4", "node4"))
|
||||
assert.True(t, n4.Valid())
|
||||
|
||||
snapshot := store.data.Load()
|
||||
assert.Len(t, snapshot.nodesByID, 4)
|
||||
@@ -478,6 +496,328 @@ func TestNodeStoreOperations(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test batch modifications return correct node state",
|
||||
setupFunc: func(t *testing.T) *NodeStore {
|
||||
node1 := createTestNode(1, 1, "user1", "node1")
|
||||
node2 := createTestNode(2, 1, "user1", "node2")
|
||||
initialNodes := types.Nodes{&node1, &node2}
|
||||
return NewNodeStore(initialNodes, allowAllPeersFunc)
|
||||
},
|
||||
steps: []testStep{
|
||||
{
|
||||
name: "verify initial state",
|
||||
action: func(store *NodeStore) {
|
||||
snapshot := store.data.Load()
|
||||
assert.Len(t, snapshot.nodesByID, 2)
|
||||
assert.Equal(t, "node1", snapshot.nodesByID[1].Hostname)
|
||||
assert.Equal(t, "node2", snapshot.nodesByID[2].Hostname)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "concurrent updates should reflect all batch changes",
|
||||
action: func(store *NodeStore) {
|
||||
// Start multiple updates that will be batched together
|
||||
done1 := make(chan struct{})
|
||||
done2 := make(chan struct{})
|
||||
done3 := make(chan struct{})
|
||||
|
||||
var resultNode1, resultNode2 types.NodeView
|
||||
var newNode3 types.NodeView
|
||||
var ok1, ok2 bool
|
||||
|
||||
// These should all be processed in the same batch
|
||||
go func() {
|
||||
resultNode1, ok1 = store.UpdateNode(1, func(n *types.Node) {
|
||||
n.Hostname = "batch-updated-node1"
|
||||
n.GivenName = "batch-given-1"
|
||||
})
|
||||
close(done1)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
resultNode2, ok2 = store.UpdateNode(2, func(n *types.Node) {
|
||||
n.Hostname = "batch-updated-node2"
|
||||
n.GivenName = "batch-given-2"
|
||||
})
|
||||
close(done2)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
node3 := createTestNode(3, 1, "user1", "node3")
|
||||
newNode3 = store.PutNode(node3)
|
||||
close(done3)
|
||||
}()
|
||||
|
||||
// Wait for all operations to complete
|
||||
<-done1
|
||||
<-done2
|
||||
<-done3
|
||||
|
||||
// Verify the returned nodes reflect the batch state
|
||||
assert.True(t, ok1, "UpdateNode should succeed for node 1")
|
||||
assert.True(t, ok2, "UpdateNode should succeed for node 2")
|
||||
assert.True(t, resultNode1.Valid())
|
||||
assert.True(t, resultNode2.Valid())
|
||||
assert.True(t, newNode3.Valid())
|
||||
|
||||
// Check that returned nodes have the updated values
|
||||
assert.Equal(t, "batch-updated-node1", resultNode1.Hostname())
|
||||
assert.Equal(t, "batch-given-1", resultNode1.GivenName())
|
||||
assert.Equal(t, "batch-updated-node2", resultNode2.Hostname())
|
||||
assert.Equal(t, "batch-given-2", resultNode2.GivenName())
|
||||
assert.Equal(t, "node3", newNode3.Hostname())
|
||||
|
||||
// Verify the snapshot also reflects all changes
|
||||
snapshot := store.data.Load()
|
||||
assert.Len(t, snapshot.nodesByID, 3)
|
||||
assert.Equal(t, "batch-updated-node1", snapshot.nodesByID[1].Hostname)
|
||||
assert.Equal(t, "batch-updated-node2", snapshot.nodesByID[2].Hostname)
|
||||
assert.Equal(t, "node3", snapshot.nodesByID[3].Hostname)
|
||||
|
||||
// Verify peer relationships are updated correctly with new node
|
||||
assert.Len(t, snapshot.peersByNode[1], 2) // sees nodes 2 and 3
|
||||
assert.Len(t, snapshot.peersByNode[2], 2) // sees nodes 1 and 3
|
||||
assert.Len(t, snapshot.peersByNode[3], 2) // sees nodes 1 and 2
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "update non-existent node returns invalid view",
|
||||
action: func(store *NodeStore) {
|
||||
resultNode, ok := store.UpdateNode(999, func(n *types.Node) {
|
||||
n.Hostname = "should-not-exist"
|
||||
})
|
||||
|
||||
assert.False(t, ok, "UpdateNode should return false for non-existent node")
|
||||
assert.False(t, resultNode.Valid(), "Result should be invalid NodeView")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple updates to same node in batch all see final state",
|
||||
action: func(store *NodeStore) {
|
||||
// This test verifies that when multiple updates to the same node
|
||||
// are batched together, each returned node reflects ALL changes
|
||||
// in the batch, not just the individual update's changes.
|
||||
|
||||
done1 := make(chan struct{})
|
||||
done2 := make(chan struct{})
|
||||
done3 := make(chan struct{})
|
||||
|
||||
var resultNode1, resultNode2, resultNode3 types.NodeView
|
||||
var ok1, ok2, ok3 bool
|
||||
|
||||
// These updates all modify node 1 and should be batched together
|
||||
// The final state should have all three modifications applied
|
||||
go func() {
|
||||
resultNode1, ok1 = store.UpdateNode(1, func(n *types.Node) {
|
||||
n.Hostname = "multi-update-hostname"
|
||||
})
|
||||
close(done1)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
resultNode2, ok2 = store.UpdateNode(1, func(n *types.Node) {
|
||||
n.GivenName = "multi-update-givenname"
|
||||
})
|
||||
close(done2)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
resultNode3, ok3 = store.UpdateNode(1, func(n *types.Node) {
|
||||
n.ForcedTags = []string{"tag1", "tag2"}
|
||||
})
|
||||
close(done3)
|
||||
}()
|
||||
|
||||
// Wait for all operations to complete
|
||||
<-done1
|
||||
<-done2
|
||||
<-done3
|
||||
|
||||
// All updates should succeed
|
||||
assert.True(t, ok1, "First update should succeed")
|
||||
assert.True(t, ok2, "Second update should succeed")
|
||||
assert.True(t, ok3, "Third update should succeed")
|
||||
|
||||
// CRITICAL: Each returned node should reflect ALL changes from the batch
|
||||
// not just the change from its specific update call
|
||||
|
||||
// resultNode1 (from hostname update) should also have the givenname and tags changes
|
||||
assert.Equal(t, "multi-update-hostname", resultNode1.Hostname())
|
||||
assert.Equal(t, "multi-update-givenname", resultNode1.GivenName())
|
||||
assert.Equal(t, []string{"tag1", "tag2"}, resultNode1.ForcedTags().AsSlice())
|
||||
|
||||
// resultNode2 (from givenname update) should also have the hostname and tags changes
|
||||
assert.Equal(t, "multi-update-hostname", resultNode2.Hostname())
|
||||
assert.Equal(t, "multi-update-givenname", resultNode2.GivenName())
|
||||
assert.Equal(t, []string{"tag1", "tag2"}, resultNode2.ForcedTags().AsSlice())
|
||||
|
||||
// resultNode3 (from tags update) should also have the hostname and givenname changes
|
||||
assert.Equal(t, "multi-update-hostname", resultNode3.Hostname())
|
||||
assert.Equal(t, "multi-update-givenname", resultNode3.GivenName())
|
||||
assert.Equal(t, []string{"tag1", "tag2"}, resultNode3.ForcedTags().AsSlice())
|
||||
|
||||
// Verify the snapshot also has all changes
|
||||
snapshot := store.data.Load()
|
||||
finalNode := snapshot.nodesByID[1]
|
||||
assert.Equal(t, "multi-update-hostname", finalNode.Hostname)
|
||||
assert.Equal(t, "multi-update-givenname", finalNode.GivenName)
|
||||
assert.Equal(t, []string{"tag1", "tag2"}, finalNode.ForcedTags)
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test UpdateNode result is immutable for database save",
|
||||
setupFunc: func(t *testing.T) *NodeStore {
|
||||
node1 := createTestNode(1, 1, "user1", "node1")
|
||||
node2 := createTestNode(2, 1, "user1", "node2")
|
||||
initialNodes := types.Nodes{&node1, &node2}
|
||||
return NewNodeStore(initialNodes, allowAllPeersFunc)
|
||||
},
|
||||
steps: []testStep{
|
||||
{
|
||||
name: "verify returned node is complete and consistent",
|
||||
action: func(store *NodeStore) {
|
||||
// Update a node and verify the returned view is complete
|
||||
resultNode, ok := store.UpdateNode(1, func(n *types.Node) {
|
||||
n.Hostname = "db-save-hostname"
|
||||
n.GivenName = "db-save-given"
|
||||
n.ForcedTags = []string{"db-tag1", "db-tag2"}
|
||||
})
|
||||
|
||||
assert.True(t, ok, "UpdateNode should succeed")
|
||||
assert.True(t, resultNode.Valid(), "Result should be valid")
|
||||
|
||||
// Verify the returned node has all expected values
|
||||
assert.Equal(t, "db-save-hostname", resultNode.Hostname())
|
||||
assert.Equal(t, "db-save-given", resultNode.GivenName())
|
||||
assert.Equal(t, []string{"db-tag1", "db-tag2"}, resultNode.ForcedTags().AsSlice())
|
||||
|
||||
// Convert to struct as would be done for database save
|
||||
nodePtr := resultNode.AsStruct()
|
||||
assert.NotNil(t, nodePtr)
|
||||
assert.Equal(t, "db-save-hostname", nodePtr.Hostname)
|
||||
assert.Equal(t, "db-save-given", nodePtr.GivenName)
|
||||
assert.Equal(t, []string{"db-tag1", "db-tag2"}, nodePtr.ForcedTags)
|
||||
|
||||
// Verify the snapshot also reflects the same state
|
||||
snapshot := store.data.Load()
|
||||
storedNode := snapshot.nodesByID[1]
|
||||
assert.Equal(t, "db-save-hostname", storedNode.Hostname)
|
||||
assert.Equal(t, "db-save-given", storedNode.GivenName)
|
||||
assert.Equal(t, []string{"db-tag1", "db-tag2"}, storedNode.ForcedTags)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "concurrent updates all return consistent final state for DB save",
|
||||
action: func(store *NodeStore) {
|
||||
// Multiple goroutines updating the same node
|
||||
// All should receive the final batch state suitable for DB save
|
||||
done1 := make(chan struct{})
|
||||
done2 := make(chan struct{})
|
||||
done3 := make(chan struct{})
|
||||
|
||||
var result1, result2, result3 types.NodeView
|
||||
var ok1, ok2, ok3 bool
|
||||
|
||||
// Start concurrent updates
|
||||
go func() {
|
||||
result1, ok1 = store.UpdateNode(1, func(n *types.Node) {
|
||||
n.Hostname = "concurrent-db-hostname"
|
||||
})
|
||||
close(done1)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
result2, ok2 = store.UpdateNode(1, func(n *types.Node) {
|
||||
n.GivenName = "concurrent-db-given"
|
||||
})
|
||||
close(done2)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
result3, ok3 = store.UpdateNode(1, func(n *types.Node) {
|
||||
n.ForcedTags = []string{"concurrent-tag"}
|
||||
})
|
||||
close(done3)
|
||||
}()
|
||||
|
||||
// Wait for all to complete
|
||||
<-done1
|
||||
<-done2
|
||||
<-done3
|
||||
|
||||
assert.True(t, ok1 && ok2 && ok3, "All updates should succeed")
|
||||
|
||||
// All results should be valid and suitable for database save
|
||||
assert.True(t, result1.Valid())
|
||||
assert.True(t, result2.Valid())
|
||||
assert.True(t, result3.Valid())
|
||||
|
||||
// Convert each to struct as would be done for DB save
|
||||
nodePtr1 := result1.AsStruct()
|
||||
nodePtr2 := result2.AsStruct()
|
||||
nodePtr3 := result3.AsStruct()
|
||||
|
||||
// All should have the complete final state
|
||||
assert.Equal(t, "concurrent-db-hostname", nodePtr1.Hostname)
|
||||
assert.Equal(t, "concurrent-db-given", nodePtr1.GivenName)
|
||||
assert.Equal(t, []string{"concurrent-tag"}, nodePtr1.ForcedTags)
|
||||
|
||||
assert.Equal(t, "concurrent-db-hostname", nodePtr2.Hostname)
|
||||
assert.Equal(t, "concurrent-db-given", nodePtr2.GivenName)
|
||||
assert.Equal(t, []string{"concurrent-tag"}, nodePtr2.ForcedTags)
|
||||
|
||||
assert.Equal(t, "concurrent-db-hostname", nodePtr3.Hostname)
|
||||
assert.Equal(t, "concurrent-db-given", nodePtr3.GivenName)
|
||||
assert.Equal(t, []string{"concurrent-tag"}, nodePtr3.ForcedTags)
|
||||
|
||||
// Verify consistency with stored state
|
||||
snapshot := store.data.Load()
|
||||
storedNode := snapshot.nodesByID[1]
|
||||
assert.Equal(t, nodePtr1.Hostname, storedNode.Hostname)
|
||||
assert.Equal(t, nodePtr1.GivenName, storedNode.GivenName)
|
||||
assert.Equal(t, nodePtr1.ForcedTags, storedNode.ForcedTags)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "verify returned node preserves all fields for DB save",
|
||||
action: func(store *NodeStore) {
|
||||
// Get initial state
|
||||
snapshot := store.data.Load()
|
||||
originalNode := snapshot.nodesByID[2]
|
||||
originalIPv4 := originalNode.IPv4
|
||||
originalIPv6 := originalNode.IPv6
|
||||
originalCreatedAt := originalNode.CreatedAt
|
||||
originalUser := originalNode.User
|
||||
|
||||
// Update only hostname
|
||||
resultNode, ok := store.UpdateNode(2, func(n *types.Node) {
|
||||
n.Hostname = "preserve-test-hostname"
|
||||
})
|
||||
|
||||
assert.True(t, ok, "Update should succeed")
|
||||
|
||||
// Convert to struct for DB save
|
||||
nodeForDB := resultNode.AsStruct()
|
||||
|
||||
// Verify all fields are preserved
|
||||
assert.Equal(t, "preserve-test-hostname", nodeForDB.Hostname)
|
||||
assert.Equal(t, originalIPv4, nodeForDB.IPv4)
|
||||
assert.Equal(t, originalIPv6, nodeForDB.IPv6)
|
||||
assert.Equal(t, originalCreatedAt, nodeForDB.CreatedAt)
|
||||
assert.Equal(t, originalUser.Name, nodeForDB.User.Name)
|
||||
assert.Equal(t, types.NodeID(2), nodeForDB.ID)
|
||||
|
||||
// These fields should be suitable for direct database save
|
||||
assert.NotNil(t, nodeForDB.IPv4)
|
||||
assert.NotNil(t, nodeForDB.IPv6)
|
||||
assert.False(t, nodeForDB.CreatedAt.IsZero())
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -499,3 +839,302 @@ type testStep struct {
|
||||
name string
|
||||
action func(store *NodeStore)
|
||||
}
|
||||
|
||||
// --- Additional NodeStore concurrency, batching, race, resource, timeout, and allocation tests ---
|
||||
|
||||
// Helper for concurrent test nodes
|
||||
func createConcurrentTestNode(id types.NodeID, hostname string) types.Node {
|
||||
machineKey := key.NewMachine()
|
||||
nodeKey := key.NewNode()
|
||||
return types.Node{
|
||||
ID: id,
|
||||
Hostname: hostname,
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey.Public(),
|
||||
UserID: 1,
|
||||
User: types.User{
|
||||
Name: "concurrent-test-user",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// --- Concurrency: concurrent PutNode operations ---
|
||||
func TestNodeStoreConcurrentPutNode(t *testing.T) {
|
||||
const concurrentOps = 20
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
results := make(chan bool, concurrentOps)
|
||||
for i := 0; i < concurrentOps; i++ {
|
||||
wg.Add(1)
|
||||
go func(nodeID int) {
|
||||
defer wg.Done()
|
||||
node := createConcurrentTestNode(types.NodeID(nodeID), "concurrent-node")
|
||||
resultNode := store.PutNode(node)
|
||||
results <- resultNode.Valid()
|
||||
}(i + 1)
|
||||
}
|
||||
wg.Wait()
|
||||
close(results)
|
||||
|
||||
successCount := 0
|
||||
for success := range results {
|
||||
if success {
|
||||
successCount++
|
||||
}
|
||||
}
|
||||
require.Equal(t, concurrentOps, successCount, "All concurrent PutNode operations should succeed")
|
||||
}
|
||||
|
||||
// --- Batching: concurrent ops fit in one batch ---
|
||||
func TestNodeStoreBatchingEfficiency(t *testing.T) {
|
||||
const batchSize = 10
|
||||
const ops = 15 // more than batchSize
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
results := make(chan bool, ops)
|
||||
for i := 0; i < ops; i++ {
|
||||
wg.Add(1)
|
||||
go func(nodeID int) {
|
||||
defer wg.Done()
|
||||
node := createConcurrentTestNode(types.NodeID(nodeID), "batch-node")
|
||||
resultNode := store.PutNode(node)
|
||||
results <- resultNode.Valid()
|
||||
}(i + 1)
|
||||
}
|
||||
wg.Wait()
|
||||
close(results)
|
||||
|
||||
successCount := 0
|
||||
for success := range results {
|
||||
if success {
|
||||
successCount++
|
||||
}
|
||||
}
|
||||
require.Equal(t, ops, successCount, "All batch PutNode operations should succeed")
|
||||
}
|
||||
|
||||
// --- Race conditions: many goroutines on same node ---
|
||||
func TestNodeStoreRaceConditions(t *testing.T) {
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
nodeID := types.NodeID(1)
|
||||
node := createConcurrentTestNode(nodeID, "race-node")
|
||||
resultNode := store.PutNode(node)
|
||||
require.True(t, resultNode.Valid())
|
||||
|
||||
const numGoroutines = 30
|
||||
const opsPerGoroutine = 10
|
||||
var wg sync.WaitGroup
|
||||
errors := make(chan error, numGoroutines*opsPerGoroutine)
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func(gid int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < opsPerGoroutine; j++ {
|
||||
switch j % 3 {
|
||||
case 0:
|
||||
resultNode, _ := store.UpdateNode(nodeID, func(n *types.Node) {
|
||||
n.Hostname = "race-updated"
|
||||
})
|
||||
if !resultNode.Valid() {
|
||||
errors <- fmt.Errorf("UpdateNode failed in goroutine %d, op %d", gid, j)
|
||||
}
|
||||
case 1:
|
||||
retrieved, found := store.GetNode(nodeID)
|
||||
if !found || !retrieved.Valid() {
|
||||
errors <- fmt.Errorf("GetNode failed in goroutine %d, op %d", gid, j)
|
||||
}
|
||||
case 2:
|
||||
newNode := createConcurrentTestNode(nodeID, "race-put")
|
||||
resultNode := store.PutNode(newNode)
|
||||
if !resultNode.Valid() {
|
||||
errors <- fmt.Errorf("PutNode failed in goroutine %d, op %d", gid, j)
|
||||
}
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
close(errors)
|
||||
|
||||
errorCount := 0
|
||||
for err := range errors {
|
||||
t.Error(err)
|
||||
errorCount++
|
||||
}
|
||||
if errorCount > 0 {
|
||||
t.Fatalf("Race condition test failed with %d errors", errorCount)
|
||||
}
|
||||
}
|
||||
|
||||
// --- Resource cleanup: goroutine leak detection ---
|
||||
func TestNodeStoreResourceCleanup(t *testing.T) {
|
||||
// initialGoroutines := runtime.NumGoroutine()
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
afterStartGoroutines := runtime.NumGoroutine()
|
||||
|
||||
const ops = 100
|
||||
for i := 0; i < ops; i++ {
|
||||
nodeID := types.NodeID(i + 1)
|
||||
node := createConcurrentTestNode(nodeID, "cleanup-node")
|
||||
resultNode := store.PutNode(node)
|
||||
assert.True(t, resultNode.Valid())
|
||||
store.UpdateNode(nodeID, func(n *types.Node) {
|
||||
n.Hostname = "cleanup-updated"
|
||||
})
|
||||
retrieved, found := store.GetNode(nodeID)
|
||||
assert.True(t, found && retrieved.Valid())
|
||||
if i%10 == 9 {
|
||||
store.DeleteNode(nodeID)
|
||||
}
|
||||
}
|
||||
runtime.GC()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
finalGoroutines := runtime.NumGoroutine()
|
||||
if finalGoroutines > afterStartGoroutines+2 {
|
||||
t.Errorf("Potential goroutine leak: started with %d, ended with %d", afterStartGoroutines, finalGoroutines)
|
||||
}
|
||||
}
|
||||
|
||||
// --- Timeout/deadlock: operations complete within reasonable time ---
|
||||
func TestNodeStoreOperationTimeout(t *testing.T) {
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
|
||||
const ops = 30
|
||||
var wg sync.WaitGroup
|
||||
putResults := make([]error, ops)
|
||||
updateResults := make([]error, ops)
|
||||
|
||||
// Launch all PutNode operations concurrently
|
||||
for i := 1; i <= ops; i++ {
|
||||
nodeID := types.NodeID(i)
|
||||
wg.Add(1)
|
||||
go func(idx int, id types.NodeID) {
|
||||
defer wg.Done()
|
||||
startPut := time.Now()
|
||||
fmt.Printf("[TestNodeStoreOperationTimeout] %s: PutNode(%d) starting\n", startPut.Format("15:04:05.000"), id)
|
||||
node := createConcurrentTestNode(id, "timeout-node")
|
||||
resultNode := store.PutNode(node)
|
||||
endPut := time.Now()
|
||||
fmt.Printf("[TestNodeStoreOperationTimeout] %s: PutNode(%d) finished, valid=%v, duration=%v\n", endPut.Format("15:04:05.000"), id, resultNode.Valid(), endPut.Sub(startPut))
|
||||
if !resultNode.Valid() {
|
||||
putResults[idx-1] = fmt.Errorf("PutNode failed for node %d", id)
|
||||
}
|
||||
}(i, nodeID)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Launch all UpdateNode operations concurrently
|
||||
wg = sync.WaitGroup{}
|
||||
for i := 1; i <= ops; i++ {
|
||||
nodeID := types.NodeID(i)
|
||||
wg.Add(1)
|
||||
go func(idx int, id types.NodeID) {
|
||||
defer wg.Done()
|
||||
startUpdate := time.Now()
|
||||
fmt.Printf("[TestNodeStoreOperationTimeout] %s: UpdateNode(%d) starting\n", startUpdate.Format("15:04:05.000"), id)
|
||||
resultNode, ok := store.UpdateNode(id, func(n *types.Node) {
|
||||
n.Hostname = "timeout-updated"
|
||||
})
|
||||
endUpdate := time.Now()
|
||||
fmt.Printf("[TestNodeStoreOperationTimeout] %s: UpdateNode(%d) finished, valid=%v, ok=%v, duration=%v\n", endUpdate.Format("15:04:05.000"), id, resultNode.Valid(), ok, endUpdate.Sub(startUpdate))
|
||||
if !ok || !resultNode.Valid() {
|
||||
updateResults[idx-1] = fmt.Errorf("UpdateNode failed for node %d", id)
|
||||
}
|
||||
}(i, nodeID)
|
||||
}
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
errorCount := 0
|
||||
for _, err := range putResults {
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
errorCount++
|
||||
}
|
||||
}
|
||||
for _, err := range updateResults {
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
errorCount++
|
||||
}
|
||||
}
|
||||
if errorCount == 0 {
|
||||
t.Log("All concurrent operations completed successfully within timeout")
|
||||
} else {
|
||||
t.Fatalf("Some concurrent operations failed: %d errors", errorCount)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
fmt.Println("[TestNodeStoreOperationTimeout] Timeout reached, test failed")
|
||||
t.Fatal("Operations timed out - potential deadlock or resource issue")
|
||||
}
|
||||
}
|
||||
|
||||
// --- Edge case: update non-existent node ---
|
||||
func TestNodeStoreUpdateNonExistentNode(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
nonExistentID := types.NodeID(999 + i)
|
||||
updateCallCount := 0
|
||||
fmt.Printf("[TestNodeStoreUpdateNonExistentNode] UpdateNode(%d) starting\n", nonExistentID)
|
||||
resultNode, ok := store.UpdateNode(nonExistentID, func(n *types.Node) {
|
||||
updateCallCount++
|
||||
n.Hostname = "should-never-be-called"
|
||||
})
|
||||
fmt.Printf("[TestNodeStoreUpdateNonExistentNode] UpdateNode(%d) finished, valid=%v, ok=%v, updateCallCount=%d\n", nonExistentID, resultNode.Valid(), ok, updateCallCount)
|
||||
assert.False(t, ok, "UpdateNode should return false for non-existent node")
|
||||
assert.False(t, resultNode.Valid(), "UpdateNode should return invalid node for non-existent node")
|
||||
assert.Equal(t, 0, updateCallCount, "UpdateFn should not be called for non-existent node")
|
||||
store.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// --- Allocation benchmark ---
|
||||
func BenchmarkNodeStoreAllocations(b *testing.B) {
|
||||
store := NewNodeStore(nil, allowAllPeersFunc)
|
||||
store.Start()
|
||||
defer store.Stop()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
nodeID := types.NodeID(i + 1)
|
||||
node := createConcurrentTestNode(nodeID, "bench-node")
|
||||
store.PutNode(node)
|
||||
store.UpdateNode(nodeID, func(n *types.Node) {
|
||||
n.Hostname = "bench-updated"
|
||||
})
|
||||
store.GetNode(nodeID)
|
||||
if i%10 == 9 {
|
||||
store.DeleteNode(nodeID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeStoreAllocationStats(t *testing.T) {
|
||||
res := testing.Benchmark(BenchmarkNodeStoreAllocations)
|
||||
allocs := res.AllocsPerOp()
|
||||
t.Logf("NodeStore allocations per op: %.2f", float64(allocs))
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user