mirror of
https://github.com/juanfont/headscale.git
synced 2025-11-07 21:02:51 -05:00
lint and leftover
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
This commit is contained in:
committed by
Kristoffer Dalby
parent
39443184d6
commit
233dffc186
@@ -1160,57 +1160,61 @@ func TestPolicyUpdateWhileRunningWithCLIInDatabase(t *testing.T) {
|
||||
err = headscale.SetPolicy(&p)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get the current policy and check
|
||||
// if it is the same as the one we set.
|
||||
var output *policyv2.Policy
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"policy",
|
||||
"get",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&output,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
// Get the current policy and check
|
||||
// if it is the same as the one we set.
|
||||
var output *policyv2.Policy
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"policy",
|
||||
"get",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&output,
|
||||
)
|
||||
assert.NoError(ct, err)
|
||||
|
||||
assert.Len(t, output.ACLs, 1)
|
||||
assert.Len(t, output.ACLs, 1)
|
||||
|
||||
if diff := cmp.Diff(p, *output, cmpopts.IgnoreUnexported(policyv2.Policy{}), cmpopts.EquateEmpty()); diff != "" {
|
||||
t.Errorf("unexpected policy(-want +got):\n%s", diff)
|
||||
}
|
||||
|
||||
// Test that user1 can visit all user2
|
||||
for _, client := range user1Clients {
|
||||
for _, peer := range user2Clients {
|
||||
fqdn, err := peer.FQDN()
|
||||
require.NoError(t, err)
|
||||
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
|
||||
t.Logf("url from %s to %s", client.Hostname(), url)
|
||||
|
||||
result, err := client.Curl(url)
|
||||
assert.Len(t, result, 13)
|
||||
require.NoError(t, err)
|
||||
if diff := cmp.Diff(p, *output, cmpopts.IgnoreUnexported(policyv2.Policy{}), cmpopts.EquateEmpty()); diff != "" {
|
||||
ct.Errorf("unexpected policy(-want +got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
}, 30*time.Second, 1*time.Second, "verifying that the new policy took place")
|
||||
|
||||
// Test that user2 _cannot_ visit user1
|
||||
for _, client := range user2Clients {
|
||||
for _, peer := range user1Clients {
|
||||
fqdn, err := peer.FQDN()
|
||||
require.NoError(t, err)
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
// Test that user1 can visit all user2
|
||||
for _, client := range user1Clients {
|
||||
for _, peer := range user2Clients {
|
||||
fqdn, err := peer.FQDN()
|
||||
assert.NoError(ct, err)
|
||||
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
|
||||
t.Logf("url from %s to %s", client.Hostname(), url)
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
|
||||
t.Logf("url from %s to %s", client.Hostname(), url)
|
||||
|
||||
result, err := client.Curl(url)
|
||||
assert.Empty(t, result)
|
||||
require.Error(t, err)
|
||||
result, err := client.Curl(url)
|
||||
assert.Len(ct, result, 13)
|
||||
assert.NoError(ct, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test that user2 _cannot_ visit user1
|
||||
for _, client := range user2Clients {
|
||||
for _, peer := range user1Clients {
|
||||
fqdn, err := peer.FQDN()
|
||||
assert.NoError(ct, err)
|
||||
|
||||
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
|
||||
t.Logf("url from %s to %s", client.Hostname(), url)
|
||||
|
||||
result, err := client.Curl(url)
|
||||
assert.Empty(ct, result)
|
||||
assert.Error(ct, err)
|
||||
}
|
||||
}
|
||||
}, 30*time.Second, 1*time.Second, "new policy did not get propagated to nodes")
|
||||
}
|
||||
|
||||
func TestACLAutogroupMember(t *testing.T) {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/samber/lo"
|
||||
@@ -53,6 +54,18 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErrGetHeadscale(t, err)
|
||||
|
||||
expectedNodes := make([]types.NodeID, 0, len(allClients))
|
||||
for _, client := range allClients {
|
||||
status := client.MustStatus()
|
||||
nodeID, err := strconv.ParseUint(string(status.Self.ID), 10, 64)
|
||||
assertNoErr(t, err)
|
||||
expectedNodes = append(expectedNodes, types.NodeID(nodeID))
|
||||
}
|
||||
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected", 30*time.Second)
|
||||
|
||||
// assertClientsState(t, allClients)
|
||||
|
||||
clientIPs := make(map[TailscaleClient][]netip.Addr)
|
||||
@@ -64,9 +77,6 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
|
||||
clientIPs[client] = ips
|
||||
}
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErrGetHeadscale(t, err)
|
||||
|
||||
listNodes, err := headscale.ListNodes()
|
||||
assert.Len(t, allClients, len(listNodes))
|
||||
nodeCountBeforeLogout := len(listNodes)
|
||||
@@ -86,6 +96,9 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
|
||||
err = scenario.WaitForTailscaleLogout()
|
||||
assertNoErrLogout(t, err)
|
||||
|
||||
// After taking down all nodes, verify all systems show nodes offline
|
||||
requireAllClientsOnline(t, headscale, expectedNodes, false, "all nodes should have logged out", 120*time.Second)
|
||||
|
||||
t.Logf("all clients logged out")
|
||||
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
|
||||
@@ -481,10 +481,6 @@ func TestOIDCReloginSameNodeNewUser(t *testing.T) {
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErr(t, err)
|
||||
|
||||
listUsers, err := headscale.ListUsers()
|
||||
assertNoErr(t, err)
|
||||
assert.Empty(t, listUsers)
|
||||
|
||||
ts, err := scenario.CreateTailscaleNode("unstable", tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]))
|
||||
assertNoErr(t, err)
|
||||
|
||||
@@ -494,26 +490,28 @@ func TestOIDCReloginSameNodeNewUser(t *testing.T) {
|
||||
_, err = doLoginURL(ts.Hostname(), u)
|
||||
assertNoErr(t, err)
|
||||
|
||||
listUsers, err = headscale.ListUsers()
|
||||
assertNoErr(t, err)
|
||||
assert.Len(t, listUsers, 1)
|
||||
wantUsers := []*v1.User{
|
||||
{
|
||||
Id: 1,
|
||||
Name: "user1",
|
||||
Email: "user1@headscale.net",
|
||||
Provider: "oidc",
|
||||
ProviderId: scenario.mockOIDC.Issuer() + "/user1",
|
||||
},
|
||||
}
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
listUsers, err := headscale.ListUsers()
|
||||
assertNoErr(t, err)
|
||||
assert.Len(t, listUsers, 1)
|
||||
wantUsers := []*v1.User{
|
||||
{
|
||||
Id: 1,
|
||||
Name: "user1",
|
||||
Email: "user1@headscale.net",
|
||||
Provider: "oidc",
|
||||
ProviderId: scenario.mockOIDC.Issuer() + "/user1",
|
||||
},
|
||||
}
|
||||
|
||||
sort.Slice(listUsers, func(i, j int) bool {
|
||||
return listUsers[i].GetId() < listUsers[j].GetId()
|
||||
})
|
||||
sort.Slice(listUsers, func(i, j int) bool {
|
||||
return listUsers[i].GetId() < listUsers[j].GetId()
|
||||
})
|
||||
|
||||
if diff := cmp.Diff(wantUsers, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, "CreatedAt")); diff != "" {
|
||||
t.Fatalf("unexpected users: %s", diff)
|
||||
}
|
||||
if diff := cmp.Diff(wantUsers, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, "CreatedAt")); diff != "" {
|
||||
t.Fatalf("unexpected users: %s", diff)
|
||||
}
|
||||
}, 30*time.Second, 1*time.Second, "validating users after first login")
|
||||
|
||||
listNodes, err := headscale.ListNodes()
|
||||
assertNoErr(t, err)
|
||||
@@ -525,19 +523,19 @@ func TestOIDCReloginSameNodeNewUser(t *testing.T) {
|
||||
err = ts.Logout()
|
||||
assertNoErr(t, err)
|
||||
|
||||
// TODO(kradalby): Not sure why we need to logout twice, but it fails and
|
||||
// logs in immediately after the first logout and I cannot reproduce it
|
||||
// manually.
|
||||
err = ts.Logout()
|
||||
assertNoErr(t, err)
|
||||
|
||||
// Wait for logout to complete and then do second logout
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
// Check that the first logout completed
|
||||
status, err := ts.Status()
|
||||
assert.NoError(ct, err)
|
||||
assert.Equal(ct, "NeedsLogin", status.BackendState)
|
||||
}, 5*time.Second, 1*time.Second)
|
||||
|
||||
// TODO(kradalby): Not sure why we need to logout twice, but it fails and
|
||||
// logs in immediately after the first logout and I cannot reproduce it
|
||||
// manually.
|
||||
err = ts.Logout()
|
||||
assertNoErr(t, err)
|
||||
}, 30*time.Second, 1*time.Second)
|
||||
|
||||
u, err = ts.LoginWithURL(headscale.GetEndpoint())
|
||||
assertNoErr(t, err)
|
||||
@@ -545,56 +543,56 @@ func TestOIDCReloginSameNodeNewUser(t *testing.T) {
|
||||
_, err = doLoginURL(ts.Hostname(), u)
|
||||
assertNoErr(t, err)
|
||||
|
||||
listUsers, err = headscale.ListUsers()
|
||||
assertNoErr(t, err)
|
||||
assert.Len(t, listUsers, 2)
|
||||
wantUsers = []*v1.User{
|
||||
{
|
||||
Id: 1,
|
||||
Name: "user1",
|
||||
Email: "user1@headscale.net",
|
||||
Provider: "oidc",
|
||||
ProviderId: scenario.mockOIDC.Issuer() + "/user1",
|
||||
},
|
||||
{
|
||||
Id: 2,
|
||||
Name: "user2",
|
||||
Email: "user2@headscale.net",
|
||||
Provider: "oidc",
|
||||
ProviderId: scenario.mockOIDC.Issuer() + "/user2",
|
||||
},
|
||||
}
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
listUsers, err := headscale.ListUsers()
|
||||
assertNoErr(t, err)
|
||||
assert.Len(t, listUsers, 2)
|
||||
wantUsers := []*v1.User{
|
||||
{
|
||||
Id: 1,
|
||||
Name: "user1",
|
||||
Email: "user1@headscale.net",
|
||||
Provider: "oidc",
|
||||
ProviderId: scenario.mockOIDC.Issuer() + "/user1",
|
||||
},
|
||||
{
|
||||
Id: 2,
|
||||
Name: "user2",
|
||||
Email: "user2@headscale.net",
|
||||
Provider: "oidc",
|
||||
ProviderId: scenario.mockOIDC.Issuer() + "/user2",
|
||||
},
|
||||
}
|
||||
|
||||
sort.Slice(listUsers, func(i, j int) bool {
|
||||
return listUsers[i].GetId() < listUsers[j].GetId()
|
||||
})
|
||||
sort.Slice(listUsers, func(i, j int) bool {
|
||||
return listUsers[i].GetId() < listUsers[j].GetId()
|
||||
})
|
||||
|
||||
if diff := cmp.Diff(wantUsers, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, "CreatedAt")); diff != "" {
|
||||
t.Fatalf("unexpected users: %s", diff)
|
||||
}
|
||||
if diff := cmp.Diff(wantUsers, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, "CreatedAt")); diff != "" {
|
||||
ct.Errorf("unexpected users: %s", diff)
|
||||
}
|
||||
}, 30*time.Second, 1*time.Second, "validating users after new user login")
|
||||
|
||||
listNodesAfterNewUserLogin, err := headscale.ListNodes()
|
||||
assertNoErr(t, err)
|
||||
assert.Len(t, listNodesAfterNewUserLogin, 2)
|
||||
var listNodesAfterNewUserLogin []*v1.Node
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
listNodesAfterNewUserLogin, err = headscale.ListNodes()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, listNodesAfterNewUserLogin, 2)
|
||||
|
||||
// Machine key is the same as the "machine" has not changed,
|
||||
// but Node key is not as it is a new node
|
||||
assert.Equal(t, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[0].GetMachineKey())
|
||||
assert.Equal(t, listNodesAfterNewUserLogin[0].GetMachineKey(), listNodesAfterNewUserLogin[1].GetMachineKey())
|
||||
assert.NotEqual(t, listNodesAfterNewUserLogin[0].GetNodeKey(), listNodesAfterNewUserLogin[1].GetNodeKey())
|
||||
// Machine key is the same as the "machine" has not changed,
|
||||
// but Node key is not as it is a new node
|
||||
assert.Equal(ct, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[0].GetMachineKey())
|
||||
assert.Equal(ct, listNodesAfterNewUserLogin[0].GetMachineKey(), listNodesAfterNewUserLogin[1].GetMachineKey())
|
||||
assert.NotEqual(ct, listNodesAfterNewUserLogin[0].GetNodeKey(), listNodesAfterNewUserLogin[1].GetNodeKey())
|
||||
}, 30*time.Second, 1*time.Second, "listing nodes after new user login")
|
||||
|
||||
// Log out user2, and log into user1, no new node should be created,
|
||||
// the node should now "become" node1 again
|
||||
err = ts.Logout()
|
||||
assertNoErr(t, err)
|
||||
|
||||
// Wait for logout to complete and then do second logout
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
// Check that the first logout completed
|
||||
status, err := ts.Status()
|
||||
assert.NoError(ct, err)
|
||||
assert.Equal(ct, "NeedsLogin", status.BackendState)
|
||||
}, 5*time.Second, 1*time.Second)
|
||||
t.Logf("Logged out take one")
|
||||
t.Log("timestamp: " + time.Now().Format("2006-01-02T15-04-05.999999999") + "\n")
|
||||
|
||||
// TODO(kradalby): Not sure why we need to logout twice, but it fails and
|
||||
// logs in immediately after the first logout and I cannot reproduce it
|
||||
@@ -602,65 +600,92 @@ func TestOIDCReloginSameNodeNewUser(t *testing.T) {
|
||||
err = ts.Logout()
|
||||
assertNoErr(t, err)
|
||||
|
||||
t.Logf("Logged out take two")
|
||||
t.Log("timestamp: " + time.Now().Format("2006-01-02T15-04-05.999999999") + "\n")
|
||||
|
||||
// Wait for logout to complete and then do second logout
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
// Check that the first logout completed
|
||||
status, err := ts.Status()
|
||||
assert.NoError(ct, err)
|
||||
assert.Equal(ct, "NeedsLogin", status.BackendState)
|
||||
}, 30*time.Second, 1*time.Second)
|
||||
|
||||
// We do not actually "change" the user here, it is done by logging in again
|
||||
// as the OIDC mock server is kind of like a stack, and the next user is
|
||||
// prepared and ready to go.
|
||||
u, err = ts.LoginWithURL(headscale.GetEndpoint())
|
||||
assertNoErr(t, err)
|
||||
|
||||
_, err = doLoginURL(ts.Hostname(), u)
|
||||
assertNoErr(t, err)
|
||||
|
||||
listUsers, err = headscale.ListUsers()
|
||||
assertNoErr(t, err)
|
||||
assert.Len(t, listUsers, 2)
|
||||
wantUsers = []*v1.User{
|
||||
{
|
||||
Id: 1,
|
||||
Name: "user1",
|
||||
Email: "user1@headscale.net",
|
||||
Provider: "oidc",
|
||||
ProviderId: scenario.mockOIDC.Issuer() + "/user1",
|
||||
},
|
||||
{
|
||||
Id: 2,
|
||||
Name: "user2",
|
||||
Email: "user2@headscale.net",
|
||||
Provider: "oidc",
|
||||
ProviderId: scenario.mockOIDC.Issuer() + "/user2",
|
||||
},
|
||||
}
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
status, err := ts.Status()
|
||||
assert.NoError(ct, err)
|
||||
assert.Equal(ct, "Running", status.BackendState)
|
||||
}, 30*time.Second, 1*time.Second)
|
||||
|
||||
sort.Slice(listUsers, func(i, j int) bool {
|
||||
return listUsers[i].GetId() < listUsers[j].GetId()
|
||||
})
|
||||
t.Logf("Logged back in")
|
||||
t.Log("timestamp: " + time.Now().Format("2006-01-02T15-04-05.999999999") + "\n")
|
||||
|
||||
if diff := cmp.Diff(wantUsers, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, "CreatedAt")); diff != "" {
|
||||
t.Fatalf("unexpected users: %s", diff)
|
||||
}
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
listUsers, err := headscale.ListUsers()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, listUsers, 2)
|
||||
wantUsers := []*v1.User{
|
||||
{
|
||||
Id: 1,
|
||||
Name: "user1",
|
||||
Email: "user1@headscale.net",
|
||||
Provider: "oidc",
|
||||
ProviderId: scenario.mockOIDC.Issuer() + "/user1",
|
||||
},
|
||||
{
|
||||
Id: 2,
|
||||
Name: "user2",
|
||||
Email: "user2@headscale.net",
|
||||
Provider: "oidc",
|
||||
ProviderId: scenario.mockOIDC.Issuer() + "/user2",
|
||||
},
|
||||
}
|
||||
|
||||
listNodesAfterLoggingBackIn, err := headscale.ListNodes()
|
||||
assertNoErr(t, err)
|
||||
assert.Len(t, listNodesAfterLoggingBackIn, 2)
|
||||
sort.Slice(listUsers, func(i, j int) bool {
|
||||
return listUsers[i].GetId() < listUsers[j].GetId()
|
||||
})
|
||||
|
||||
// Validate that the machine we had when we logged in the first time, has the same
|
||||
// machine key, but a different ID than the newly logged in version of the same
|
||||
// machine.
|
||||
assert.Equal(t, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[0].GetMachineKey())
|
||||
assert.Equal(t, listNodes[0].GetNodeKey(), listNodesAfterNewUserLogin[0].GetNodeKey())
|
||||
assert.Equal(t, listNodes[0].GetId(), listNodesAfterNewUserLogin[0].GetId())
|
||||
assert.Equal(t, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[1].GetMachineKey())
|
||||
assert.NotEqual(t, listNodes[0].GetId(), listNodesAfterNewUserLogin[1].GetId())
|
||||
assert.NotEqual(t, listNodes[0].GetUser().GetId(), listNodesAfterNewUserLogin[1].GetUser().GetId())
|
||||
if diff := cmp.Diff(wantUsers, listUsers, cmpopts.IgnoreUnexported(v1.User{}), cmpopts.IgnoreFields(v1.User{}, "CreatedAt")); diff != "" {
|
||||
ct.Errorf("unexpected users: %s", diff)
|
||||
}
|
||||
}, 30*time.Second, 1*time.Second, "log out user2, and log into user1, no new node should be created")
|
||||
|
||||
// Even tho we are logging in again with the same user, the previous key has been expired
|
||||
// and a new one has been generated. The node entry in the database should be the same
|
||||
// as the user + machinekey still matches.
|
||||
assert.Equal(t, listNodes[0].GetMachineKey(), listNodesAfterLoggingBackIn[0].GetMachineKey())
|
||||
assert.NotEqual(t, listNodes[0].GetNodeKey(), listNodesAfterLoggingBackIn[0].GetNodeKey())
|
||||
assert.Equal(t, listNodes[0].GetId(), listNodesAfterLoggingBackIn[0].GetId())
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
listNodesAfterLoggingBackIn, err := headscale.ListNodes()
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, listNodesAfterLoggingBackIn, 2)
|
||||
|
||||
// The "logged back in" machine should have the same machinekey but a different nodekey
|
||||
// than the version logged in with a different user.
|
||||
assert.Equal(t, listNodesAfterLoggingBackIn[0].GetMachineKey(), listNodesAfterLoggingBackIn[1].GetMachineKey())
|
||||
assert.NotEqual(t, listNodesAfterLoggingBackIn[0].GetNodeKey(), listNodesAfterLoggingBackIn[1].GetNodeKey())
|
||||
// Validate that the machine we had when we logged in the first time, has the same
|
||||
// machine key, but a different ID than the newly logged in version of the same
|
||||
// machine.
|
||||
assert.Equal(ct, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[0].GetMachineKey())
|
||||
assert.Equal(ct, listNodes[0].GetNodeKey(), listNodesAfterNewUserLogin[0].GetNodeKey())
|
||||
assert.Equal(ct, listNodes[0].GetId(), listNodesAfterNewUserLogin[0].GetId())
|
||||
assert.Equal(ct, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[1].GetMachineKey())
|
||||
assert.NotEqual(ct, listNodes[0].GetId(), listNodesAfterNewUserLogin[1].GetId())
|
||||
assert.NotEqual(ct, listNodes[0].GetUser().GetId(), listNodesAfterNewUserLogin[1].GetUser().GetId())
|
||||
|
||||
// Even tho we are logging in again with the same user, the previous key has been expired
|
||||
// and a new one has been generated. The node entry in the database should be the same
|
||||
// as the user + machinekey still matches.
|
||||
assert.Equal(ct, listNodes[0].GetMachineKey(), listNodesAfterLoggingBackIn[0].GetMachineKey())
|
||||
assert.NotEqual(ct, listNodes[0].GetNodeKey(), listNodesAfterLoggingBackIn[0].GetNodeKey())
|
||||
assert.Equal(ct, listNodes[0].GetId(), listNodesAfterLoggingBackIn[0].GetId())
|
||||
|
||||
// The "logged back in" machine should have the same machinekey but a different nodekey
|
||||
// than the version logged in with a different user.
|
||||
assert.Equal(ct, listNodesAfterLoggingBackIn[0].GetMachineKey(), listNodesAfterLoggingBackIn[1].GetMachineKey())
|
||||
assert.NotEqual(ct, listNodesAfterLoggingBackIn[0].GetNodeKey(), listNodesAfterLoggingBackIn[1].GetNodeKey())
|
||||
}, 30*time.Second, 1*time.Second, "log out user2, and log into user1, no new node should be created")
|
||||
}
|
||||
|
||||
// assertTailscaleNodesLogout verifies that all provided Tailscale clients
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"net/netip"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol"
|
||||
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
|
||||
"github.com/juanfont/headscale/hscontrol/routes"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
|
||||
@@ -10,18 +10,21 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/integrationutil"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/samber/lo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
@@ -59,13 +62,15 @@ func TestPingAllByIP(t *testing.T) {
|
||||
hs, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
all, err := hs.GetAllMapReponses()
|
||||
assert.NoError(ct, err)
|
||||
|
||||
onlineMap := buildExpectedOnlineMap(all)
|
||||
assertExpectedOnlineMapAllOnline(ct, len(allClients)-1, onlineMap)
|
||||
}, 30*time.Second, 2*time.Second)
|
||||
// Extract node IDs for validation
|
||||
expectedNodes := make([]types.NodeID, 0, len(allClients))
|
||||
for _, client := range allClients {
|
||||
status := client.MustStatus()
|
||||
nodeID, err := strconv.ParseUint(string(status.Self.ID), 10, 64)
|
||||
require.NoError(t, err, "failed to parse node ID")
|
||||
expectedNodes = append(expectedNodes, types.NodeID(nodeID))
|
||||
}
|
||||
requireAllClientsOnline(t, hs, expectedNodes, true, "all clients should be online across all systems", 30*time.Second)
|
||||
|
||||
// assertClientsState(t, allClients)
|
||||
|
||||
@@ -73,6 +78,14 @@ func TestPingAllByIP(t *testing.T) {
|
||||
return x.String()
|
||||
})
|
||||
|
||||
// Get headscale instance for batcher debug check
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErr(t, err)
|
||||
|
||||
// Test our DebugBatcher functionality
|
||||
t.Logf("Testing DebugBatcher functionality...")
|
||||
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected to the batcher", 30*time.Second)
|
||||
|
||||
success := pingAllHelper(t, allClients, allAddrs)
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
}
|
||||
@@ -962,9 +975,6 @@ func TestPingAllByIPManyUpDown(t *testing.T) {
|
||||
)
|
||||
assertNoErrHeadscaleEnv(t, err)
|
||||
|
||||
hs, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
assertNoErrListClients(t, err)
|
||||
|
||||
@@ -980,14 +990,31 @@ func TestPingAllByIPManyUpDown(t *testing.T) {
|
||||
return x.String()
|
||||
})
|
||||
|
||||
// Get headscale instance for batcher debug checks
|
||||
headscale, err := scenario.Headscale()
|
||||
assertNoErr(t, err)
|
||||
|
||||
// Initial check: all nodes should be connected to batcher
|
||||
// Extract node IDs for validation
|
||||
expectedNodes := make([]types.NodeID, 0, len(allClients))
|
||||
for _, client := range allClients {
|
||||
status := client.MustStatus()
|
||||
nodeID, err := strconv.ParseUint(string(status.Self.ID), 10, 64)
|
||||
assertNoErr(t, err)
|
||||
expectedNodes = append(expectedNodes, types.NodeID(nodeID))
|
||||
}
|
||||
requireAllClientsOnline(t, headscale, expectedNodes, true, "all clients should be connected to batcher", 30*time.Second)
|
||||
|
||||
success := pingAllHelper(t, allClients, allAddrs)
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
wg, _ := errgroup.WithContext(context.Background())
|
||||
|
||||
for run := range 3 {
|
||||
t.Logf("Starting DownUpPing run %d at %s", run+1, time.Now().Format("2006-01-02T15-04-05.999999999"))
|
||||
|
||||
// Create fresh errgroup with timeout for each run
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
wg, _ := errgroup.WithContext(ctx)
|
||||
|
||||
for _, client := range allClients {
|
||||
c := client
|
||||
wg.Go(func() error {
|
||||
@@ -1001,6 +1028,9 @@ func TestPingAllByIPManyUpDown(t *testing.T) {
|
||||
}
|
||||
t.Logf("All nodes taken down at %s", time.Now().Format("2006-01-02T15-04-05.999999999"))
|
||||
|
||||
// After taking down all nodes, verify all systems show nodes offline
|
||||
requireAllClientsOnline(t, headscale, expectedNodes, false, fmt.Sprintf("Run %d: all nodes should be offline after Down()", run+1), 120*time.Second)
|
||||
|
||||
for _, client := range allClients {
|
||||
c := client
|
||||
wg.Go(func() error {
|
||||
@@ -1014,22 +1044,22 @@ func TestPingAllByIPManyUpDown(t *testing.T) {
|
||||
}
|
||||
t.Logf("All nodes brought up at %s", time.Now().Format("2006-01-02T15-04-05.999999999"))
|
||||
|
||||
// After bringing up all nodes, verify batcher shows all reconnected
|
||||
requireAllClientsOnline(t, headscale, expectedNodes, true, fmt.Sprintf("Run %d: all nodes should be reconnected after Up()", run+1), 120*time.Second)
|
||||
|
||||
// Wait for sync and successful pings after nodes come back up
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assert.NoError(t, err)
|
||||
|
||||
t.Logf("All nodes synced up %s", time.Now().Format("2006-01-02T15-04-05.999999999"))
|
||||
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
all, err := hs.GetAllMapReponses()
|
||||
assert.NoError(ct, err)
|
||||
|
||||
onlineMap := buildExpectedOnlineMap(all)
|
||||
assertExpectedOnlineMapAllOnline(ct, len(allClients)-1, onlineMap)
|
||||
}, 60*time.Second, 2*time.Second)
|
||||
requireAllClientsOnline(t, headscale, expectedNodes, true, fmt.Sprintf("Run %d: all systems should show nodes online after reconnection", run+1), 60*time.Second)
|
||||
|
||||
success := pingAllHelper(t, allClients, allAddrs)
|
||||
assert.Equalf(t, len(allClients)*len(allIps), success, "%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
// Clean up context for this run
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1141,51 +1171,158 @@ func Test2118DeletingOnlineNodePanics(t *testing.T) {
|
||||
assert.Equal(t, nodeList[1].GetId(), nodeListAfter[0].GetId())
|
||||
}
|
||||
|
||||
func buildExpectedOnlineMap(all map[types.NodeID][]tailcfg.MapResponse) map[types.NodeID]map[types.NodeID]bool {
|
||||
res := make(map[types.NodeID]map[types.NodeID]bool)
|
||||
for nid, mrs := range all {
|
||||
res[nid] = make(map[types.NodeID]bool)
|
||||
for _, mr := range mrs {
|
||||
for _, peer := range mr.Peers {
|
||||
if peer.Online != nil {
|
||||
res[nid][types.NodeID(peer.ID)] = *peer.Online
|
||||
}
|
||||
}
|
||||
|
||||
for _, peer := range mr.PeersChanged {
|
||||
if peer.Online != nil {
|
||||
res[nid][types.NodeID(peer.ID)] = *peer.Online
|
||||
}
|
||||
}
|
||||
|
||||
for _, peer := range mr.PeersChangedPatch {
|
||||
if peer.Online != nil {
|
||||
res[nid][types.NodeID(peer.NodeID)] = *peer.Online
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return res
|
||||
// NodeSystemStatus represents the online status of a node across different systems
|
||||
type NodeSystemStatus struct {
|
||||
Batcher bool
|
||||
BatcherConnCount int
|
||||
MapResponses bool
|
||||
NodeStore bool
|
||||
}
|
||||
|
||||
func assertExpectedOnlineMapAllOnline(t *assert.CollectT, expectedPeerCount int, onlineMap map[types.NodeID]map[types.NodeID]bool) {
|
||||
for nid, peers := range onlineMap {
|
||||
onlineCount := 0
|
||||
for _, online := range peers {
|
||||
if online {
|
||||
onlineCount++
|
||||
// requireAllSystemsOnline checks that nodes are online/offline across batcher, mapresponses, and nodestore
|
||||
func requireAllClientsOnline(t *testing.T, headscale ControlServer, expectedNodes []types.NodeID, expectedOnline bool, message string, timeout time.Duration) {
|
||||
t.Helper()
|
||||
|
||||
startTime := time.Now()
|
||||
t.Logf("requireAllSystemsOnline: Starting validation at %s - %s", startTime.Format("2006-01-02T15:04:05.000"), message)
|
||||
|
||||
var prevReport string
|
||||
require.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
// Get batcher state
|
||||
debugInfo, err := headscale.DebugBatcher()
|
||||
assert.NoError(c, err, "Failed to get batcher debug info")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Get map responses
|
||||
mapResponses, err := headscale.GetAllMapReponses()
|
||||
assert.NoError(c, err, "Failed to get map responses")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Get nodestore state
|
||||
nodeStore, err := headscale.DebugNodeStore()
|
||||
assert.NoError(c, err, "Failed to get nodestore debug info")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Validate node counts first
|
||||
expectedCount := len(expectedNodes)
|
||||
assert.Equal(c, expectedCount, debugInfo.TotalNodes, "Batcher total nodes mismatch")
|
||||
assert.Equal(c, expectedCount, len(nodeStore), "NodeStore total nodes mismatch")
|
||||
|
||||
// Check that we have map responses for expected nodes
|
||||
mapResponseCount := len(mapResponses)
|
||||
assert.Equal(c, expectedCount, mapResponseCount, "MapResponses total nodes mismatch")
|
||||
|
||||
// Build status map for each node
|
||||
nodeStatus := make(map[types.NodeID]NodeSystemStatus)
|
||||
|
||||
// Initialize all expected nodes
|
||||
for _, nodeID := range expectedNodes {
|
||||
nodeStatus[nodeID] = NodeSystemStatus{}
|
||||
}
|
||||
|
||||
// Check batcher state
|
||||
for nodeIDStr, nodeInfo := range debugInfo.ConnectedNodes {
|
||||
nodeID := types.MustParseNodeID(nodeIDStr)
|
||||
if status, exists := nodeStatus[nodeID]; exists {
|
||||
status.Batcher = nodeInfo.Connected
|
||||
status.BatcherConnCount = nodeInfo.ActiveConnections
|
||||
nodeStatus[nodeID] = status
|
||||
}
|
||||
}
|
||||
assert.Equalf(t, expectedPeerCount, len(peers), "node:%d had an unexpected number of peers in online map", nid)
|
||||
if expectedPeerCount != onlineCount {
|
||||
var sb strings.Builder
|
||||
sb.WriteString(fmt.Sprintf("Not all of node:%d peers where online:\n", nid))
|
||||
for pid, online := range peers {
|
||||
sb.WriteString(fmt.Sprintf("\tPeer node:%d online: %t\n", pid, online))
|
||||
|
||||
// Check map responses using buildExpectedOnlineMap
|
||||
onlineFromMaps := make(map[types.NodeID]bool)
|
||||
onlineMap := integrationutil.BuildExpectedOnlineMap(mapResponses)
|
||||
for nodeID := range nodeStatus {
|
||||
NODE_STATUS:
|
||||
for id, peerMap := range onlineMap {
|
||||
if id == nodeID {
|
||||
continue
|
||||
}
|
||||
|
||||
online := peerMap[nodeID]
|
||||
// If the node is offline in any map response, we consider it offline
|
||||
if !online {
|
||||
onlineFromMaps[nodeID] = false
|
||||
continue NODE_STATUS
|
||||
}
|
||||
|
||||
onlineFromMaps[nodeID] = true
|
||||
}
|
||||
sb.WriteString("timestamp: " + time.Now().Format("2006-01-02T15-04-05.999999999") + "\n")
|
||||
sb.WriteString("expected all peers to be online.")
|
||||
t.Errorf("%s", sb.String())
|
||||
}
|
||||
}
|
||||
assert.Lenf(c, onlineFromMaps, expectedCount, "MapResponses missing nodes in status check")
|
||||
|
||||
// Update status with map response data
|
||||
for nodeID, online := range onlineFromMaps {
|
||||
if status, exists := nodeStatus[nodeID]; exists {
|
||||
status.MapResponses = online
|
||||
nodeStatus[nodeID] = status
|
||||
}
|
||||
}
|
||||
|
||||
// Check nodestore state
|
||||
for nodeID, node := range nodeStore {
|
||||
if status, exists := nodeStatus[nodeID]; exists {
|
||||
// Check if node is online in nodestore
|
||||
status.NodeStore = node.IsOnline != nil && *node.IsOnline
|
||||
nodeStatus[nodeID] = status
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all systems show nodes in expected state and report failures
|
||||
allMatch := true
|
||||
var failureReport strings.Builder
|
||||
|
||||
ids := types.NodeIDs(maps.Keys(nodeStatus))
|
||||
slices.Sort(ids)
|
||||
for _, nodeID := range ids {
|
||||
status := nodeStatus[nodeID]
|
||||
systemsMatch := (status.Batcher == expectedOnline) &&
|
||||
(status.MapResponses == expectedOnline) &&
|
||||
(status.NodeStore == expectedOnline)
|
||||
|
||||
if !systemsMatch {
|
||||
allMatch = false
|
||||
stateStr := "offline"
|
||||
if expectedOnline {
|
||||
stateStr = "online"
|
||||
}
|
||||
failureReport.WriteString(fmt.Sprintf("node:%d is not fully %s:\n", nodeID, stateStr))
|
||||
failureReport.WriteString(fmt.Sprintf(" - batcher: %t\n", status.Batcher))
|
||||
failureReport.WriteString(fmt.Sprintf(" - conn count: %d\n", status.BatcherConnCount))
|
||||
failureReport.WriteString(fmt.Sprintf(" - mapresponses: %t (down with at least one peer)\n", status.MapResponses))
|
||||
failureReport.WriteString(fmt.Sprintf(" - nodestore: %t\n", status.NodeStore))
|
||||
}
|
||||
}
|
||||
|
||||
if !allMatch {
|
||||
if diff := cmp.Diff(prevReport, failureReport.String()); diff != "" {
|
||||
t.Log("Diff between reports:")
|
||||
t.Logf("Prev report: \n%s\n", prevReport)
|
||||
t.Logf("New report: \n%s\n", failureReport.String())
|
||||
t.Log("timestamp: " + time.Now().Format("2006-01-02T15-04-05.999999999") + "\n")
|
||||
prevReport = failureReport.String()
|
||||
}
|
||||
|
||||
failureReport.WriteString("timestamp: " + time.Now().Format("2006-01-02T15-04-05.999999999") + "\n")
|
||||
|
||||
assert.Fail(c, failureReport.String())
|
||||
}
|
||||
|
||||
stateStr := "offline"
|
||||
if expectedOnline {
|
||||
stateStr = "online"
|
||||
}
|
||||
assert.True(c, allMatch, fmt.Sprintf("Not all nodes are %s across all systems", stateStr))
|
||||
}, timeout, 2*time.Second, message)
|
||||
|
||||
endTime := time.Now()
|
||||
duration := endTime.Sub(startTime)
|
||||
t.Logf("requireAllSystemsOnline: Completed validation at %s - Duration: %v - %s", endTime.Format("2006-01-02T15:04:05.000"), duration, message)
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol"
|
||||
policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2"
|
||||
"github.com/juanfont/headscale/hscontrol/routes"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
// PeerSyncTimeout returns the timeout for peer synchronization based on environment:
|
||||
@@ -199,3 +200,30 @@ func CreateCertificate(hostname string) ([]byte, []byte, error) {
|
||||
|
||||
return certPEM.Bytes(), certPrivKeyPEM.Bytes(), nil
|
||||
}
|
||||
|
||||
func BuildExpectedOnlineMap(all map[types.NodeID][]tailcfg.MapResponse) map[types.NodeID]map[types.NodeID]bool {
|
||||
res := make(map[types.NodeID]map[types.NodeID]bool)
|
||||
for nid, mrs := range all {
|
||||
res[nid] = make(map[types.NodeID]bool)
|
||||
for _, mr := range mrs {
|
||||
for _, peer := range mr.Peers {
|
||||
if peer.Online != nil {
|
||||
res[nid][types.NodeID(peer.ID)] = *peer.Online
|
||||
}
|
||||
}
|
||||
|
||||
for _, peer := range mr.PeersChanged {
|
||||
if peer.Online != nil {
|
||||
res[nid][types.NodeID(peer.ID)] = *peer.Online
|
||||
}
|
||||
}
|
||||
|
||||
for _, peer := range mr.PeersChangedPatch {
|
||||
if peer.Online != nil {
|
||||
res[nid][types.NodeID(peer.NodeID)] = *peer.Online
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user