diff --git a/hscontrol/policy/v1/policy.go b/hscontrol/policy/v1/policy.go index 0ac49d04..89625ce3 100644 --- a/hscontrol/policy/v1/policy.go +++ b/hscontrol/policy/v1/policy.go @@ -7,6 +7,8 @@ import ( "os" "sync" + "slices" + "github.com/juanfont/headscale/hscontrol/types" "github.com/rs/zerolog/log" "tailscale.com/tailcfg" @@ -145,13 +147,7 @@ func (pm *PolicyManager) NodeCanHaveTag(node *types.Node, tag string) bool { tags, invalid := pm.pol.TagsOfNode(pm.users, node) log.Debug().Strs("authorised_tags", tags).Strs("unauthorised_tags", invalid).Uint64("node.id", node.ID.Uint64()).Msg("tags provided by policy") - for _, t := range tags { - if t == tag { - return true - } - } - - return false + return slices.Contains(tags, tag) } func (pm *PolicyManager) NodeCanApproveRoute(node *types.Node, route netip.Prefix) bool { @@ -174,7 +170,7 @@ func (pm *PolicyManager) NodeCanApproveRoute(node *types.Node, route netip.Prefi } // approvedIPs should contain all of node's IPs if it matches the rule, so check for first - if ips.Contains(*node.IPv4) { + if ips != nil && ips.Contains(*node.IPv4) { return true } } diff --git a/hscontrol/policy/v2/policy.go b/hscontrol/policy/v2/policy.go index 41f51487..4060b6a6 100644 --- a/hscontrol/policy/v2/policy.go +++ b/hscontrol/policy/v2/policy.go @@ -7,6 +7,8 @@ import ( "strings" "sync" + "slices" + "github.com/juanfont/headscale/hscontrol/types" "go4.org/netipx" "tailscale.com/net/tsaddr" @@ -174,10 +176,8 @@ func (pm *PolicyManager) NodeCanHaveTag(node *types.Node, tag string) bool { defer pm.mu.Unlock() if ips, ok := pm.tagOwnerMap[Tag(tag)]; ok { - for _, nodeAddr := range node.IPs() { - if ips.Contains(nodeAddr) { - return true - } + if slices.ContainsFunc(node.IPs(), ips.Contains) { + return true } } @@ -196,10 +196,8 @@ func (pm *PolicyManager) NodeCanApproveRoute(node *types.Node, route netip.Prefi // where there is an exact entry, e.g. 10.0.0.0/8, then // check and return quickly if _, ok := pm.autoApproveMap[route]; ok { - for _, nodeAddr := range node.IPs() { - if pm.autoApproveMap[route].Contains(nodeAddr) { - return true - } + if slices.ContainsFunc(node.IPs(), pm.autoApproveMap[route].Contains) { + return true } } @@ -220,10 +218,8 @@ func (pm *PolicyManager) NodeCanApproveRoute(node *types.Node, route netip.Prefi // Check if prefix is larger (so containing) and then overlaps // the route to see if the node can approve a subset of an autoapprover if prefix.Bits() <= route.Bits() && prefix.Overlaps(route) { - for _, nodeAddr := range node.IPs() { - if approveAddrs.Contains(nodeAddr) { - return true - } + if slices.ContainsFunc(node.IPs(), approveAddrs.Contains) { + return true } } } @@ -279,5 +275,8 @@ func (pm *PolicyManager) DebugString() string { } } + sb.WriteString("\n\n") + sb.WriteString(pm.nodes.DebugString()) + return sb.String() } diff --git a/hscontrol/policy/v2/types.go b/hscontrol/policy/v2/types.go index e533bafb..55376b97 100644 --- a/hscontrol/policy/v2/types.go +++ b/hscontrol/policy/v2/types.go @@ -162,6 +162,10 @@ func (g Group) CanBeAutoApprover() bool { return true } +func (g Group) String() string { + return string(g) +} + func (g Group) Resolve(p *Policy, users types.Users, nodes types.Nodes) (*netipx.IPSet, error) { var ips netipx.IPSetBuilder var errs []error @@ -235,6 +239,10 @@ func (t Tag) CanBeAutoApprover() bool { return true } +func (t Tag) String() string { + return string(t) +} + // Host is a string that represents a hostname. type Host string @@ -590,6 +598,7 @@ func unmarshalPointer[T any]( type AutoApprover interface { CanBeAutoApprover() bool UnmarshalJSON([]byte) error + String() string } type AutoApprovers []AutoApprover diff --git a/hscontrol/types/node.go b/hscontrol/types/node.go index 3af43473..3567c4f1 100644 --- a/hscontrol/types/node.go +++ b/hscontrol/types/node.go @@ -5,6 +5,7 @@ import ( "fmt" "net/netip" "slices" + "sort" "strconv" "strings" "time" @@ -194,19 +195,26 @@ func (node *Node) IsTagged() bool { // Currently, this function only handles tags set // via CLI ("forced tags" and preauthkeys) func (node *Node) HasTag(tag string) bool { - if slices.Contains(node.ForcedTags, tag) { - return true - } + return slices.Contains(node.Tags(), tag) +} - if node.AuthKey != nil && slices.Contains(node.AuthKey.Tags, tag) { - return true +func (node *Node) Tags() []string { + var tags []string + + if node.AuthKey != nil { + tags = append(tags, node.AuthKey.Tags...) } // TODO(kradalby): Figure out how tagging should work // and hostinfo.requestedtags. // Do this in other work. + // #2417 - return false + tags = append(tags, node.ForcedTags...) + sort.Strings(tags) + tags = slices.Compact(tags) + + return tags } func (node *Node) RequestTags() []string { @@ -549,3 +557,25 @@ func (nodes Nodes) IDMap() map[NodeID]*Node { return ret } + +func (nodes Nodes) DebugString() string { + var sb strings.Builder + sb.WriteString("Nodes:\n") + for _, node := range nodes { + sb.WriteString(node.DebugString()) + sb.WriteString("\n") + } + return sb.String() +} + +func (node Node) DebugString() string { + var sb strings.Builder + fmt.Fprintf(&sb, "%s(%s):\n", node.Hostname, node.ID) + fmt.Fprintf(&sb, "\tUser: %s (%d, %q)\n", node.User.Display(), node.User.ID, node.User.Username()) + fmt.Fprintf(&sb, "\tTags: %v\n", node.Tags()) + fmt.Fprintf(&sb, "\tIPs: %v\n", node.IPs()) + fmt.Fprintf(&sb, "\tApprovedRoutes: %v\n", node.ApprovedRoutes) + fmt.Fprintf(&sb, "\tSubnetRoutes: %v\n", node.SubnetRoutes()) + sb.WriteString("\n") + return sb.String() +} diff --git a/hscontrol/util/util.go b/hscontrol/util/util.go index a41ee6f8..4f6660be 100644 --- a/hscontrol/util/util.go +++ b/hscontrol/util/util.go @@ -5,6 +5,7 @@ import ( "fmt" "net/netip" "net/url" + "os" "regexp" "strconv" "strings" @@ -173,3 +174,15 @@ func ParseTraceroute(output string) (Traceroute, error) { return result, nil } + +func IsCI() bool { + if _, ok := os.LookupEnv("CI"); ok { + return true + } + + if _, ok := os.LookupEnv("GITHUB_RUN_ID"); ok { + return true + } + + return false +} diff --git a/integration/acl_test.go b/integration/acl_test.go index 72f44cc0..bb18b3b3 100644 --- a/integration/acl_test.go +++ b/integration/acl_test.go @@ -1054,7 +1054,7 @@ func TestPolicyUpdateWhileRunningWithCLIInDatabase(t *testing.T) { // Initially all nodes can reach each other for _, client := range all { for _, peer := range all { - if client.ID() == peer.ID() { + if client.ContainerID() == peer.ContainerID() { continue } diff --git a/integration/auth_oidc_test.go b/integration/auth_oidc_test.go index a036fdd0..53c74577 100644 --- a/integration/auth_oidc_test.go +++ b/integration/auth_oidc_test.go @@ -442,7 +442,7 @@ func TestOIDCReloginSameNodeNewUser(t *testing.T) { assertNoErr(t, err) assert.Len(t, listUsers, 0) - ts, err := scenario.CreateTailscaleNode("unstable", tsic.WithNetwork(scenario.networks[TestDefaultNetwork])) + ts, err := scenario.CreateTailscaleNode("unstable", tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork])) assertNoErr(t, err) u, err := ts.LoginWithURL(headscale.GetEndpoint()) diff --git a/integration/auth_web_flow_test.go b/integration/auth_web_flow_test.go index 034ad5ae..64cace7b 100644 --- a/integration/auth_web_flow_test.go +++ b/integration/auth_web_flow_test.go @@ -26,7 +26,7 @@ func TestAuthWebFlowAuthenticationPingAll(t *testing.T) { } defer scenario.ShutdownAssertNoPanics(t) - err = scenario.CreateHeadscaleEnv( + err = scenario.CreateHeadscaleEnvWithLoginURL( nil, hsic.WithTestName("webauthping"), hsic.WithEmbeddedDERPServerOnly(), @@ -66,7 +66,7 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) { assertNoErr(t, err) defer scenario.ShutdownAssertNoPanics(t) - err = scenario.CreateHeadscaleEnv( + err = scenario.CreateHeadscaleEnvWithLoginURL( nil, hsic.WithTestName("weblogout"), hsic.WithTLS(), diff --git a/integration/dockertestutil/network.go b/integration/dockertestutil/network.go index 9b51986b..83fc08c4 100644 --- a/integration/dockertestutil/network.go +++ b/integration/dockertestutil/network.go @@ -6,6 +6,7 @@ import ( "log" "net" + "github.com/juanfont/headscale/hscontrol/util" "github.com/ory/dockertest/v3" "github.com/ory/dockertest/v3/docker" ) @@ -105,3 +106,23 @@ func CleanUnreferencedNetworks(pool *dockertest.Pool) error { return nil } + +// CleanImagesInCI removes images if running in CI. +func CleanImagesInCI(pool *dockertest.Pool) error { + if !util.IsCI() { + log.Println("Skipping image cleanup outside of CI") + return nil + } + + images, err := pool.Client.ListImages(docker.ListImagesOptions{}) + if err != nil { + return fmt.Errorf("getting images: %w", err) + } + + for _, image := range images { + log.Printf("removing image: %s, %v", image.ID, image.RepoTags) + _ = pool.Client.RemoveImage(image.ID) + } + + return nil +} diff --git a/integration/general_test.go b/integration/general_test.go index 02936f16..71d7c02c 100644 --- a/integration/general_test.go +++ b/integration/general_test.go @@ -138,7 +138,7 @@ func testEphemeralWithOptions(t *testing.T, opts ...hsic.Option) { t.Fatalf("failed to create user %s: %s", userName, err) } - err = scenario.CreateTailscaleNodesInUser(userName, "all", spec.NodesPerUser, tsic.WithNetwork(scenario.networks[TestDefaultNetwork])) + err = scenario.CreateTailscaleNodesInUser(userName, "all", spec.NodesPerUser, tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork])) if err != nil { t.Fatalf("failed to create tailscale nodes in user %s: %s", userName, err) } @@ -216,7 +216,7 @@ func TestEphemeral2006DeletedTooQuickly(t *testing.T) { t.Fatalf("failed to create user %s: %s", userName, err) } - err = scenario.CreateTailscaleNodesInUser(userName, "all", spec.NodesPerUser, tsic.WithNetwork(scenario.networks[TestDefaultNetwork])) + err = scenario.CreateTailscaleNodesInUser(userName, "all", spec.NodesPerUser, tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork])) if err != nil { t.Fatalf("failed to create tailscale nodes in user %s: %s", userName, err) } diff --git a/integration/route_test.go b/integration/route_test.go index ece89909..2a322f9c 100644 --- a/integration/route_test.go +++ b/integration/route_test.go @@ -287,9 +287,9 @@ func TestHASubnetRouterFailover(t *testing.T) { require.NoError(t, err) assert.Len(t, nodes, 6) - assertNodeRouteCount(t, nodes[0], 1, 0, 0) - assertNodeRouteCount(t, nodes[1], 1, 0, 0) - assertNodeRouteCount(t, nodes[2], 1, 0, 0) + requireNodeRouteCount(t, nodes[0], 1, 0, 0) + requireNodeRouteCount(t, nodes[1], 1, 0, 0) + requireNodeRouteCount(t, nodes[2], 1, 0, 0) // Verify that no routes has been sent to the client, // they are not yet enabled. @@ -319,9 +319,9 @@ func TestHASubnetRouterFailover(t *testing.T) { require.NoError(t, err) assert.Len(t, nodes, 6) - assertNodeRouteCount(t, nodes[0], 1, 1, 1) - assertNodeRouteCount(t, nodes[1], 1, 0, 0) - assertNodeRouteCount(t, nodes[2], 1, 0, 0) + requireNodeRouteCount(t, nodes[0], 1, 1, 1) + requireNodeRouteCount(t, nodes[1], 1, 0, 0) + requireNodeRouteCount(t, nodes[2], 1, 0, 0) // Verify that the client has routes from the primary machine and can access // the webservice. @@ -375,9 +375,9 @@ func TestHASubnetRouterFailover(t *testing.T) { require.NoError(t, err) assert.Len(t, nodes, 6) - assertNodeRouteCount(t, nodes[0], 1, 1, 1) - assertNodeRouteCount(t, nodes[1], 1, 1, 0) - assertNodeRouteCount(t, nodes[2], 1, 0, 0) + requireNodeRouteCount(t, nodes[0], 1, 1, 1) + requireNodeRouteCount(t, nodes[1], 1, 1, 0) + requireNodeRouteCount(t, nodes[2], 1, 0, 0) // Verify that the client has routes from the primary machine srs1 = subRouter1.MustStatus() @@ -431,9 +431,9 @@ func TestHASubnetRouterFailover(t *testing.T) { require.NoError(t, err) assert.Len(t, nodes, 6) - assertNodeRouteCount(t, nodes[0], 1, 1, 1) - assertNodeRouteCount(t, nodes[1], 1, 1, 0) - assertNodeRouteCount(t, nodes[2], 1, 1, 0) + requireNodeRouteCount(t, nodes[0], 1, 1, 1) + requireNodeRouteCount(t, nodes[1], 1, 1, 0) + requireNodeRouteCount(t, nodes[2], 1, 1, 0) // Verify that the client has routes from the primary machine srs1 = subRouter1.MustStatus() @@ -645,9 +645,9 @@ func TestHASubnetRouterFailover(t *testing.T) { require.NoError(t, err) assert.Len(t, nodes, 6) - assertNodeRouteCount(t, nodes[0], 1, 1, 1) - assertNodeRouteCount(t, nodes[1], 1, 1, 0) - assertNodeRouteCount(t, nodes[2], 1, 0, 0) + requireNodeRouteCount(t, nodes[0], 1, 1, 1) + requireNodeRouteCount(t, nodes[1], 1, 1, 0) + requireNodeRouteCount(t, nodes[2], 1, 0, 0) // Verify that the route is announced from subnet router 1 clientStatus, err = client.Status() @@ -690,9 +690,9 @@ func TestHASubnetRouterFailover(t *testing.T) { require.NoError(t, err) assert.Len(t, nodes, 6) - assertNodeRouteCount(t, nodes[0], 1, 0, 0) - assertNodeRouteCount(t, nodes[1], 1, 1, 1) - assertNodeRouteCount(t, nodes[2], 1, 0, 0) + requireNodeRouteCount(t, nodes[0], 1, 0, 0) + requireNodeRouteCount(t, nodes[1], 1, 1, 1) + requireNodeRouteCount(t, nodes[2], 1, 0, 0) // Verify that the route is announced from subnet router 1 clientStatus, err = client.Status() @@ -738,9 +738,9 @@ func TestHASubnetRouterFailover(t *testing.T) { require.NoError(t, err) assert.Len(t, nodes, 6) - assertNodeRouteCount(t, nodes[0], 1, 1, 0) - assertNodeRouteCount(t, nodes[1], 1, 1, 1) - assertNodeRouteCount(t, nodes[2], 1, 0, 0) + requireNodeRouteCount(t, nodes[0], 1, 1, 0) + requireNodeRouteCount(t, nodes[1], 1, 1, 1) + requireNodeRouteCount(t, nodes[2], 1, 0, 0) // Verify that the route is announced from subnet router 1 clientStatus, err = client.Status() @@ -870,8 +870,8 @@ func TestSubnetRouteACL(t *testing.T) { require.NoError(t, err) require.Len(t, nodes, 2) - assertNodeRouteCount(t, nodes[0], 1, 0, 0) - assertNodeRouteCount(t, nodes[1], 0, 0, 0) + requireNodeRouteCount(t, nodes[0], 1, 0, 0) + requireNodeRouteCount(t, nodes[1], 0, 0, 0) // Verify that no routes has been sent to the client, // they are not yet enabled. @@ -899,8 +899,8 @@ func TestSubnetRouteACL(t *testing.T) { require.NoError(t, err) require.Len(t, nodes, 2) - assertNodeRouteCount(t, nodes[0], 1, 1, 1) - assertNodeRouteCount(t, nodes[1], 0, 0, 0) + requireNodeRouteCount(t, nodes[0], 1, 1, 1) + requireNodeRouteCount(t, nodes[1], 0, 0, 0) // Verify that the client has routes from the primary machine srs1, _ := subRouter1.Status() @@ -1034,8 +1034,8 @@ func TestEnablingExitRoutes(t *testing.T) { require.NoError(t, err) require.Len(t, nodes, 2) - assertNodeRouteCount(t, nodes[0], 2, 0, 0) - assertNodeRouteCount(t, nodes[1], 2, 0, 0) + requireNodeRouteCount(t, nodes[0], 2, 0, 0) + requireNodeRouteCount(t, nodes[1], 2, 0, 0) // Verify that no routes has been sent to the client, // they are not yet enabled. @@ -1067,8 +1067,8 @@ func TestEnablingExitRoutes(t *testing.T) { require.NoError(t, err) require.Len(t, nodes, 2) - assertNodeRouteCount(t, nodes[0], 2, 2, 2) - assertNodeRouteCount(t, nodes[1], 2, 2, 2) + requireNodeRouteCount(t, nodes[0], 2, 2, 2) + requireNodeRouteCount(t, nodes[1], 2, 2, 2) time.Sleep(5 * time.Second) @@ -1158,7 +1158,7 @@ func TestSubnetRouterMultiNetwork(t *testing.T) { nodes, err := headscale.ListNodes() require.NoError(t, err) assert.Len(t, nodes, 2) - assertNodeRouteCount(t, nodes[0], 1, 0, 0) + requireNodeRouteCount(t, nodes[0], 1, 0, 0) // Verify that no routes has been sent to the client, // they are not yet enabled. @@ -1184,7 +1184,7 @@ func TestSubnetRouterMultiNetwork(t *testing.T) { nodes, err = headscale.ListNodes() require.NoError(t, err) assert.Len(t, nodes, 2) - assertNodeRouteCount(t, nodes[0], 1, 1, 1) + requireNodeRouteCount(t, nodes[0], 1, 1, 1) // Verify that the routes have been sent to the client. status, err = user2c.Status() @@ -1282,7 +1282,7 @@ func TestSubnetRouterMultiNetworkExitNode(t *testing.T) { nodes, err := headscale.ListNodes() require.NoError(t, err) assert.Len(t, nodes, 2) - assertNodeRouteCount(t, nodes[0], 2, 0, 0) + requireNodeRouteCount(t, nodes[0], 2, 0, 0) // Verify that no routes has been sent to the client, // they are not yet enabled. @@ -1305,7 +1305,7 @@ func TestSubnetRouterMultiNetworkExitNode(t *testing.T) { nodes, err = headscale.ListNodes() require.NoError(t, err) assert.Len(t, nodes, 2) - assertNodeRouteCount(t, nodes[0], 2, 2, 2) + requireNodeRouteCount(t, nodes[0], 2, 2, 2) // Verify that the routes have been sent to the client. status, err = user2c.Status() @@ -1349,6 +1349,15 @@ func TestSubnetRouterMultiNetworkExitNode(t *testing.T) { require.NoError(t, err) } +func MustFindNode(hostname string, nodes []*v1.Node) *v1.Node { + for _, node := range nodes { + if node.GetName() == hostname { + return node + } + } + panic("node not found") +} + // TestAutoApproveMultiNetwork tests auto approving of routes // by setting up two networks where network1 has three subnet // routers: @@ -1367,358 +1376,601 @@ func TestSubnetRouterMultiNetworkExitNode(t *testing.T) { // - Verify that routes can now be seen by peers. func TestAutoApproveMultiNetwork(t *testing.T) { IntegrationSkip(t) - t.Parallel() - - spec := ScenarioSpec{ - NodesPerUser: 3, - Users: []string{"user1", "user2"}, - Networks: map[string][]string{ - "usernet1": {"user1"}, - "usernet2": {"user2"}, - }, - ExtraService: map[string][]extraServiceFunc{ - "usernet1": {Webservice}, - }, - // We build the head image with curl and traceroute, so only use - // that for this test. - Versions: []string{"head"}, - } - - rootRoute := netip.MustParsePrefix("10.42.0.0/16") + bigRoute := netip.MustParsePrefix("10.42.0.0/16") subRoute := netip.MustParsePrefix("10.42.7.0/24") notApprovedRoute := netip.MustParsePrefix("192.168.0.0/24") - scenario, err := NewScenario(spec) - require.NoErrorf(t, err, "failed to create scenario: %s", err) - defer scenario.ShutdownAssertNoPanics(t) - - pol := &policyv1.ACLPolicy{ - ACLs: []policyv1.ACL{ - { - Action: "accept", - Sources: []string{"*"}, - Destinations: []string{"*:*"}, + tests := []struct { + name string + pol *policyv1.ACLPolicy + approver string + spec ScenarioSpec + withURL bool + }{ + { + name: "authkey-tag", + pol: &policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ + { + Action: "accept", + Sources: []string{"*"}, + Destinations: []string{"*:*"}, + }, + }, + TagOwners: map[string][]string{ + "tag:approve": {"user1@"}, + }, + AutoApprovers: policyv1.AutoApprovers{ + Routes: map[string][]string{ + bigRoute.String(): {"tag:approve"}, + }, + ExitNode: []string{"tag:approve"}, + }, + }, + approver: "tag:approve", + spec: ScenarioSpec{ + NodesPerUser: 3, + Users: []string{"user1", "user2"}, + Networks: map[string][]string{ + "usernet1": {"user1"}, + "usernet2": {"user2"}, + }, + ExtraService: map[string][]extraServiceFunc{ + "usernet1": {Webservice}, + }, + // We build the head image with curl and traceroute, so only use + // that for this test. + Versions: []string{"head"}, }, }, - TagOwners: map[string][]string{ - "tag:approve": {"user1@"}, - }, - AutoApprovers: policyv1.AutoApprovers{ - Routes: map[string][]string{ - rootRoute.String(): {"tag:approve"}, + { + name: "authkey-user", + pol: &policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ + { + Action: "accept", + Sources: []string{"*"}, + Destinations: []string{"*:*"}, + }, + }, + AutoApprovers: policyv1.AutoApprovers{ + Routes: map[string][]string{ + bigRoute.String(): {"user1@"}, + }, + ExitNode: []string{"user1@"}, + }, }, - ExitNode: []string{"tag:approve"}, + approver: "user1@", + spec: ScenarioSpec{ + NodesPerUser: 3, + Users: []string{"user1", "user2"}, + Networks: map[string][]string{ + "usernet1": {"user1"}, + "usernet2": {"user2"}, + }, + ExtraService: map[string][]extraServiceFunc{ + "usernet1": {Webservice}, + }, + // We build the head image with curl and traceroute, so only use + // that for this test. + Versions: []string{"head"}, + }, + }, + { + name: "authkey-group", + pol: &policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ + { + Action: "accept", + Sources: []string{"*"}, + Destinations: []string{"*:*"}, + }, + }, + Groups: policyv1.Groups{ + "group:approve": []string{"user1@"}, + }, + AutoApprovers: policyv1.AutoApprovers{ + Routes: map[string][]string{ + bigRoute.String(): {"group:approve"}, + }, + ExitNode: []string{"group:approve"}, + }, + }, + approver: "group:approve", + spec: ScenarioSpec{ + NodesPerUser: 3, + Users: []string{"user1", "user2"}, + Networks: map[string][]string{ + "usernet1": {"user1"}, + "usernet2": {"user2"}, + }, + ExtraService: map[string][]extraServiceFunc{ + "usernet1": {Webservice}, + }, + // We build the head image with curl and traceroute, so only use + // that for this test. + Versions: []string{"head"}, + }, + }, + { + name: "webauth-user", + pol: &policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ + { + Action: "accept", + Sources: []string{"*"}, + Destinations: []string{"*:*"}, + }, + }, + AutoApprovers: policyv1.AutoApprovers{ + Routes: map[string][]string{ + bigRoute.String(): {"user1@"}, + }, + ExitNode: []string{"user1@"}, + }, + }, + approver: "user1@", + spec: ScenarioSpec{ + NodesPerUser: 3, + Users: []string{"user1", "user2"}, + Networks: map[string][]string{ + "usernet1": {"user1"}, + "usernet2": {"user2"}, + }, + ExtraService: map[string][]extraServiceFunc{ + "usernet1": {Webservice}, + }, + // We build the head image with curl and traceroute, so only use + // that for this test. + Versions: []string{"head"}, + }, + withURL: true, + }, + { + name: "webauth-tag", + pol: &policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ + { + Action: "accept", + Sources: []string{"*"}, + Destinations: []string{"*:*"}, + }, + }, + TagOwners: map[string][]string{ + "tag:approve": {"user1@"}, + }, + AutoApprovers: policyv1.AutoApprovers{ + Routes: map[string][]string{ + bigRoute.String(): {"tag:approve"}, + }, + ExitNode: []string{"tag:approve"}, + }, + }, + approver: "tag:approve", + spec: ScenarioSpec{ + NodesPerUser: 3, + Users: []string{"user1", "user2"}, + Networks: map[string][]string{ + "usernet1": {"user1"}, + "usernet2": {"user2"}, + }, + ExtraService: map[string][]extraServiceFunc{ + "usernet1": {Webservice}, + }, + // We build the head image with curl and traceroute, so only use + // that for this test. + Versions: []string{"head"}, + }, + withURL: true, + }, + { + name: "webauth-group", + pol: &policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ + { + Action: "accept", + Sources: []string{"*"}, + Destinations: []string{"*:*"}, + }, + }, + Groups: policyv1.Groups{ + "group:approve": []string{"user1@"}, + }, + AutoApprovers: policyv1.AutoApprovers{ + Routes: map[string][]string{ + bigRoute.String(): {"group:approve"}, + }, + ExitNode: []string{"group:approve"}, + }, + }, + approver: "group:approve", + spec: ScenarioSpec{ + NodesPerUser: 3, + Users: []string{"user1", "user2"}, + Networks: map[string][]string{ + "usernet1": {"user1"}, + "usernet2": {"user2"}, + }, + ExtraService: map[string][]extraServiceFunc{ + "usernet1": {Webservice}, + }, + // We build the head image with curl and traceroute, so only use + // that for this test. + Versions: []string{"head"}, + }, + withURL: true, }, } - err = scenario.CreateHeadscaleEnv([]tsic.Option{ - tsic.WithAcceptRoutes(), - tsic.WithTags([]string{"tag:approve"}), - }, - hsic.WithTestName("clienableroute"), - hsic.WithEmbeddedDERPServerOnly(), - hsic.WithTLS(), - hsic.WithACLPolicy(pol), - hsic.WithPolicyMode(types.PolicyModeDB), - ) - assertNoErrHeadscaleEnv(t, err) + for _, tt := range tests { + for _, dbMode := range []types.PolicyMode{types.PolicyModeDB, types.PolicyModeFile} { + for _, advertiseDuringUp := range []bool{false, true} { + name := fmt.Sprintf("%s-advertiseduringup-%t-pol-%s", tt.name, advertiseDuringUp, dbMode) + t.Run(name, func(t *testing.T) { + scenario, err := NewScenario(tt.spec) + require.NoErrorf(t, err, "failed to create scenario: %s", err) + defer scenario.ShutdownAssertNoPanics(t) - allClients, err := scenario.ListTailscaleClients() - assertNoErrListClients(t, err) + opts := []hsic.Option{ + hsic.WithTestName("autoapprovemulti"), + hsic.WithEmbeddedDERPServerOnly(), + hsic.WithTLS(), + hsic.WithACLPolicy(tt.pol), + hsic.WithPolicyMode(dbMode), + } - err = scenario.WaitForTailscaleSync() - assertNoErrSync(t, err) + tsOpts := []tsic.Option{ + tsic.WithAcceptRoutes(), + } - headscale, err := scenario.Headscale() - assertNoErrGetHeadscale(t, err) - assert.NotNil(t, headscale) + if tt.approver == "tag:approve" { + tsOpts = append(tsOpts, + tsic.WithTags([]string{"tag:approve"}), + ) + } - route, err := scenario.SubnetOfNetwork("usernet1") - require.NoError(t, err) + route, err := scenario.SubnetOfNetwork("usernet1") + require.NoError(t, err) - // Set the route of usernet1 to be autoapproved - pol.AutoApprovers.Routes[route.String()] = []string{"tag:approve"} - err = headscale.SetPolicy(pol) - require.NoError(t, err) + err = scenario.createHeadscaleEnv(tt.withURL, tsOpts, + opts..., + ) + assertNoErrHeadscaleEnv(t, err) - services, err := scenario.Services("usernet1") - require.NoError(t, err) - require.Len(t, services, 1) + allClients, err := scenario.ListTailscaleClients() + assertNoErrListClients(t, err) - usernet1, err := scenario.Network("usernet1") - require.NoError(t, err) + err = scenario.WaitForTailscaleSync() + assertNoErrSync(t, err) - web := services[0] - webip := netip.MustParseAddr(web.GetIPInNetwork(usernet1)) - weburl := fmt.Sprintf("http://%s/etc/hostname", webip) - t.Logf("webservice: %s, %s", webip.String(), weburl) + services, err := scenario.Services("usernet1") + require.NoError(t, err) + require.Len(t, services, 1) - // Sort nodes by ID - sort.SliceStable(allClients, func(i, j int) bool { - statusI := allClients[i].MustStatus() - statusJ := allClients[j].MustStatus() + usernet1, err := scenario.Network("usernet1") + require.NoError(t, err) - return statusI.Self.ID < statusJ.Self.ID - }) + headscale, err := scenario.Headscale() + assertNoErrGetHeadscale(t, err) + assert.NotNil(t, headscale) - // This is ok because the scenario makes users in order, so the three first - // nodes, which are subnet routes, will be created first, and the last user - // will be created with the second. - routerUsernet1 := allClients[0] - routerSubRoute := allClients[1] - routerExitNode := allClients[2] + if advertiseDuringUp { + tsOpts = append(tsOpts, + tsic.WithExtraLoginArgs([]string{"--advertise-routes=" + route.String()}), + ) + } - client := allClients[3] + tsOpts = append(tsOpts, tsic.WithNetwork(usernet1)) - // Advertise the route for the dockersubnet of user1 - command := []string{ - "tailscale", - "set", - "--advertise-routes=" + route.String(), - } - _, _, err = routerUsernet1.Execute(command) - require.NoErrorf(t, err, "failed to advertise route: %s", err) + // This whole dance is to add a node _after_ all the other nodes + // with an additional tsOpt which advertises the route as part + // of the `tailscale up` command. If we do this as part of the + // scenario creation, it will be added to all nodes and turn + // into a HA node, which isnt something we are testing here. + routerUsernet1, err := scenario.CreateTailscaleNode("head", tsOpts...) + require.NoError(t, err) + defer routerUsernet1.Shutdown() - time.Sleep(5 * time.Second) + if tt.withURL { + u, err := routerUsernet1.LoginWithURL(headscale.GetEndpoint()) + assertNoErr(t, err) - // These route should auto approve, so the node is expected to have a route - // for all counts. - nodes, err := headscale.ListNodes() - require.NoError(t, err) - assertNodeRouteCount(t, nodes[0], 1, 1, 1) + body, err := doLoginURL(routerUsernet1.Hostname(), u) + assertNoErr(t, err) - // Verify that the routes have been sent to the client. - status, err := client.Status() - require.NoError(t, err) + scenario.runHeadscaleRegister("user1", body) + } else { + pak, err := scenario.CreatePreAuthKey("user1", false, false) + assertNoErr(t, err) - for _, peerKey := range status.Peers() { - peerStatus := status.Peer[peerKey] + err = routerUsernet1.Login(headscale.GetEndpoint(), pak.Key) + assertNoErr(t, err) + } + // extra creation end. - if peerStatus.ID == "1" { - assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route) - requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route}) - } else { - requirePeerSubnetRoutes(t, peerStatus, nil) - } - } + // Set the route of usernet1 to be autoapproved + tt.pol.AutoApprovers.Routes[route.String()] = []string{tt.approver} + err = headscale.SetPolicy(tt.pol) + require.NoError(t, err) - url := fmt.Sprintf("http://%s/etc/hostname", webip) - t.Logf("url from %s to %s", client.Hostname(), url) + routerUsernet1ID := routerUsernet1.MustID() - result, err := client.Curl(url) - require.NoError(t, err) - assert.Len(t, result, 13) + web := services[0] + webip := netip.MustParseAddr(web.GetIPInNetwork(usernet1)) + weburl := fmt.Sprintf("http://%s/etc/hostname", webip) + t.Logf("webservice: %s, %s", webip.String(), weburl) - tr, err := client.Traceroute(webip) - require.NoError(t, err) - assertTracerouteViaIP(t, tr, routerUsernet1.MustIPv4()) + // Sort nodes by ID + sort.SliceStable(allClients, func(i, j int) bool { + statusI := allClients[i].MustStatus() + statusJ := allClients[j].MustStatus() - // Remove the auto approval from the policy, any routes already enabled should be allowed. - delete(pol.AutoApprovers.Routes, route.String()) - err = headscale.SetPolicy(pol) - require.NoError(t, err) + return statusI.Self.ID < statusJ.Self.ID + }) - time.Sleep(5 * time.Second) + // This is ok because the scenario makes users in order, so the three first + // nodes, which are subnet routes, will be created first, and the last user + // will be created with the second. + routerSubRoute := allClients[1] + routerExitNode := allClients[2] - // These route should auto approve, so the node is expected to have a route - // for all counts. - nodes, err = headscale.ListNodes() - require.NoError(t, err) - assertNodeRouteCount(t, nodes[0], 1, 1, 1) + client := allClients[3] - // Verify that the routes have been sent to the client. - status, err = client.Status() - require.NoError(t, err) + if !advertiseDuringUp { + // Advertise the route for the dockersubnet of user1 + command := []string{ + "tailscale", + "set", + "--advertise-routes=" + route.String(), + } + _, _, err = routerUsernet1.Execute(command) + require.NoErrorf(t, err, "failed to advertise route: %s", err) + } - for _, peerKey := range status.Peers() { - peerStatus := status.Peer[peerKey] + time.Sleep(5 * time.Second) - if peerStatus.ID == "1" { - assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route) - requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route}) - } else { - requirePeerSubnetRoutes(t, peerStatus, nil) - } - } + // These route should auto approve, so the node is expected to have a route + // for all counts. + nodes, err := headscale.ListNodes() + require.NoError(t, err) + requireNodeRouteCount(t, MustFindNode(routerUsernet1.Hostname(), nodes), 1, 1, 1) - url = fmt.Sprintf("http://%s/etc/hostname", webip) - t.Logf("url from %s to %s", client.Hostname(), url) + // Verify that the routes have been sent to the client. + status, err := client.Status() + require.NoError(t, err) - result, err = client.Curl(url) - require.NoError(t, err) - assert.Len(t, result, 13) + for _, peerKey := range status.Peers() { + peerStatus := status.Peer[peerKey] - tr, err = client.Traceroute(webip) - require.NoError(t, err) - assertTracerouteViaIP(t, tr, routerUsernet1.MustIPv4()) + if peerStatus.ID == routerUsernet1ID.StableID() { + assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route) + requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route}) + } else { + requirePeerSubnetRoutes(t, peerStatus, nil) + } + } - // Disable the route, making it unavailable since it is no longer auto-approved - _, err = headscale.ApproveRoutes( - nodes[0].GetId(), - []netip.Prefix{}, - ) - require.NoError(t, err) + url := fmt.Sprintf("http://%s/etc/hostname", webip) + t.Logf("url from %s to %s", client.Hostname(), url) - time.Sleep(5 * time.Second) + result, err := client.Curl(url) + require.NoError(t, err) + assert.Len(t, result, 13) - // These route should auto approve, so the node is expected to have a route - // for all counts. - nodes, err = headscale.ListNodes() - require.NoError(t, err) - assertNodeRouteCount(t, nodes[0], 1, 0, 0) + tr, err := client.Traceroute(webip) + require.NoError(t, err) + assertTracerouteViaIP(t, tr, routerUsernet1.MustIPv4()) - // Verify that the routes have been sent to the client. - status, err = client.Status() - require.NoError(t, err) + // Remove the auto approval from the policy, any routes already enabled should be allowed. + delete(tt.pol.AutoApprovers.Routes, route.String()) + err = headscale.SetPolicy(tt.pol) + require.NoError(t, err) - for _, peerKey := range status.Peers() { - peerStatus := status.Peer[peerKey] - requirePeerSubnetRoutes(t, peerStatus, nil) - } + time.Sleep(5 * time.Second) - // Add the route back to the auto approver in the policy, the route should - // now become available again. - pol.AutoApprovers.Routes[route.String()] = []string{"tag:approve"} - err = headscale.SetPolicy(pol) - require.NoError(t, err) + // These route should auto approve, so the node is expected to have a route + // for all counts. + nodes, err = headscale.ListNodes() + require.NoError(t, err) + requireNodeRouteCount(t, MustFindNode(routerUsernet1.Hostname(), nodes), 1, 1, 1) - time.Sleep(5 * time.Second) + // Verify that the routes have been sent to the client. + status, err = client.Status() + require.NoError(t, err) - // These route should auto approve, so the node is expected to have a route - // for all counts. - nodes, err = headscale.ListNodes() - require.NoError(t, err) - assertNodeRouteCount(t, nodes[0], 1, 1, 1) + for _, peerKey := range status.Peers() { + peerStatus := status.Peer[peerKey] - // Verify that the routes have been sent to the client. - status, err = client.Status() - require.NoError(t, err) + if peerStatus.ID == routerUsernet1ID.StableID() { + assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route) + requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route}) + } else { + requirePeerSubnetRoutes(t, peerStatus, nil) + } + } - for _, peerKey := range status.Peers() { - peerStatus := status.Peer[peerKey] + url = fmt.Sprintf("http://%s/etc/hostname", webip) + t.Logf("url from %s to %s", client.Hostname(), url) - if peerStatus.ID == "1" { - require.NotNil(t, peerStatus.PrimaryRoutes) - assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route) - requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route}) - } else { - requirePeerSubnetRoutes(t, peerStatus, nil) - } - } + result, err = client.Curl(url) + require.NoError(t, err) + assert.Len(t, result, 13) - url = fmt.Sprintf("http://%s/etc/hostname", webip) - t.Logf("url from %s to %s", client.Hostname(), url) + tr, err = client.Traceroute(webip) + require.NoError(t, err) + assertTracerouteViaIP(t, tr, routerUsernet1.MustIPv4()) - result, err = client.Curl(url) - require.NoError(t, err) - assert.Len(t, result, 13) + // Disable the route, making it unavailable since it is no longer auto-approved + _, err = headscale.ApproveRoutes( + MustFindNode(routerUsernet1.Hostname(), nodes).GetId(), + []netip.Prefix{}, + ) + require.NoError(t, err) - tr, err = client.Traceroute(webip) - require.NoError(t, err) - assertTracerouteViaIP(t, tr, routerUsernet1.MustIPv4()) + time.Sleep(5 * time.Second) - // Advertise and validate a subnet of an auto approved route, /24 inside the - // auto approved /16. - command = []string{ - "tailscale", - "set", - "--advertise-routes=" + subRoute.String(), - } - _, _, err = routerSubRoute.Execute(command) - require.NoErrorf(t, err, "failed to advertise route: %s", err) + // These route should auto approve, so the node is expected to have a route + // for all counts. + nodes, err = headscale.ListNodes() + require.NoError(t, err) + requireNodeRouteCount(t, MustFindNode(routerUsernet1.Hostname(), nodes), 1, 0, 0) - time.Sleep(5 * time.Second) + // Verify that the routes have been sent to the client. + status, err = client.Status() + require.NoError(t, err) - // These route should auto approve, so the node is expected to have a route - // for all counts. - nodes, err = headscale.ListNodes() - require.NoError(t, err) - assertNodeRouteCount(t, nodes[0], 1, 1, 1) - assertNodeRouteCount(t, nodes[1], 1, 1, 1) + for _, peerKey := range status.Peers() { + peerStatus := status.Peer[peerKey] + requirePeerSubnetRoutes(t, peerStatus, nil) + } - // Verify that the routes have been sent to the client. - status, err = client.Status() - require.NoError(t, err) + // Add the route back to the auto approver in the policy, the route should + // now become available again. + tt.pol.AutoApprovers.Routes[route.String()] = []string{tt.approver} + err = headscale.SetPolicy(tt.pol) + require.NoError(t, err) - for _, peerKey := range status.Peers() { - peerStatus := status.Peer[peerKey] + time.Sleep(5 * time.Second) - if peerStatus.ID == "1" { - assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route) - requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route}) - } else if peerStatus.ID == "2" { - assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), subRoute) - requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{subRoute}) - } else { - requirePeerSubnetRoutes(t, peerStatus, nil) - } - } + // These route should auto approve, so the node is expected to have a route + // for all counts. + nodes, err = headscale.ListNodes() + require.NoError(t, err) + requireNodeRouteCount(t, MustFindNode(routerUsernet1.Hostname(), nodes), 1, 1, 1) - // Advertise a not approved route will not end up anywhere - command = []string{ - "tailscale", - "set", - "--advertise-routes=" + notApprovedRoute.String(), - } - _, _, err = routerSubRoute.Execute(command) - require.NoErrorf(t, err, "failed to advertise route: %s", err) + // Verify that the routes have been sent to the client. + status, err = client.Status() + require.NoError(t, err) - time.Sleep(5 * time.Second) + for _, peerKey := range status.Peers() { + peerStatus := status.Peer[peerKey] - // These route should auto approve, so the node is expected to have a route - // for all counts. - nodes, err = headscale.ListNodes() - require.NoError(t, err) - assertNodeRouteCount(t, nodes[0], 1, 1, 1) - assertNodeRouteCount(t, nodes[1], 1, 1, 0) - assertNodeRouteCount(t, nodes[2], 0, 0, 0) + if peerStatus.ID == routerUsernet1ID.StableID() { + require.NotNil(t, peerStatus.PrimaryRoutes) + assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route) + requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route}) + } else { + requirePeerSubnetRoutes(t, peerStatus, nil) + } + } - // Verify that the routes have been sent to the client. - status, err = client.Status() - require.NoError(t, err) + url = fmt.Sprintf("http://%s/etc/hostname", webip) + t.Logf("url from %s to %s", client.Hostname(), url) - for _, peerKey := range status.Peers() { - peerStatus := status.Peer[peerKey] + result, err = client.Curl(url) + require.NoError(t, err) + assert.Len(t, result, 13) - if peerStatus.ID == "1" { - assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route) - requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route}) - } else { - requirePeerSubnetRoutes(t, peerStatus, nil) - } - } + tr, err = client.Traceroute(webip) + require.NoError(t, err) + assertTracerouteViaIP(t, tr, routerUsernet1.MustIPv4()) - // Exit routes are also automatically approved - command = []string{ - "tailscale", - "set", - "--advertise-exit-node", - } - _, _, err = routerExitNode.Execute(command) - require.NoErrorf(t, err, "failed to advertise route: %s", err) + // Advertise and validate a subnet of an auto approved route, /24 inside the + // auto approved /16. + command := []string{ + "tailscale", + "set", + "--advertise-routes=" + subRoute.String(), + } + _, _, err = routerSubRoute.Execute(command) + require.NoErrorf(t, err, "failed to advertise route: %s", err) - time.Sleep(5 * time.Second) + time.Sleep(5 * time.Second) - nodes, err = headscale.ListNodes() - require.NoError(t, err) - assertNodeRouteCount(t, nodes[0], 1, 1, 1) - assertNodeRouteCount(t, nodes[1], 1, 1, 0) - assertNodeRouteCount(t, nodes[2], 2, 2, 2) + // These route should auto approve, so the node is expected to have a route + // for all counts. + nodes, err = headscale.ListNodes() + require.NoError(t, err) + requireNodeRouteCount(t, MustFindNode(routerUsernet1.Hostname(), nodes), 1, 1, 1) + requireNodeRouteCount(t, nodes[1], 1, 1, 1) - // Verify that the routes have been sent to the client. - status, err = client.Status() - require.NoError(t, err) + // Verify that the routes have been sent to the client. + status, err = client.Status() + require.NoError(t, err) - for _, peerKey := range status.Peers() { - peerStatus := status.Peer[peerKey] + for _, peerKey := range status.Peers() { + peerStatus := status.Peer[peerKey] - if peerStatus.ID == "1" { - assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route) - requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route}) - } else if peerStatus.ID == "3" { - requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()}) - } else { - requirePeerSubnetRoutes(t, peerStatus, nil) + if peerStatus.ID == routerUsernet1ID.StableID() { + assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route) + requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route}) + } else if peerStatus.ID == "2" { + assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), subRoute) + requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{subRoute}) + } else { + requirePeerSubnetRoutes(t, peerStatus, nil) + } + } + + // Advertise a not approved route will not end up anywhere + command = []string{ + "tailscale", + "set", + "--advertise-routes=" + notApprovedRoute.String(), + } + _, _, err = routerSubRoute.Execute(command) + require.NoErrorf(t, err, "failed to advertise route: %s", err) + + time.Sleep(5 * time.Second) + + // These route should auto approve, so the node is expected to have a route + // for all counts. + nodes, err = headscale.ListNodes() + require.NoError(t, err) + requireNodeRouteCount(t, MustFindNode(routerUsernet1.Hostname(), nodes), 1, 1, 1) + requireNodeRouteCount(t, nodes[1], 1, 1, 0) + requireNodeRouteCount(t, nodes[2], 0, 0, 0) + + // Verify that the routes have been sent to the client. + status, err = client.Status() + require.NoError(t, err) + + for _, peerKey := range status.Peers() { + peerStatus := status.Peer[peerKey] + + if peerStatus.ID == routerUsernet1ID.StableID() { + assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route) + requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route}) + } else { + requirePeerSubnetRoutes(t, peerStatus, nil) + } + } + + // Exit routes are also automatically approved + command = []string{ + "tailscale", + "set", + "--advertise-exit-node", + } + _, _, err = routerExitNode.Execute(command) + require.NoErrorf(t, err, "failed to advertise route: %s", err) + + time.Sleep(5 * time.Second) + + nodes, err = headscale.ListNodes() + require.NoError(t, err) + requireNodeRouteCount(t, MustFindNode(routerUsernet1.Hostname(), nodes), 1, 1, 1) + requireNodeRouteCount(t, nodes[1], 1, 1, 0) + requireNodeRouteCount(t, nodes[2], 2, 2, 2) + + // Verify that the routes have been sent to the client. + status, err = client.Status() + require.NoError(t, err) + + for _, peerKey := range status.Peers() { + peerStatus := status.Peer[peerKey] + + if peerStatus.ID == routerUsernet1ID.StableID() { + assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route) + requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route}) + } else if peerStatus.ID == "3" { + requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()}) + } else { + requirePeerSubnetRoutes(t, peerStatus, nil) + } + } + }) + } } } } @@ -1757,9 +2009,9 @@ func requirePeerSubnetRoutes(t *testing.T, status *ipnstate.PeerStatus, expected } } -func assertNodeRouteCount(t *testing.T, node *v1.Node, announced, approved, subnet int) { +func requireNodeRouteCount(t *testing.T, node *v1.Node, announced, approved, subnet int) { t.Helper() - assert.Len(t, node.GetAvailableRoutes(), announced) - assert.Len(t, node.GetApprovedRoutes(), approved) - assert.Len(t, node.GetSubnetRoutes(), subnet) + require.Lenf(t, node.GetAvailableRoutes(), announced, "expected %q announced routes(%v) to have %d route, had %d", node.GetName(), node.GetAvailableRoutes(), announced, len(node.GetAvailableRoutes())) + require.Lenf(t, node.GetApprovedRoutes(), approved, "expected %q approved routes(%v) to have %d route, had %d", node.GetName(), node.GetApprovedRoutes(), approved, len(node.GetApprovedRoutes())) + require.Lenf(t, node.GetSubnetRoutes(), subnet, "expected %q subnet routes(%v) to have %d route, had %d", node.GetName(), node.GetSubnetRoutes(), subnet, len(node.GetSubnetRoutes())) } diff --git a/integration/scenario.go b/integration/scenario.go index 5ad02708..eef7e1e8 100644 --- a/integration/scenario.go +++ b/integration/scenario.go @@ -109,6 +109,9 @@ type Scenario struct { spec ScenarioSpec userToNetwork map[string]*dockertest.Network + + testHashPrefix string + testDefaultNetwork string } // ScenarioSpec describes the users, nodes, and network topology to @@ -150,11 +153,8 @@ type ScenarioSpec struct { MaxWait time.Duration } -var TestHashPrefix = "hs-" + util.MustGenerateRandomStringDNSSafe(scenarioHashLength) -var TestDefaultNetwork = TestHashPrefix + "-default" - -func prefixedNetworkName(name string) string { - return TestHashPrefix + "-" + name +func (s *Scenario) prefixedNetworkName(name string) string { + return s.testHashPrefix + "-" + name } // NewScenario creates a test Scenario which can be used to bootstraps a ControlServer with @@ -169,6 +169,7 @@ func NewScenario(spec ScenarioSpec) (*Scenario, error) { // This might be a no op, but it is worth a try as we sometime // dont clean up nicely after ourselves. dockertestutil.CleanUnreferencedNetworks(pool) + dockertestutil.CleanImagesInCI(pool) if spec.MaxWait == 0 { pool.MaxWait = dockertestMaxWait() @@ -176,18 +177,22 @@ func NewScenario(spec ScenarioSpec) (*Scenario, error) { pool.MaxWait = spec.MaxWait } + testHashPrefix := "hs-" + util.MustGenerateRandomStringDNSSafe(scenarioHashLength) s := &Scenario{ controlServers: xsync.NewMapOf[string, ControlServer](), users: make(map[string]*User), pool: pool, spec: spec, + + testHashPrefix: testHashPrefix, + testDefaultNetwork: testHashPrefix + "-default", } var userToNetwork map[string]*dockertest.Network if spec.Networks != nil || len(spec.Networks) != 0 { for name, users := range s.spec.Networks { - networkName := TestHashPrefix + "-" + name + networkName := testHashPrefix + "-" + name network, err := s.AddNetwork(networkName) if err != nil { return nil, err @@ -201,7 +206,7 @@ func NewScenario(spec ScenarioSpec) (*Scenario, error) { } } } else { - _, err := s.AddNetwork(TestDefaultNetwork) + _, err := s.AddNetwork(s.testDefaultNetwork) if err != nil { return nil, err } @@ -213,7 +218,7 @@ func NewScenario(spec ScenarioSpec) (*Scenario, error) { if err != nil { return nil, err } - mak.Set(&s.extraServices, prefixedNetworkName(network), append(s.extraServices[prefixedNetworkName(network)], svc)) + mak.Set(&s.extraServices, s.prefixedNetworkName(network), append(s.extraServices[s.prefixedNetworkName(network)], svc)) } } @@ -261,7 +266,7 @@ func (s *Scenario) Networks() []*dockertest.Network { } func (s *Scenario) Network(name string) (*dockertest.Network, error) { - net, ok := s.networks[prefixedNetworkName(name)] + net, ok := s.networks[s.prefixedNetworkName(name)] if !ok { return nil, fmt.Errorf("no network named: %s", name) } @@ -270,7 +275,7 @@ func (s *Scenario) Network(name string) (*dockertest.Network, error) { } func (s *Scenario) SubnetOfNetwork(name string) (*netip.Prefix, error) { - net, ok := s.networks[prefixedNetworkName(name)] + net, ok := s.networks[s.prefixedNetworkName(name)] if !ok { return nil, fmt.Errorf("no network named: %s", name) } @@ -288,7 +293,7 @@ func (s *Scenario) SubnetOfNetwork(name string) (*netip.Prefix, error) { } func (s *Scenario) Services(name string) ([]*dockertest.Resource, error) { - res, ok := s.extraServices[prefixedNetworkName(name)] + res, ok := s.extraServices[s.prefixedNetworkName(name)] if !ok { return nil, fmt.Errorf("no network named: %s", name) } @@ -298,6 +303,7 @@ func (s *Scenario) Services(name string) ([]*dockertest.Resource, error) { func (s *Scenario) ShutdownAssertNoPanics(t *testing.T) { defer dockertestutil.CleanUnreferencedNetworks(s.pool) + defer dockertestutil.CleanImagesInCI(s.pool) s.controlServers.Range(func(_ string, control ControlServer) bool { stdoutPath, stderrPath, err := control.Shutdown() @@ -493,8 +499,7 @@ func (s *Scenario) CreateTailscaleNode( ) if err != nil { return nil, fmt.Errorf( - "failed to create tailscale (%s) node: %w", - tsClient.Hostname(), + "failed to create tailscale node: %w", err, ) } @@ -707,7 +712,7 @@ func (s *Scenario) createHeadscaleEnv( if s.userToNetwork != nil { opts = append(tsOpts, tsic.WithNetwork(s.userToNetwork[user])) } else { - opts = append(tsOpts, tsic.WithNetwork(s.networks[TestDefaultNetwork])) + opts = append(tsOpts, tsic.WithNetwork(s.networks[s.testDefaultNetwork])) } err = s.CreateTailscaleNodesInUser(user, "all", s.spec.NodesPerUser, opts...) @@ -1181,7 +1186,7 @@ func Webservice(s *Scenario, networkName string) (*dockertest.Resource, error) { hostname := fmt.Sprintf("hs-webservice-%s", hash) - network, ok := s.networks[prefixedNetworkName(networkName)] + network, ok := s.networks[s.prefixedNetworkName(networkName)] if !ok { return nil, fmt.Errorf("network does not exist: %s", networkName) } diff --git a/integration/scenario_test.go b/integration/scenario_test.go index 7f34fa77..c7f606bb 100644 --- a/integration/scenario_test.go +++ b/integration/scenario_test.go @@ -111,7 +111,7 @@ func TestTailscaleNodesJoiningHeadcale(t *testing.T) { }) t.Run("create-tailscale", func(t *testing.T) { - err := scenario.CreateTailscaleNodesInUser(user, "unstable", count, tsic.WithNetwork(scenario.networks[TestDefaultNetwork])) + err := scenario.CreateTailscaleNodesInUser(user, "unstable", count, tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork])) if err != nil { t.Fatalf("failed to add tailscale nodes: %s", err) } diff --git a/integration/ssh_test.go b/integration/ssh_test.go index 20aefdfd..f6e0e66d 100644 --- a/integration/ssh_test.go +++ b/integration/ssh_test.go @@ -410,7 +410,7 @@ func assertSSHHostname(t *testing.T, client TailscaleClient, peer TailscaleClien result, _, err := doSSH(t, client, peer) assertNoErr(t, err) - assertContains(t, peer.ID(), strings.ReplaceAll(result, "\n", "")) + assertContains(t, peer.ContainerID(), strings.ReplaceAll(result, "\n", "")) } func assertSSHPermissionDenied(t *testing.T, client TailscaleClient, peer TailscaleClient) { diff --git a/integration/tailscale.go b/integration/tailscale.go index 552fc759..94b08364 100644 --- a/integration/tailscale.go +++ b/integration/tailscale.go @@ -5,6 +5,7 @@ import ( "net/netip" "net/url" + "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/juanfont/headscale/integration/dockertestutil" "github.com/juanfont/headscale/integration/tsic" @@ -43,7 +44,8 @@ type TailscaleClient interface { Ping(hostnameOrIP string, opts ...tsic.PingOption) error Curl(url string, opts ...tsic.CurlOption) (string, error) Traceroute(netip.Addr) (util.Traceroute, error) - ID() string + ContainerID() string + MustID() types.NodeID ReadFile(path string) ([]byte, error) // FailingPeersAsString returns a formatted-ish multi-line-string of peers in the client diff --git a/integration/tsic/tsic.go b/integration/tsic/tsic.go index 0c8ba734..57770d41 100644 --- a/integration/tsic/tsic.go +++ b/integration/tsic/tsic.go @@ -18,6 +18,7 @@ import ( "strings" "time" + "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/juanfont/headscale/integration/dockertestutil" "github.com/juanfont/headscale/integration/integrationutil" @@ -194,7 +195,7 @@ func WithBuildTag(tag string) Option { // as part of the Login function. func WithExtraLoginArgs(args []string) Option { return func(tsic *TailscaleInContainer) { - tsic.extraLoginArgs = args + tsic.extraLoginArgs = append(tsic.extraLoginArgs, args...) } } @@ -383,7 +384,7 @@ func (t *TailscaleInContainer) Version() string { // ID returns the Docker container ID of the TailscaleInContainer // instance. -func (t *TailscaleInContainer) ID() string { +func (t *TailscaleInContainer) ContainerID() string { return t.container.Container.ID } @@ -426,20 +427,21 @@ func (t *TailscaleInContainer) Logs(stdout, stderr io.Writer) error { ) } -// Up runs the login routine on the given Tailscale instance. -// This login mechanism uses the authorised key for authentication. -func (t *TailscaleInContainer) Login( +func (t *TailscaleInContainer) buildLoginCommand( loginServer, authKey string, -) error { +) []string { command := []string{ "tailscale", "up", "--login-server=" + loginServer, - "--authkey=" + authKey, "--hostname=" + t.hostname, fmt.Sprintf("--accept-routes=%t", t.withAcceptRoutes), } + if authKey != "" { + command = append(command, "--authkey="+authKey) + } + if t.extraLoginArgs != nil { command = append(command, t.extraLoginArgs...) } @@ -458,6 +460,16 @@ func (t *TailscaleInContainer) Login( ) } + return command +} + +// Login runs the login routine on the given Tailscale instance. +// This login mechanism uses the authorised key for authentication. +func (t *TailscaleInContainer) Login( + loginServer, authKey string, +) error { + command := t.buildLoginCommand(loginServer, authKey) + if _, _, err := t.Execute(command, dockertestutil.ExecuteCommandTimeout(dockerExecuteTimeout)); err != nil { return fmt.Errorf( "%s failed to join tailscale client (%s): %w", @@ -475,17 +487,7 @@ func (t *TailscaleInContainer) Login( func (t *TailscaleInContainer) LoginWithURL( loginServer string, ) (loginURL *url.URL, err error) { - command := []string{ - "tailscale", - "up", - "--login-server=" + loginServer, - "--hostname=" + t.hostname, - "--accept-routes=false", - } - - if t.extraLoginArgs != nil { - command = append(command, t.extraLoginArgs...) - } + command := t.buildLoginCommand(loginServer, "") stdout, stderr, err := t.Execute(command) if errors.Is(err, errTailscaleNotLoggedIn) { @@ -646,7 +648,7 @@ func (t *TailscaleInContainer) Status(save ...bool) (*ipnstate.Status, error) { return &status, err } -// Status returns the ipnstate.Status of the Tailscale instance. +// MustStatus returns the ipnstate.Status of the Tailscale instance. func (t *TailscaleInContainer) MustStatus() *ipnstate.Status { status, err := t.Status() if err != nil { @@ -656,6 +658,21 @@ func (t *TailscaleInContainer) MustStatus() *ipnstate.Status { return status } +// MustID returns the ID of the Tailscale instance. +func (t *TailscaleInContainer) MustID() types.NodeID { + status, err := t.Status() + if err != nil { + panic(err) + } + + id, err := strconv.ParseUint(string(status.Self.ID), 10, 64) + if err != nil { + panic(fmt.Sprintf("failed to parse ID: %s", err)) + } + + return types.NodeID(id) +} + // Netmap returns the current Netmap (netmap.NetworkMap) of the Tailscale instance. // Only works with Tailscale 1.56 and newer. // Panics if version is lower then minimum. diff --git a/integration/utils.go b/integration/utils.go index 1fcdf6c7..440fa663 100644 --- a/integration/utils.go +++ b/integration/utils.go @@ -5,7 +5,6 @@ import ( "bytes" "fmt" "io" - "os" "strings" "sync" "testing" @@ -344,22 +343,10 @@ func isSelfClient(client TailscaleClient, addr string) bool { return false } -func isCI() bool { - if _, ok := os.LookupEnv("CI"); ok { - return true - } - - if _, ok := os.LookupEnv("GITHUB_RUN_ID"); ok { - return true - } - - return false -} - func dockertestMaxWait() time.Duration { wait := 120 * time.Second //nolint - if isCI() { + if util.IsCI() { wait = 300 * time.Second //nolint }