fix auto approver on register and new policy (#2506)

* fix issue auto approve route on register bug

This commit fixes an issue where routes where not approved
on a node during registration. This cause the auto approval
to require the node to readvertise the routes.

Fixes #2497
Fixes #2485

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>

* hsic: only set db policy if exist

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>

* policy: calculate changed based on policy and filter

v1 is a bit simpler than v2, it does not pre calculate the auto approver map
and we cannot tell if it is changed.

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>

---------

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
This commit is contained in:
Kristoffer Dalby 2025-03-31 15:55:07 +02:00 committed by GitHub
parent e3521be705
commit 5a18e91317
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 575 additions and 217 deletions

View File

@ -66,12 +66,11 @@ jobs:
- Test2118DeletingOnlineNodePanics
- TestEnablingRoutes
- TestHASubnetRouterFailover
- TestEnableDisableAutoApprovedRoute
- TestAutoApprovedSubRoute2068
- TestSubnetRouteACL
- TestEnablingExitRoutes
- TestSubnetRouterMultiNetwork
- TestSubnetRouterMultiNetworkExitNode
- TestAutoApproveMultiNetwork
- TestHeadscale
- TestTailscaleNodesJoiningHeadcale
- TestSSHOneUserToAll

View File

@ -66,12 +66,11 @@ jobs:
- Test2118DeletingOnlineNodePanics
- TestEnablingRoutes
- TestHASubnetRouterFailover
- TestEnableDisableAutoApprovedRoute
- TestAutoApprovedSubRoute2068
- TestSubnetRouteACL
- TestEnablingExitRoutes
- TestSubnetRouterMultiNetwork
- TestSubnetRouterMultiNetworkExitNode
- TestAutoApproveMultiNetwork
- TestHeadscale
- TestTailscaleNodesJoiningHeadcale
- TestSSHOneUserToAll

View File

@ -866,6 +866,11 @@ func (h *Headscale) Serve() error {
log.Info().
Msg("ACL policy successfully reloaded, notifying nodes of change")
err = h.autoApproveNodes()
if err != nil {
log.Error().Err(err).Msg("failed to approve routes after new policy")
}
ctx := types.NotifyCtx(context.Background(), "acl-sighup", "na")
h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
}
@ -1166,3 +1171,36 @@ func (h *Headscale) loadPolicyManager() error {
return errOut
}
// autoApproveNodes mass approves routes on all nodes. It is _only_ intended for
// use when the policy is replaced. It is not sending or reporting any changes
// or updates as we send full updates after replacing the policy.
// TODO(kradalby): This is kind of messy, maybe this is another +1
// for an event bus. See example comments here.
func (h *Headscale) autoApproveNodes() error {
err := h.db.Write(func(tx *gorm.DB) error {
nodes, err := db.ListNodes(tx)
if err != nil {
return err
}
for _, node := range nodes {
changed := policy.AutoApproveRoutes(h.polMan, node)
if changed {
err = tx.Save(node).Error
if err != nil {
return err
}
h.primaryRoutes.SetRoutes(node.ID, node.SubnetRoutes()...)
}
}
return nil
})
if err != nil {
return fmt.Errorf("auto approving routes for nodes: %w", err)
}
return nil
}

View File

@ -10,6 +10,7 @@ import (
"time"
"github.com/juanfont/headscale/hscontrol/db"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"gorm.io/gorm"
@ -212,6 +213,9 @@ func (h *Headscale) handleRegisterWithAuthKey(
nodeToRegister.Expiry = &regReq.Expiry
}
// Ensure any auto approved routes are handled before saving.
policy.AutoApproveRoutes(h.polMan, &nodeToRegister)
ipv4, ipv6, err := h.ipAlloc.Next()
if err != nil {
return nil, fmt.Errorf("allocating IPs: %w", err)
@ -266,7 +270,7 @@ func (h *Headscale) handleRegisterInteractive(
return nil, fmt.Errorf("generating registration ID: %w", err)
}
newNode := types.RegisterNode{
nodeToRegister := types.RegisterNode{
Node: types.Node{
Hostname: regReq.Hostinfo.Hostname,
MachineKey: machineKey,
@ -278,12 +282,15 @@ func (h *Headscale) handleRegisterInteractive(
}
if !regReq.Expiry.IsZero() {
newNode.Node.Expiry = &regReq.Expiry
nodeToRegister.Node.Expiry = &regReq.Expiry
}
// Ensure any auto approved routes are handled before saving.
policy.AutoApproveRoutes(h.polMan, &nodeToRegister.Node)
h.registrationCache.Set(
registrationId,
newNode,
nodeToRegister,
)
return &tailcfg.RegisterResponse{

View File

@ -739,6 +739,11 @@ func (api headscaleV1APIServer) SetPolicy(
// Only send update if the packet filter has changed.
if changed {
err = api.h.autoApproveNodes()
if err != nil {
return nil, err
}
ctx := types.NotifyCtx(context.Background(), "acl-update", "na")
api.h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
}

View File

@ -53,14 +53,15 @@ func NewPolicyManager(polB []byte, users []types.User, nodes types.Nodes) (*Poli
}
type PolicyManager struct {
mu sync.Mutex
pol *ACLPolicy
mu sync.Mutex
pol *ACLPolicy
polHash deephash.Sum
users []types.User
nodes types.Nodes
filterHash deephash.Sum
filter []tailcfg.FilterRule
filterHash deephash.Sum
}
// updateLocked updates the filter rules based on the current policy and nodes.
@ -71,13 +72,16 @@ func (pm *PolicyManager) updateLocked() (bool, error) {
return false, fmt.Errorf("compiling filter rules: %w", err)
}
polHash := deephash.Hash(pm.pol)
filterHash := deephash.Hash(&filter)
if filterHash == pm.filterHash {
if polHash == pm.polHash && filterHash == pm.filterHash {
return false, nil
}
pm.filter = filter
pm.filterHash = filterHash
pm.polHash = polHash
return true, nil
}

View File

@ -1,7 +1,6 @@
package integration
import (
"encoding/json"
"fmt"
"net/netip"
"strings"
@ -9,6 +8,7 @@ import (
"github.com/google/go-cmp/cmp"
policyv1 "github.com/juanfont/headscale/hscontrol/policy/v1"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic"
"github.com/stretchr/testify/assert"
@ -1033,9 +1033,7 @@ func TestPolicyUpdateWhileRunningWithCLIInDatabase(t *testing.T) {
tsic.WithDockerWorkdir("/"),
},
hsic.WithTestName("policyreload"),
hsic.WithConfigEnv(map[string]string{
"HEADSCALE_POLICY_MODE": "database",
}),
hsic.WithPolicyMode(types.PolicyModeDB),
)
require.NoError(t, err)
@ -1086,24 +1084,7 @@ func TestPolicyUpdateWhileRunningWithCLIInDatabase(t *testing.T) {
Hosts: policyv1.Hosts{},
}
pBytes, _ := json.Marshal(p)
policyFilePath := "/etc/headscale/policy.json"
err = headscale.WriteFile(policyFilePath, pBytes)
require.NoError(t, err)
// No policy is present at this time.
// Add a new policy from a file.
_, err = headscale.Execute(
[]string{
"headscale",
"policy",
"set",
"-f",
policyFilePath,
},
)
err = headscale.SetPolicy(&p)
require.NoError(t, err)
// Get the current policy and check

View File

@ -4,6 +4,7 @@ import (
"net/netip"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
policyv1 "github.com/juanfont/headscale/hscontrol/policy/v1"
"github.com/ory/dockertest/v3"
)
@ -24,4 +25,5 @@ type ControlServer interface {
ApproveRoutes(uint64, []netip.Prefix) (*v1.Node, error)
GetCert() []byte
GetHostname() string
SetPolicy(*policyv1.ACLPolicy) error
}

View File

@ -71,6 +71,7 @@ type HeadscaleInContainer struct {
filesInContainer []fileInContainer
postgres bool
policyV2 bool
policyMode types.PolicyMode
}
// Option represent optional settings that can be given to a
@ -195,6 +196,14 @@ func WithPolicyV2() Option {
}
}
// WithPolicy sets the policy mode for headscale
func WithPolicyMode(mode types.PolicyMode) Option {
return func(hsic *HeadscaleInContainer) {
hsic.policyMode = mode
hsic.env["HEADSCALE_POLICY_MODE"] = string(mode)
}
}
// WithIPAllocationStrategy sets the tests IP Allocation strategy.
func WithIPAllocationStrategy(strategy types.IPAllocationStrategy) Option {
return func(hsic *HeadscaleInContainer) {
@ -286,6 +295,7 @@ func New(
env: DefaultConfigEnv(),
filesInContainer: []fileInContainer{},
policyMode: types.PolicyModeFile,
}
for _, opt := range opts {
@ -412,14 +422,9 @@ func New(
}
if hsic.aclPolicy != nil {
data, err := json.Marshal(hsic.aclPolicy)
err = hsic.writePolicy(hsic.aclPolicy)
if err != nil {
return nil, fmt.Errorf("failed to marshal ACL Policy to JSON: %w", err)
}
err = hsic.WriteFile(aclPolicyPath, data)
if err != nil {
return nil, fmt.Errorf("failed to write ACL policy to container: %w", err)
return nil, fmt.Errorf("writing policy: %w", err)
}
}
@ -441,6 +446,15 @@ func New(
}
}
// Load the database from policy file on repeat until it succeeds,
// this is done as the container sleeps before starting headscale.
if hsic.aclPolicy != nil && hsic.policyMode == types.PolicyModeDB {
err := pool.Retry(hsic.reloadDatabasePolicy)
if err != nil {
return nil, fmt.Errorf("loading database policy on startup: %w", err)
}
}
return hsic, nil
}
@ -822,6 +836,116 @@ func (t *HeadscaleInContainer) ListUsers() ([]*v1.User, error) {
return users, nil
}
func (h *HeadscaleInContainer) SetPolicy(pol *policyv1.ACLPolicy) error {
err := h.writePolicy(pol)
if err != nil {
return fmt.Errorf("writing policy file: %w", err)
}
switch h.policyMode {
case types.PolicyModeDB:
err := h.reloadDatabasePolicy()
if err != nil {
return fmt.Errorf("reloading database policy: %w", err)
}
case types.PolicyModeFile:
err := h.Reload()
if err != nil {
return fmt.Errorf("reloading policy file: %w", err)
}
default:
panic("policy mode is not valid: " + h.policyMode)
}
return nil
}
func (h *HeadscaleInContainer) reloadDatabasePolicy() error {
_, err := h.Execute(
[]string{
"headscale",
"policy",
"set",
"-f",
aclPolicyPath,
},
)
if err != nil {
return fmt.Errorf("setting policy with db command: %w", err)
}
return nil
}
func (h *HeadscaleInContainer) writePolicy(pol *policyv1.ACLPolicy) error {
pBytes, err := json.Marshal(pol)
if err != nil {
return fmt.Errorf("marshalling pol: %w", err)
}
err = h.WriteFile(aclPolicyPath, pBytes)
if err != nil {
return fmt.Errorf("writing policy to headscale container: %w", err)
}
return nil
}
func (h *HeadscaleInContainer) PID() (int, error) {
cmd := []string{"bash", "-c", `ps aux | grep headscale | grep -v grep | awk '{print $2}'`}
output, err := h.Execute(cmd)
if err != nil {
return 0, fmt.Errorf("failed to execute command: %w", err)
}
lines := strings.TrimSpace(output)
if lines == "" {
return 0, os.ErrNotExist // No output means no process found
}
pids := make([]int, 0, len(lines))
for _, line := range strings.Split(lines, "\n") {
line = strings.TrimSpace(line)
if line == "" {
continue
}
pidInt, err := strconv.Atoi(line)
if err != nil {
return 0, fmt.Errorf("parsing PID: %w", err)
}
// We dont care about the root pid for the container
if pidInt == 1 {
continue
}
pids = append(pids, pidInt)
}
switch len(pids) {
case 0:
return 0, os.ErrNotExist
case 1:
return pids[0], nil
default:
return 0, fmt.Errorf("multiple headscale processes running")
}
}
// Reload sends a SIGHUP to the headscale process to reload internals,
// for example Policy from file.
func (h *HeadscaleInContainer) Reload() error {
pid, err := h.PID()
if err != nil {
return fmt.Errorf("getting headscale PID: %w", err)
}
_, err = h.Execute([]string{"kill", "-HUP", strconv.Itoa(pid)})
if err != nil {
return fmt.Errorf("reloading headscale with HUP: %w", err)
}
return nil
}
// ApproveRoutes approves routes for a node.
func (t *HeadscaleInContainer) ApproveRoutes(id uint64, routes []netip.Prefix) (*v1.Node, error) {
command := []string{

View File

@ -13,6 +13,7 @@ import (
"github.com/google/go-cmp/cmp/cmpopts"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
policyv1 "github.com/juanfont/headscale/hscontrol/policy/v1"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic"
@ -768,178 +769,6 @@ func TestHASubnetRouterFailover(t *testing.T) {
assertTracerouteViaIP(t, tr, subRouter2.MustIPv4())
}
func TestEnableDisableAutoApprovedRoute(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
expectedRoutes := "172.0.0.0/24"
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1"},
}
scenario, err := NewScenario(spec)
require.NoErrorf(t, err, "failed to create scenario: %s", err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{
tsic.WithTags([]string{"tag:approve"}),
tsic.WithAcceptRoutes(),
}, hsic.WithTestName("clienableroute"), hsic.WithACLPolicy(
&policyv1.ACLPolicy{
ACLs: []policyv1.ACL{
{
Action: "accept",
Sources: []string{"*"},
Destinations: []string{"*:*"},
},
},
TagOwners: map[string][]string{
"tag:approve": {"user1@"},
},
AutoApprovers: policyv1.AutoApprovers{
Routes: map[string][]string{
expectedRoutes: {"tag:approve"},
},
},
},
))
assertNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
assertNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
headscale, err := scenario.Headscale()
assertNoErrGetHeadscale(t, err)
subRouter1 := allClients[0]
// Initially advertise route
command := []string{
"tailscale",
"set",
"--advertise-routes=" + expectedRoutes,
}
_, _, err = subRouter1.Execute(command)
require.NoErrorf(t, err, "failed to advertise route: %s", err)
time.Sleep(10 * time.Second)
nodes, err := headscale.ListNodes()
require.NoError(t, err)
assert.Len(t, nodes, 1)
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
// Stop advertising route
command = []string{
"tailscale",
"set",
`--advertise-routes=`,
}
_, _, err = subRouter1.Execute(command)
require.NoErrorf(t, err, "failed to remove advertised route: %s", err)
time.Sleep(10 * time.Second)
nodes, err = headscale.ListNodes()
require.NoError(t, err)
assert.Len(t, nodes, 1)
assertNodeRouteCount(t, nodes[0], 0, 1, 0)
// Advertise route again
command = []string{
"tailscale",
"set",
"--advertise-routes=" + expectedRoutes,
}
_, _, err = subRouter1.Execute(command)
require.NoErrorf(t, err, "failed to advertise route: %s", err)
time.Sleep(10 * time.Second)
nodes, err = headscale.ListNodes()
require.NoError(t, err)
assert.Len(t, nodes, 1)
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
}
func TestAutoApprovedSubRoute2068(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
expectedRoutes := "10.42.7.0/24"
user := "user1"
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{user},
}
scenario, err := NewScenario(spec)
require.NoErrorf(t, err, "failed to create scenario: %s", err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv([]tsic.Option{
tsic.WithTags([]string{"tag:approve"}),
tsic.WithAcceptRoutes(),
},
hsic.WithTestName("clienableroute"),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithTLS(),
hsic.WithACLPolicy(
&policyv1.ACLPolicy{
ACLs: []policyv1.ACL{
{
Action: "accept",
Sources: []string{"*"},
Destinations: []string{"*:*"},
},
},
TagOwners: map[string][]string{
"tag:approve": {user + "@"},
},
AutoApprovers: policyv1.AutoApprovers{
Routes: map[string][]string{
"10.42.0.0/16": {"tag:approve"},
},
},
},
))
assertNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
assertNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
headscale, err := scenario.Headscale()
assertNoErrGetHeadscale(t, err)
subRouter1 := allClients[0]
// Initially advertise route
command := []string{
"tailscale",
"set",
"--advertise-routes=" + expectedRoutes,
}
_, _, err = subRouter1.Execute(command)
require.NoErrorf(t, err, "failed to advertise route: %s", err)
time.Sleep(10 * time.Second)
nodes, err := headscale.ListNodes()
require.NoError(t, err)
assert.Len(t, nodes, 1)
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
}
// TestSubnetRouteACL verifies that Subnet routes are distributed
// as expected when ACLs are activated.
// It implements the issue from
@ -1390,7 +1219,6 @@ func TestSubnetRouterMultiNetwork(t *testing.T) {
assertTracerouteViaIP(t, tr, user1c.MustIPv4())
}
// TestSubnetRouterMultiNetworkExitNode
func TestSubnetRouterMultiNetworkExitNode(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
@ -1469,10 +1297,7 @@ func TestSubnetRouterMultiNetworkExitNode(t *testing.T) {
}
// Enable route
_, err = headscale.ApproveRoutes(
nodes[0].Id,
[]netip.Prefix{tsaddr.AllIPv4()},
)
_, err = headscale.ApproveRoutes(nodes[0].Id, []netip.Prefix{tsaddr.AllIPv4()})
require.NoError(t, err)
time.Sleep(5 * time.Second)
@ -1524,6 +1349,380 @@ func TestSubnetRouterMultiNetworkExitNode(t *testing.T) {
require.NoError(t, err)
}
// TestAutoApproveMultiNetwork tests auto approving of routes
// by setting up two networks where network1 has three subnet
// routers:
// - routerUsernet1: advertising the docker network
// - routerSubRoute: advertising a subroute, a /24 inside a auto approved /16
// - routeExitNode: advertising an exit node
//
// Each router is tested step by step through the following scenarios
// - Policy is set to auto approve the nodes route
// - Node advertises route and it is verified that it is auto approved and sent to nodes
// - Policy is changed to _not_ auto approve the route
// - Verify that peers can still see the node
// - Disable route, making it unavailable
// - Verify that peers can no longer use node
// - Policy is changed back to auto approve route, check that routes already existing is approved.
// - Verify that routes can now be seen by peers.
func TestAutoApproveMultiNetwork(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
spec := ScenarioSpec{
NodesPerUser: 3,
Users: []string{"user1", "user2"},
Networks: map[string][]string{
"usernet1": {"user1"},
"usernet2": {"user2"},
},
ExtraService: map[string][]extraServiceFunc{
"usernet1": {Webservice},
},
// We build the head image with curl and traceroute, so only use
// that for this test.
Versions: []string{"head"},
}
rootRoute := netip.MustParsePrefix("10.42.0.0/16")
subRoute := netip.MustParsePrefix("10.42.7.0/24")
notApprovedRoute := netip.MustParsePrefix("192.168.0.0/24")
scenario, err := NewScenario(spec)
require.NoErrorf(t, err, "failed to create scenario: %s", err)
defer scenario.ShutdownAssertNoPanics(t)
pol := &policyv1.ACLPolicy{
ACLs: []policyv1.ACL{
{
Action: "accept",
Sources: []string{"*"},
Destinations: []string{"*:*"},
},
},
TagOwners: map[string][]string{
"tag:approve": {"user1@"},
},
AutoApprovers: policyv1.AutoApprovers{
Routes: map[string][]string{
rootRoute.String(): {"tag:approve"},
},
ExitNode: []string{"tag:approve"},
},
}
err = scenario.CreateHeadscaleEnv([]tsic.Option{
tsic.WithAcceptRoutes(),
tsic.WithTags([]string{"tag:approve"}),
},
hsic.WithTestName("clienableroute"),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithTLS(),
hsic.WithACLPolicy(pol),
hsic.WithPolicyMode(types.PolicyModeDB),
)
assertNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
assertNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
headscale, err := scenario.Headscale()
assertNoErrGetHeadscale(t, err)
assert.NotNil(t, headscale)
route, err := scenario.SubnetOfNetwork("usernet1")
require.NoError(t, err)
// Set the route of usernet1 to be autoapproved
pol.AutoApprovers.Routes[route.String()] = []string{"tag:approve"}
err = headscale.SetPolicy(pol)
require.NoError(t, err)
services, err := scenario.Services("usernet1")
require.NoError(t, err)
require.Len(t, services, 1)
usernet1, err := scenario.Network("usernet1")
require.NoError(t, err)
web := services[0]
webip := netip.MustParseAddr(web.GetIPInNetwork(usernet1))
weburl := fmt.Sprintf("http://%s/etc/hostname", webip)
t.Logf("webservice: %s, %s", webip.String(), weburl)
// Sort nodes by ID
sort.SliceStable(allClients, func(i, j int) bool {
statusI := allClients[i].MustStatus()
statusJ := allClients[j].MustStatus()
return statusI.Self.ID < statusJ.Self.ID
})
// This is ok because the scenario makes users in order, so the three first
// nodes, which are subnet routes, will be created first, and the last user
// will be created with the second.
routerUsernet1 := allClients[0]
routerSubRoute := allClients[1]
routerExitNode := allClients[2]
client := allClients[3]
// Advertise the route for the dockersubnet of user1
command := []string{
"tailscale",
"set",
"--advertise-routes=" + route.String(),
}
_, _, err = routerUsernet1.Execute(command)
require.NoErrorf(t, err, "failed to advertise route: %s", err)
time.Sleep(5 * time.Second)
// These route should auto approve, so the node is expected to have a route
// for all counts.
nodes, err := headscale.ListNodes()
require.NoError(t, err)
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
// Verify that the routes have been sent to the client.
status, err := client.Status()
require.NoError(t, err)
for _, peerKey := range status.Peers() {
peerStatus := status.Peer[peerKey]
if peerStatus.ID == "1" {
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route)
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route})
} else {
requirePeerSubnetRoutes(t, peerStatus, nil)
}
}
url := fmt.Sprintf("http://%s/etc/hostname", webip)
t.Logf("url from %s to %s", client.Hostname(), url)
result, err := client.Curl(url)
require.NoError(t, err)
assert.Len(t, result, 13)
tr, err := client.Traceroute(webip)
require.NoError(t, err)
assertTracerouteViaIP(t, tr, routerUsernet1.MustIPv4())
// Remove the auto approval from the policy, any routes already enabled should be allowed.
delete(pol.AutoApprovers.Routes, route.String())
err = headscale.SetPolicy(pol)
require.NoError(t, err)
time.Sleep(5 * time.Second)
// These route should auto approve, so the node is expected to have a route
// for all counts.
nodes, err = headscale.ListNodes()
require.NoError(t, err)
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
// Verify that the routes have been sent to the client.
status, err = client.Status()
require.NoError(t, err)
for _, peerKey := range status.Peers() {
peerStatus := status.Peer[peerKey]
if peerStatus.ID == "1" {
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route)
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route})
} else {
requirePeerSubnetRoutes(t, peerStatus, nil)
}
}
url = fmt.Sprintf("http://%s/etc/hostname", webip)
t.Logf("url from %s to %s", client.Hostname(), url)
result, err = client.Curl(url)
require.NoError(t, err)
assert.Len(t, result, 13)
tr, err = client.Traceroute(webip)
require.NoError(t, err)
assertTracerouteViaIP(t, tr, routerUsernet1.MustIPv4())
// Disable the route, making it unavailable since it is no longer auto-approved
_, err = headscale.ApproveRoutes(
nodes[0].GetId(),
[]netip.Prefix{},
)
require.NoError(t, err)
time.Sleep(5 * time.Second)
// These route should auto approve, so the node is expected to have a route
// for all counts.
nodes, err = headscale.ListNodes()
require.NoError(t, err)
assertNodeRouteCount(t, nodes[0], 1, 0, 0)
// Verify that the routes have been sent to the client.
status, err = client.Status()
require.NoError(t, err)
for _, peerKey := range status.Peers() {
peerStatus := status.Peer[peerKey]
requirePeerSubnetRoutes(t, peerStatus, nil)
}
// Add the route back to the auto approver in the policy, the route should
// now become available again.
pol.AutoApprovers.Routes[route.String()] = []string{"tag:approve"}
err = headscale.SetPolicy(pol)
require.NoError(t, err)
time.Sleep(5 * time.Second)
// These route should auto approve, so the node is expected to have a route
// for all counts.
nodes, err = headscale.ListNodes()
require.NoError(t, err)
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
// Verify that the routes have been sent to the client.
status, err = client.Status()
require.NoError(t, err)
for _, peerKey := range status.Peers() {
peerStatus := status.Peer[peerKey]
if peerStatus.ID == "1" {
require.NotNil(t, peerStatus.PrimaryRoutes)
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route)
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route})
} else {
requirePeerSubnetRoutes(t, peerStatus, nil)
}
}
url = fmt.Sprintf("http://%s/etc/hostname", webip)
t.Logf("url from %s to %s", client.Hostname(), url)
result, err = client.Curl(url)
require.NoError(t, err)
assert.Len(t, result, 13)
tr, err = client.Traceroute(webip)
require.NoError(t, err)
assertTracerouteViaIP(t, tr, routerUsernet1.MustIPv4())
// Advertise and validate a subnet of an auto approved route, /24 inside the
// auto approved /16.
command = []string{
"tailscale",
"set",
"--advertise-routes=" + subRoute.String(),
}
_, _, err = routerSubRoute.Execute(command)
require.NoErrorf(t, err, "failed to advertise route: %s", err)
time.Sleep(5 * time.Second)
// These route should auto approve, so the node is expected to have a route
// for all counts.
nodes, err = headscale.ListNodes()
require.NoError(t, err)
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
assertNodeRouteCount(t, nodes[1], 1, 1, 1)
// Verify that the routes have been sent to the client.
status, err = client.Status()
require.NoError(t, err)
for _, peerKey := range status.Peers() {
peerStatus := status.Peer[peerKey]
if peerStatus.ID == "1" {
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route)
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route})
} else if peerStatus.ID == "2" {
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), subRoute)
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{subRoute})
} else {
requirePeerSubnetRoutes(t, peerStatus, nil)
}
}
// Advertise a not approved route will not end up anywhere
command = []string{
"tailscale",
"set",
"--advertise-routes=" + notApprovedRoute.String(),
}
_, _, err = routerSubRoute.Execute(command)
require.NoErrorf(t, err, "failed to advertise route: %s", err)
time.Sleep(5 * time.Second)
// These route should auto approve, so the node is expected to have a route
// for all counts.
nodes, err = headscale.ListNodes()
require.NoError(t, err)
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
assertNodeRouteCount(t, nodes[1], 1, 1, 0)
assertNodeRouteCount(t, nodes[2], 0, 0, 0)
// Verify that the routes have been sent to the client.
status, err = client.Status()
require.NoError(t, err)
for _, peerKey := range status.Peers() {
peerStatus := status.Peer[peerKey]
if peerStatus.ID == "1" {
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route)
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route})
} else {
requirePeerSubnetRoutes(t, peerStatus, nil)
}
}
// Exit routes are also automatically approved
command = []string{
"tailscale",
"set",
"--advertise-exit-node",
}
_, _, err = routerExitNode.Execute(command)
require.NoErrorf(t, err, "failed to advertise route: %s", err)
time.Sleep(5 * time.Second)
nodes, err = headscale.ListNodes()
require.NoError(t, err)
assertNodeRouteCount(t, nodes[0], 1, 1, 1)
assertNodeRouteCount(t, nodes[1], 1, 1, 0)
assertNodeRouteCount(t, nodes[2], 2, 2, 2)
// Verify that the routes have been sent to the client.
status, err = client.Status()
require.NoError(t, err)
for _, peerKey := range status.Peers() {
peerStatus := status.Peer[peerKey]
if peerStatus.ID == "1" {
assert.Contains(t, peerStatus.PrimaryRoutes.AsSlice(), *route)
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{*route})
} else if peerStatus.ID == "3" {
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()})
} else {
requirePeerSubnetRoutes(t, peerStatus, nil)
}
}
}
func assertTracerouteViaIP(t *testing.T, tr util.Traceroute, ip netip.Addr) {
t.Helper()