chore: fix autogroup:self with other acl rules (#2842)

This commit is contained in:
Vitalij Dovhanyc
2025-11-02 11:48:27 +01:00
committed by GitHub
parent 02c7c1a0e7
commit af2de35b6c
4 changed files with 568 additions and 184 deletions

View File

@@ -99,14 +99,16 @@ func (pol *Policy) compileFilterRulesForNode(
return nil, ErrInvalidAction return nil, ErrInvalidAction
} }
rule, err := pol.compileACLWithAutogroupSelf(acl, users, node, nodes) aclRules, err := pol.compileACLWithAutogroupSelf(acl, users, node, nodes)
if err != nil { if err != nil {
log.Trace().Err(err).Msgf("compiling ACL") log.Trace().Err(err).Msgf("compiling ACL")
continue continue
} }
if rule != nil { for _, rule := range aclRules {
rules = append(rules, *rule) if rule != nil {
rules = append(rules, *rule)
}
} }
} }
@@ -115,27 +117,32 @@ func (pol *Policy) compileFilterRulesForNode(
// compileACLWithAutogroupSelf compiles a single ACL rule, handling // compileACLWithAutogroupSelf compiles a single ACL rule, handling
// autogroup:self per-node while supporting all other alias types normally. // autogroup:self per-node while supporting all other alias types normally.
// It returns a slice of filter rules because when an ACL has both autogroup:self
// and other destinations, they need to be split into separate rules with different
// source filtering logic.
func (pol *Policy) compileACLWithAutogroupSelf( func (pol *Policy) compileACLWithAutogroupSelf(
acl ACL, acl ACL,
users types.Users, users types.Users,
node types.NodeView, node types.NodeView,
nodes views.Slice[types.NodeView], nodes views.Slice[types.NodeView],
) (*tailcfg.FilterRule, error) { ) ([]*tailcfg.FilterRule, error) {
// Check if any destination uses autogroup:self var autogroupSelfDests []AliasWithPorts
hasAutogroupSelfInDst := false var otherDests []AliasWithPorts
for _, dest := range acl.Destinations { for _, dest := range acl.Destinations {
if ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) { if ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
hasAutogroupSelfInDst = true autogroupSelfDests = append(autogroupSelfDests, dest)
break } else {
otherDests = append(otherDests, dest)
} }
} }
var srcIPs netipx.IPSetBuilder protocols, _ := acl.Protocol.parseProtocol()
var rules []*tailcfg.FilterRule
var resolvedSrcIPs []*netipx.IPSet
// Resolve sources to only include devices from the same user as the target node.
for _, src := range acl.Sources { for _, src := range acl.Sources {
// autogroup:self is not allowed in sources
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) { if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
return nil, fmt.Errorf("autogroup:self cannot be used in sources") return nil, fmt.Errorf("autogroup:self cannot be used in sources")
} }
@@ -147,92 +154,121 @@ func (pol *Policy) compileACLWithAutogroupSelf(
} }
if ips != nil { if ips != nil {
if hasAutogroupSelfInDst { resolvedSrcIPs = append(resolvedSrcIPs, ips)
// Instead of iterating all addresses (which could be millions),
// check each node's IPs against the source set
for _, n := range nodes.All() {
if n.User().ID == node.User().ID && !n.IsTagged() {
// Check if any of this node's IPs are in the source set
for _, nodeIP := range n.IPs() {
if ips.Contains(nodeIP) {
n.AppendToIPSet(&srcIPs)
break // Found this node, move to next
}
}
}
}
} else {
// No autogroup:self in destination, use all resolved sources
srcIPs.AddSet(ips)
}
} }
} }
srcSet, err := srcIPs.IPSet() if len(resolvedSrcIPs) == 0 {
if err != nil { return rules, nil
return nil, err
} }
if srcSet == nil || len(srcSet.Prefixes()) == 0 { // Handle autogroup:self destinations (if any)
// No sources resolved, skip this rule if len(autogroupSelfDests) > 0 {
return nil, nil //nolint:nilnil // Pre-filter to same-user untagged devices once - reuse for both sources and destinations
} sameUserNodes := make([]types.NodeView, 0)
for _, n := range nodes.All() {
if n.User().ID == node.User().ID && !n.IsTagged() {
sameUserNodes = append(sameUserNodes, n)
}
}
protocols, _ := acl.Protocol.parseProtocol() if len(sameUserNodes) > 0 {
// Filter sources to only same-user untagged devices
var destPorts []tailcfg.NetPortRange var srcIPs netipx.IPSetBuilder
for _, ips := range resolvedSrcIPs {
for _, dest := range acl.Destinations { for _, n := range sameUserNodes {
if ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) { // Check if any of this node's IPs are in the source set
for _, n := range nodes.All() { for _, nodeIP := range n.IPs() {
if n.User().ID == node.User().ID && !n.IsTagged() { if ips.Contains(nodeIP) {
for _, port := range dest.Ports { n.AppendToIPSet(&srcIPs)
for _, ip := range n.IPs() { break
pr := tailcfg.NetPortRange{
IP: ip.String(),
Ports: port,
}
destPorts = append(destPorts, pr)
} }
} }
} }
} }
} else {
ips, err := dest.Resolve(pol, users, nodes) srcSet, err := srcIPs.IPSet()
if err != nil { if err != nil {
log.Trace().Err(err).Msgf("resolving destination ips") return nil, err
continue
} }
if ips == nil { if srcSet != nil && len(srcSet.Prefixes()) > 0 {
log.Debug().Msgf("destination resolved to nil ips: %v", dest) var destPorts []tailcfg.NetPortRange
continue for _, dest := range autogroupSelfDests {
} for _, n := range sameUserNodes {
for _, port := range dest.Ports {
prefixes := ips.Prefixes() for _, ip := range n.IPs() {
destPorts = append(destPorts, tailcfg.NetPortRange{
for _, pref := range prefixes { IP: ip.String(),
for _, port := range dest.Ports { Ports: port,
pr := tailcfg.NetPortRange{ })
IP: pref.String(), }
Ports: port, }
} }
destPorts = append(destPorts, pr) }
if len(destPorts) > 0 {
rules = append(rules, &tailcfg.FilterRule{
SrcIPs: ipSetToPrefixStringList(srcSet),
DstPorts: destPorts,
IPProto: protocols,
})
} }
} }
} }
} }
if len(destPorts) == 0 { if len(otherDests) > 0 {
// No destinations resolved, skip this rule var srcIPs netipx.IPSetBuilder
return nil, nil //nolint:nilnil
for _, ips := range resolvedSrcIPs {
srcIPs.AddSet(ips)
}
srcSet, err := srcIPs.IPSet()
if err != nil {
return nil, err
}
if srcSet != nil && len(srcSet.Prefixes()) > 0 {
var destPorts []tailcfg.NetPortRange
for _, dest := range otherDests {
ips, err := dest.Resolve(pol, users, nodes)
if err != nil {
log.Trace().Err(err).Msgf("resolving destination ips")
continue
}
if ips == nil {
log.Debug().Msgf("destination resolved to nil ips: %v", dest)
continue
}
prefixes := ips.Prefixes()
for _, pref := range prefixes {
for _, port := range dest.Ports {
pr := tailcfg.NetPortRange{
IP: pref.String(),
Ports: port,
}
destPorts = append(destPorts, pr)
}
}
}
if len(destPorts) > 0 {
rules = append(rules, &tailcfg.FilterRule{
SrcIPs: ipSetToPrefixStringList(srcSet),
DstPorts: destPorts,
IPProto: protocols,
})
}
}
} }
return &tailcfg.FilterRule{ return rules, nil
SrcIPs: ipSetToPrefixStringList(srcSet),
DstPorts: destPorts,
IPProto: protocols,
}, nil
} }
func sshAction(accept bool, duration time.Duration) tailcfg.SSHAction { func sshAction(accept bool, duration time.Duration) tailcfg.SSHAction {
@@ -260,46 +296,30 @@ func (pol *Policy) compileSSHPolicy(
var rules []*tailcfg.SSHRule var rules []*tailcfg.SSHRule
for index, rule := range pol.SSHs { for index, rule := range pol.SSHs {
// Check if any destination uses autogroup:self // Separate destinations into autogroup:self and others
hasAutogroupSelfInDst := false // This is needed because autogroup:self requires filtering sources to same-user only,
// while other destinations should use all resolved sources
var autogroupSelfDests []Alias
var otherDests []Alias
for _, dst := range rule.Destinations { for _, dst := range rule.Destinations {
if ag, ok := dst.(*AutoGroup); ok && ag.Is(AutoGroupSelf) { if ag, ok := dst.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
hasAutogroupSelfInDst = true autogroupSelfDests = append(autogroupSelfDests, dst)
break
}
}
// If autogroup:self is used, skip tagged nodes
if hasAutogroupSelfInDst && node.IsTagged() {
continue
}
var dest netipx.IPSetBuilder
for _, src := range rule.Destinations {
// Handle autogroup:self specially
if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) {
// For autogroup:self, only include the target user's untagged devices
for _, n := range nodes.All() {
if n.User().ID == node.User().ID && !n.IsTagged() {
n.AppendToIPSet(&dest)
}
}
} else { } else {
ips, err := src.Resolve(pol, users, nodes) otherDests = append(otherDests, dst)
if err != nil {
log.Trace().Caller().Err(err).Msgf("resolving destination ips")
continue
}
dest.AddSet(ips)
} }
} }
destSet, err := dest.IPSet() // Note: Tagged nodes can't match autogroup:self destinations, but can still match other destinations
// Resolve sources once - we'll use them differently for each destination type
srcIPs, err := rule.Sources.Resolve(pol, users, nodes)
if err != nil { if err != nil {
return nil, err log.Trace().Caller().Err(err).Msgf("SSH policy compilation failed resolving source ips for rule %+v", rule)
continue // Skip this rule if we can't resolve sources
} }
if !node.InIPSet(destSet) { if srcIPs == nil || len(srcIPs.Prefixes()) == 0 {
continue continue
} }
@@ -313,50 +333,9 @@ func (pol *Policy) compileSSHPolicy(
return nil, fmt.Errorf("parsing SSH policy, unknown action %q, index: %d: %w", rule.Action, index, err) return nil, fmt.Errorf("parsing SSH policy, unknown action %q, index: %d: %w", rule.Action, index, err)
} }
var principals []*tailcfg.SSHPrincipal
srcIPs, err := rule.Sources.Resolve(pol, users, nodes)
if err != nil {
log.Trace().Caller().Err(err).Msgf("SSH policy compilation failed resolving source ips for rule %+v", rule)
continue // Skip this rule if we can't resolve sources
}
// If autogroup:self is in destinations, filter sources to same user only
if hasAutogroupSelfInDst {
var filteredSrcIPs netipx.IPSetBuilder
// Instead of iterating all addresses, check each node's IPs
for _, n := range nodes.All() {
if n.User().ID == node.User().ID && !n.IsTagged() {
// Check if any of this node's IPs are in the source set
for _, nodeIP := range n.IPs() {
if srcIPs.Contains(nodeIP) {
n.AppendToIPSet(&filteredSrcIPs)
break // Found this node, move to next
}
}
}
}
srcIPs, err = filteredSrcIPs.IPSet()
if err != nil {
return nil, err
}
if srcIPs == nil || len(srcIPs.Prefixes()) == 0 {
// No valid sources after filtering, skip this rule
continue
}
}
for addr := range util.IPSetAddrIter(srcIPs) {
principals = append(principals, &tailcfg.SSHPrincipal{
NodeIP: addr.String(),
})
}
userMap := make(map[string]string, len(rule.Users)) userMap := make(map[string]string, len(rule.Users))
if rule.Users.ContainsNonRoot() { if rule.Users.ContainsNonRoot() {
userMap["*"] = "=" userMap["*"] = "="
// by default, we do not allow root unless explicitly stated // by default, we do not allow root unless explicitly stated
userMap["root"] = "" userMap["root"] = ""
} }
@@ -366,11 +345,108 @@ func (pol *Policy) compileSSHPolicy(
for _, u := range rule.Users.NormalUsers() { for _, u := range rule.Users.NormalUsers() {
userMap[u.String()] = u.String() userMap[u.String()] = u.String()
} }
rules = append(rules, &tailcfg.SSHRule{
Principals: principals, // Handle autogroup:self destinations (if any)
SSHUsers: userMap, // Note: Tagged nodes can't match autogroup:self, so skip this block for tagged nodes
Action: &action, if len(autogroupSelfDests) > 0 && !node.IsTagged() {
}) // Build destination set for autogroup:self (same-user untagged devices only)
var dest netipx.IPSetBuilder
for _, n := range nodes.All() {
if n.User().ID == node.User().ID && !n.IsTagged() {
n.AppendToIPSet(&dest)
}
}
destSet, err := dest.IPSet()
if err != nil {
return nil, err
}
// Only create rule if this node is in the destination set
if node.InIPSet(destSet) {
// Filter sources to only same-user untagged devices
// Pre-filter to same-user untagged devices for efficiency
sameUserNodes := make([]types.NodeView, 0)
for _, n := range nodes.All() {
if n.User().ID == node.User().ID && !n.IsTagged() {
sameUserNodes = append(sameUserNodes, n)
}
}
var filteredSrcIPs netipx.IPSetBuilder
for _, n := range sameUserNodes {
// Check if any of this node's IPs are in the source set
for _, nodeIP := range n.IPs() {
if srcIPs.Contains(nodeIP) {
n.AppendToIPSet(&filteredSrcIPs)
break // Found this node, move to next
}
}
}
filteredSrcSet, err := filteredSrcIPs.IPSet()
if err != nil {
return nil, err
}
if filteredSrcSet != nil && len(filteredSrcSet.Prefixes()) > 0 {
var principals []*tailcfg.SSHPrincipal
for addr := range util.IPSetAddrIter(filteredSrcSet) {
principals = append(principals, &tailcfg.SSHPrincipal{
NodeIP: addr.String(),
})
}
if len(principals) > 0 {
rules = append(rules, &tailcfg.SSHRule{
Principals: principals,
SSHUsers: userMap,
Action: &action,
})
}
}
}
}
// Handle other destinations (if any)
if len(otherDests) > 0 {
// Build destination set for other destinations
var dest netipx.IPSetBuilder
for _, dst := range otherDests {
ips, err := dst.Resolve(pol, users, nodes)
if err != nil {
log.Trace().Caller().Err(err).Msgf("resolving destination ips")
continue
}
if ips != nil {
dest.AddSet(ips)
}
}
destSet, err := dest.IPSet()
if err != nil {
return nil, err
}
// Only create rule if this node is in the destination set
if node.InIPSet(destSet) {
// For non-autogroup:self destinations, use all resolved sources (no filtering)
var principals []*tailcfg.SSHPrincipal
for addr := range util.IPSetAddrIter(srcIPs) {
principals = append(principals, &tailcfg.SSHPrincipal{
NodeIP: addr.String(),
})
}
if len(principals) > 0 {
rules = append(rules, &tailcfg.SSHRule{
Principals: principals,
SSHUsers: userMap,
Action: &action,
})
}
}
}
} }
return &tailcfg.SSHPolicy{ return &tailcfg.SSHPolicy{

View File

@@ -1339,3 +1339,70 @@ func TestSSHWithAutogroupSelfExcludesTaggedDevices(t *testing.T) {
assert.Empty(t, sshPolicy2.Rules, "tagged node should get no SSH rules with autogroup:self") assert.Empty(t, sshPolicy2.Rules, "tagged node should get no SSH rules with autogroup:self")
} }
} }
// TestSSHWithAutogroupSelfAndMixedDestinations tests that SSH rules can have both
// autogroup:self and other destinations (like tag:router) in the same rule, and that
// autogroup:self filtering only applies to autogroup:self destinations, not others.
func TestSSHWithAutogroupSelfAndMixedDestinations(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "user1"},
{Model: gorm.Model{ID: 2}, Name: "user2"},
}
nodes := types.Nodes{
{User: users[0], IPv4: ap("100.64.0.1"), Hostname: "user1-device"},
{User: users[0], IPv4: ap("100.64.0.2"), Hostname: "user1-device2"},
{User: users[1], IPv4: ap("100.64.0.3"), Hostname: "user2-device"},
{User: users[1], IPv4: ap("100.64.0.4"), Hostname: "user2-router", ForcedTags: []string{"tag:router"}},
}
policy := &Policy{
TagOwners: TagOwners{
Tag("tag:router"): Owners{up("user2@")},
},
SSHs: []SSH{
{
Action: "accept",
Sources: SSHSrcAliases{agp("autogroup:member")},
Destinations: SSHDstAliases{agp("autogroup:self"), tp("tag:router")},
Users: []SSHUser{"admin"},
},
},
}
err := policy.validate()
require.NoError(t, err)
// Test 1: Compile for user1's device (should only match autogroup:self destination)
node1 := nodes[0].View()
sshPolicy1, err := policy.compileSSHPolicy(users, node1, nodes.ViewSlice())
require.NoError(t, err)
require.NotNil(t, sshPolicy1)
require.Len(t, sshPolicy1.Rules, 1, "user1's device should have 1 SSH rule (autogroup:self)")
// Verify autogroup:self rule has filtered sources (only same-user devices)
selfRule := sshPolicy1.Rules[0]
require.Len(t, selfRule.Principals, 2, "autogroup:self rule should only have user1's devices")
selfPrincipals := make([]string, len(selfRule.Principals))
for i, p := range selfRule.Principals {
selfPrincipals[i] = p.NodeIP
}
require.ElementsMatch(t, []string{"100.64.0.1", "100.64.0.2"}, selfPrincipals,
"autogroup:self rule should only include same-user untagged devices")
// Test 2: Compile for router (should only match tag:router destination)
routerNode := nodes[3].View() // user2-router
sshPolicyRouter, err := policy.compileSSHPolicy(users, routerNode, nodes.ViewSlice())
require.NoError(t, err)
require.NotNil(t, sshPolicyRouter)
require.Len(t, sshPolicyRouter.Rules, 1, "router should have 1 SSH rule (tag:router)")
routerRule := sshPolicyRouter.Rules[0]
routerPrincipals := make([]string, len(routerRule.Principals))
for i, p := range routerRule.Principals {
routerPrincipals[i] = p.NodeIP
}
require.Contains(t, routerPrincipals, "100.64.0.1", "router rule should include user1's device (unfiltered sources)")
require.Contains(t, routerPrincipals, "100.64.0.2", "router rule should include user1's other device (unfiltered sources)")
require.Contains(t, routerPrincipals, "100.64.0.3", "router rule should include user2's device (unfiltered sources)")
}

View File

@@ -2,6 +2,7 @@ package v2
import ( import (
"net/netip" "net/netip"
"slices"
"testing" "testing"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
@@ -439,3 +440,82 @@ func TestAutogroupSelfReducedVsUnreducedRules(t *testing.T) {
require.Empty(t, peerMap[node1.ID], "node1 should have no peers (can only reach itself)") require.Empty(t, peerMap[node1.ID], "node1 should have no peers (can only reach itself)")
require.Empty(t, peerMap[node2.ID], "node2 should have no peers") require.Empty(t, peerMap[node2.ID], "node2 should have no peers")
} }
// When separate ACL rules exist (one with autogroup:self, one with tag:router),
// the autogroup:self rule should not prevent the tag:router rule from working.
// This ensures that autogroup:self doesn't interfere with other ACL rules.
func TestAutogroupSelfWithOtherRules(t *testing.T) {
users := types.Users{
{Model: gorm.Model{ID: 1}, Name: "test-1", Email: "test-1@example.com"},
{Model: gorm.Model{ID: 2}, Name: "test-2", Email: "test-2@example.com"},
}
// test-1 has a regular device
test1Node := &types.Node{
ID: 1,
Hostname: "test-1-device",
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[0],
UserID: users[0].ID,
Hostinfo: &tailcfg.Hostinfo{},
}
// test-2 has a router device with tag:node-router
test2RouterNode := &types.Node{
ID: 2,
Hostname: "test-2-router",
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[1],
UserID: users[1].ID,
ForcedTags: []string{"tag:node-router"},
Hostinfo: &tailcfg.Hostinfo{},
}
nodes := types.Nodes{test1Node, test2RouterNode}
// This matches the exact policy from issue #2838:
// - First rule: autogroup:member -> autogroup:self (allows users to see their own devices)
// - Second rule: group:home -> tag:node-router (should allow group members to see router)
policy := `{
"groups": {
"group:home": ["test-1@example.com", "test-2@example.com"]
},
"tagOwners": {
"tag:node-router": ["group:home"]
},
"acls": [
{
"action": "accept",
"src": ["autogroup:member"],
"dst": ["autogroup:self:*"]
},
{
"action": "accept",
"src": ["group:home"],
"dst": ["tag:node-router:*"]
}
]
}`
pm, err := NewPolicyManager([]byte(policy), users, nodes.ViewSlice())
require.NoError(t, err)
peerMap := pm.BuildPeerMap(nodes.ViewSlice())
// test-1 (in group:home) should see:
// 1. Their own node (from autogroup:self rule)
// 2. The router node (from group:home -> tag:node-router rule)
test1Peers := peerMap[test1Node.ID]
// Verify test-1 can see the router (group:home -> tag:node-router rule)
require.True(t, slices.ContainsFunc(test1Peers, func(n types.NodeView) bool {
return n.ID() == test2RouterNode.ID
}), "test-1 should see test-2's router via group:home -> tag:node-router rule, even when autogroup:self rule exists (issue #2838)")
// Verify that test-1 has filter rules (including autogroup:self and tag:node-router access)
rules, err := pm.FilterForNode(test1Node.View())
require.NoError(t, err)
require.NotEmpty(t, rules, "test-1 should have filter rules from both ACL rules")
}

View File

@@ -1611,37 +1611,170 @@ func TestACLAutogroupTagged(t *testing.T) {
} }
// Test that only devices owned by the same user can access each other and cannot access devices of other users // Test that only devices owned by the same user can access each other and cannot access devices of other users
// Test structure:
// - user1: 2 regular nodes (tests autogroup:self for same-user access)
// - user2: 2 regular nodes (tests autogroup:self for same-user access and cross-user isolation)
// - user-router: 1 node with tag:router-node (tests that autogroup:self doesn't interfere with other rules)
func TestACLAutogroupSelf(t *testing.T) { func TestACLAutogroupSelf(t *testing.T) {
IntegrationSkip(t) IntegrationSkip(t)
scenario := aclScenario(t, // Policy with TWO separate ACL rules:
&policyv2.Policy{ // 1. autogroup:member -> autogroup:self (same-user access)
ACLs: []policyv2.ACL{ // 2. group:home -> tag:router-node (router access)
{ // This tests that autogroup:self doesn't prevent other rules from working
Action: "accept", policy := &policyv2.Policy{
Sources: []policyv2.Alias{ptr.To(policyv2.AutoGroupMember)}, Groups: policyv2.Groups{
Destinations: []policyv2.AliasWithPorts{ policyv2.Group("group:home"): []policyv2.Username{
aliasWithPorts(ptr.To(policyv2.AutoGroupSelf), tailcfg.PortRangeAny), policyv2.Username("user1@"),
}, policyv2.Username("user2@"),
},
},
TagOwners: policyv2.TagOwners{
policyv2.Tag("tag:router-node"): policyv2.Owners{
usernameOwner("user-router@"),
},
},
ACLs: []policyv2.ACL{
{
Action: "accept",
Sources: []policyv2.Alias{ptr.To(policyv2.AutoGroupMember)},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(ptr.To(policyv2.AutoGroupSelf), tailcfg.PortRangeAny),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{groupp("group:home")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(tagp("tag:router-node"), tailcfg.PortRangeAny),
},
},
{
Action: "accept",
Sources: []policyv2.Alias{tagp("tag:router-node")},
Destinations: []policyv2.AliasWithPorts{
aliasWithPorts(groupp("group:home"), tailcfg.PortRangeAny),
}, },
}, },
}, },
2, }
)
// Create custom scenario: user1 and user2 with regular nodes, plus user-router with tagged node
spec := ScenarioSpec{
NodesPerUser: 2,
Users: []string{"user1", "user2"},
}
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t) defer scenario.ShutdownAssertNoPanics(t)
err := scenario.WaitForTailscaleSyncWithPeerCount(1, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval()) err = scenario.CreateHeadscaleEnv(
[]tsic.Option{
tsic.WithNetfilter("off"),
tsic.WithDockerEntrypoint([]string{
"/bin/sh",
"-c",
"/bin/sleep 3 ; apk add python3 curl ; update-ca-certificates ; python3 -m http.server --bind :: 80 & tailscaled --tun=tsdev",
}),
tsic.WithDockerWorkdir("/"),
},
hsic.WithACLPolicy(policy),
hsic.WithTestName("acl-autogroup-self"),
hsic.WithEmbeddedDERPServerOnly(),
hsic.WithTLS(),
)
require.NoError(t, err) require.NoError(t, err)
// Add router node for user-router (single shared router node)
networks := scenario.Networks()
var network *dockertest.Network
if len(networks) > 0 {
network = networks[0]
}
headscale, err := scenario.Headscale()
require.NoError(t, err)
routerUser, err := scenario.CreateUser("user-router")
require.NoError(t, err)
authKey, err := scenario.CreatePreAuthKey(routerUser.GetId(), true, false)
require.NoError(t, err)
// Create router node (tagged with tag:router-node)
routerClient, err := tsic.New(
scenario.Pool(),
"unstable",
tsic.WithCACert(headscale.GetCert()),
tsic.WithHeadscaleName(headscale.GetHostname()),
tsic.WithNetwork(network),
tsic.WithTags([]string{"tag:router-node"}),
tsic.WithNetfilter("off"),
tsic.WithDockerEntrypoint([]string{
"/bin/sh",
"-c",
"/bin/sleep 3 ; apk add python3 curl ; update-ca-certificates ; python3 -m http.server --bind :: 80 & tailscaled --tun=tsdev",
}),
tsic.WithDockerWorkdir("/"),
)
require.NoError(t, err)
err = routerClient.WaitForNeedsLogin(integrationutil.PeerSyncTimeout())
require.NoError(t, err)
err = routerClient.Login(headscale.GetEndpoint(), authKey.GetKey())
require.NoError(t, err)
err = routerClient.WaitForRunning(integrationutil.PeerSyncTimeout())
require.NoError(t, err)
userRouterObj := scenario.GetOrCreateUser("user-router")
userRouterObj.Clients[routerClient.Hostname()] = routerClient
user1Clients, err := scenario.GetClients("user1") user1Clients, err := scenario.GetClients("user1")
require.NoError(t, err) require.NoError(t, err)
user2Clients, err := scenario.GetClients("user2") user2Clients, err := scenario.GetClients("user2")
require.NoError(t, err) require.NoError(t, err)
// Test that user1's devices can access each other var user1Regular, user2Regular []TailscaleClient
for _, client := range user1Clients { for _, client := range user1Clients {
for _, peer := range user1Clients { status, err := client.Status()
require.NoError(t, err)
if status.Self != nil && (status.Self.Tags == nil || status.Self.Tags.Len() == 0) {
user1Regular = append(user1Regular, client)
}
}
for _, client := range user2Clients {
status, err := client.Status()
require.NoError(t, err)
if status.Self != nil && (status.Self.Tags == nil || status.Self.Tags.Len() == 0) {
user2Regular = append(user2Regular, client)
}
}
require.NotEmpty(t, user1Regular, "user1 should have regular (untagged) devices")
require.NotEmpty(t, user2Regular, "user2 should have regular (untagged) devices")
require.NotNil(t, routerClient, "router node should exist")
// Wait for all nodes to sync with their expected peer counts
// With our ACL policy:
// - Regular nodes (user1/user2): 1 same-user regular peer + 1 router-node = 2 peers
// - Router node: 2 user1 regular + 2 user2 regular = 4 peers
for _, client := range user1Regular {
err := client.WaitForPeers(2, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())
require.NoError(t, err, "user1 regular device %s should see 2 peers (1 same-user peer + 1 router)", client.Hostname())
}
for _, client := range user2Regular {
err := client.WaitForPeers(2, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())
require.NoError(t, err, "user2 regular device %s should see 2 peers (1 same-user peer + 1 router)", client.Hostname())
}
err = routerClient.WaitForPeers(4, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())
require.NoError(t, err, "router should see 4 peers (all group:home regular nodes)")
// Test that user1's regular devices can access each other
for _, client := range user1Regular {
for _, peer := range user1Regular {
if client.Hostname() == peer.Hostname() { if client.Hostname() == peer.Hostname() {
continue continue
} }
@@ -1656,13 +1789,13 @@ func TestACLAutogroupSelf(t *testing.T) {
result, err := client.Curl(url) result, err := client.Curl(url)
assert.NoError(c, err) assert.NoError(c, err)
assert.Len(c, result, 13) assert.Len(c, result, 13)
}, 10*time.Second, 200*time.Millisecond, "user1 device should reach other user1 device") }, 10*time.Second, 200*time.Millisecond, "user1 device should reach other user1 device via autogroup:self")
} }
} }
// Test that user2's devices can access each other // Test that user2's regular devices can access each other
for _, client := range user2Clients { for _, client := range user2Regular {
for _, peer := range user2Clients { for _, peer := range user2Regular {
if client.Hostname() == peer.Hostname() { if client.Hostname() == peer.Hostname() {
continue continue
} }
@@ -1677,36 +1810,64 @@ func TestACLAutogroupSelf(t *testing.T) {
result, err := client.Curl(url) result, err := client.Curl(url)
assert.NoError(c, err) assert.NoError(c, err)
assert.Len(c, result, 13) assert.Len(c, result, 13)
}, 10*time.Second, 200*time.Millisecond, "user2 device should reach other user2 device") }, 10*time.Second, 200*time.Millisecond, "user2 device should reach other user2 device via autogroup:self")
} }
} }
// Test that devices from different users cannot access each other // Test that user1's regular devices can access router-node
for _, client := range user1Clients { for _, client := range user1Regular {
for _, peer := range user2Clients { fqdn, err := routerClient.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s (user1) to %s (router-node) - should SUCCEED", client.Hostname(), fqdn)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.NoError(c, err)
assert.NotEmpty(c, result, "user1 should be able to access router-node via group:home -> tag:router-node rule")
}, 10*time.Second, 200*time.Millisecond, "user1 device should reach router-node (proves autogroup:self doesn't interfere)")
}
// Test that user2's regular devices can access router-node
for _, client := range user2Regular {
fqdn, err := routerClient.FQDN()
require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s (user2) to %s (router-node) - should SUCCEED", client.Hostname(), fqdn)
assert.EventuallyWithT(t, func(c *assert.CollectT) {
result, err := client.Curl(url)
assert.NoError(c, err)
assert.NotEmpty(c, result, "user2 should be able to access router-node via group:home -> tag:router-node rule")
}, 10*time.Second, 200*time.Millisecond, "user2 device should reach router-node (proves autogroup:self doesn't interfere)")
}
// Test that devices from different users cannot access each other's regular devices
for _, client := range user1Regular {
for _, peer := range user2Regular {
fqdn, err := peer.FQDN() fqdn, err := peer.FQDN()
require.NoError(t, err) require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn) url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s (user1) to %s (user2) - should FAIL", client.Hostname(), fqdn) t.Logf("url from %s (user1) to %s (user2 regular) - should FAIL", client.Hostname(), fqdn)
result, err := client.Curl(url) result, err := client.Curl(url)
assert.Empty(t, result, "user1 should not be able to access user2's devices with autogroup:self") assert.Empty(t, result, "user1 should not be able to access user2's regular devices (autogroup:self isolation)")
assert.Error(t, err, "connection from user1 to user2 should fail") assert.Error(t, err, "connection from user1 to user2 regular device should fail")
} }
} }
for _, client := range user2Clients { for _, client := range user2Regular {
for _, peer := range user1Clients { for _, peer := range user1Regular {
fqdn, err := peer.FQDN() fqdn, err := peer.FQDN()
require.NoError(t, err) require.NoError(t, err)
url := fmt.Sprintf("http://%s/etc/hostname", fqdn) url := fmt.Sprintf("http://%s/etc/hostname", fqdn)
t.Logf("url from %s (user2) to %s (user1) - should FAIL", client.Hostname(), fqdn) t.Logf("url from %s (user2) to %s (user1 regular) - should FAIL", client.Hostname(), fqdn)
result, err := client.Curl(url) result, err := client.Curl(url)
assert.Empty(t, result, "user2 should not be able to access user1's devices with autogroup:self") assert.Empty(t, result, "user2 should not be able to access user1's regular devices (autogroup:self isolation)")
assert.Error(t, err, "connection from user2 to user1 should fail") assert.Error(t, err, "connection from user2 to user1 regular device should fail")
} }
} }
} }