policy: fix autogroup:self propagation and optimize cache invalidation (#2807)

This commit is contained in:
Kristoffer Dalby
2025-10-23 17:57:41 +02:00
committed by GitHub
parent 66826232ff
commit 2bf1200483
32 changed files with 3318 additions and 1770 deletions

View File

@@ -1,6 +1,6 @@
package capver
//Generated DO NOT EDIT
// Generated DO NOT EDIT
import "tailscale.com/tailcfg"
@@ -37,16 +37,15 @@ var tailscaleToCapVer = map[string]tailcfg.CapabilityVersion{
"v1.84.2": 116,
}
var capVerToTailscaleVer = map[tailcfg.CapabilityVersion]string{
90: "v1.64.0",
95: "v1.66.0",
97: "v1.68.0",
102: "v1.70.0",
104: "v1.72.0",
106: "v1.74.0",
109: "v1.78.0",
113: "v1.80.0",
115: "v1.82.0",
116: "v1.84.0",
90: "v1.64.0",
95: "v1.66.0",
97: "v1.68.0",
102: "v1.70.0",
104: "v1.72.0",
106: "v1.74.0",
109: "v1.78.0",
113: "v1.80.0",
115: "v1.82.0",
116: "v1.84.0",
}

View File

@@ -185,7 +185,6 @@ func TestShuffleDERPMapDeterministic(t *testing.T) {
}
})
}
}
func TestShuffleDERPMapEdgeCases(t *testing.T) {

View File

@@ -73,7 +73,6 @@ func (b *LockFreeBatcher) AddNode(id types.NodeID, c chan<- *tailcfg.MapResponse
// Use the worker pool for controlled concurrency instead of direct generation
initialMap, err := b.MapResponseFromChange(id, change.FullSelf(id))
if err != nil {
log.Error().Uint64("node.id", id.Uint64()).Err(err).Msg("Initial map generation failed")
nodeConn.removeConnectionByChannel(c)

View File

@@ -7,7 +7,6 @@ import (
"time"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/policy/matcher"
"github.com/juanfont/headscale/hscontrol/types"
"tailscale.com/tailcfg"
"tailscale.com/types/views"
@@ -181,6 +180,9 @@ func (b *MapResponseBuilder) WithPacketFilters() *MapResponseBuilder {
return b
}
// FilterForNode returns rules already reduced to only those relevant for this node.
// For autogroup:self policies, it returns per-node compiled rules.
// For global policies, it returns the global filter reduced for this node.
filter, err := b.mapper.state.FilterForNode(node)
if err != nil {
b.addError(err)
@@ -192,7 +194,7 @@ func (b *MapResponseBuilder) WithPacketFilters() *MapResponseBuilder {
// new PacketFilters field and "base" allows us to send a full update when we
// have to send an empty list, avoiding the hack in the else block.
b.resp.PacketFilters = map[string][]tailcfg.FilterRule{
"base": policy.ReduceFilterRules(node, filter),
"base": filter,
}
return b
@@ -231,18 +233,19 @@ func (b *MapResponseBuilder) buildTailPeers(peers views.Slice[types.NodeView]) (
return nil, errors.New("node not found")
}
// Use per-node filter to handle autogroup:self
filter, err := b.mapper.state.FilterForNode(node)
// Get unreduced matchers for peer relationship determination.
// MatchersForNode returns unreduced matchers that include all rules where the node
// could be either source or destination. This is different from FilterForNode which
// returns reduced rules for packet filtering (only rules where node is destination).
matchers, err := b.mapper.state.MatchersForNode(node)
if err != nil {
return nil, err
}
matchers := matcher.MatchesFromFilterRules(filter)
// If there are filter rules present, see if there are any nodes that cannot
// access each-other at all and remove them from the peers.
var changedViews views.Slice[types.NodeView]
if len(filter) > 0 {
if len(matchers) > 0 {
changedViews = policy.ReduceNodes(node, peers, matchers)
} else {
changedViews = peers

View File

@@ -15,6 +15,10 @@ type PolicyManager interface {
Filter() ([]tailcfg.FilterRule, []matcher.Match)
// FilterForNode returns filter rules for a specific node, handling autogroup:self
FilterForNode(node types.NodeView) ([]tailcfg.FilterRule, error)
// MatchersForNode returns matchers for peer relationship determination (unreduced)
MatchersForNode(node types.NodeView) ([]matcher.Match, error)
// BuildPeerMap constructs peer relationship maps for the given nodes
BuildPeerMap(nodes views.Slice[types.NodeView]) map[types.NodeID][]types.NodeView
SSHPolicy(types.NodeView) (*tailcfg.SSHPolicy, error)
SetPolicy([]byte) (bool, error)
SetUsers(users []types.User) (bool, error)

View File

@@ -10,7 +10,6 @@ import (
"github.com/rs/zerolog/log"
"github.com/samber/lo"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
"tailscale.com/types/views"
)
@@ -79,66 +78,6 @@ func BuildPeerMap(
return ret
}
// ReduceFilterRules takes a node and a set of rules and removes all rules and destinations
// that are not relevant to that particular node.
func ReduceFilterRules(node types.NodeView, rules []tailcfg.FilterRule) []tailcfg.FilterRule {
ret := []tailcfg.FilterRule{}
for _, rule := range rules {
// record if the rule is actually relevant for the given node.
var dests []tailcfg.NetPortRange
DEST_LOOP:
for _, dest := range rule.DstPorts {
expanded, err := util.ParseIPSet(dest.IP, nil)
// Fail closed, if we can't parse it, then we should not allow
// access.
if err != nil {
continue DEST_LOOP
}
if node.InIPSet(expanded) {
dests = append(dests, dest)
continue DEST_LOOP
}
// If the node exposes routes, ensure they are note removed
// when the filters are reduced.
if node.Hostinfo().Valid() {
routableIPs := node.Hostinfo().RoutableIPs()
if routableIPs.Len() > 0 {
for _, routableIP := range routableIPs.All() {
if expanded.OverlapsPrefix(routableIP) {
dests = append(dests, dest)
continue DEST_LOOP
}
}
}
}
// Also check approved subnet routes - nodes should have access
// to subnets they're approved to route traffic for.
subnetRoutes := node.SubnetRoutes()
for _, subnetRoute := range subnetRoutes {
if expanded.OverlapsPrefix(subnetRoute) {
dests = append(dests, dest)
continue DEST_LOOP
}
}
}
if len(dests) > 0 {
ret = append(ret, tailcfg.FilterRule{
SrcIPs: rule.SrcIPs,
DstPorts: dests,
IPProto: rule.IPProto,
})
}
}
return ret
}
// ApproveRoutesWithPolicy checks if the node can approve the announced routes
// and returns the new list of approved routes.
// The approved routes will include:

View File

@@ -1,7 +1,6 @@
package policy
import (
"encoding/json"
"fmt"
"net/netip"
"testing"
@@ -11,12 +10,9 @@ import (
"github.com/juanfont/headscale/hscontrol/policy/matcher"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
"github.com/stretchr/testify/require"
"gorm.io/gorm"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
"tailscale.com/util/must"
)
var ap = func(ipStr string) *netip.Addr {
@@ -29,817 +25,6 @@ var p = func(prefStr string) netip.Prefix {
return ip
}
// hsExitNodeDestForTest is the list of destination IP ranges that are allowed when
// we use headscale "autogroup:internet".
var hsExitNodeDestForTest = []tailcfg.NetPortRange{
{IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "12.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "64.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "96.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "100.0.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "100.128.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "101.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "102.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "104.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "112.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "168.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "169.0.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "169.128.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "169.192.0.0/11", Ports: tailcfg.PortRangeAny},
{IP: "169.224.0.0/12", Ports: tailcfg.PortRangeAny},
{IP: "169.240.0.0/13", Ports: tailcfg.PortRangeAny},
{IP: "169.248.0.0/14", Ports: tailcfg.PortRangeAny},
{IP: "169.252.0.0/15", Ports: tailcfg.PortRangeAny},
{IP: "169.255.0.0/16", Ports: tailcfg.PortRangeAny},
{IP: "170.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "172.0.0.0/12", Ports: tailcfg.PortRangeAny},
{IP: "172.32.0.0/11", Ports: tailcfg.PortRangeAny},
{IP: "172.64.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "172.128.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "173.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "174.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "176.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "192.0.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "192.128.0.0/11", Ports: tailcfg.PortRangeAny},
{IP: "192.160.0.0/13", Ports: tailcfg.PortRangeAny},
{IP: "192.169.0.0/16", Ports: tailcfg.PortRangeAny},
{IP: "192.170.0.0/15", Ports: tailcfg.PortRangeAny},
{IP: "192.172.0.0/14", Ports: tailcfg.PortRangeAny},
{IP: "192.176.0.0/12", Ports: tailcfg.PortRangeAny},
{IP: "192.192.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "193.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "194.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "196.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "224.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "2000::/3", Ports: tailcfg.PortRangeAny},
}
func TestTheInternet(t *testing.T) {
internetSet := util.TheInternet()
internetPrefs := internetSet.Prefixes()
for i := range internetPrefs {
if internetPrefs[i].String() != hsExitNodeDestForTest[i].IP {
t.Errorf(
"prefix from internet set %q != hsExit list %q",
internetPrefs[i].String(),
hsExitNodeDestForTest[i].IP,
)
}
}
if len(internetPrefs) != len(hsExitNodeDestForTest) {
t.Fatalf(
"expected same length of prefixes, internet: %d, hsExit: %d",
len(internetPrefs),
len(hsExitNodeDestForTest),
)
}
}
func TestReduceFilterRules(t *testing.T) {
users := types.Users{
types.User{Model: gorm.Model{ID: 1}, Name: "mickael"},
types.User{Model: gorm.Model{ID: 2}, Name: "user1"},
types.User{Model: gorm.Model{ID: 3}, Name: "user2"},
types.User{Model: gorm.Model{ID: 4}, Name: "user100"},
types.User{Model: gorm.Model{ID: 5}, Name: "user3"},
}
tests := []struct {
name string
node *types.Node
peers types.Nodes
pol string
want []tailcfg.FilterRule
}{
{
name: "host1-can-reach-host2-no-rules",
pol: `
{
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"100.64.0.1"
],
"dst": [
"100.64.0.2:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"),
User: users[0],
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"),
User: users[0],
},
},
want: []tailcfg.FilterRule{},
},
{
name: "1604-subnet-routers-are-preserved",
pol: `
{
"groups": {
"group:admins": [
"user1@"
]
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:admins"
],
"dst": [
"group:admins:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:admins"
],
"dst": [
"10.33.0.0/16:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{
netip.MustParsePrefix("10.33.0.0/16"),
},
},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[1],
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{
"100.64.0.1/32",
"100.64.0.2/32",
"fd7a:115c:a1e0::1/128",
"fd7a:115c:a1e0::2/128",
},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.1/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::1/128",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
{
SrcIPs: []string{
"100.64.0.1/32",
"100.64.0.2/32",
"fd7a:115c:a1e0::1/128",
"fd7a:115c:a1e0::2/128",
},
DstPorts: []tailcfg.NetPortRange{
{
IP: "10.33.0.0/16",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
},
},
{
name: "1786-reducing-breaks-exit-nodes-the-client",
pol: `
{
"groups": {
"group:team": [
"user3@",
"user2@",
"user1@"
]
},
"hosts": {
"internal": "100.64.0.100/32"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"internal:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"autogroup:internet:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[2],
},
// "internal" exit node
&types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: tsaddr.ExitRoutes(),
},
},
},
want: []tailcfg.FilterRule{},
},
{
name: "1786-reducing-breaks-exit-nodes-the-exit",
pol: `
{
"groups": {
"group:team": [
"user3@",
"user2@",
"user1@"
]
},
"hosts": {
"internal": "100.64.0.100/32"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"internal:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"autogroup:internet:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: tsaddr.ExitRoutes(),
},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[2],
},
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.100/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::100/128",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: hsExitNodeDestForTest,
IPProto: []int{6, 17},
},
},
},
{
name: "1786-reducing-breaks-exit-nodes-the-example-from-issue",
pol: `
{
"groups": {
"group:team": [
"user3@",
"user2@",
"user1@"
]
},
"hosts": {
"internal": "100.64.0.100/32"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"internal:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"0.0.0.0/5:*",
"8.0.0.0/7:*",
"11.0.0.0/8:*",
"12.0.0.0/6:*",
"16.0.0.0/4:*",
"32.0.0.0/3:*",
"64.0.0.0/2:*",
"128.0.0.0/3:*",
"160.0.0.0/5:*",
"168.0.0.0/6:*",
"172.0.0.0/12:*",
"172.32.0.0/11:*",
"172.64.0.0/10:*",
"172.128.0.0/9:*",
"173.0.0.0/8:*",
"174.0.0.0/7:*",
"176.0.0.0/4:*",
"192.0.0.0/9:*",
"192.128.0.0/11:*",
"192.160.0.0/13:*",
"192.169.0.0/16:*",
"192.170.0.0/15:*",
"192.172.0.0/14:*",
"192.176.0.0/12:*",
"192.192.0.0/10:*",
"193.0.0.0/8:*",
"194.0.0.0/7:*",
"196.0.0.0/6:*",
"200.0.0.0/5:*",
"208.0.0.0/4:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: tsaddr.ExitRoutes(),
},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[2],
},
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.100/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::100/128",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "12.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "64.0.0.0/2", Ports: tailcfg.PortRangeAny},
{IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "168.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "172.0.0.0/12", Ports: tailcfg.PortRangeAny},
{IP: "172.32.0.0/11", Ports: tailcfg.PortRangeAny},
{IP: "172.64.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "172.128.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "173.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "174.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "176.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "192.0.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "192.128.0.0/11", Ports: tailcfg.PortRangeAny},
{IP: "192.160.0.0/13", Ports: tailcfg.PortRangeAny},
{IP: "192.169.0.0/16", Ports: tailcfg.PortRangeAny},
{IP: "192.170.0.0/15", Ports: tailcfg.PortRangeAny},
{IP: "192.172.0.0/14", Ports: tailcfg.PortRangeAny},
{IP: "192.176.0.0/12", Ports: tailcfg.PortRangeAny},
{IP: "192.192.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "193.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "194.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "196.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny},
},
IPProto: []int{6, 17},
},
},
},
{
name: "1786-reducing-breaks-exit-nodes-app-connector-like",
pol: `
{
"groups": {
"group:team": [
"user3@",
"user2@",
"user1@"
]
},
"hosts": {
"internal": "100.64.0.100/32"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"internal:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"8.0.0.0/8:*",
"16.0.0.0/8:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/16"), netip.MustParsePrefix("16.0.0.0/16")},
},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[2],
},
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.100/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::100/128",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "8.0.0.0/8",
Ports: tailcfg.PortRangeAny,
},
{
IP: "16.0.0.0/8",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
},
},
{
name: "1786-reducing-breaks-exit-nodes-app-connector-like2",
pol: `
{
"groups": {
"group:team": [
"user3@",
"user2@",
"user1@"
]
},
"hosts": {
"internal": "100.64.0.100/32"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"internal:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"8.0.0.0/16:*",
"16.0.0.0/16:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/8"), netip.MustParsePrefix("16.0.0.0/8")},
},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[2],
},
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.100/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::100/128",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "8.0.0.0/16",
Ports: tailcfg.PortRangeAny,
},
{
IP: "16.0.0.0/16",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
},
},
{
name: "1817-reduce-breaks-32-mask",
pol: `
{
"tagOwners": {
"tag:access-servers": ["user100@"],
},
"groups": {
"group:access": [
"user1@"
]
},
"hosts": {
"dns1": "172.16.0.21/32",
"vlan1": "172.16.0.0/24"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:access"
],
"dst": [
"tag:access-servers:*",
"dns1:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("172.16.0.0/24")},
},
ForcedTags: []string{"tag:access-servers"},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "fd7a:115c:a1e0::1/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.100/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::100/128",
Ports: tailcfg.PortRangeAny,
},
{
IP: "172.16.0.21/32",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
},
},
{
name: "2365-only-route-policy",
pol: `
{
"hosts": {
"router": "100.64.0.1/32",
"node": "100.64.0.2/32"
},
"acls": [
{
"action": "accept",
"src": [
"*"
],
"dst": [
"router:8000"
]
},
{
"action": "accept",
"src": [
"node"
],
"dst": [
"172.26.0.0/16:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[3],
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{p("172.16.0.0/24"), p("10.10.11.0/24"), p("10.10.12.0/24")},
},
ApprovedRoutes: []netip.Prefix{p("172.16.0.0/24"), p("10.10.11.0/24"), p("10.10.12.0/24")},
},
},
want: []tailcfg.FilterRule{},
},
}
for _, tt := range tests {
for idx, pmf := range PolicyManagerFuncsForTest([]byte(tt.pol)) {
t.Run(fmt.Sprintf("%s-index%d", tt.name, idx), func(t *testing.T) {
var pm PolicyManager
var err error
pm, err = pmf(users, append(tt.peers, tt.node).ViewSlice())
require.NoError(t, err)
got, _ := pm.Filter()
t.Logf("full filter:\n%s", must.Get(json.MarshalIndent(got, "", " ")))
got = ReduceFilterRules(tt.node.View(), got)
if diff := cmp.Diff(tt.want, got); diff != "" {
log.Trace().Interface("got", got).Msg("result")
t.Errorf("TestReduceFilterRules() unexpected result (-want +got):\n%s", diff)
}
})
}
}
}
func TestReduceNodes(t *testing.T) {
type args struct {
nodes types.Nodes

View File

@@ -0,0 +1,71 @@
package policyutil
import (
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"tailscale.com/tailcfg"
)
// ReduceFilterRules takes a node and a set of global filter rules and removes all rules
// and destinations that are not relevant to that particular node.
//
// IMPORTANT: This function is designed for global filters only. Per-node filters
// (from autogroup:self policies) are already node-specific and should not be passed
// to this function. Use PolicyManager.FilterForNode() instead, which handles both cases.
func ReduceFilterRules(node types.NodeView, rules []tailcfg.FilterRule) []tailcfg.FilterRule {
ret := []tailcfg.FilterRule{}
for _, rule := range rules {
// record if the rule is actually relevant for the given node.
var dests []tailcfg.NetPortRange
DEST_LOOP:
for _, dest := range rule.DstPorts {
expanded, err := util.ParseIPSet(dest.IP, nil)
// Fail closed, if we can't parse it, then we should not allow
// access.
if err != nil {
continue DEST_LOOP
}
if node.InIPSet(expanded) {
dests = append(dests, dest)
continue DEST_LOOP
}
// If the node exposes routes, ensure they are note removed
// when the filters are reduced.
if node.Hostinfo().Valid() {
routableIPs := node.Hostinfo().RoutableIPs()
if routableIPs.Len() > 0 {
for _, routableIP := range routableIPs.All() {
if expanded.OverlapsPrefix(routableIP) {
dests = append(dests, dest)
continue DEST_LOOP
}
}
}
}
// Also check approved subnet routes - nodes should have access
// to subnets they're approved to route traffic for.
subnetRoutes := node.SubnetRoutes()
for _, subnetRoute := range subnetRoutes {
if expanded.OverlapsPrefix(subnetRoute) {
dests = append(dests, dest)
continue DEST_LOOP
}
}
}
if len(dests) > 0 {
ret = append(ret, tailcfg.FilterRule{
SrcIPs: rule.SrcIPs,
DstPorts: dests,
IPProto: rule.IPProto,
})
}
}
return ret
}

View File

@@ -0,0 +1,841 @@
package policyutil_test
import (
"encoding/json"
"fmt"
"net/netip"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/policy/policyutil"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
"github.com/stretchr/testify/require"
"gorm.io/gorm"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
"tailscale.com/util/must"
)
var ap = func(ipStr string) *netip.Addr {
ip := netip.MustParseAddr(ipStr)
return &ip
}
var p = func(prefStr string) netip.Prefix {
ip := netip.MustParsePrefix(prefStr)
return ip
}
// hsExitNodeDestForTest is the list of destination IP ranges that are allowed when
// we use headscale "autogroup:internet".
var hsExitNodeDestForTest = []tailcfg.NetPortRange{
{IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "12.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "64.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "96.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "100.0.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "100.128.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "101.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "102.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "104.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "112.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "168.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "169.0.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "169.128.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "169.192.0.0/11", Ports: tailcfg.PortRangeAny},
{IP: "169.224.0.0/12", Ports: tailcfg.PortRangeAny},
{IP: "169.240.0.0/13", Ports: tailcfg.PortRangeAny},
{IP: "169.248.0.0/14", Ports: tailcfg.PortRangeAny},
{IP: "169.252.0.0/15", Ports: tailcfg.PortRangeAny},
{IP: "169.255.0.0/16", Ports: tailcfg.PortRangeAny},
{IP: "170.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "172.0.0.0/12", Ports: tailcfg.PortRangeAny},
{IP: "172.32.0.0/11", Ports: tailcfg.PortRangeAny},
{IP: "172.64.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "172.128.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "173.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "174.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "176.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "192.0.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "192.128.0.0/11", Ports: tailcfg.PortRangeAny},
{IP: "192.160.0.0/13", Ports: tailcfg.PortRangeAny},
{IP: "192.169.0.0/16", Ports: tailcfg.PortRangeAny},
{IP: "192.170.0.0/15", Ports: tailcfg.PortRangeAny},
{IP: "192.172.0.0/14", Ports: tailcfg.PortRangeAny},
{IP: "192.176.0.0/12", Ports: tailcfg.PortRangeAny},
{IP: "192.192.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "193.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "194.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "196.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "224.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "2000::/3", Ports: tailcfg.PortRangeAny},
}
func TestTheInternet(t *testing.T) {
internetSet := util.TheInternet()
internetPrefs := internetSet.Prefixes()
for i := range internetPrefs {
if internetPrefs[i].String() != hsExitNodeDestForTest[i].IP {
t.Errorf(
"prefix from internet set %q != hsExit list %q",
internetPrefs[i].String(),
hsExitNodeDestForTest[i].IP,
)
}
}
if len(internetPrefs) != len(hsExitNodeDestForTest) {
t.Fatalf(
"expected same length of prefixes, internet: %d, hsExit: %d",
len(internetPrefs),
len(hsExitNodeDestForTest),
)
}
}
func TestReduceFilterRules(t *testing.T) {
users := types.Users{
types.User{Model: gorm.Model{ID: 1}, Name: "mickael"},
types.User{Model: gorm.Model{ID: 2}, Name: "user1"},
types.User{Model: gorm.Model{ID: 3}, Name: "user2"},
types.User{Model: gorm.Model{ID: 4}, Name: "user100"},
types.User{Model: gorm.Model{ID: 5}, Name: "user3"},
}
tests := []struct {
name string
node *types.Node
peers types.Nodes
pol string
want []tailcfg.FilterRule
}{
{
name: "host1-can-reach-host2-no-rules",
pol: `
{
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"100.64.0.1"
],
"dst": [
"100.64.0.2:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"),
User: users[0],
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"),
User: users[0],
},
},
want: []tailcfg.FilterRule{},
},
{
name: "1604-subnet-routers-are-preserved",
pol: `
{
"groups": {
"group:admins": [
"user1@"
]
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:admins"
],
"dst": [
"group:admins:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:admins"
],
"dst": [
"10.33.0.0/16:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{
netip.MustParsePrefix("10.33.0.0/16"),
},
},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[1],
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{
"100.64.0.1/32",
"100.64.0.2/32",
"fd7a:115c:a1e0::1/128",
"fd7a:115c:a1e0::2/128",
},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.1/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::1/128",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
{
SrcIPs: []string{
"100.64.0.1/32",
"100.64.0.2/32",
"fd7a:115c:a1e0::1/128",
"fd7a:115c:a1e0::2/128",
},
DstPorts: []tailcfg.NetPortRange{
{
IP: "10.33.0.0/16",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
},
},
{
name: "1786-reducing-breaks-exit-nodes-the-client",
pol: `
{
"groups": {
"group:team": [
"user3@",
"user2@",
"user1@"
]
},
"hosts": {
"internal": "100.64.0.100/32"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"internal:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"autogroup:internet:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[2],
},
// "internal" exit node
&types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: tsaddr.ExitRoutes(),
},
},
},
want: []tailcfg.FilterRule{},
},
{
name: "1786-reducing-breaks-exit-nodes-the-exit",
pol: `
{
"groups": {
"group:team": [
"user3@",
"user2@",
"user1@"
]
},
"hosts": {
"internal": "100.64.0.100/32"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"internal:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"autogroup:internet:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: tsaddr.ExitRoutes(),
},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[2],
},
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.100/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::100/128",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: hsExitNodeDestForTest,
IPProto: []int{6, 17},
},
},
},
{
name: "1786-reducing-breaks-exit-nodes-the-example-from-issue",
pol: `
{
"groups": {
"group:team": [
"user3@",
"user2@",
"user1@"
]
},
"hosts": {
"internal": "100.64.0.100/32"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"internal:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"0.0.0.0/5:*",
"8.0.0.0/7:*",
"11.0.0.0/8:*",
"12.0.0.0/6:*",
"16.0.0.0/4:*",
"32.0.0.0/3:*",
"64.0.0.0/2:*",
"128.0.0.0/3:*",
"160.0.0.0/5:*",
"168.0.0.0/6:*",
"172.0.0.0/12:*",
"172.32.0.0/11:*",
"172.64.0.0/10:*",
"172.128.0.0/9:*",
"173.0.0.0/8:*",
"174.0.0.0/7:*",
"176.0.0.0/4:*",
"192.0.0.0/9:*",
"192.128.0.0/11:*",
"192.160.0.0/13:*",
"192.169.0.0/16:*",
"192.170.0.0/15:*",
"192.172.0.0/14:*",
"192.176.0.0/12:*",
"192.192.0.0/10:*",
"193.0.0.0/8:*",
"194.0.0.0/7:*",
"196.0.0.0/6:*",
"200.0.0.0/5:*",
"208.0.0.0/4:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: tsaddr.ExitRoutes(),
},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[2],
},
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.100/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::100/128",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "12.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "64.0.0.0/2", Ports: tailcfg.PortRangeAny},
{IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny},
{IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "168.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "172.0.0.0/12", Ports: tailcfg.PortRangeAny},
{IP: "172.32.0.0/11", Ports: tailcfg.PortRangeAny},
{IP: "172.64.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "172.128.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "173.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "174.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "176.0.0.0/4", Ports: tailcfg.PortRangeAny},
{IP: "192.0.0.0/9", Ports: tailcfg.PortRangeAny},
{IP: "192.128.0.0/11", Ports: tailcfg.PortRangeAny},
{IP: "192.160.0.0/13", Ports: tailcfg.PortRangeAny},
{IP: "192.169.0.0/16", Ports: tailcfg.PortRangeAny},
{IP: "192.170.0.0/15", Ports: tailcfg.PortRangeAny},
{IP: "192.172.0.0/14", Ports: tailcfg.PortRangeAny},
{IP: "192.176.0.0/12", Ports: tailcfg.PortRangeAny},
{IP: "192.192.0.0/10", Ports: tailcfg.PortRangeAny},
{IP: "193.0.0.0/8", Ports: tailcfg.PortRangeAny},
{IP: "194.0.0.0/7", Ports: tailcfg.PortRangeAny},
{IP: "196.0.0.0/6", Ports: tailcfg.PortRangeAny},
{IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny},
{IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny},
},
IPProto: []int{6, 17},
},
},
},
{
name: "1786-reducing-breaks-exit-nodes-app-connector-like",
pol: `
{
"groups": {
"group:team": [
"user3@",
"user2@",
"user1@"
]
},
"hosts": {
"internal": "100.64.0.100/32"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"internal:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"8.0.0.0/8:*",
"16.0.0.0/8:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/16"), netip.MustParsePrefix("16.0.0.0/16")},
},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[2],
},
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.100/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::100/128",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "8.0.0.0/8",
Ports: tailcfg.PortRangeAny,
},
{
IP: "16.0.0.0/8",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
},
},
{
name: "1786-reducing-breaks-exit-nodes-app-connector-like2",
pol: `
{
"groups": {
"group:team": [
"user3@",
"user2@",
"user1@"
]
},
"hosts": {
"internal": "100.64.0.100/32"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"internal:*"
]
},
{
"action": "accept",
"proto": "",
"src": [
"group:team"
],
"dst": [
"8.0.0.0/16:*",
"16.0.0.0/16:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/8"), netip.MustParsePrefix("16.0.0.0/8")},
},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[2],
},
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.100/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::100/128",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
{
SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "8.0.0.0/16",
Ports: tailcfg.PortRangeAny,
},
{
IP: "16.0.0.0/16",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
},
},
{
name: "1817-reduce-breaks-32-mask",
pol: `
{
"tagOwners": {
"tag:access-servers": ["user100@"],
},
"groups": {
"group:access": [
"user1@"
]
},
"hosts": {
"dns1": "172.16.0.21/32",
"vlan1": "172.16.0.0/24"
},
"acls": [
{
"action": "accept",
"proto": "",
"src": [
"group:access"
],
"dst": [
"tag:access-servers:*",
"dns1:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.100"),
IPv6: ap("fd7a:115c:a1e0::100"),
User: users[3],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{netip.MustParsePrefix("172.16.0.0/24")},
},
ForcedTags: []string{"tag:access-servers"},
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.1/32", "fd7a:115c:a1e0::1/128"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.100/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::100/128",
Ports: tailcfg.PortRangeAny,
},
{
IP: "172.16.0.21/32",
Ports: tailcfg.PortRangeAny,
},
},
IPProto: []int{6, 17},
},
},
},
{
name: "2365-only-route-policy",
pol: `
{
"hosts": {
"router": "100.64.0.1/32",
"node": "100.64.0.2/32"
},
"acls": [
{
"action": "accept",
"src": [
"*"
],
"dst": [
"router:8000"
]
},
{
"action": "accept",
"src": [
"node"
],
"dst": [
"172.26.0.0/16:*"
]
}
],
}
`,
node: &types.Node{
IPv4: ap("100.64.0.2"),
IPv6: ap("fd7a:115c:a1e0::2"),
User: users[3],
},
peers: types.Nodes{
&types.Node{
IPv4: ap("100.64.0.1"),
IPv6: ap("fd7a:115c:a1e0::1"),
User: users[1],
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{p("172.16.0.0/24"), p("10.10.11.0/24"), p("10.10.12.0/24")},
},
ApprovedRoutes: []netip.Prefix{p("172.16.0.0/24"), p("10.10.11.0/24"), p("10.10.12.0/24")},
},
},
want: []tailcfg.FilterRule{},
},
}
for _, tt := range tests {
for idx, pmf := range policy.PolicyManagerFuncsForTest([]byte(tt.pol)) {
t.Run(fmt.Sprintf("%s-index%d", tt.name, idx), func(t *testing.T) {
var pm policy.PolicyManager
var err error
pm, err = pmf(users, append(tt.peers, tt.node).ViewSlice())
require.NoError(t, err)
got, _ := pm.Filter()
t.Logf("full filter:\n%s", must.Get(json.MarshalIndent(got, "", " ")))
got = policyutil.ReduceFilterRules(tt.node.View(), got)
if diff := cmp.Diff(tt.want, got); diff != "" {
log.Trace().Interface("got", got).Msg("result")
t.Errorf("TestReduceFilterRules() unexpected result (-want +got):\n%s", diff)
}
})
}
}
}

View File

@@ -854,7 +854,6 @@ func TestCompileFilterRulesForNodeWithAutogroupSelf(t *testing.T) {
node1 := nodes[0].View()
rules, err := policy2.compileFilterRulesForNode(users, node1, nodes.ViewSlice())
if err != nil {
t.Fatalf("unexpected error: %v", err)
}

View File

@@ -9,6 +9,7 @@ import (
"sync"
"github.com/juanfont/headscale/hscontrol/policy/matcher"
"github.com/juanfont/headscale/hscontrol/policy/policyutil"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/rs/zerolog/log"
"go4.org/netipx"
@@ -39,7 +40,9 @@ type PolicyManager struct {
// Lazy map of SSH policies
sshPolicyMap map[types.NodeID]*tailcfg.SSHPolicy
// Lazy map of per-node filter rules (when autogroup:self is used)
// Lazy map of per-node compiled filter rules (unreduced, for autogroup:self)
compiledFilterRulesMap map[types.NodeID][]tailcfg.FilterRule
// Lazy map of per-node filter rules (reduced, for packet filters)
filterRulesMap map[types.NodeID][]tailcfg.FilterRule
usesAutogroupSelf bool
}
@@ -54,12 +57,13 @@ func NewPolicyManager(b []byte, users []types.User, nodes views.Slice[types.Node
}
pm := PolicyManager{
pol: policy,
users: users,
nodes: nodes,
sshPolicyMap: make(map[types.NodeID]*tailcfg.SSHPolicy, nodes.Len()),
filterRulesMap: make(map[types.NodeID][]tailcfg.FilterRule, nodes.Len()),
usesAutogroupSelf: policy.usesAutogroupSelf(),
pol: policy,
users: users,
nodes: nodes,
sshPolicyMap: make(map[types.NodeID]*tailcfg.SSHPolicy, nodes.Len()),
compiledFilterRulesMap: make(map[types.NodeID][]tailcfg.FilterRule, nodes.Len()),
filterRulesMap: make(map[types.NodeID][]tailcfg.FilterRule, nodes.Len()),
usesAutogroupSelf: policy.usesAutogroupSelf(),
}
_, err = pm.updateLocked()
@@ -78,6 +82,7 @@ func (pm *PolicyManager) updateLocked() (bool, error) {
// policies for nodes that have changed. Particularly if the only difference is
// that nodes has been added or removed.
clear(pm.sshPolicyMap)
clear(pm.compiledFilterRulesMap)
clear(pm.filterRulesMap)
// Check if policy uses autogroup:self
@@ -233,9 +238,157 @@ func (pm *PolicyManager) Filter() ([]tailcfg.FilterRule, []matcher.Match) {
return pm.filter, pm.matchers
}
// FilterForNode returns the filter rules for a specific node.
// If the policy uses autogroup:self, this returns node-specific rules for security.
// Otherwise, it returns the global filter rules for efficiency.
// BuildPeerMap constructs peer relationship maps for the given nodes.
// For global filters, it uses the global filter matchers for all nodes.
// For autogroup:self policies (empty global filter), it builds per-node
// peer maps using each node's specific filter rules.
func (pm *PolicyManager) BuildPeerMap(nodes views.Slice[types.NodeView]) map[types.NodeID][]types.NodeView {
if pm == nil {
return nil
}
pm.mu.Lock()
defer pm.mu.Unlock()
// If we have a global filter, use it for all nodes (normal case)
if !pm.usesAutogroupSelf {
ret := make(map[types.NodeID][]types.NodeView, nodes.Len())
// Build the map of all peers according to the matchers.
// Compared to ReduceNodes, which builds the list per node, we end up with doing
// the full work for every node O(n^2), while this will reduce the list as we see
// relationships while building the map, making it O(n^2/2) in the end, but with less work per node.
for i := range nodes.Len() {
for j := i + 1; j < nodes.Len(); j++ {
if nodes.At(i).ID() == nodes.At(j).ID() {
continue
}
if nodes.At(i).CanAccess(pm.matchers, nodes.At(j)) || nodes.At(j).CanAccess(pm.matchers, nodes.At(i)) {
ret[nodes.At(i).ID()] = append(ret[nodes.At(i).ID()], nodes.At(j))
ret[nodes.At(j).ID()] = append(ret[nodes.At(j).ID()], nodes.At(i))
}
}
}
return ret
}
// For autogroup:self (empty global filter), build per-node peer relationships
ret := make(map[types.NodeID][]types.NodeView, nodes.Len())
// Pre-compute per-node matchers using unreduced compiled rules
// We need unreduced rules to determine peer relationships correctly.
// Reduced rules only show destinations where the node is the target,
// but peer relationships require the full bidirectional access rules.
nodeMatchers := make(map[types.NodeID][]matcher.Match, nodes.Len())
for _, node := range nodes.All() {
filter, err := pm.compileFilterRulesForNodeLocked(node)
if err != nil || len(filter) == 0 {
continue
}
nodeMatchers[node.ID()] = matcher.MatchesFromFilterRules(filter)
}
// Check each node pair for peer relationships.
// Start j at i+1 to avoid checking the same pair twice and creating duplicates.
// We check both directions (i->j and j->i) since ACLs can be asymmetric.
for i := range nodes.Len() {
nodeI := nodes.At(i)
matchersI, hasFilterI := nodeMatchers[nodeI.ID()]
for j := i + 1; j < nodes.Len(); j++ {
nodeJ := nodes.At(j)
matchersJ, hasFilterJ := nodeMatchers[nodeJ.ID()]
// Check if nodeI can access nodeJ
if hasFilterI && nodeI.CanAccess(matchersI, nodeJ) {
ret[nodeI.ID()] = append(ret[nodeI.ID()], nodeJ)
}
// Check if nodeJ can access nodeI
if hasFilterJ && nodeJ.CanAccess(matchersJ, nodeI) {
ret[nodeJ.ID()] = append(ret[nodeJ.ID()], nodeI)
}
}
}
return ret
}
// compileFilterRulesForNodeLocked returns the unreduced compiled filter rules for a node
// when using autogroup:self. This is used by BuildPeerMap to determine peer relationships.
// For packet filters sent to nodes, use filterForNodeLocked which returns reduced rules.
func (pm *PolicyManager) compileFilterRulesForNodeLocked(node types.NodeView) ([]tailcfg.FilterRule, error) {
if pm == nil {
return nil, nil
}
// Check if we have cached compiled rules
if rules, ok := pm.compiledFilterRulesMap[node.ID()]; ok {
return rules, nil
}
// Compile per-node rules with autogroup:self expanded
rules, err := pm.pol.compileFilterRulesForNode(pm.users, node, pm.nodes)
if err != nil {
return nil, fmt.Errorf("compiling filter rules for node: %w", err)
}
// Cache the unreduced compiled rules
pm.compiledFilterRulesMap[node.ID()] = rules
return rules, nil
}
// filterForNodeLocked returns the filter rules for a specific node, already reduced
// to only include rules relevant to that node.
// This is a lock-free version of FilterForNode for internal use when the lock is already held.
// BuildPeerMap already holds the lock, so we need a version that doesn't re-acquire it.
func (pm *PolicyManager) filterForNodeLocked(node types.NodeView) ([]tailcfg.FilterRule, error) {
if pm == nil {
return nil, nil
}
if !pm.usesAutogroupSelf {
// For global filters, reduce to only rules relevant to this node.
// Cache the reduced filter per node for efficiency.
if rules, ok := pm.filterRulesMap[node.ID()]; ok {
return rules, nil
}
// Use policyutil.ReduceFilterRules for global filter reduction.
reducedFilter := policyutil.ReduceFilterRules(node, pm.filter)
pm.filterRulesMap[node.ID()] = reducedFilter
return reducedFilter, nil
}
// For autogroup:self, compile per-node rules then reduce them.
// Check if we have cached reduced rules for this node.
if rules, ok := pm.filterRulesMap[node.ID()]; ok {
return rules, nil
}
// Get unreduced compiled rules
compiledRules, err := pm.compileFilterRulesForNodeLocked(node)
if err != nil {
return nil, err
}
// Reduce the compiled rules to only destinations relevant to this node
reducedFilter := policyutil.ReduceFilterRules(node, compiledRules)
// Cache the reduced filter
pm.filterRulesMap[node.ID()] = reducedFilter
return reducedFilter, nil
}
// FilterForNode returns the filter rules for a specific node, already reduced
// to only include rules relevant to that node.
// If the policy uses autogroup:self, this returns node-specific compiled rules.
// Otherwise, it returns the global filter reduced for this node.
func (pm *PolicyManager) FilterForNode(node types.NodeView) ([]tailcfg.FilterRule, error) {
if pm == nil {
return nil, nil
@@ -244,22 +397,36 @@ func (pm *PolicyManager) FilterForNode(node types.NodeView) ([]tailcfg.FilterRul
pm.mu.Lock()
defer pm.mu.Unlock()
return pm.filterForNodeLocked(node)
}
// MatchersForNode returns the matchers for peer relationship determination for a specific node.
// These are UNREDUCED matchers - they include all rules where the node could be either source or destination.
// This is different from FilterForNode which returns REDUCED rules for packet filtering.
//
// For global policies: returns the global matchers (same for all nodes)
// For autogroup:self: returns node-specific matchers from unreduced compiled rules
func (pm *PolicyManager) MatchersForNode(node types.NodeView) ([]matcher.Match, error) {
if pm == nil {
return nil, nil
}
pm.mu.Lock()
defer pm.mu.Unlock()
// For global policies, return the shared global matchers
if !pm.usesAutogroupSelf {
return pm.filter, nil
return pm.matchers, nil
}
if rules, ok := pm.filterRulesMap[node.ID()]; ok {
return rules, nil
}
rules, err := pm.pol.compileFilterRulesForNode(pm.users, node, pm.nodes)
// For autogroup:self, get unreduced compiled rules and create matchers
compiledRules, err := pm.compileFilterRulesForNodeLocked(node)
if err != nil {
return nil, fmt.Errorf("compiling filter rules for node: %w", err)
return nil, err
}
pm.filterRulesMap[node.ID()] = rules
return rules, nil
// Create matchers from unreduced rules for peer relationship determination
return matcher.MatchesFromFilterRules(compiledRules), nil
}
// SetUsers updates the users in the policy manager and updates the filter rules.
@@ -300,22 +467,40 @@ func (pm *PolicyManager) SetNodes(nodes views.Slice[types.NodeView]) (bool, erro
pm.mu.Lock()
defer pm.mu.Unlock()
// Clear cache based on what actually changed
if pm.usesAutogroupSelf {
// For autogroup:self, we need granular invalidation since rules depend on:
// - User ownership (node.User().ID)
// - Tag status (node.IsTagged())
// - IP addresses (node.IPs())
// - Node existence (added/removed)
pm.invalidateAutogroupSelfCache(pm.nodes, nodes)
} else {
// For non-autogroup:self policies, we can clear everything
clear(pm.filterRulesMap)
}
oldNodeCount := pm.nodes.Len()
newNodeCount := nodes.Len()
// Invalidate cache entries for nodes that changed.
// For autogroup:self: invalidate all nodes belonging to affected users (peer changes).
// For global policies: invalidate only nodes whose properties changed (IPs, routes).
pm.invalidateNodeCache(nodes)
pm.nodes = nodes
return pm.updateLocked()
nodesChanged := oldNodeCount != newNodeCount
// When nodes are added/removed, we must recompile filters because:
// 1. User/group aliases (like "user1@") resolve to node IPs
// 2. Filter compilation needs nodes to generate rules
// 3. Without nodes, filters compile to empty (0 rules)
//
// For autogroup:self: return true when nodes change even if the global filter
// hash didn't change. The global filter is empty for autogroup:self (each node
// has its own filter), so the hash never changes. But peer relationships DO
// change when nodes are added/removed, so we must signal this to trigger updates.
// For global policies: the filter must be recompiled to include the new nodes.
if nodesChanged {
// Recompile filter with the new node list
_, err := pm.updateLocked()
if err != nil {
return false, err
}
// Always return true when nodes changed, even if filter hash didn't change
// (can happen with autogroup:self or when nodes are added but don't affect rules)
return true, nil
}
return false, nil
}
func (pm *PolicyManager) NodeCanHaveTag(node types.NodeView, tag string) bool {
@@ -552,10 +737,12 @@ func (pm *PolicyManager) invalidateAutogroupSelfCache(oldNodes, newNodes views.S
// If we found the user and they're affected, clear this cache entry
if found {
if _, affected := affectedUsers[nodeUserID]; affected {
delete(pm.compiledFilterRulesMap, nodeID)
delete(pm.filterRulesMap, nodeID)
}
} else {
// Node not found in either old or new list, clear it
delete(pm.compiledFilterRulesMap, nodeID)
delete(pm.filterRulesMap, nodeID)
}
}
@@ -567,3 +754,50 @@ func (pm *PolicyManager) invalidateAutogroupSelfCache(oldNodes, newNodes views.S
Msg("Selectively cleared autogroup:self cache for affected users")
}
}
// invalidateNodeCache invalidates cache entries based on what changed.
func (pm *PolicyManager) invalidateNodeCache(newNodes views.Slice[types.NodeView]) {
if pm.usesAutogroupSelf {
// For autogroup:self, a node's filter depends on its peers (same user).
// When any node in a user changes, all nodes for that user need invalidation.
pm.invalidateAutogroupSelfCache(pm.nodes, newNodes)
} else {
// For global policies, a node's filter depends only on its own properties.
// Only invalidate nodes whose properties actually changed.
pm.invalidateGlobalPolicyCache(newNodes)
}
}
// invalidateGlobalPolicyCache invalidates only nodes whose properties affecting
// ReduceFilterRules changed. For global policies, each node's filter is independent.
func (pm *PolicyManager) invalidateGlobalPolicyCache(newNodes views.Slice[types.NodeView]) {
oldNodeMap := make(map[types.NodeID]types.NodeView)
for _, node := range pm.nodes.All() {
oldNodeMap[node.ID()] = node
}
newNodeMap := make(map[types.NodeID]types.NodeView)
for _, node := range newNodes.All() {
newNodeMap[node.ID()] = node
}
// Invalidate nodes whose properties changed
for nodeID, newNode := range newNodeMap {
oldNode, existed := oldNodeMap[nodeID]
if !existed {
// New node - no cache entry yet, will be lazily calculated
continue
}
if newNode.HasNetworkChanges(oldNode) {
delete(pm.filterRulesMap, nodeID)
}
}
// Remove deleted nodes from cache
for nodeID := range pm.filterRulesMap {
if _, exists := newNodeMap[nodeID]; !exists {
delete(pm.filterRulesMap, nodeID)
}
}
}

View File

@@ -1,6 +1,7 @@
package v2
import (
"net/netip"
"testing"
"github.com/google/go-cmp/cmp"
@@ -204,3 +205,237 @@ func TestInvalidateAutogroupSelfCache(t *testing.T) {
})
}
}
// TestInvalidateGlobalPolicyCache tests the cache invalidation logic for global policies.
func TestInvalidateGlobalPolicyCache(t *testing.T) {
mustIPPtr := func(s string) *netip.Addr {
ip := netip.MustParseAddr(s)
return &ip
}
tests := []struct {
name string
oldNodes types.Nodes
newNodes types.Nodes
initialCache map[types.NodeID][]tailcfg.FilterRule
expectedCacheAfter map[types.NodeID]bool // true = should exist, false = should not exist
}{
{
name: "node property changed - invalidates only that node",
oldNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
newNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.99")}, // Changed
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")}, // Unchanged
},
initialCache: map[types.NodeID][]tailcfg.FilterRule{
1: {},
2: {},
},
expectedCacheAfter: map[types.NodeID]bool{
1: false, // Invalidated
2: true, // Preserved
},
},
{
name: "multiple nodes changed",
oldNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
&types.Node{ID: 3, IPv4: mustIPPtr("100.64.0.3")},
},
newNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.99")}, // Changed
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")}, // Unchanged
&types.Node{ID: 3, IPv4: mustIPPtr("100.64.0.88")}, // Changed
},
initialCache: map[types.NodeID][]tailcfg.FilterRule{
1: {},
2: {},
3: {},
},
expectedCacheAfter: map[types.NodeID]bool{
1: false, // Invalidated
2: true, // Preserved
3: false, // Invalidated
},
},
{
name: "node deleted - removes from cache",
oldNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
newNodes: types.Nodes{
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
initialCache: map[types.NodeID][]tailcfg.FilterRule{
1: {},
2: {},
},
expectedCacheAfter: map[types.NodeID]bool{
1: false, // Deleted
2: true, // Preserved
},
},
{
name: "node added - no cache invalidation needed",
oldNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
},
newNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")}, // New
},
initialCache: map[types.NodeID][]tailcfg.FilterRule{
1: {},
},
expectedCacheAfter: map[types.NodeID]bool{
1: true, // Preserved
2: false, // Not in cache (new node)
},
},
{
name: "no changes - preserves all cache",
oldNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
newNodes: types.Nodes{
&types.Node{ID: 1, IPv4: mustIPPtr("100.64.0.1")},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
initialCache: map[types.NodeID][]tailcfg.FilterRule{
1: {},
2: {},
},
expectedCacheAfter: map[types.NodeID]bool{
1: true,
2: true,
},
},
{
name: "routes changed - invalidates that node only",
oldNodes: types.Nodes{
&types.Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24"), netip.MustParsePrefix("192.168.0.0/24")}},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")},
},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
newNodes: types.Nodes{
&types.Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24"), netip.MustParsePrefix("192.168.0.0/24")}},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}, // Changed
},
&types.Node{ID: 2, IPv4: mustIPPtr("100.64.0.2")},
},
initialCache: map[types.NodeID][]tailcfg.FilterRule{
1: {},
2: {},
},
expectedCacheAfter: map[types.NodeID]bool{
1: false, // Invalidated
2: true, // Preserved
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
pm := &PolicyManager{
nodes: tt.oldNodes.ViewSlice(),
filterRulesMap: tt.initialCache,
usesAutogroupSelf: false,
}
pm.invalidateGlobalPolicyCache(tt.newNodes.ViewSlice())
// Verify cache state
for nodeID, shouldExist := range tt.expectedCacheAfter {
_, exists := pm.filterRulesMap[nodeID]
require.Equal(t, shouldExist, exists, "node %d cache existence mismatch", nodeID)
}
})
}
}
// TestAutogroupSelfReducedVsUnreducedRules verifies that:
// 1. BuildPeerMap uses unreduced compiled rules for determining peer relationships
// 2. FilterForNode returns reduced compiled rules for packet filters
func TestAutogroupSelfReducedVsUnreducedRules(t *testing.T) {
user1 := types.User{Model: gorm.Model{ID: 1}, Name: "user1", Email: "user1@headscale.net"}
user2 := types.User{Model: gorm.Model{ID: 2}, Name: "user2", Email: "user2@headscale.net"}
users := types.Users{user1, user2}
// Create two nodes
node1 := node("node1", "100.64.0.1", "fd7a:115c:a1e0::1", user1, nil)
node1.ID = 1
node2 := node("node2", "100.64.0.2", "fd7a:115c:a1e0::2", user2, nil)
node2.ID = 2
nodes := types.Nodes{node1, node2}
// Policy with autogroup:self - all members can reach their own devices
policyStr := `{
"acls": [
{
"action": "accept",
"src": ["autogroup:member"],
"dst": ["autogroup:self:*"]
}
]
}`
pm, err := NewPolicyManager([]byte(policyStr), users, nodes.ViewSlice())
require.NoError(t, err)
require.True(t, pm.usesAutogroupSelf, "policy should use autogroup:self")
// Test FilterForNode returns reduced rules
// For node1: should have rules where node1 is in destinations (its own IP)
filterNode1, err := pm.FilterForNode(nodes[0].View())
require.NoError(t, err)
// For node2: should have rules where node2 is in destinations (its own IP)
filterNode2, err := pm.FilterForNode(nodes[1].View())
require.NoError(t, err)
// FilterForNode should return reduced rules - verify they only contain the node's own IPs as destinations
// For node1, destinations should only be node1's IPs
node1IPs := []string{"100.64.0.1/32", "100.64.0.1", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::1"}
for _, rule := range filterNode1 {
for _, dst := range rule.DstPorts {
require.Contains(t, node1IPs, dst.IP,
"node1 filter should only contain node1's IPs as destinations")
}
}
// For node2, destinations should only be node2's IPs
node2IPs := []string{"100.64.0.2/32", "100.64.0.2", "fd7a:115c:a1e0::2/128", "fd7a:115c:a1e0::2"}
for _, rule := range filterNode2 {
for _, dst := range rule.DstPorts {
require.Contains(t, node2IPs, dst.IP,
"node2 filter should only contain node2's IPs as destinations")
}
}
// Test BuildPeerMap uses unreduced rules
peerMap := pm.BuildPeerMap(nodes.ViewSlice())
// According to the policy, user1 can reach autogroup:self (which expands to node1's own IPs for node1)
// So node1 should be able to reach itself, but since we're looking at peer relationships,
// node1 should NOT have itself in the peer map (nodes don't peer with themselves)
// node2 should also not have any peers since user2 has no rules allowing it to reach anyone
// Verify peer relationships based on unreduced rules
// With unreduced rules, BuildPeerMap can properly determine that:
// - node1 can access autogroup:self (its own IPs)
// - node2 cannot access node1
require.Empty(t, peerMap[node1.ID], "node1 should have no peers (can only reach itself)")
require.Empty(t, peerMap[node2.ID], "node2 should have no peers")
}

View File

@@ -20,9 +20,10 @@ const (
)
const (
put = 1
del = 2
update = 3
put = 1
del = 2
update = 3
rebuildPeerMaps = 4
)
const prometheusNamespace = "headscale"
@@ -142,6 +143,8 @@ type work struct {
updateFn UpdateNodeFunc
result chan struct{}
nodeResult chan types.NodeView // Channel to return the resulting node after batch application
// For rebuildPeerMaps operation
rebuildResult chan struct{}
}
// PutNode adds or updates a node in the store.
@@ -298,6 +301,9 @@ func (s *NodeStore) applyBatch(batch []work) {
// Track which work items need node results
nodeResultRequests := make(map[types.NodeID][]*work)
// Track rebuildPeerMaps operations
var rebuildOps []*work
for i := range batch {
w := &batch[i]
switch w.op {
@@ -321,6 +327,10 @@ func (s *NodeStore) applyBatch(batch []work) {
if w.nodeResult != nil {
nodeResultRequests[w.nodeID] = append(nodeResultRequests[w.nodeID], w)
}
case rebuildPeerMaps:
// rebuildPeerMaps doesn't modify nodes, it just forces the snapshot rebuild
// below to recalculate peer relationships using the current peersFunc
rebuildOps = append(rebuildOps, w)
}
}
@@ -347,9 +357,16 @@ func (s *NodeStore) applyBatch(batch []work) {
}
}
// Signal completion for all work items
// Signal completion for rebuildPeerMaps operations
for _, w := range rebuildOps {
close(w.rebuildResult)
}
// Signal completion for all other work items
for _, w := range batch {
close(w.result)
if w.op != rebuildPeerMaps {
close(w.result)
}
}
}
@@ -546,6 +563,22 @@ func (s *NodeStore) ListPeers(id types.NodeID) views.Slice[types.NodeView] {
return views.SliceOf(s.data.Load().peersByNode[id])
}
// RebuildPeerMaps rebuilds the peer relationship map using the current peersFunc.
// This must be called after policy changes because peersFunc uses PolicyManager's
// filters to determine which nodes can see each other. Without rebuilding, the
// peer map would use stale filter data until the next node add/delete.
func (s *NodeStore) RebuildPeerMaps() {
result := make(chan struct{})
w := work{
op: rebuildPeerMaps,
rebuildResult: result,
}
s.writeQueue <- w
<-result
}
// ListNodesByUser returns a slice of all nodes for a given user ID.
func (s *NodeStore) ListNodesByUser(uid types.UserID) views.Slice[types.NodeView] {
timer := prometheus.NewTimer(nodeStoreOperationDuration.WithLabelValues("list_by_user"))

View File

@@ -132,9 +132,10 @@ func NewState(cfg *types.Config) (*State, error) {
return nil, fmt.Errorf("init policy manager: %w", err)
}
// PolicyManager.BuildPeerMap handles both global and per-node filter complexity.
// This moves the complex peer relationship logic into the policy package where it belongs.
nodeStore := NewNodeStore(nodes, func(nodes []types.NodeView) map[types.NodeID][]types.NodeView {
_, matchers := polMan.Filter()
return policy.BuildPeerMap(views.SliceOf(nodes), matchers)
return polMan.BuildPeerMap(views.SliceOf(nodes))
})
nodeStore.Start()
@@ -225,6 +226,12 @@ func (s *State) ReloadPolicy() ([]change.ChangeSet, error) {
return nil, fmt.Errorf("setting policy: %w", err)
}
// Rebuild peer maps after policy changes because the peersFunc in NodeStore
// uses the PolicyManager's filters. Without this, nodes won't see newly allowed
// peers until a node is added/removed, causing autogroup:self policies to not
// propagate correctly when switching between policy types.
s.nodeStore.RebuildPeerMaps()
cs := []change.ChangeSet{change.PolicyChange()}
// Always call autoApproveNodes during policy reload, regardless of whether
@@ -797,6 +804,11 @@ func (s *State) FilterForNode(node types.NodeView) ([]tailcfg.FilterRule, error)
return s.polMan.FilterForNode(node)
}
// MatchersForNode returns matchers for peer relationship determination (unreduced).
func (s *State) MatchersForNode(node types.NodeView) ([]matcher.Match, error) {
return s.polMan.MatchersForNode(node)
}
// NodeCanHaveTag checks if a node is allowed to have a specific tag.
func (s *State) NodeCanHaveTag(node types.NodeView, tag string) bool {
return s.polMan.NodeCanHaveTag(node, tag)

View File

@@ -340,11 +340,11 @@ func LoadConfig(path string, isFile bool) error {
viper.SetDefault("prefixes.allocation", string(IPAllocationStrategySequential))
if err := viper.ReadInConfig(); err != nil {
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
log.Warn().Msg("No config file found, using defaults")
return nil
}
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
log.Warn().Msg("No config file found, using defaults")
return nil
}
return fmt.Errorf("fatal error reading config file: %w", err)
}

View File

@@ -855,3 +855,22 @@ func (v NodeView) IPsAsString() []string {
}
return v.ж.IPsAsString()
}
// HasNetworkChanges checks if the node has network-related changes.
// Returns true if IPs, announced routes, or approved routes changed.
// This is primarily used for policy cache invalidation.
func (v NodeView) HasNetworkChanges(other NodeView) bool {
if !slices.Equal(v.IPs(), other.IPs()) {
return true
}
if !slices.Equal(v.AnnouncedRoutes(), other.AnnouncedRoutes()) {
return true
}
if !slices.Equal(v.SubnetRoutes(), other.SubnetRoutes()) {
return true
}
return false
}

View File

@@ -793,3 +793,179 @@ func TestNodeRegisterMethodToV1Enum(t *testing.T) {
})
}
}
// TestHasNetworkChanges tests the NodeView method for detecting
// when a node's network properties have changed.
func TestHasNetworkChanges(t *testing.T) {
mustIPPtr := func(s string) *netip.Addr {
ip := netip.MustParseAddr(s)
return &ip
}
tests := []struct {
name string
old *Node
new *Node
changed bool
}{
{
name: "no changes",
old: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
IPv6: mustIPPtr("fd7a:115c:a1e0::1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
},
new: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
IPv6: mustIPPtr("fd7a:115c:a1e0::1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
},
changed: false,
},
{
name: "IPv4 changed",
old: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
IPv6: mustIPPtr("fd7a:115c:a1e0::1"),
},
new: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.2"),
IPv6: mustIPPtr("fd7a:115c:a1e0::1"),
},
changed: true,
},
{
name: "IPv6 changed",
old: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
IPv6: mustIPPtr("fd7a:115c:a1e0::1"),
},
new: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
IPv6: mustIPPtr("fd7a:115c:a1e0::2"),
},
changed: true,
},
{
name: "RoutableIPs added",
old: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{},
},
new: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}},
},
changed: true,
},
{
name: "RoutableIPs removed",
old: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}},
},
new: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{},
},
changed: true,
},
{
name: "RoutableIPs changed",
old: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")}},
},
new: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}},
},
changed: true,
},
{
name: "SubnetRoutes added",
old: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}},
ApprovedRoutes: []netip.Prefix{},
},
new: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
},
changed: true,
},
{
name: "SubnetRoutes removed",
old: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
},
new: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}},
ApprovedRoutes: []netip.Prefix{},
},
changed: true,
},
{
name: "SubnetRoutes changed",
old: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24"), netip.MustParsePrefix("192.168.0.0/24")}},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24")},
},
new: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostinfo: &tailcfg.Hostinfo{RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/24"), netip.MustParsePrefix("192.168.0.0/24")}},
ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
},
changed: true,
},
{
name: "irrelevant property changed (Hostname)",
old: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostname: "old-name",
},
new: &Node{
ID: 1,
IPv4: mustIPPtr("100.64.0.1"),
Hostname: "new-name",
},
changed: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := tt.new.View().HasNetworkChanges(tt.old.View())
if got != tt.changed {
t.Errorf("HasNetworkChanges() = %v, want %v", got, tt.changed)
}
})
}
}