From 87326f5c4f3a3a58e1a461156fd5abe43fe3e810 Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Mon, 10 Mar 2025 16:20:29 +0100 Subject: [PATCH] Experimental implementation of Policy v2 (#2214) * utility iterator for ipset Signed-off-by: Kristoffer Dalby * split policy -> policy and v1 This commit split out the common policy logic and policy implementation into separate packages. policy contains functions that are independent of the policy implementation, this typically means logic that works on tailcfg types and generic formats. In addition, it defines the PolicyManager interface which the v1 implements. v1 is a subpackage which implements the PolicyManager using the "original" policy implementation. Signed-off-by: Kristoffer Dalby * use polivyv1 definitions in integration tests These can be marshalled back into JSON, which the new format might not be able to. Also, just dont change it all to JSON strings for now. Signed-off-by: Kristoffer Dalby * formatter: breaks lines Signed-off-by: Kristoffer Dalby * remove compareprefix, use tsaddr version Signed-off-by: Kristoffer Dalby * remove getacl test, add back autoapprover Signed-off-by: Kristoffer Dalby * use policy manager tag handling Signed-off-by: Kristoffer Dalby * rename display helper for user Signed-off-by: Kristoffer Dalby * introduce policy v2 package policy v2 is built from the ground up to be stricter and follow the same pattern for all types of resolvers. TODO introduce aliass resolver Signed-off-by: Kristoffer Dalby * wire up policyv2 in integration testing Signed-off-by: Kristoffer Dalby * split policy v2 tests into seperate workflow to work around github limit Signed-off-by: Kristoffer Dalby * add policy manager output to /debug Signed-off-by: Kristoffer Dalby * update changelog Signed-off-by: Kristoffer Dalby --------- Signed-off-by: Kristoffer Dalby --- .../gh-action-integration-generator.go | 10 +- .../workflows/test-integration-policyv2.yaml | 159 ++ .github/workflows/test-integration.yaml | 5 +- CHANGELOG.md | 64 +- hscontrol/app.go | 21 +- hscontrol/db/db.go | 3 +- hscontrol/db/node_test.go | 348 ++-- hscontrol/debug.go | 5 + hscontrol/grpcv1.go | 9 +- hscontrol/mapper/mapper_test.go | 53 +- hscontrol/mapper/tail.go | 7 +- hscontrol/mapper/tail_test.go | 10 +- hscontrol/oidc.go | 2 +- hscontrol/policy/pm.go | 236 +-- hscontrol/policy/policy.go | 109 ++ hscontrol/policy/policy_test.go | 1455 +++++++++++++++++ hscontrol/policy/{ => v1}/acls.go | 121 +- hscontrol/policy/{ => v1}/acls_test.go | 1384 +--------------- hscontrol/policy/{ => v1}/acls_types.go | 2 +- hscontrol/policy/v1/policy.go | 187 +++ .../policy/{pm_test.go => v1/policy_test.go} | 2 +- hscontrol/policy/v2/filter.go | 169 ++ hscontrol/policy/v2/filter_test.go | 378 +++++ hscontrol/policy/v2/policy.go | 283 ++++ hscontrol/policy/v2/policy_test.go | 58 + hscontrol/policy/v2/types.go | 1005 ++++++++++++ hscontrol/policy/v2/types_test.go | 1162 +++++++++++++ hscontrol/policy/v2/utils.go | 164 ++ hscontrol/policy/v2/utils_test.go | 102 ++ hscontrol/poll.go | 26 +- hscontrol/types/node.go | 76 +- hscontrol/types/users.go | 30 +- hscontrol/util/addr.go | 14 + hscontrol/util/net.go | 49 +- integration/acl_test.go | 105 +- integration/cli_test.go | 48 +- integration/general_test.go | 6 +- integration/hsic/hsic.go | 67 +- integration/route_test.go | 28 +- integration/scenario.go | 5 + integration/ssh_test.go | 34 +- 41 files changed, 5883 insertions(+), 2118 deletions(-) create mode 100644 .github/workflows/test-integration-policyv2.yaml create mode 100644 hscontrol/policy/policy.go create mode 100644 hscontrol/policy/policy_test.go rename hscontrol/policy/{ => v1}/acls.go (88%) rename hscontrol/policy/{ => v1}/acls_test.go (66%) rename hscontrol/policy/{ => v1}/acls_types.go (99%) create mode 100644 hscontrol/policy/v1/policy.go rename hscontrol/policy/{pm_test.go => v1/policy_test.go} (99%) create mode 100644 hscontrol/policy/v2/filter.go create mode 100644 hscontrol/policy/v2/filter_test.go create mode 100644 hscontrol/policy/v2/policy.go create mode 100644 hscontrol/policy/v2/policy_test.go create mode 100644 hscontrol/policy/v2/types.go create mode 100644 hscontrol/policy/v2/types_test.go create mode 100644 hscontrol/policy/v2/utils.go create mode 100644 hscontrol/policy/v2/utils_test.go diff --git a/.github/workflows/gh-action-integration-generator.go b/.github/workflows/gh-action-integration-generator.go index 48d96716..471e3589 100644 --- a/.github/workflows/gh-action-integration-generator.go +++ b/.github/workflows/gh-action-integration-generator.go @@ -38,12 +38,13 @@ func findTests() []string { return tests } -func updateYAML(tests []string) { +func updateYAML(tests []string, testPath string) { testsForYq := fmt.Sprintf("[%s]", strings.Join(tests, ", ")) yqCommand := fmt.Sprintf( - "yq eval '.jobs.integration-test.strategy.matrix.test = %s' ./test-integration.yaml -i", + "yq eval '.jobs.integration-test.strategy.matrix.test = %s' %s -i", testsForYq, + testPath, ) cmd := exec.Command("bash", "-c", yqCommand) @@ -58,7 +59,7 @@ func updateYAML(tests []string) { log.Fatalf("failed to run yq command: %s", err) } - fmt.Println("YAML file updated successfully") + fmt.Printf("YAML file (%s) updated successfully\n", testPath) } func main() { @@ -69,5 +70,6 @@ func main() { quotedTests[i] = fmt.Sprintf("\"%s\"", test) } - updateYAML(quotedTests) + updateYAML(quotedTests, "./test-integration.yaml") + updateYAML(quotedTests, "./test-integration-policyv2.yaml") } diff --git a/.github/workflows/test-integration-policyv2.yaml b/.github/workflows/test-integration-policyv2.yaml new file mode 100644 index 00000000..73015603 --- /dev/null +++ b/.github/workflows/test-integration-policyv2.yaml @@ -0,0 +1,159 @@ +name: Integration Tests (policy v2) +# To debug locally on a branch, and when needing secrets +# change this to include `push` so the build is ran on +# the main repository. +on: [pull_request] +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true +jobs: + integration-test: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + test: + - TestACLHostsInNetMapTable + - TestACLAllowUser80Dst + - TestACLDenyAllPort80 + - TestACLAllowUserDst + - TestACLAllowStarDst + - TestACLNamedHostsCanReachBySubnet + - TestACLNamedHostsCanReach + - TestACLDevice1CanAccessDevice2 + - TestPolicyUpdateWhileRunningWithCLIInDatabase + - TestAuthKeyLogoutAndReloginSameUser + - TestAuthKeyLogoutAndReloginNewUser + - TestAuthKeyLogoutAndReloginSameUserExpiredKey + - TestOIDCAuthenticationPingAll + - TestOIDCExpireNodesBasedOnTokenExpiry + - TestOIDC024UserCreation + - TestOIDCAuthenticationWithPKCE + - TestOIDCReloginSameNodeNewUser + - TestAuthWebFlowAuthenticationPingAll + - TestAuthWebFlowLogoutAndRelogin + - TestUserCommand + - TestPreAuthKeyCommand + - TestPreAuthKeyCommandWithoutExpiry + - TestPreAuthKeyCommandReusableEphemeral + - TestPreAuthKeyCorrectUserLoggedInCommand + - TestApiKeyCommand + - TestNodeTagCommand + - TestNodeAdvertiseTagCommand + - TestNodeCommand + - TestNodeExpireCommand + - TestNodeRenameCommand + - TestNodeMoveCommand + - TestPolicyCommand + - TestPolicyBrokenConfigCommand + - TestDERPVerifyEndpoint + - TestResolveMagicDNS + - TestResolveMagicDNSExtraRecordsPath + - TestValidateResolvConf + - TestDERPServerScenario + - TestDERPServerWebsocketScenario + - TestPingAllByIP + - TestPingAllByIPPublicDERP + - TestEphemeral + - TestEphemeralInAlternateTimezone + - TestEphemeral2006DeletedTooQuickly + - TestPingAllByHostname + - TestTaildrop + - TestUpdateHostnameFromClient + - TestExpireNode + - TestNodeOnlineStatus + - TestPingAllByIPManyUpDown + - Test2118DeletingOnlineNodePanics + - TestEnablingRoutes + - TestHASubnetRouterFailover + - TestEnableDisableAutoApprovedRoute + - TestAutoApprovedSubRoute2068 + - TestSubnetRouteACL + - TestEnablingExitRoutes + - TestHeadscale + - TestCreateTailscale + - TestTailscaleNodesJoiningHeadcale + - TestSSHOneUserToAll + - TestSSHMultipleUsersAllToAll + - TestSSHNoSSHConfigured + - TestSSHIsBlockedInACL + - TestSSHUserOnlyIsolation + database: [postgres, sqlite] + env: + # Github does not allow us to access secrets in pull requests, + # so this env var is used to check if we have the secret or not. + # If we have the secrets, meaning we are running on push in a fork, + # there might be secrets available for more debugging. + # If TS_OAUTH_CLIENT_ID and TS_OAUTH_SECRET is set, then the job + # will join a debug tailscale network, set up SSH and a tmux session. + # The SSH will be configured to use the SSH key of the Github user + # that triggered the build. + HAS_TAILSCALE_SECRET: ${{ secrets.TS_OAUTH_CLIENT_ID }} + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 2 + - name: Get changed files + id: changed-files + uses: dorny/paths-filter@v3 + with: + filters: | + files: + - '*.nix' + - 'go.*' + - '**/*.go' + - 'integration_test/' + - 'config-example.yaml' + - name: Tailscale + if: ${{ env.HAS_TAILSCALE_SECRET }} + uses: tailscale/github-action@v2 + with: + oauth-client-id: ${{ secrets.TS_OAUTH_CLIENT_ID }} + oauth-secret: ${{ secrets.TS_OAUTH_SECRET }} + tags: tag:gh + - name: Setup SSH server for Actor + if: ${{ env.HAS_TAILSCALE_SECRET }} + uses: alexellis/setup-sshd-actor@master + - uses: DeterminateSystems/nix-installer-action@main + if: steps.changed-files.outputs.files == 'true' + - uses: DeterminateSystems/magic-nix-cache-action@main + if: steps.changed-files.outputs.files == 'true' + - uses: satackey/action-docker-layer-caching@main + if: steps.changed-files.outputs.files == 'true' + continue-on-error: true + - name: Run Integration Test + uses: Wandalen/wretry.action@master + if: steps.changed-files.outputs.files == 'true' + env: + USE_POSTGRES: ${{ matrix.database == 'postgres' && '1' || '0' }} + with: + attempt_limit: 5 + command: | + nix develop --command -- docker run \ + --tty --rm \ + --volume ~/.cache/hs-integration-go:/go \ + --name headscale-test-suite \ + --volume $PWD:$PWD -w $PWD/integration \ + --volume /var/run/docker.sock:/var/run/docker.sock \ + --volume $PWD/control_logs:/tmp/control \ + --env HEADSCALE_INTEGRATION_POSTGRES=${{env.USE_POSTGRES}} \ + --env HEADSCALE_EXPERIMENTAL_POLICY_V2=1 \ + golang:1 \ + go run gotest.tools/gotestsum@latest -- ./... \ + -failfast \ + -timeout 120m \ + -parallel 1 \ + -run "^${{ matrix.test }}$" + - uses: actions/upload-artifact@v4 + if: always() && steps.changed-files.outputs.files == 'true' + with: + name: ${{ matrix.test }}-${{matrix.database}}-${{matrix.policy}}-logs + path: "control_logs/*.log" + - uses: actions/upload-artifact@v4 + if: always() && steps.changed-files.outputs.files == 'true' + with: + name: ${{ matrix.test }}-${{matrix.database}}-${{matrix.policy}}-pprof + path: "control_logs/*.pprof.tar" + - name: Setup a blocking tmux session + if: ${{ env.HAS_TAILSCALE_SECRET }} + uses: alexellis/block-with-tmux-action@master diff --git a/.github/workflows/test-integration.yaml b/.github/workflows/test-integration.yaml index f2e2ee17..2898b4ba 100644 --- a/.github/workflows/test-integration.yaml +++ b/.github/workflows/test-integration.yaml @@ -137,6 +137,7 @@ jobs: --volume /var/run/docker.sock:/var/run/docker.sock \ --volume $PWD/control_logs:/tmp/control \ --env HEADSCALE_INTEGRATION_POSTGRES=${{env.USE_POSTGRES}} \ + --env HEADSCALE_EXPERIMENTAL_POLICY_V2=0 \ golang:1 \ go run gotest.tools/gotestsum@latest -- ./... \ -failfast \ @@ -146,12 +147,12 @@ jobs: - uses: actions/upload-artifact@v4 if: always() && steps.changed-files.outputs.files == 'true' with: - name: ${{ matrix.test }}-${{matrix.database}}-logs + name: ${{ matrix.test }}-${{matrix.database}}-${{matrix.policy}}-logs path: "control_logs/*.log" - uses: actions/upload-artifact@v4 if: always() && steps.changed-files.outputs.files == 'true' with: - name: ${{ matrix.test }}-${{matrix.database}}-pprof + name: ${{ matrix.test }}-${{matrix.database}}-${{matrix.policy}}-pprof path: "control_logs/*.pprof.tar" - name: Setup a blocking tmux session if: ${{ env.HAS_TAILSCALE_SECRET }} diff --git a/CHANGELOG.md b/CHANGELOG.md index d0571150..6bda04ed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,13 +4,13 @@ ### BREAKING -Route internals have been rewritten, removing the dedicated route table in the database. -This was done to simplify the codebase, which had grown unnecessarily complex after -the routes were split into separate tables. The overhead of having to go via the database -and keeping the state in sync made the code very hard to reason about and prone to errors. -The majority of the route state is only relevant when headscale is running, and is now only -kept in memory. -As part of this, the CLI and API has been simplified to reflect the changes; +Route internals have been rewritten, removing the dedicated route table in the +database. This was done to simplify the codebase, which had grown unnecessarily +complex after the routes were split into separate tables. The overhead of having +to go via the database and keeping the state in sync made the code very hard to +reason about and prone to errors. The majority of the route state is only +relevant when headscale is running, and is now only kept in memory. As part of +this, the CLI and API has been simplified to reflect the changes; ```console $ headscale nodes list-routes @@ -27,15 +27,55 @@ ID | Hostname | Approved | Available | Serving 2 | ts-unstable-fq7ob4 | | 0.0.0.0/0, ::/0 | ``` -Note that if an exit route is approved (0.0.0.0/0 or ::/0), both IPv4 and IPv6 will be approved. +Note that if an exit route is approved (0.0.0.0/0 or ::/0), both IPv4 and IPv6 +will be approved. -- Route API and CLI has been removed [#2422](https://github.com/juanfont/headscale/pull/2422) -- Routes are now managed via the Node API [#2422](https://github.com/juanfont/headscale/pull/2422) +- Route API and CLI has been removed + [#2422](https://github.com/juanfont/headscale/pull/2422) +- Routes are now managed via the Node API + [#2422](https://github.com/juanfont/headscale/pull/2422) + +### Experimental Policy v2 + +This release introduces a new experimental version of Headscales policy +implementation. In this context, experimental means that the feature is not yet +fully tested and may contain bugs or unexpected behavior and that we are still +experimenting with how the final interface/behavior will be. + +#### Breaking changes + +- The policy is validated and "resolved" when loading, providing errors for + invalid rules and conditions. + - Previously this was done as a mix between load and runtime (when it was + applied to a node). + - This means that when you convert the first time, what was previously a + policy that loaded, but failed at runtime, will now fail at load time. +- Error messages should be more descriptive and informative. + - There is still work to be here, but it is already improved with "typing" + (e.g. only Users can be put in Groups) +- All users must contain an `@` character. + - If your user naturally contains and `@`, like an email, this will just work. + - If its based on usernames, or other identifiers not containing an `@`, an + `@` should be appended at the end. For example, if your user is `john`, it + must be written as `john@` in the policy. + +#### Current state + +The new policy is passing all tests, both integration and unit tests. This does +not mean it is perfect, but it is a good start. Corner cases that is currently +working in v1 and not tested might be broken in v2 (and vice versa). + +**We do need help testing this code**, and we think that most of the user facing +API will not really change. We are not sure yet when this code will replace v1, +but we are confident that it will, and all new changes and fixes will be made +towards this code. + +The new policy can be used by setting the environment variable +`HEADSCALE_EXPERIMENTAL_POLICY_V2` to `1`. ### Changes -- Use Go 1.24 - [#2427](https://github.com/juanfont/headscale/pull/2427) +- Use Go 1.24 [#2427](https://github.com/juanfont/headscale/pull/2427) - `oidc.map_legacy_users` and `oidc.strip_email_domain` has been removed [#2411](https://github.com/juanfont/headscale/pull/2411) - Add more information to `/debug` endpoint diff --git a/hscontrol/app.go b/hscontrol/app.go index c37e1e89..ee1587ad 100644 --- a/hscontrol/app.go +++ b/hscontrol/app.go @@ -194,10 +194,14 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) { var magicDNSDomains []dnsname.FQDN if cfg.PrefixV4 != nil { - magicDNSDomains = append(magicDNSDomains, util.GenerateIPv4DNSRootDomain(*cfg.PrefixV4)...) + magicDNSDomains = append( + magicDNSDomains, + util.GenerateIPv4DNSRootDomain(*cfg.PrefixV4)...) } if cfg.PrefixV6 != nil { - magicDNSDomains = append(magicDNSDomains, util.GenerateIPv6DNSRootDomain(*cfg.PrefixV6)...) + magicDNSDomains = append( + magicDNSDomains, + util.GenerateIPv6DNSRootDomain(*cfg.PrefixV6)...) } // we might have routes already from Split DNS @@ -459,11 +463,13 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router { router := mux.NewRouter() router.Use(prometheusMiddleware) - router.HandleFunc(ts2021UpgradePath, h.NoiseUpgradeHandler).Methods(http.MethodPost, http.MethodGet) + router.HandleFunc(ts2021UpgradePath, h.NoiseUpgradeHandler). + Methods(http.MethodPost, http.MethodGet) router.HandleFunc("/health", h.HealthHandler).Methods(http.MethodGet) router.HandleFunc("/key", h.KeyHandler).Methods(http.MethodGet) - router.HandleFunc("/register/{registration_id}", h.authProvider.RegisterHandler).Methods(http.MethodGet) + router.HandleFunc("/register/{registration_id}", h.authProvider.RegisterHandler). + Methods(http.MethodGet) if provider, ok := h.authProvider.(*AuthProviderOIDC); ok { router.HandleFunc("/oidc/callback", provider.OIDCCallbackHandler).Methods(http.MethodGet) @@ -523,7 +529,11 @@ func usersChangedHook(db *db.HSDatabase, polMan policy.PolicyManager, notif *not // Maybe we should attempt a new in memory state and not go via the DB? // Maybe this should be implemented as an event bus? // A bool is returned indicating if a full update was sent to all nodes -func nodesChangedHook(db *db.HSDatabase, polMan policy.PolicyManager, notif *notifier.Notifier) (bool, error) { +func nodesChangedHook( + db *db.HSDatabase, + polMan policy.PolicyManager, + notif *notifier.Notifier, +) (bool, error) { nodes, err := db.ListNodes() if err != nil { return false, err @@ -1143,6 +1153,7 @@ func (h *Headscale) loadPolicyManager() error { errOut = fmt.Errorf("creating policy manager: %w", err) return } + log.Info().Msgf("Using policy manager version: %d", h.polMan.Version()) if len(nodes) > 0 { _, err = h.polMan.SSHPolicy(nodes[0]) diff --git a/hscontrol/db/db.go b/hscontrol/db/db.go index a130f876..7d0c3144 100644 --- a/hscontrol/db/db.go +++ b/hscontrol/db/db.go @@ -22,6 +22,7 @@ import ( "gorm.io/gorm" "gorm.io/gorm/logger" "gorm.io/gorm/schema" + "tailscale.com/net/tsaddr" "tailscale.com/util/set" "zgo.at/zcache/v2" ) @@ -655,7 +656,7 @@ AND auth_key_id NOT IN ( } for nodeID, routes := range nodeRoutes { - slices.SortFunc(routes, util.ComparePrefix) + tsaddr.SortPrefixes(routes) slices.Compact(routes) data, err := json.Marshal(routes) diff --git a/hscontrol/db/node_test.go b/hscontrol/db/node_test.go index c3924bbe..c92a4497 100644 --- a/hscontrol/db/node_test.go +++ b/hscontrol/db/node_test.go @@ -19,6 +19,7 @@ import ( "github.com/stretchr/testify/require" "gopkg.in/check.v1" "gorm.io/gorm" + "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/ptr" @@ -146,105 +147,6 @@ func (s *Suite) TestListPeers(c *check.C) { c.Assert(peersOfNode0[8].Hostname, check.Equals, "testnode10") } -func (s *Suite) TestGetACLFilteredPeers(c *check.C) { - type base struct { - user *types.User - key *types.PreAuthKey - } - - stor := make([]base, 0) - - for _, name := range []string{"test", "admin"} { - user, err := db.CreateUser(types.User{Name: name}) - c.Assert(err, check.IsNil) - pak, err := db.CreatePreAuthKey(types.UserID(user.ID), false, false, nil, nil) - c.Assert(err, check.IsNil) - stor = append(stor, base{user, pak}) - } - - _, err := db.GetNodeByID(0) - c.Assert(err, check.NotNil) - - for index := 0; index <= 10; index++ { - nodeKey := key.NewNode() - machineKey := key.NewMachine() - - v4 := netip.MustParseAddr(fmt.Sprintf("100.64.0.%d", index+1)) - node := types.Node{ - ID: types.NodeID(index), - MachineKey: machineKey.Public(), - NodeKey: nodeKey.Public(), - IPv4: &v4, - Hostname: "testnode" + strconv.Itoa(index), - UserID: stor[index%2].user.ID, - RegisterMethod: util.RegisterMethodAuthKey, - AuthKeyID: ptr.To(stor[index%2].key.ID), - } - trx := db.DB.Save(&node) - c.Assert(trx.Error, check.IsNil) - } - - aclPolicy := &policy.ACLPolicy{ - Groups: map[string][]string{ - "group:test": {"admin"}, - }, - Hosts: map[string]netip.Prefix{}, - TagOwners: map[string][]string{}, - ACLs: []policy.ACL{ - { - Action: "accept", - Sources: []string{"admin"}, - Destinations: []string{"*:*"}, - }, - { - Action: "accept", - Sources: []string{"test"}, - Destinations: []string{"test:*"}, - }, - }, - Tests: []policy.ACLTest{}, - } - - adminNode, err := db.GetNodeByID(1) - c.Logf("Node(%v), user: %v", adminNode.Hostname, adminNode.User) - c.Assert(adminNode.IPv4, check.NotNil) - c.Assert(adminNode.IPv6, check.IsNil) - c.Assert(err, check.IsNil) - - testNode, err := db.GetNodeByID(2) - c.Logf("Node(%v), user: %v", testNode.Hostname, testNode.User) - c.Assert(err, check.IsNil) - - adminPeers, err := db.ListPeers(adminNode.ID) - c.Assert(err, check.IsNil) - c.Assert(len(adminPeers), check.Equals, 9) - - testPeers, err := db.ListPeers(testNode.ID) - c.Assert(err, check.IsNil) - c.Assert(len(testPeers), check.Equals, 9) - - adminRules, _, err := policy.GenerateFilterAndSSHRulesForTests(aclPolicy, adminNode, adminPeers, []types.User{*stor[0].user, *stor[1].user}) - c.Assert(err, check.IsNil) - - testRules, _, err := policy.GenerateFilterAndSSHRulesForTests(aclPolicy, testNode, testPeers, []types.User{*stor[0].user, *stor[1].user}) - c.Assert(err, check.IsNil) - - peersOfAdminNode := policy.FilterNodesByACL(adminNode, adminPeers, adminRules) - peersOfTestNode := policy.FilterNodesByACL(testNode, testPeers, testRules) - c.Log(peersOfAdminNode) - c.Log(peersOfTestNode) - - c.Assert(len(peersOfTestNode), check.Equals, 9) - c.Assert(peersOfTestNode[0].Hostname, check.Equals, "testnode1") - c.Assert(peersOfTestNode[1].Hostname, check.Equals, "testnode3") - c.Assert(peersOfTestNode[3].Hostname, check.Equals, "testnode5") - - c.Assert(len(peersOfAdminNode), check.Equals, 9) - c.Assert(peersOfAdminNode[0].Hostname, check.Equals, "testnode2") - c.Assert(peersOfAdminNode[2].Hostname, check.Equals, "testnode4") - c.Assert(peersOfAdminNode[5].Hostname, check.Equals, "testnode7") -} - func (s *Suite) TestExpireNode(c *check.C) { user, err := db.CreateUser(types.User{Name: "test"}) c.Assert(err, check.IsNil) @@ -456,143 +358,171 @@ func TestHeadscale_generateGivenName(t *testing.T) { } } -// TODO(kradalby): replace this test -// func TestAutoApproveRoutes(t *testing.T) { -// tests := []struct { -// name string -// acl string -// routes []netip.Prefix -// want []netip.Prefix -// }{ -// { -// name: "2068-approve-issue-sub", -// acl: ` -// { -// "groups": { -// "group:k8s": ["test"] -// }, +func TestAutoApproveRoutes(t *testing.T) { + tests := []struct { + name string + acl string + routes []netip.Prefix + want []netip.Prefix + want2 []netip.Prefix + }{ + { + name: "2068-approve-issue-sub-kube", + acl: ` +{ + "groups": { + "group:k8s": ["test@"] + }, // "acls": [ // {"action": "accept", "users": ["*"], "ports": ["*:*"]}, // ], -// "autoApprovers": { -// "routes": { -// "10.42.0.0/16": ["test"], -// } -// } -// }`, -// routes: []netip.Prefix{netip.MustParsePrefix("10.42.7.0/24")}, -// want: []netip.Prefix{netip.MustParsePrefix("10.42.7.0/24")}, -// }, -// { -// name: "2068-approve-issue-sub", -// acl: ` -// { -// "tagOwners": { -// "tag:exit": ["test"], -// }, + "autoApprovers": { + "routes": { + "10.42.0.0/16": ["test@"], + } + } +}`, + routes: []netip.Prefix{netip.MustParsePrefix("10.42.7.0/24")}, + want: []netip.Prefix{netip.MustParsePrefix("10.42.7.0/24")}, + }, + { + name: "2068-approve-issue-sub-exit-tag", + acl: ` +{ + "tagOwners": { + "tag:exit": ["test@"], + }, -// "groups": { -// "group:test": ["test"] -// }, + "groups": { + "group:test": ["test@"] + }, // "acls": [ // {"action": "accept", "users": ["*"], "ports": ["*:*"]}, // ], -// "autoApprovers": { -// "exitNode": ["tag:exit"], -// "routes": { -// "10.10.0.0/16": ["group:test"], -// "10.11.0.0/16": ["test"], -// } -// } -// }`, -// routes: []netip.Prefix{ -// tsaddr.AllIPv4(), -// tsaddr.AllIPv6(), -// netip.MustParsePrefix("10.10.0.0/16"), -// netip.MustParsePrefix("10.11.0.0/24"), -// }, -// want: []netip.Prefix{ -// tsaddr.AllIPv4(), -// netip.MustParsePrefix("10.10.0.0/16"), -// netip.MustParsePrefix("10.11.0.0/24"), -// tsaddr.AllIPv6(), -// }, -// }, -// } + "autoApprovers": { + "exitNode": ["tag:exit"], + "routes": { + "10.10.0.0/16": ["group:test"], + "10.11.0.0/16": ["test@"], + "8.11.0.0/24": ["test2@"], // No nodes + } + } +}`, + routes: []netip.Prefix{ + tsaddr.AllIPv4(), + tsaddr.AllIPv6(), + netip.MustParsePrefix("10.10.0.0/16"), + netip.MustParsePrefix("10.11.0.0/24"), -// for _, tt := range tests { -// t.Run(tt.name, func(t *testing.T) { -// adb, err := newSQLiteTestDB() -// require.NoError(t, err) -// pol, err := policy.LoadACLPolicyFromBytes([]byte(tt.acl)) + // Not approved + netip.MustParsePrefix("8.11.0.0/24"), + }, + want: []netip.Prefix{ + netip.MustParsePrefix("10.10.0.0/16"), + netip.MustParsePrefix("10.11.0.0/24"), + }, + want2: []netip.Prefix{ + tsaddr.AllIPv4(), + tsaddr.AllIPv6(), + }, + }, + } -// require.NoError(t, err) -// require.NotNil(t, pol) + for _, tt := range tests { + pmfs := policy.PolicyManagerFuncsForTest([]byte(tt.acl)) + for i, pmf := range pmfs { + version := i + 1 + t.Run(fmt.Sprintf("%s-policyv%d", tt.name, version), func(t *testing.T) { + adb, err := newSQLiteTestDB() + require.NoError(t, err) -// user, err := adb.CreateUser(types.User{Name: "test"}) -// require.NoError(t, err) + suffix := "" + if version == 1 { + suffix = "@" + } -// pak, err := adb.CreatePreAuthKey(types.UserID(user.ID), false, nil, nil) -// require.NoError(t, err) + user, err := adb.CreateUser(types.User{Name: "test" + suffix}) + require.NoError(t, err) + _, err = adb.CreateUser(types.User{Name: "test2" + suffix}) + require.NoError(t, err) + taggedUser, err := adb.CreateUser(types.User{Name: "tagged" + suffix}) + require.NoError(t, err) -// nodeKey := key.NewNode() -// machineKey := key.NewMachine() + node := types.Node{ + ID: 1, + MachineKey: key.NewMachine().Public(), + NodeKey: key.NewNode().Public(), + Hostname: "testnode", + UserID: user.ID, + RegisterMethod: util.RegisterMethodAuthKey, + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: tt.routes, + }, + IPv4: ptr.To(netip.MustParseAddr("100.64.0.1")), + } -// v4 := netip.MustParseAddr("100.64.0.1") -// node := types.Node{ -// ID: 0, -// MachineKey: machineKey.Public(), -// NodeKey: nodeKey.Public(), -// Hostname: "test", -// UserID: user.ID, -// RegisterMethod: util.RegisterMethodAuthKey, -// AuthKeyID: ptr.To(pak.ID), -// Hostinfo: &tailcfg.Hostinfo{ -// RequestTags: []string{"tag:exit"}, -// RoutableIPs: tt.routes, -// }, -// IPv4: &v4, -// } + err = adb.DB.Save(&node).Error + require.NoError(t, err) -// trx := adb.DB.Save(&node) -// require.NoError(t, trx.Error) + nodeTagged := types.Node{ + ID: 2, + MachineKey: key.NewMachine().Public(), + NodeKey: key.NewNode().Public(), + Hostname: "taggednode", + UserID: taggedUser.ID, + RegisterMethod: util.RegisterMethodAuthKey, + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: tt.routes, + }, + ForcedTags: []string{"tag:exit"}, + IPv4: ptr.To(netip.MustParseAddr("100.64.0.2")), + } -// sendUpdate, err := adb.SaveNodeRoutes(&node) -// require.NoError(t, err) -// assert.False(t, sendUpdate) + err = adb.DB.Save(&nodeTagged).Error + require.NoError(t, err) -// node0ByID, err := adb.GetNodeByID(0) -// require.NoError(t, err) + users, err := adb.ListUsers() + assert.NoError(t, err) -// users, err := adb.ListUsers() -// assert.NoError(t, err) + nodes, err := adb.ListNodes() + assert.NoError(t, err) -// nodes, err := adb.ListNodes() -// assert.NoError(t, err) + pm, err := pmf(users, nodes) + require.NoError(t, err) + require.NotNil(t, pm) -// pm, err := policy.NewPolicyManager([]byte(tt.acl), users, nodes) -// assert.NoError(t, err) + changed1 := policy.AutoApproveRoutes(pm, &node) + assert.True(t, changed1) -// // TODO(kradalby): Check state update -// err = adb.EnableAutoApprovedRoutes(pm, node0ByID) -// require.NoError(t, err) + err = adb.DB.Save(&node).Error + require.NoError(t, err) -// enabledRoutes, err := adb.GetEnabledRoutes(node0ByID) -// require.NoError(t, err) -// assert.Len(t, enabledRoutes, len(tt.want)) + _ = policy.AutoApproveRoutes(pm, &nodeTagged) -// tsaddr.SortPrefixes(enabledRoutes) + err = adb.DB.Save(&nodeTagged).Error + require.NoError(t, err) -// if diff := cmp.Diff(tt.want, enabledRoutes, util.Comparers...); diff != "" { -// t.Errorf("unexpected enabled routes (-want +got):\n%s", diff) -// } -// }) -// } -// } + node1ByID, err := adb.GetNodeByID(1) + require.NoError(t, err) + + if diff := cmp.Diff(tt.want, node1ByID.SubnetRoutes(), util.Comparers...); diff != "" { + t.Errorf("unexpected enabled routes (-want +got):\n%s", diff) + } + + node2ByID, err := adb.GetNodeByID(2) + require.NoError(t, err) + + if diff := cmp.Diff(tt.want2, node2ByID.SubnetRoutes(), util.Comparers...); diff != "" { + t.Errorf("unexpected enabled routes (-want +got):\n%s", diff) + } + }) + } + } +} func TestEphemeralGarbageCollectorOrder(t *testing.T) { want := []types.NodeID{1, 3} diff --git a/hscontrol/debug.go b/hscontrol/debug.go index d60aadbf..0d20ddf9 100644 --- a/hscontrol/debug.go +++ b/hscontrol/debug.go @@ -105,6 +105,11 @@ func (h *Headscale) debugHTTPServer() *http.Server { w.WriteHeader(http.StatusOK) w.Write([]byte(h.primaryRoutes.String())) })) + debug.Handle("policy-manager", "Policy Manager", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + w.WriteHeader(http.StatusOK) + w.Write([]byte(h.polMan.DebugString())) + })) err := statsviz.Register(debugMux) if err == nil { diff --git a/hscontrol/grpcv1.go b/hscontrol/grpcv1.go index 57b46889..66f2b02f 100644 --- a/hscontrol/grpcv1.go +++ b/hscontrol/grpcv1.go @@ -348,7 +348,7 @@ func (api headscaleV1APIServer) SetApprovedRoutes( routes = append(routes, prefix) } } - slices.SortFunc(routes, util.ComparePrefix) + tsaddr.SortPrefixes(routes) slices.Compact(routes) node, err := db.Write(api.h.db.DB, func(tx *gorm.DB) (*types.Node, error) { @@ -525,7 +525,12 @@ func nodesToProto(polMan policy.PolicyManager, isLikelyConnected *xsync.MapOf[ty resp.Online = true } - tags := polMan.Tags(node) + var tags []string + for _, tag := range node.RequestTags() { + if polMan.NodeCanHaveTag(node, tag) { + tags = append(tags, tag) + } + } resp.ValidTags = lo.Uniq(append(tags, node.ForcedTags...)) response[index] = resp } diff --git a/hscontrol/mapper/mapper_test.go b/hscontrol/mapper/mapper_test.go index 51c09411..6dd3387d 100644 --- a/hscontrol/mapper/mapper_test.go +++ b/hscontrol/mapper/mapper_test.go @@ -11,6 +11,7 @@ import ( "github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/routes" "github.com/juanfont/headscale/hscontrol/types" + "github.com/stretchr/testify/require" "gorm.io/gorm" "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" @@ -246,7 +247,7 @@ func Test_fullMapResponse(t *testing.T) { tests := []struct { name string - pol *policy.ACLPolicy + pol []byte node *types.Node peers types.Nodes @@ -258,7 +259,7 @@ func Test_fullMapResponse(t *testing.T) { // { // name: "empty-node", // node: types.Node{}, - // pol: &policy.ACLPolicy{}, + // pol: &policyv1.ACLPolicy{}, // dnsConfig: &tailcfg.DNSConfig{}, // baseDomain: "", // want: nil, @@ -266,7 +267,6 @@ func Test_fullMapResponse(t *testing.T) { // }, { name: "no-pol-no-peers-map-response", - pol: &policy.ACLPolicy{}, node: mini, peers: types.Nodes{}, derpMap: &tailcfg.DERPMap{}, @@ -284,10 +284,15 @@ func Test_fullMapResponse(t *testing.T) { DNSConfig: &tailcfg.DNSConfig{}, Domain: "", CollectServices: "false", - PacketFilter: []tailcfg.FilterRule{}, - UserProfiles: []tailcfg.UserProfile{{ID: tailcfg.UserID(user1.ID), LoginName: "user1", DisplayName: "user1"}}, - SSHPolicy: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{}}, - ControlTime: &time.Time{}, + UserProfiles: []tailcfg.UserProfile{ + { + ID: tailcfg.UserID(user1.ID), + LoginName: "user1", + DisplayName: "user1", + }, + }, + PacketFilter: tailcfg.FilterAllowAll, + ControlTime: &time.Time{}, Debug: &tailcfg.Debug{ DisableLogTail: true, }, @@ -296,7 +301,6 @@ func Test_fullMapResponse(t *testing.T) { }, { name: "no-pol-with-peer-map-response", - pol: &policy.ACLPolicy{}, node: mini, peers: types.Nodes{ peer1, @@ -318,13 +322,12 @@ func Test_fullMapResponse(t *testing.T) { DNSConfig: &tailcfg.DNSConfig{}, Domain: "", CollectServices: "false", - PacketFilter: []tailcfg.FilterRule{}, UserProfiles: []tailcfg.UserProfile{ {ID: tailcfg.UserID(user1.ID), LoginName: "user1", DisplayName: "user1"}, {ID: tailcfg.UserID(user2.ID), LoginName: "user2", DisplayName: "user2"}, }, - SSHPolicy: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{}}, - ControlTime: &time.Time{}, + PacketFilter: tailcfg.FilterAllowAll, + ControlTime: &time.Time{}, Debug: &tailcfg.Debug{ DisableLogTail: true, }, @@ -333,18 +336,17 @@ func Test_fullMapResponse(t *testing.T) { }, { name: "with-pol-map-response", - pol: &policy.ACLPolicy{ - Hosts: policy.Hosts{ - "mini": netip.MustParsePrefix("100.64.0.1/32"), - }, - ACLs: []policy.ACL{ - { - Action: "accept", - Sources: []string{"100.64.0.2"}, - Destinations: []string{"mini:*"}, - }, - }, - }, + pol: []byte(` + { + "acls": [ + { + "action": "accept", + "src": ["100.64.0.2"], + "dst": ["user1:*"], + }, + ], + } + `), node: mini, peers: types.Nodes{ peer1, @@ -374,11 +376,11 @@ func Test_fullMapResponse(t *testing.T) { }, }, }, + SSHPolicy: &tailcfg.SSHPolicy{}, UserProfiles: []tailcfg.UserProfile{ {ID: tailcfg.UserID(user1.ID), LoginName: "user1", DisplayName: "user1"}, {ID: tailcfg.UserID(user2.ID), LoginName: "user2", DisplayName: "user2"}, }, - SSHPolicy: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{}}, ControlTime: &time.Time{}, Debug: &tailcfg.Debug{ DisableLogTail: true, @@ -390,7 +392,8 @@ func Test_fullMapResponse(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - polMan, _ := policy.NewPolicyManagerForTest(tt.pol, []types.User{user1, user2}, append(tt.peers, tt.node)) + polMan, err := policy.NewPolicyManager(tt.pol, []types.User{user1, user2}, append(tt.peers, tt.node)) + require.NoError(t, err) primary := routes.New() primary.SetRoutes(tt.node.ID, tt.node.SubnetRoutes()...) diff --git a/hscontrol/mapper/tail.go b/hscontrol/mapper/tail.go index 4a285290..9e3ff4cf 100644 --- a/hscontrol/mapper/tail.go +++ b/hscontrol/mapper/tail.go @@ -81,7 +81,12 @@ func tailNode( return nil, fmt.Errorf("tailNode, failed to create FQDN: %s", err) } - tags := polMan.Tags(node) + var tags []string + for _, tag := range node.RequestTags() { + if polMan.NodeCanHaveTag(node, tag) { + tags = append(tags, tag) + } + } tags = lo.Uniq(append(tags, node.ForcedTags...)) tNode := tailcfg.Node{ diff --git a/hscontrol/mapper/tail_test.go b/hscontrol/mapper/tail_test.go index 6a620467..919ea43c 100644 --- a/hscontrol/mapper/tail_test.go +++ b/hscontrol/mapper/tail_test.go @@ -11,6 +11,7 @@ import ( "github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/routes" "github.com/juanfont/headscale/hscontrol/types" + "github.com/stretchr/testify/require" "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" "tailscale.com/types/key" @@ -49,7 +50,7 @@ func TestTailNode(t *testing.T) { tests := []struct { name string node *types.Node - pol *policy.ACLPolicy + pol []byte dnsConfig *tailcfg.DNSConfig baseDomain string want *tailcfg.Node @@ -61,7 +62,6 @@ func TestTailNode(t *testing.T) { GivenName: "empty", Hostinfo: &tailcfg.Hostinfo{}, }, - pol: &policy.ACLPolicy{}, dnsConfig: &tailcfg.DNSConfig{}, baseDomain: "", want: &tailcfg.Node{ @@ -117,7 +117,6 @@ func TestTailNode(t *testing.T) { ApprovedRoutes: []netip.Prefix{tsaddr.AllIPv4(), netip.MustParsePrefix("192.168.0.0/24")}, CreatedAt: created, }, - pol: &policy.ACLPolicy{}, dnsConfig: &tailcfg.DNSConfig{}, baseDomain: "", want: &tailcfg.Node{ @@ -179,7 +178,8 @@ func TestTailNode(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - polMan, _ := policy.NewPolicyManagerForTest(tt.pol, []types.User{}, types.Nodes{tt.node}) + polMan, err := policy.NewPolicyManager(tt.pol, []types.User{}, types.Nodes{tt.node}) + require.NoError(t, err) primary := routes.New() cfg := &types.Config{ BaseDomain: tt.baseDomain, @@ -248,7 +248,7 @@ func TestNodeExpiry(t *testing.T) { tn, err := tailNode( node, 0, - &policy.PolicyManagerV1{}, + nil, // TODO(kradalby): removed in merge but error? nil, &types.Config{}, ) diff --git a/hscontrol/oidc.go b/hscontrol/oidc.go index d7a46a87..a1807717 100644 --- a/hscontrol/oidc.go +++ b/hscontrol/oidc.go @@ -513,7 +513,7 @@ func renderOIDCCallbackTemplate( ) (*bytes.Buffer, error) { var content bytes.Buffer if err := oidcCallbackTemplate.Execute(&content, oidcCallbackTemplateConfig{ - User: user.DisplayNameOrUsername(), + User: user.Display(), Verb: verb, }); err != nil { return nil, fmt.Errorf("rendering OIDC callback template: %w", err) diff --git a/hscontrol/policy/pm.go b/hscontrol/policy/pm.go index 980dc5aa..24f68ca1 100644 --- a/hscontrol/policy/pm.go +++ b/hscontrol/policy/pm.go @@ -1,219 +1,81 @@ package policy import ( - "fmt" - "io" "net/netip" - "os" - "sync" + policyv1 "github.com/juanfont/headscale/hscontrol/policy/v1" + policyv2 "github.com/juanfont/headscale/hscontrol/policy/v2" "github.com/juanfont/headscale/hscontrol/types" - "github.com/rs/zerolog/log" - "go4.org/netipx" + "tailscale.com/envknob" "tailscale.com/tailcfg" - "tailscale.com/util/deephash" +) + +var ( + polv2 = envknob.Bool("HEADSCALE_EXPERIMENTAL_POLICY_V2") ) type PolicyManager interface { Filter() []tailcfg.FilterRule SSHPolicy(*types.Node) (*tailcfg.SSHPolicy, error) - Tags(*types.Node) []string - ApproversForRoute(netip.Prefix) []string - ExpandAlias(string) (*netipx.IPSet, error) SetPolicy([]byte) (bool, error) SetUsers(users []types.User) (bool, error) SetNodes(nodes types.Nodes) (bool, error) + // NodeCanHaveTag reports whether the given node can have the given tag. + NodeCanHaveTag(*types.Node, string) bool // NodeCanApproveRoute reports whether the given node can approve the given route. NodeCanApproveRoute(*types.Node, netip.Prefix) bool + + Version() int + DebugString() string } -func NewPolicyManagerFromPath(path string, users []types.User, nodes types.Nodes) (PolicyManager, error) { - policyFile, err := os.Open(path) - if err != nil { - return nil, err - } - defer policyFile.Close() - - policyBytes, err := io.ReadAll(policyFile) - if err != nil { - return nil, err - } - - return NewPolicyManager(policyBytes, users, nodes) -} - -func NewPolicyManager(polB []byte, users []types.User, nodes types.Nodes) (PolicyManager, error) { - var pol *ACLPolicy +// NewPolicyManager returns a new policy manager, the version is determined by +// the environment flag "HEADSCALE_EXPERIMENTAL_POLICY_V2". +func NewPolicyManager(pol []byte, users []types.User, nodes types.Nodes) (PolicyManager, error) { + var polMan PolicyManager var err error - if polB != nil && len(polB) > 0 { - pol, err = LoadACLPolicyFromBytes(polB) + if polv2 { + polMan, err = policyv2.NewPolicyManager(pol, users, nodes) if err != nil { - return nil, fmt.Errorf("parsing policy: %w", err) + return nil, err + } + } else { + polMan, err = policyv1.NewPolicyManager(pol, users, nodes) + if err != nil { + return nil, err } } - pm := PolicyManagerV1{ - pol: pol, - users: users, - nodes: nodes, - } - - _, err = pm.updateLocked() - if err != nil { - return nil, err - } - - return &pm, nil + return polMan, err } -func NewPolicyManagerForTest(pol *ACLPolicy, users []types.User, nodes types.Nodes) (PolicyManager, error) { - pm := PolicyManagerV1{ - pol: pol, - users: users, - nodes: nodes, - } +// PolicyManagersForTest returns all available PostureManagers to be used +// in tests to validate them in tests that try to determine that they +// behave the same. +func PolicyManagersForTest(pol []byte, users []types.User, nodes types.Nodes) ([]PolicyManager, error) { + var polMans []PolicyManager - _, err := pm.updateLocked() - if err != nil { - return nil, err - } - - return &pm, nil -} - -type PolicyManagerV1 struct { - mu sync.Mutex - pol *ACLPolicy - - users []types.User - nodes types.Nodes - - filterHash deephash.Sum - filter []tailcfg.FilterRule -} - -// updateLocked updates the filter rules based on the current policy and nodes. -// It must be called with the lock held. -func (pm *PolicyManagerV1) updateLocked() (bool, error) { - filter, err := pm.pol.CompileFilterRules(pm.users, pm.nodes) - if err != nil { - return false, fmt.Errorf("compiling filter rules: %w", err) - } - - filterHash := deephash.Hash(&filter) - if filterHash == pm.filterHash { - return false, nil - } - - pm.filter = filter - pm.filterHash = filterHash - - return true, nil -} - -func (pm *PolicyManagerV1) Filter() []tailcfg.FilterRule { - pm.mu.Lock() - defer pm.mu.Unlock() - return pm.filter -} - -func (pm *PolicyManagerV1) SSHPolicy(node *types.Node) (*tailcfg.SSHPolicy, error) { - pm.mu.Lock() - defer pm.mu.Unlock() - - return pm.pol.CompileSSHPolicy(node, pm.users, pm.nodes) -} - -func (pm *PolicyManagerV1) SetPolicy(polB []byte) (bool, error) { - if len(polB) == 0 { - return false, nil - } - - pol, err := LoadACLPolicyFromBytes(polB) - if err != nil { - return false, fmt.Errorf("parsing policy: %w", err) - } - - pm.mu.Lock() - defer pm.mu.Unlock() - - pm.pol = pol - - return pm.updateLocked() -} - -// SetUsers updates the users in the policy manager and updates the filter rules. -func (pm *PolicyManagerV1) SetUsers(users []types.User) (bool, error) { - pm.mu.Lock() - defer pm.mu.Unlock() - - pm.users = users - return pm.updateLocked() -} - -// SetNodes updates the nodes in the policy manager and updates the filter rules. -func (pm *PolicyManagerV1) SetNodes(nodes types.Nodes) (bool, error) { - pm.mu.Lock() - defer pm.mu.Unlock() - pm.nodes = nodes - return pm.updateLocked() -} - -func (pm *PolicyManagerV1) Tags(node *types.Node) []string { - if pm == nil { - return nil - } - - tags, invalid := pm.pol.TagsOfNode(pm.users, node) - log.Debug().Strs("authorised_tags", tags).Strs("unauthorised_tags", invalid).Uint64("node.id", node.ID.Uint64()).Msg("tags provided by policy") - return tags -} - -func (pm *PolicyManagerV1) ApproversForRoute(route netip.Prefix) []string { - // TODO(kradalby): This can be a parse error of the address in the policy, - // in the new policy this will be typed and not a problem, in this policy - // we will just return empty list - if pm.pol == nil { - return nil - } - approvers, _ := pm.pol.AutoApprovers.GetRouteApprovers(route) - return approvers -} - -func (pm *PolicyManagerV1) ExpandAlias(alias string) (*netipx.IPSet, error) { - ips, err := pm.pol.ExpandAlias(pm.nodes, pm.users, alias) - if err != nil { - return nil, err - } - return ips, nil -} - -func (pm *PolicyManagerV1) NodeCanApproveRoute(node *types.Node, route netip.Prefix) bool { - if pm.pol == nil { - return false - } - - pm.mu.Lock() - defer pm.mu.Unlock() - - approvers, _ := pm.pol.AutoApprovers.GetRouteApprovers(route) - - for _, approvedAlias := range approvers { - if approvedAlias == node.User.Username() { - return true - } else { - ips, err := pm.pol.ExpandAlias(pm.nodes, pm.users, approvedAlias) - if err != nil { - return false - } - - // approvedIPs should contain all of node's IPs if it matches the rule, so check for first - if ips.Contains(*node.IPv4) { - return true - } + for _, pmf := range PolicyManagerFuncsForTest(pol) { + pm, err := pmf(users, nodes) + if err != nil { + return nil, err } + polMans = append(polMans, pm) } - return false + return polMans, nil +} + +func PolicyManagerFuncsForTest(pol []byte) []func([]types.User, types.Nodes) (PolicyManager, error) { + var polmanFuncs []func([]types.User, types.Nodes) (PolicyManager, error) + + polmanFuncs = append(polmanFuncs, func(u []types.User, n types.Nodes) (PolicyManager, error) { + return policyv1.NewPolicyManager(pol, u, n) + }) + polmanFuncs = append(polmanFuncs, func(u []types.User, n types.Nodes) (PolicyManager, error) { + return policyv2.NewPolicyManager(pol, u, n) + }) + + return polmanFuncs } diff --git a/hscontrol/policy/policy.go b/hscontrol/policy/policy.go new file mode 100644 index 00000000..ba375beb --- /dev/null +++ b/hscontrol/policy/policy.go @@ -0,0 +1,109 @@ +package policy + +import ( + "net/netip" + "slices" + + "github.com/juanfont/headscale/hscontrol/types" + "github.com/juanfont/headscale/hscontrol/util" + "github.com/samber/lo" + "tailscale.com/net/tsaddr" + "tailscale.com/tailcfg" +) + +// FilterNodesByACL returns the list of peers authorized to be accessed from a given node. +func FilterNodesByACL( + node *types.Node, + nodes types.Nodes, + filter []tailcfg.FilterRule, +) types.Nodes { + var result types.Nodes + + for index, peer := range nodes { + if peer.ID == node.ID { + continue + } + + if node.CanAccess(filter, nodes[index]) || peer.CanAccess(filter, node) { + result = append(result, peer) + } + } + + return result +} + +// ReduceFilterRules takes a node and a set of rules and removes all rules and destinations +// that are not relevant to that particular node. +func ReduceFilterRules(node *types.Node, rules []tailcfg.FilterRule) []tailcfg.FilterRule { + ret := []tailcfg.FilterRule{} + + for _, rule := range rules { + // record if the rule is actually relevant for the given node. + var dests []tailcfg.NetPortRange + DEST_LOOP: + for _, dest := range rule.DstPorts { + expanded, err := util.ParseIPSet(dest.IP, nil) + // Fail closed, if we can't parse it, then we should not allow + // access. + if err != nil { + continue DEST_LOOP + } + + if node.InIPSet(expanded) { + dests = append(dests, dest) + continue DEST_LOOP + } + + // If the node exposes routes, ensure they are note removed + // when the filters are reduced. + if node.Hostinfo != nil { + if len(node.Hostinfo.RoutableIPs) > 0 { + for _, routableIP := range node.Hostinfo.RoutableIPs { + if expanded.OverlapsPrefix(routableIP) { + dests = append(dests, dest) + continue DEST_LOOP + } + } + } + } + } + + if len(dests) > 0 { + ret = append(ret, tailcfg.FilterRule{ + SrcIPs: rule.SrcIPs, + DstPorts: dests, + IPProto: rule.IPProto, + }) + } + } + + return ret +} + +// AutoApproveRoutes approves any route that can be autoapproved from +// the nodes perspective according to the given policy. +// It reports true if any routes were approved. +func AutoApproveRoutes(pm PolicyManager, node *types.Node) bool { + if pm == nil { + return false + } + var newApproved []netip.Prefix + for _, route := range node.AnnouncedRoutes() { + if pm.NodeCanApproveRoute(node, route) { + newApproved = append(newApproved, route) + } + } + if newApproved != nil { + newApproved = append(newApproved, node.ApprovedRoutes...) + tsaddr.SortPrefixes(newApproved) + newApproved = slices.Compact(newApproved) + newApproved = lo.Filter(newApproved, func(route netip.Prefix, index int) bool { + return route.IsValid() + }) + node.ApprovedRoutes = newApproved + + return true + } + + return false +} diff --git a/hscontrol/policy/policy_test.go b/hscontrol/policy/policy_test.go new file mode 100644 index 00000000..e67af16f --- /dev/null +++ b/hscontrol/policy/policy_test.go @@ -0,0 +1,1455 @@ +package policy + +import ( + "fmt" + "net/netip" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/juanfont/headscale/hscontrol/types" + "github.com/juanfont/headscale/hscontrol/util" + "github.com/rs/zerolog/log" + "github.com/stretchr/testify/require" + "gorm.io/gorm" + "tailscale.com/net/tsaddr" + "tailscale.com/tailcfg" +) + +var ap = func(ipStr string) *netip.Addr { + ip := netip.MustParseAddr(ipStr) + return &ip +} + +// hsExitNodeDestForTest is the list of destination IP ranges that are allowed when +// we use headscale "autogroup:internet". +var hsExitNodeDestForTest = []tailcfg.NetPortRange{ + {IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "12.0.0.0/6", Ports: tailcfg.PortRangeAny}, + {IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny}, + {IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny}, + {IP: "64.0.0.0/3", Ports: tailcfg.PortRangeAny}, + {IP: "96.0.0.0/6", Ports: tailcfg.PortRangeAny}, + {IP: "100.0.0.0/10", Ports: tailcfg.PortRangeAny}, + {IP: "100.128.0.0/9", Ports: tailcfg.PortRangeAny}, + {IP: "101.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "102.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "104.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "112.0.0.0/4", Ports: tailcfg.PortRangeAny}, + {IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny}, + {IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "168.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "169.0.0.0/9", Ports: tailcfg.PortRangeAny}, + {IP: "169.128.0.0/10", Ports: tailcfg.PortRangeAny}, + {IP: "169.192.0.0/11", Ports: tailcfg.PortRangeAny}, + {IP: "169.224.0.0/12", Ports: tailcfg.PortRangeAny}, + {IP: "169.240.0.0/13", Ports: tailcfg.PortRangeAny}, + {IP: "169.248.0.0/14", Ports: tailcfg.PortRangeAny}, + {IP: "169.252.0.0/15", Ports: tailcfg.PortRangeAny}, + {IP: "169.255.0.0/16", Ports: tailcfg.PortRangeAny}, + {IP: "170.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "172.0.0.0/12", Ports: tailcfg.PortRangeAny}, + {IP: "172.32.0.0/11", Ports: tailcfg.PortRangeAny}, + {IP: "172.64.0.0/10", Ports: tailcfg.PortRangeAny}, + {IP: "172.128.0.0/9", Ports: tailcfg.PortRangeAny}, + {IP: "173.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "174.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "176.0.0.0/4", Ports: tailcfg.PortRangeAny}, + {IP: "192.0.0.0/9", Ports: tailcfg.PortRangeAny}, + {IP: "192.128.0.0/11", Ports: tailcfg.PortRangeAny}, + {IP: "192.160.0.0/13", Ports: tailcfg.PortRangeAny}, + {IP: "192.169.0.0/16", Ports: tailcfg.PortRangeAny}, + {IP: "192.170.0.0/15", Ports: tailcfg.PortRangeAny}, + {IP: "192.172.0.0/14", Ports: tailcfg.PortRangeAny}, + {IP: "192.176.0.0/12", Ports: tailcfg.PortRangeAny}, + {IP: "192.192.0.0/10", Ports: tailcfg.PortRangeAny}, + {IP: "193.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "194.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "196.0.0.0/6", Ports: tailcfg.PortRangeAny}, + {IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny}, + {IP: "224.0.0.0/3", Ports: tailcfg.PortRangeAny}, + {IP: "2000::/3", Ports: tailcfg.PortRangeAny}, +} + +func TestTheInternet(t *testing.T) { + internetSet := util.TheInternet() + + internetPrefs := internetSet.Prefixes() + + for i := range internetPrefs { + if internetPrefs[i].String() != hsExitNodeDestForTest[i].IP { + t.Errorf( + "prefix from internet set %q != hsExit list %q", + internetPrefs[i].String(), + hsExitNodeDestForTest[i].IP, + ) + } + } + + if len(internetPrefs) != len(hsExitNodeDestForTest) { + t.Fatalf( + "expected same length of prefixes, internet: %d, hsExit: %d", + len(internetPrefs), + len(hsExitNodeDestForTest), + ) + } +} + +// addAtForFilterV1 returns a copy of the given userslice +// and adds "@" character to the Name field. +// This is a "compatibility" move to allow the old tests +// to run against the "new" format which requires "@". +func addAtForFilterV1(users types.Users) types.Users { + ret := make(types.Users, len(users)) + for idx := range users { + ret[idx] = users[idx] + ret[idx].Name = ret[idx].Name + "@" + } + return ret +} + +func TestReduceFilterRules(t *testing.T) { + users := types.Users{ + types.User{Model: gorm.Model{ID: 1}, Name: "mickael"}, + types.User{Model: gorm.Model{ID: 2}, Name: "user1"}, + types.User{Model: gorm.Model{ID: 3}, Name: "user2"}, + types.User{Model: gorm.Model{ID: 4}, Name: "user100"}, + types.User{Model: gorm.Model{ID: 5}, Name: "user3"}, + } + + tests := []struct { + name string + node *types.Node + peers types.Nodes + pol string + want []tailcfg.FilterRule + }{ + { + name: "host1-can-reach-host2-no-rules", + pol: ` +{ + "acls": [ + { + "action": "accept", + "proto": "", + "src": [ + "100.64.0.1" + ], + "dst": [ + "100.64.0.2:*" + ] + } + ], +} +`, + node: &types.Node{ + IPv4: ap("100.64.0.1"), + IPv6: ap("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"), + User: users[0], + }, + peers: types.Nodes{ + &types.Node{ + IPv4: ap("100.64.0.2"), + IPv6: ap("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), + User: users[0], + }, + }, + want: []tailcfg.FilterRule{}, + }, + { + name: "1604-subnet-routers-are-preserved", + pol: ` +{ + "groups": { + "group:admins": [ + "user1@" + ] + }, + "acls": [ + { + "action": "accept", + "proto": "", + "src": [ + "group:admins" + ], + "dst": [ + "group:admins:*" + ] + }, + { + "action": "accept", + "proto": "", + "src": [ + "group:admins" + ], + "dst": [ + "10.33.0.0/16:*" + ] + } + ], +} +`, + node: &types.Node{ + IPv4: ap("100.64.0.1"), + IPv6: ap("fd7a:115c:a1e0::1"), + User: users[1], + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: []netip.Prefix{ + netip.MustParsePrefix("10.33.0.0/16"), + }, + }, + }, + peers: types.Nodes{ + &types.Node{ + IPv4: ap("100.64.0.2"), + IPv6: ap("fd7a:115c:a1e0::2"), + User: users[1], + }, + }, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{ + "100.64.0.1/32", + "100.64.0.2/32", + "fd7a:115c:a1e0::1/128", + "fd7a:115c:a1e0::2/128", + }, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "100.64.0.1/32", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "fd7a:115c:a1e0::1/128", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + { + SrcIPs: []string{ + "100.64.0.1/32", + "100.64.0.2/32", + "fd7a:115c:a1e0::1/128", + "fd7a:115c:a1e0::2/128", + }, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "10.33.0.0/16", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + }, + }, + { + name: "1786-reducing-breaks-exit-nodes-the-client", + pol: ` +{ + "groups": { + "group:team": [ + "user3@", + "user2@", + "user1@" + ] + }, + "hosts": { + "internal": "100.64.0.100/32" + }, + "acls": [ + { + "action": "accept", + "proto": "", + "src": [ + "group:team" + ], + "dst": [ + "internal:*" + ] + }, + { + "action": "accept", + "proto": "", + "src": [ + "group:team" + ], + "dst": [ + "autogroup:internet:*" + ] + } + ], +} +`, + node: &types.Node{ + IPv4: ap("100.64.0.1"), + IPv6: ap("fd7a:115c:a1e0::1"), + User: users[1], + }, + peers: types.Nodes{ + &types.Node{ + IPv4: ap("100.64.0.2"), + IPv6: ap("fd7a:115c:a1e0::2"), + User: users[2], + }, + // "internal" exit node + &types.Node{ + IPv4: ap("100.64.0.100"), + IPv6: ap("fd7a:115c:a1e0::100"), + User: users[3], + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: tsaddr.ExitRoutes(), + }, + }, + }, + want: []tailcfg.FilterRule{}, + }, + { + name: "1786-reducing-breaks-exit-nodes-the-exit", + pol: ` +{ + "groups": { + "group:team": [ + "user3@", + "user2@", + "user1@" + ] + }, + "hosts": { + "internal": "100.64.0.100/32" + }, + "acls": [ + { + "action": "accept", + "proto": "", + "src": [ + "group:team" + ], + "dst": [ + "internal:*" + ] + }, + { + "action": "accept", + "proto": "", + "src": [ + "group:team" + ], + "dst": [ + "autogroup:internet:*" + ] + } + ], +} +`, + node: &types.Node{ + IPv4: ap("100.64.0.100"), + IPv6: ap("fd7a:115c:a1e0::100"), + User: users[3], + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: tsaddr.ExitRoutes(), + }, + }, + peers: types.Nodes{ + &types.Node{ + IPv4: ap("100.64.0.2"), + IPv6: ap("fd7a:115c:a1e0::2"), + User: users[2], + }, + &types.Node{ + IPv4: ap("100.64.0.1"), + IPv6: ap("fd7a:115c:a1e0::1"), + User: users[1], + }, + }, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "100.64.0.100/32", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "fd7a:115c:a1e0::100/128", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: hsExitNodeDestForTest, + }, + }, + }, + { + name: "1786-reducing-breaks-exit-nodes-the-example-from-issue", + pol: ` +{ + "groups": { + "group:team": [ + "user3@", + "user2@", + "user1@" + ] + }, + "hosts": { + "internal": "100.64.0.100/32" + }, + "acls": [ + { + "action": "accept", + "proto": "", + "src": [ + "group:team" + ], + "dst": [ + "internal:*" + ] + }, + { + "action": "accept", + "proto": "", + "src": [ + "group:team" + ], + "dst": [ + "0.0.0.0/5:*", + "8.0.0.0/7:*", + "11.0.0.0/8:*", + "12.0.0.0/6:*", + "16.0.0.0/4:*", + "32.0.0.0/3:*", + "64.0.0.0/2:*", + "128.0.0.0/3:*", + "160.0.0.0/5:*", + "168.0.0.0/6:*", + "172.0.0.0/12:*", + "172.32.0.0/11:*", + "172.64.0.0/10:*", + "172.128.0.0/9:*", + "173.0.0.0/8:*", + "174.0.0.0/7:*", + "176.0.0.0/4:*", + "192.0.0.0/9:*", + "192.128.0.0/11:*", + "192.160.0.0/13:*", + "192.169.0.0/16:*", + "192.170.0.0/15:*", + "192.172.0.0/14:*", + "192.176.0.0/12:*", + "192.192.0.0/10:*", + "193.0.0.0/8:*", + "194.0.0.0/7:*", + "196.0.0.0/6:*", + "200.0.0.0/5:*", + "208.0.0.0/4:*" + ] + } + ], +} +`, + node: &types.Node{ + IPv4: ap("100.64.0.100"), + IPv6: ap("fd7a:115c:a1e0::100"), + User: users[3], + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: tsaddr.ExitRoutes(), + }, + }, + peers: types.Nodes{ + &types.Node{ + IPv4: ap("100.64.0.2"), + IPv6: ap("fd7a:115c:a1e0::2"), + User: users[2], + }, + &types.Node{ + IPv4: ap("100.64.0.1"), + IPv6: ap("fd7a:115c:a1e0::1"), + User: users[1], + }, + }, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "100.64.0.100/32", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "fd7a:115c:a1e0::100/128", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + {IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "12.0.0.0/6", Ports: tailcfg.PortRangeAny}, + {IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny}, + {IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny}, + {IP: "64.0.0.0/2", Ports: tailcfg.PortRangeAny}, + // This should not be included I believe, seems like + // this is a bug in the v1 code. + // For example: + // If a src or dst includes "64.0.0.0/2:*", it will include 100.64/16 range, which + // means that it will need to fetch the IPv6 addrs of the node to include the full range. + // Clearly, if a user sets the dst to be "64.0.0.0/2:*", it is likely more of a exit node + // and this would be strange behaviour. + // TODO(kradalby): Remove before launch. + {IP: "fd7a:115c:a1e0::1/128", Ports: tailcfg.PortRangeAny}, + {IP: "fd7a:115c:a1e0::2/128", Ports: tailcfg.PortRangeAny}, + {IP: "fd7a:115c:a1e0::100/128", Ports: tailcfg.PortRangeAny}, + // End + {IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny}, + {IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "168.0.0.0/6", Ports: tailcfg.PortRangeAny}, + {IP: "172.0.0.0/12", Ports: tailcfg.PortRangeAny}, + {IP: "172.32.0.0/11", Ports: tailcfg.PortRangeAny}, + {IP: "172.64.0.0/10", Ports: tailcfg.PortRangeAny}, + {IP: "172.128.0.0/9", Ports: tailcfg.PortRangeAny}, + {IP: "173.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "174.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "176.0.0.0/4", Ports: tailcfg.PortRangeAny}, + {IP: "192.0.0.0/9", Ports: tailcfg.PortRangeAny}, + {IP: "192.128.0.0/11", Ports: tailcfg.PortRangeAny}, + {IP: "192.160.0.0/13", Ports: tailcfg.PortRangeAny}, + {IP: "192.169.0.0/16", Ports: tailcfg.PortRangeAny}, + {IP: "192.170.0.0/15", Ports: tailcfg.PortRangeAny}, + {IP: "192.172.0.0/14", Ports: tailcfg.PortRangeAny}, + {IP: "192.176.0.0/12", Ports: tailcfg.PortRangeAny}, + {IP: "192.192.0.0/10", Ports: tailcfg.PortRangeAny}, + {IP: "193.0.0.0/8", Ports: tailcfg.PortRangeAny}, + {IP: "194.0.0.0/7", Ports: tailcfg.PortRangeAny}, + {IP: "196.0.0.0/6", Ports: tailcfg.PortRangeAny}, + {IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny}, + {IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny}, + }, + }, + }, + }, + { + name: "1786-reducing-breaks-exit-nodes-app-connector-like", + pol: ` +{ + "groups": { + "group:team": [ + "user3@", + "user2@", + "user1@" + ] + }, + "hosts": { + "internal": "100.64.0.100/32" + }, + "acls": [ + { + "action": "accept", + "proto": "", + "src": [ + "group:team" + ], + "dst": [ + "internal:*" + ] + }, + { + "action": "accept", + "proto": "", + "src": [ + "group:team" + ], + "dst": [ + "8.0.0.0/8:*", + "16.0.0.0/8:*" + ] + } + ], +} +`, + node: &types.Node{ + IPv4: ap("100.64.0.100"), + IPv6: ap("fd7a:115c:a1e0::100"), + User: users[3], + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/16"), netip.MustParsePrefix("16.0.0.0/16")}, + }, + }, + peers: types.Nodes{ + &types.Node{ + IPv4: ap("100.64.0.2"), + IPv6: ap("fd7a:115c:a1e0::2"), + User: users[2], + }, + &types.Node{ + IPv4: ap("100.64.0.1"), + IPv6: ap("fd7a:115c:a1e0::1"), + User: users[1], + }, + }, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "100.64.0.100/32", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "fd7a:115c:a1e0::100/128", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "8.0.0.0/8", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "16.0.0.0/8", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + }, + }, + { + name: "1786-reducing-breaks-exit-nodes-app-connector-like2", + pol: ` +{ + "groups": { + "group:team": [ + "user3@", + "user2@", + "user1@" + ] + }, + "hosts": { + "internal": "100.64.0.100/32" + }, + "acls": [ + { + "action": "accept", + "proto": "", + "src": [ + "group:team" + ], + "dst": [ + "internal:*" + ] + }, + { + "action": "accept", + "proto": "", + "src": [ + "group:team" + ], + "dst": [ + "8.0.0.0/16:*", + "16.0.0.0/16:*" + ] + } + ], +} +`, + node: &types.Node{ + IPv4: ap("100.64.0.100"), + IPv6: ap("fd7a:115c:a1e0::100"), + User: users[3], + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: []netip.Prefix{netip.MustParsePrefix("8.0.0.0/8"), netip.MustParsePrefix("16.0.0.0/8")}, + }, + }, + peers: types.Nodes{ + &types.Node{ + IPv4: ap("100.64.0.2"), + IPv6: ap("fd7a:115c:a1e0::2"), + User: users[2], + }, + &types.Node{ + IPv4: ap("100.64.0.1"), + IPv6: ap("fd7a:115c:a1e0::1"), + User: users[1], + }, + }, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "100.64.0.100/32", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "fd7a:115c:a1e0::100/128", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + { + SrcIPs: []string{"100.64.0.1/32", "100.64.0.2/32", "fd7a:115c:a1e0::1/128", "fd7a:115c:a1e0::2/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "8.0.0.0/16", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "16.0.0.0/16", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + }, + }, + { + name: "1817-reduce-breaks-32-mask", + pol: ` +{ + "groups": { + "group:access": [ + "user1@" + ] + }, + "hosts": { + "dns1": "172.16.0.21/32", + "vlan1": "172.16.0.0/24" + }, + "acls": [ + { + "action": "accept", + "proto": "", + "src": [ + "group:access" + ], + "dst": [ + "tag:access-servers:*", + "dns1:*" + ] + } + ], +} +`, + node: &types.Node{ + IPv4: ap("100.64.0.100"), + IPv6: ap("fd7a:115c:a1e0::100"), + User: users[3], + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: []netip.Prefix{netip.MustParsePrefix("172.16.0.0/24")}, + }, + ForcedTags: []string{"tag:access-servers"}, + }, + peers: types.Nodes{ + &types.Node{ + IPv4: ap("100.64.0.1"), + IPv6: ap("fd7a:115c:a1e0::1"), + User: users[1], + }, + }, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"100.64.0.1/32", "fd7a:115c:a1e0::1/128"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "100.64.0.100/32", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "fd7a:115c:a1e0::100/128", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "172.16.0.21/32", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + for idx, pmf := range PolicyManagerFuncsForTest([]byte(tt.pol)) { + version := idx + 1 + t.Run(fmt.Sprintf("%s-v%d", tt.name, version), func(t *testing.T) { + var pm PolicyManager + var err error + if version == 1 { + pm, err = pmf(addAtForFilterV1(users), append(tt.peers, tt.node)) + } else { + pm, err = pmf(users, append(tt.peers, tt.node)) + } + require.NoError(t, err) + got := pm.Filter() + got = ReduceFilterRules(tt.node, got) + + if diff := cmp.Diff(tt.want, got); diff != "" { + log.Trace().Interface("got", got).Msg("result") + t.Errorf("TestReduceFilterRules() unexpected result (-want +got):\n%s", diff) + } + }) + } + } +} + +func TestFilterNodesByACL(t *testing.T) { + type args struct { + nodes types.Nodes + rules []tailcfg.FilterRule + node *types.Node + } + tests := []struct { + name string + args args + want types.Nodes + }{ + { + name: "all hosts can talk to each other", + args: args{ + nodes: types.Nodes{ // list of all nodes in the database + &types.Node{ + ID: 1, + IPv4: ap("100.64.0.1"), + User: types.User{Name: "joe"}, + }, + &types.Node{ + ID: 2, + IPv4: ap("100.64.0.2"), + User: types.User{Name: "marc"}, + }, + &types.Node{ + ID: 3, + IPv4: ap("100.64.0.3"), + User: types.User{Name: "mickael"}, + }, + }, + rules: []tailcfg.FilterRule{ + { + SrcIPs: []string{"100.64.0.1", "100.64.0.2", "100.64.0.3"}, + DstPorts: []tailcfg.NetPortRange{ + {IP: "*"}, + }, + }, + }, + node: &types.Node{ // current nodes + ID: 1, + IPv4: ap("100.64.0.1"), + User: types.User{Name: "joe"}, + }, + }, + want: types.Nodes{ + &types.Node{ + ID: 2, + IPv4: ap("100.64.0.2"), + User: types.User{Name: "marc"}, + }, + &types.Node{ + ID: 3, + IPv4: ap("100.64.0.3"), + User: types.User{Name: "mickael"}, + }, + }, + }, + { + name: "One host can talk to another, but not all hosts", + args: args{ + nodes: types.Nodes{ // list of all nodes in the database + &types.Node{ + ID: 1, + IPv4: ap("100.64.0.1"), + User: types.User{Name: "joe"}, + }, + &types.Node{ + ID: 2, + IPv4: ap("100.64.0.2"), + User: types.User{Name: "marc"}, + }, + &types.Node{ + ID: 3, + IPv4: ap("100.64.0.3"), + User: types.User{Name: "mickael"}, + }, + }, + rules: []tailcfg.FilterRule{ // list of all ACLRules registered + { + SrcIPs: []string{"100.64.0.1", "100.64.0.2", "100.64.0.3"}, + DstPorts: []tailcfg.NetPortRange{ + {IP: "100.64.0.2"}, + }, + }, + }, + node: &types.Node{ // current nodes + ID: 1, + IPv4: ap("100.64.0.1"), + User: types.User{Name: "joe"}, + }, + }, + want: types.Nodes{ + &types.Node{ + ID: 2, + IPv4: ap("100.64.0.2"), + User: types.User{Name: "marc"}, + }, + }, + }, + { + name: "host cannot directly talk to destination, but return path is authorized", + args: args{ + nodes: types.Nodes{ // list of all nodes in the database + &types.Node{ + ID: 1, + IPv4: ap("100.64.0.1"), + User: types.User{Name: "joe"}, + }, + &types.Node{ + ID: 2, + IPv4: ap("100.64.0.2"), + User: types.User{Name: "marc"}, + }, + &types.Node{ + ID: 3, + IPv4: ap("100.64.0.3"), + User: types.User{Name: "mickael"}, + }, + }, + rules: []tailcfg.FilterRule{ // list of all ACLRules registered + { + SrcIPs: []string{"100.64.0.3"}, + DstPorts: []tailcfg.NetPortRange{ + {IP: "100.64.0.2"}, + }, + }, + }, + node: &types.Node{ // current nodes + ID: 2, + IPv4: ap("100.64.0.2"), + User: types.User{Name: "marc"}, + }, + }, + want: types.Nodes{ + &types.Node{ + ID: 3, + IPv4: ap("100.64.0.3"), + User: types.User{Name: "mickael"}, + }, + }, + }, + { + name: "rules allows all hosts to reach one destination", + args: args{ + nodes: types.Nodes{ // list of all nodes in the database + &types.Node{ + ID: 1, + IPv4: ap("100.64.0.1"), + User: types.User{Name: "joe"}, + }, + &types.Node{ + ID: 2, + IPv4: ap("100.64.0.2"), + User: types.User{Name: "marc"}, + }, + &types.Node{ + ID: 3, + IPv4: ap("100.64.0.3"), + User: types.User{Name: "mickael"}, + }, + }, + rules: []tailcfg.FilterRule{ // list of all ACLRules registered + { + SrcIPs: []string{"*"}, + DstPorts: []tailcfg.NetPortRange{ + {IP: "100.64.0.2"}, + }, + }, + }, + node: &types.Node{ // current nodes + ID: 1, + IPv4: ap("100.64.0.1"), + User: types.User{Name: "joe"}, + }, + }, + want: types.Nodes{ + &types.Node{ + ID: 2, + IPv4: ap("100.64.0.2"), + User: types.User{Name: "marc"}, + }, + }, + }, + { + name: "rules allows all hosts to reach one destination, destination can reach all hosts", + args: args{ + nodes: types.Nodes{ // list of all nodes in the database + &types.Node{ + ID: 1, + IPv4: ap("100.64.0.1"), + User: types.User{Name: "joe"}, + }, + &types.Node{ + ID: 2, + IPv4: ap("100.64.0.2"), + User: types.User{Name: "marc"}, + }, + &types.Node{ + ID: 3, + IPv4: ap("100.64.0.3"), + User: types.User{Name: "mickael"}, + }, + }, + rules: []tailcfg.FilterRule{ // list of all ACLRules registered + { + SrcIPs: []string{"*"}, + DstPorts: []tailcfg.NetPortRange{ + {IP: "100.64.0.2"}, + }, + }, + }, + node: &types.Node{ // current nodes + ID: 2, + IPv4: ap("100.64.0.2"), + User: types.User{Name: "marc"}, + }, + }, + want: types.Nodes{ + &types.Node{ + ID: 1, + IPv4: ap("100.64.0.1"), + User: types.User{Name: "joe"}, + }, + &types.Node{ + ID: 3, + IPv4: ap("100.64.0.3"), + User: types.User{Name: "mickael"}, + }, + }, + }, + { + name: "rule allows all hosts to reach all destinations", + args: args{ + nodes: types.Nodes{ // list of all nodes in the database + &types.Node{ + ID: 1, + IPv4: ap("100.64.0.1"), + User: types.User{Name: "joe"}, + }, + &types.Node{ + ID: 2, + IPv4: ap("100.64.0.2"), + User: types.User{Name: "marc"}, + }, + &types.Node{ + ID: 3, + IPv4: ap("100.64.0.3"), + User: types.User{Name: "mickael"}, + }, + }, + rules: []tailcfg.FilterRule{ // list of all ACLRules registered + { + SrcIPs: []string{"*"}, + DstPorts: []tailcfg.NetPortRange{ + {IP: "*"}, + }, + }, + }, + node: &types.Node{ // current nodes + ID: 2, + IPv4: ap("100.64.0.2"), + User: types.User{Name: "marc"}, + }, + }, + want: types.Nodes{ + &types.Node{ + ID: 1, + IPv4: ap("100.64.0.1"), + User: types.User{Name: "joe"}, + }, + &types.Node{ + ID: 3, + IPv4: ap("100.64.0.3"), + User: types.User{Name: "mickael"}, + }, + }, + }, + { + name: "without rule all communications are forbidden", + args: args{ + nodes: types.Nodes{ // list of all nodes in the database + &types.Node{ + ID: 1, + IPv4: ap("100.64.0.1"), + User: types.User{Name: "joe"}, + }, + &types.Node{ + ID: 2, + IPv4: ap("100.64.0.2"), + User: types.User{Name: "marc"}, + }, + &types.Node{ + ID: 3, + IPv4: ap("100.64.0.3"), + User: types.User{Name: "mickael"}, + }, + }, + rules: []tailcfg.FilterRule{ // list of all ACLRules registered + }, + node: &types.Node{ // current nodes + ID: 2, + IPv4: ap("100.64.0.2"), + User: types.User{Name: "marc"}, + }, + }, + want: nil, + }, + { + // Investigating 699 + // Found some nodes: [ts-head-8w6paa ts-unstable-lys2ib ts-head-upcrmb ts-unstable-rlwpvr] nodes=ts-head-8w6paa + // ACL rules generated ACL=[{"DstPorts":[{"Bits":null,"IP":"*","Ports":{"First":0,"Last":65535}}],"SrcIPs":["fd7a:115c:a1e0::3","100.64.0.3","fd7a:115c:a1e0::4","100.64.0.4"]}] + // ACL Cache Map={"100.64.0.3":{"*":{}},"100.64.0.4":{"*":{}},"fd7a:115c:a1e0::3":{"*":{}},"fd7a:115c:a1e0::4":{"*":{}}} + name: "issue-699-broken-star", + args: args{ + nodes: types.Nodes{ // + &types.Node{ + ID: 1, + Hostname: "ts-head-upcrmb", + IPv4: ap("100.64.0.3"), + IPv6: ap("fd7a:115c:a1e0::3"), + User: types.User{Name: "user1"}, + }, + &types.Node{ + ID: 2, + Hostname: "ts-unstable-rlwpvr", + IPv4: ap("100.64.0.4"), + IPv6: ap("fd7a:115c:a1e0::4"), + User: types.User{Name: "user1"}, + }, + &types.Node{ + ID: 3, + Hostname: "ts-head-8w6paa", + IPv4: ap("100.64.0.1"), + IPv6: ap("fd7a:115c:a1e0::1"), + User: types.User{Name: "user2"}, + }, + &types.Node{ + ID: 4, + Hostname: "ts-unstable-lys2ib", + IPv4: ap("100.64.0.2"), + IPv6: ap("fd7a:115c:a1e0::2"), + User: types.User{Name: "user2"}, + }, + }, + rules: []tailcfg.FilterRule{ // list of all ACLRules registered + { + DstPorts: []tailcfg.NetPortRange{ + { + IP: "*", + Ports: tailcfg.PortRange{First: 0, Last: 65535}, + }, + }, + SrcIPs: []string{ + "fd7a:115c:a1e0::3", "100.64.0.3", + "fd7a:115c:a1e0::4", "100.64.0.4", + }, + }, + }, + node: &types.Node{ // current nodes + ID: 3, + Hostname: "ts-head-8w6paa", + IPv4: ap("100.64.0.1"), + IPv6: ap("fd7a:115c:a1e0::1"), + User: types.User{Name: "user2"}, + }, + }, + want: types.Nodes{ + &types.Node{ + ID: 1, + Hostname: "ts-head-upcrmb", + IPv4: ap("100.64.0.3"), + IPv6: ap("fd7a:115c:a1e0::3"), + User: types.User{Name: "user1"}, + }, + &types.Node{ + ID: 2, + Hostname: "ts-unstable-rlwpvr", + IPv4: ap("100.64.0.4"), + IPv6: ap("fd7a:115c:a1e0::4"), + User: types.User{Name: "user1"}, + }, + }, + }, + { + name: "failing-edge-case-during-p3-refactor", + args: args{ + nodes: []*types.Node{ + { + ID: 1, + IPv4: ap("100.64.0.2"), + Hostname: "peer1", + User: types.User{Name: "mini"}, + }, + { + ID: 2, + IPv4: ap("100.64.0.3"), + Hostname: "peer2", + User: types.User{Name: "peer2"}, + }, + }, + rules: []tailcfg.FilterRule{ + { + SrcIPs: []string{"100.64.0.1/32"}, + DstPorts: []tailcfg.NetPortRange{ + {IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny}, + {IP: "::/0", Ports: tailcfg.PortRangeAny}, + }, + }, + }, + node: &types.Node{ + ID: 0, + IPv4: ap("100.64.0.1"), + Hostname: "mini", + User: types.User{Name: "mini"}, + }, + }, + want: []*types.Node{ + { + ID: 2, + IPv4: ap("100.64.0.3"), + Hostname: "peer2", + User: types.User{Name: "peer2"}, + }, + }, + }, + { + name: "p4-host-in-netmap-user2-dest-bug", + args: args{ + nodes: []*types.Node{ + { + ID: 1, + IPv4: ap("100.64.0.2"), + Hostname: "user1-2", + User: types.User{Name: "user1"}, + }, + { + ID: 0, + IPv4: ap("100.64.0.1"), + Hostname: "user1-1", + User: types.User{Name: "user1"}, + }, + { + ID: 3, + IPv4: ap("100.64.0.4"), + Hostname: "user2-2", + User: types.User{Name: "user2"}, + }, + }, + rules: []tailcfg.FilterRule{ + { + SrcIPs: []string{ + "100.64.0.3/32", + "100.64.0.4/32", + "fd7a:115c:a1e0::3/128", + "fd7a:115c:a1e0::4/128", + }, + DstPorts: []tailcfg.NetPortRange{ + {IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny}, + {IP: "100.64.0.4/32", Ports: tailcfg.PortRangeAny}, + {IP: "fd7a:115c:a1e0::3/128", Ports: tailcfg.PortRangeAny}, + {IP: "fd7a:115c:a1e0::4/128", Ports: tailcfg.PortRangeAny}, + }, + }, + { + SrcIPs: []string{ + "100.64.0.1/32", + "100.64.0.2/32", + "fd7a:115c:a1e0::1/128", + "fd7a:115c:a1e0::2/128", + }, + DstPorts: []tailcfg.NetPortRange{ + {IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny}, + {IP: "100.64.0.4/32", Ports: tailcfg.PortRangeAny}, + {IP: "fd7a:115c:a1e0::3/128", Ports: tailcfg.PortRangeAny}, + {IP: "fd7a:115c:a1e0::4/128", Ports: tailcfg.PortRangeAny}, + }, + }, + }, + node: &types.Node{ + ID: 2, + IPv4: ap("100.64.0.3"), + Hostname: "user-2-1", + User: types.User{Name: "user2"}, + }, + }, + want: []*types.Node{ + { + ID: 1, + IPv4: ap("100.64.0.2"), + Hostname: "user1-2", + User: types.User{Name: "user1"}, + }, + { + ID: 0, + IPv4: ap("100.64.0.1"), + Hostname: "user1-1", + User: types.User{Name: "user1"}, + }, + { + ID: 3, + IPv4: ap("100.64.0.4"), + Hostname: "user2-2", + User: types.User{Name: "user2"}, + }, + }, + }, + { + name: "p4-host-in-netmap-user1-dest-bug", + args: args{ + nodes: []*types.Node{ + { + ID: 1, + IPv4: ap("100.64.0.2"), + Hostname: "user1-2", + User: types.User{Name: "user1"}, + }, + { + ID: 2, + IPv4: ap("100.64.0.3"), + Hostname: "user-2-1", + User: types.User{Name: "user2"}, + }, + { + ID: 3, + IPv4: ap("100.64.0.4"), + Hostname: "user2-2", + User: types.User{Name: "user2"}, + }, + }, + rules: []tailcfg.FilterRule{ + { + SrcIPs: []string{ + "100.64.0.1/32", + "100.64.0.2/32", + "fd7a:115c:a1e0::1/128", + "fd7a:115c:a1e0::2/128", + }, + DstPorts: []tailcfg.NetPortRange{ + {IP: "100.64.0.1/32", Ports: tailcfg.PortRangeAny}, + {IP: "100.64.0.2/32", Ports: tailcfg.PortRangeAny}, + {IP: "fd7a:115c:a1e0::1/128", Ports: tailcfg.PortRangeAny}, + {IP: "fd7a:115c:a1e0::2/128", Ports: tailcfg.PortRangeAny}, + }, + }, + { + SrcIPs: []string{ + "100.64.0.1/32", + "100.64.0.2/32", + "fd7a:115c:a1e0::1/128", + "fd7a:115c:a1e0::2/128", + }, + DstPorts: []tailcfg.NetPortRange{ + {IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny}, + {IP: "100.64.0.4/32", Ports: tailcfg.PortRangeAny}, + {IP: "fd7a:115c:a1e0::3/128", Ports: tailcfg.PortRangeAny}, + {IP: "fd7a:115c:a1e0::4/128", Ports: tailcfg.PortRangeAny}, + }, + }, + }, + node: &types.Node{ + ID: 0, + IPv4: ap("100.64.0.1"), + Hostname: "user1-1", + User: types.User{Name: "user1"}, + }, + }, + want: []*types.Node{ + { + ID: 1, + IPv4: ap("100.64.0.2"), + Hostname: "user1-2", + User: types.User{Name: "user1"}, + }, + { + ID: 2, + IPv4: ap("100.64.0.3"), + Hostname: "user-2-1", + User: types.User{Name: "user2"}, + }, + { + ID: 3, + IPv4: ap("100.64.0.4"), + Hostname: "user2-2", + User: types.User{Name: "user2"}, + }, + }, + }, + + { + name: "subnet-router-with-only-route", + args: args{ + nodes: []*types.Node{ + { + ID: 1, + IPv4: ap("100.64.0.1"), + Hostname: "user1", + User: types.User{Name: "user1"}, + }, + { + ID: 2, + IPv4: ap("100.64.0.2"), + Hostname: "router", + User: types.User{Name: "router"}, + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")}, + }, + ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")}, + }, + }, + rules: []tailcfg.FilterRule{ + { + SrcIPs: []string{ + "100.64.0.1/32", + }, + DstPorts: []tailcfg.NetPortRange{ + {IP: "10.33.0.0/16", Ports: tailcfg.PortRangeAny}, + }, + }, + }, + node: &types.Node{ + ID: 1, + IPv4: ap("100.64.0.1"), + Hostname: "user1", + User: types.User{Name: "user1"}, + }, + }, + want: []*types.Node{ + { + ID: 2, + IPv4: ap("100.64.0.2"), + Hostname: "router", + User: types.User{Name: "router"}, + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")}, + }, + ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")}, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := FilterNodesByACL( + tt.args.node, + tt.args.nodes, + tt.args.rules, + ) + if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" { + t.Errorf("FilterNodesByACL() unexpected result (-want +got):\n%s", diff) + } + }) + } +} diff --git a/hscontrol/policy/acls.go b/hscontrol/policy/v1/acls.go similarity index 88% rename from hscontrol/policy/acls.go rename to hscontrol/policy/v1/acls.go index eab7063b..945f171a 100644 --- a/hscontrol/policy/acls.go +++ b/hscontrol/policy/v1/acls.go @@ -1,11 +1,10 @@ -package policy +package v1 import ( "encoding/json" "errors" "fmt" "io" - "iter" "net/netip" "os" "slices" @@ -18,7 +17,6 @@ import ( "github.com/rs/zerolog/log" "github.com/tailscale/hujson" "go4.org/netipx" - "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" ) @@ -37,38 +35,6 @@ const ( expectedTokenItems = 2 ) -var theInternetSet *netipx.IPSet - -// theInternet returns the IPSet for the Internet. -// https://www.youtube.com/watch?v=iDbyYGrswtg -func theInternet() *netipx.IPSet { - if theInternetSet != nil { - return theInternetSet - } - - var internetBuilder netipx.IPSetBuilder - internetBuilder.AddPrefix(netip.MustParsePrefix("2000::/3")) - internetBuilder.AddPrefix(tsaddr.AllIPv4()) - - // Delete Private network addresses - // https://datatracker.ietf.org/doc/html/rfc1918 - internetBuilder.RemovePrefix(netip.MustParsePrefix("fc00::/7")) - internetBuilder.RemovePrefix(netip.MustParsePrefix("10.0.0.0/8")) - internetBuilder.RemovePrefix(netip.MustParsePrefix("172.16.0.0/12")) - internetBuilder.RemovePrefix(netip.MustParsePrefix("192.168.0.0/16")) - - // Delete Tailscale networks - internetBuilder.RemovePrefix(tsaddr.TailscaleULARange()) - internetBuilder.RemovePrefix(tsaddr.CGNATRange()) - - // Delete "can't find DHCP networks" - internetBuilder.RemovePrefix(netip.MustParsePrefix("fe80::/10")) // link-local - internetBuilder.RemovePrefix(netip.MustParsePrefix("169.254.0.0/16")) - - theInternetSet, _ := internetBuilder.IPSet() - return theInternetSet -} - // For some reason golang.org/x/net/internal/iana is an internal package. const ( protocolICMP = 1 // Internet Control Message @@ -240,53 +206,6 @@ func (pol *ACLPolicy) CompileFilterRules( return rules, nil } -// ReduceFilterRules takes a node and a set of rules and removes all rules and destinations -// that are not relevant to that particular node. -func ReduceFilterRules(node *types.Node, rules []tailcfg.FilterRule) []tailcfg.FilterRule { - // TODO(kradalby): Make this nil and not alloc unless needed - ret := []tailcfg.FilterRule{} - - for _, rule := range rules { - // record if the rule is actually relevant for the given node. - var dests []tailcfg.NetPortRange - DEST_LOOP: - for _, dest := range rule.DstPorts { - expanded, err := util.ParseIPSet(dest.IP, nil) - // Fail closed, if we can't parse it, then we should not allow - // access. - if err != nil { - continue DEST_LOOP - } - - if node.InIPSet(expanded) { - dests = append(dests, dest) - continue DEST_LOOP - } - - // If the node exposes routes, ensure they are note removed - // when the filters are reduced. - if len(node.SubnetRoutes()) > 0 { - for _, routableIP := range node.SubnetRoutes() { - if expanded.OverlapsPrefix(routableIP) { - dests = append(dests, dest) - continue DEST_LOOP - } - } - } - } - - if len(dests) > 0 { - ret = append(ret, tailcfg.FilterRule{ - SrcIPs: rule.SrcIPs, - DstPorts: dests, - IPProto: rule.IPProto, - }) - } - } - - return ret -} - func (pol *ACLPolicy) CompileSSHPolicy( node *types.Node, users []types.User, @@ -418,7 +337,7 @@ func (pol *ACLPolicy) CompileSSHPolicy( if err != nil { return nil, fmt.Errorf("parsing SSH policy, expanding alias, index: %d->%d: %w", index, innerIndex, err) } - for addr := range ipSetAll(ips) { + for addr := range util.IPSetAddrIter(ips) { principals = append(principals, &tailcfg.SSHPrincipal{ NodeIP: addr.String(), }) @@ -441,19 +360,6 @@ func (pol *ACLPolicy) CompileSSHPolicy( }, nil } -// ipSetAll returns a function that iterates over all the IPs in the IPSet. -func ipSetAll(ipSet *netipx.IPSet) iter.Seq[netip.Addr] { - return func(yield func(netip.Addr) bool) { - for _, rng := range ipSet.Ranges() { - for ip := rng.From(); ip.Compare(rng.To()) <= 0; ip = ip.Next() { - if !yield(ip) { - return - } - } - } - } -} - func sshCheckAction(duration string) (*tailcfg.SSHAction, error) { sessionLength, err := time.ParseDuration(duration) if err != nil { @@ -950,7 +856,7 @@ func (pol *ACLPolicy) expandIPsFromIPPrefix( func expandAutoGroup(alias string) (*netipx.IPSet, error) { switch { case strings.HasPrefix(alias, "autogroup:internet"): - return theInternet(), nil + return util.TheInternet(), nil default: return nil, fmt.Errorf("unknown autogroup %q", alias) @@ -1084,24 +990,3 @@ func findUserFromToken(users []types.User, token string) (types.User, error) { return potentialUsers[0], nil } - -// FilterNodesByACL returns the list of peers authorized to be accessed from a given node. -func FilterNodesByACL( - node *types.Node, - nodes types.Nodes, - filter []tailcfg.FilterRule, -) types.Nodes { - var result types.Nodes - - for index, peer := range nodes { - if peer.ID == node.ID { - continue - } - - if node.CanAccess(filter, nodes[index]) || peer.CanAccess(filter, node) { - result = append(result, peer) - } - } - - return result -} diff --git a/hscontrol/policy/acls_test.go b/hscontrol/policy/v1/acls_test.go similarity index 66% rename from hscontrol/policy/acls_test.go rename to hscontrol/policy/v1/acls_test.go index a7b12b1d..4c8ab306 100644 --- a/hscontrol/policy/acls_test.go +++ b/hscontrol/policy/v1/acls_test.go @@ -1,4 +1,4 @@ -package policy +package v1 import ( "database/sql" @@ -17,7 +17,6 @@ import ( "go4.org/netipx" "gopkg.in/check.v1" "gorm.io/gorm" - "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" ) @@ -2020,731 +2019,6 @@ var tsExitNodeDest = []tailcfg.NetPortRange{ }, } -// hsExitNodeDest is the list of destination IP ranges that are allowed when -// we use headscale "autogroup:internet". -var hsExitNodeDest = []tailcfg.NetPortRange{ - {IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny}, - {IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny}, - {IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny}, - {IP: "12.0.0.0/6", Ports: tailcfg.PortRangeAny}, - {IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny}, - {IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny}, - {IP: "64.0.0.0/3", Ports: tailcfg.PortRangeAny}, - {IP: "96.0.0.0/6", Ports: tailcfg.PortRangeAny}, - {IP: "100.0.0.0/10", Ports: tailcfg.PortRangeAny}, - {IP: "100.128.0.0/9", Ports: tailcfg.PortRangeAny}, - {IP: "101.0.0.0/8", Ports: tailcfg.PortRangeAny}, - {IP: "102.0.0.0/7", Ports: tailcfg.PortRangeAny}, - {IP: "104.0.0.0/5", Ports: tailcfg.PortRangeAny}, - {IP: "112.0.0.0/4", Ports: tailcfg.PortRangeAny}, - {IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny}, - {IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny}, - {IP: "168.0.0.0/8", Ports: tailcfg.PortRangeAny}, - {IP: "169.0.0.0/9", Ports: tailcfg.PortRangeAny}, - {IP: "169.128.0.0/10", Ports: tailcfg.PortRangeAny}, - {IP: "169.192.0.0/11", Ports: tailcfg.PortRangeAny}, - {IP: "169.224.0.0/12", Ports: tailcfg.PortRangeAny}, - {IP: "169.240.0.0/13", Ports: tailcfg.PortRangeAny}, - {IP: "169.248.0.0/14", Ports: tailcfg.PortRangeAny}, - {IP: "169.252.0.0/15", Ports: tailcfg.PortRangeAny}, - {IP: "169.255.0.0/16", Ports: tailcfg.PortRangeAny}, - {IP: "170.0.0.0/7", Ports: tailcfg.PortRangeAny}, - {IP: "172.0.0.0/12", Ports: tailcfg.PortRangeAny}, - {IP: "172.32.0.0/11", Ports: tailcfg.PortRangeAny}, - {IP: "172.64.0.0/10", Ports: tailcfg.PortRangeAny}, - {IP: "172.128.0.0/9", Ports: tailcfg.PortRangeAny}, - {IP: "173.0.0.0/8", Ports: tailcfg.PortRangeAny}, - {IP: "174.0.0.0/7", Ports: tailcfg.PortRangeAny}, - {IP: "176.0.0.0/4", Ports: tailcfg.PortRangeAny}, - {IP: "192.0.0.0/9", Ports: tailcfg.PortRangeAny}, - {IP: "192.128.0.0/11", Ports: tailcfg.PortRangeAny}, - {IP: "192.160.0.0/13", Ports: tailcfg.PortRangeAny}, - {IP: "192.169.0.0/16", Ports: tailcfg.PortRangeAny}, - {IP: "192.170.0.0/15", Ports: tailcfg.PortRangeAny}, - {IP: "192.172.0.0/14", Ports: tailcfg.PortRangeAny}, - {IP: "192.176.0.0/12", Ports: tailcfg.PortRangeAny}, - {IP: "192.192.0.0/10", Ports: tailcfg.PortRangeAny}, - {IP: "193.0.0.0/8", Ports: tailcfg.PortRangeAny}, - {IP: "194.0.0.0/7", Ports: tailcfg.PortRangeAny}, - {IP: "196.0.0.0/6", Ports: tailcfg.PortRangeAny}, - {IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny}, - {IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny}, - {IP: "224.0.0.0/3", Ports: tailcfg.PortRangeAny}, - {IP: "2000::/3", Ports: tailcfg.PortRangeAny}, -} - -func TestTheInternet(t *testing.T) { - internetSet := theInternet() - - internetPrefs := internetSet.Prefixes() - - for i := range internetPrefs { - if internetPrefs[i].String() != hsExitNodeDest[i].IP { - t.Errorf( - "prefix from internet set %q != hsExit list %q", - internetPrefs[i].String(), - hsExitNodeDest[i].IP, - ) - } - } - - if len(internetPrefs) != len(hsExitNodeDest) { - t.Fatalf( - "expected same length of prefixes, internet: %d, hsExit: %d", - len(internetPrefs), - len(hsExitNodeDest), - ) - } -} - -func TestReduceFilterRules(t *testing.T) { - users := []types.User{ - {Model: gorm.Model{ID: 1}, Name: "mickael"}, - {Model: gorm.Model{ID: 2}, Name: "user1"}, - {Model: gorm.Model{ID: 3}, Name: "user2"}, - {Model: gorm.Model{ID: 4}, Name: "user100"}, - } - - tests := []struct { - name string - node *types.Node - peers types.Nodes - pol ACLPolicy - want []tailcfg.FilterRule - }{ - { - name: "host1-can-reach-host2-no-rules", - pol: ACLPolicy{ - ACLs: []ACL{ - { - Action: "accept", - Sources: []string{"100.64.0.1"}, - Destinations: []string{"100.64.0.2:*"}, - }, - }, - }, - node: &types.Node{ - IPv4: iap("100.64.0.1"), - IPv6: iap("fd7a:115c:a1e0:ab12:4843:2222:6273:2221"), - User: users[0], - }, - peers: types.Nodes{ - &types.Node{ - IPv4: iap("100.64.0.2"), - IPv6: iap("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"), - User: users[0], - }, - }, - want: []tailcfg.FilterRule{}, - }, - { - name: "1604-subnet-routers-are-preserved", - pol: ACLPolicy{ - Groups: Groups{ - "group:admins": {"user1"}, - }, - ACLs: []ACL{ - { - Action: "accept", - Sources: []string{"group:admins"}, - Destinations: []string{"group:admins:*"}, - }, - { - Action: "accept", - Sources: []string{"group:admins"}, - Destinations: []string{"10.33.0.0/16:*"}, - }, - }, - }, - node: &types.Node{ - IPv4: iap("100.64.0.1"), - IPv6: iap("fd7a:115c:a1e0::1"), - User: users[1], - Hostinfo: &tailcfg.Hostinfo{ - RoutableIPs: []netip.Prefix{ - netip.MustParsePrefix("10.33.0.0/16"), - }, - }, - ApprovedRoutes: []netip.Prefix{ - netip.MustParsePrefix("10.33.0.0/16"), - }, - }, - peers: types.Nodes{ - &types.Node{ - IPv4: iap("100.64.0.2"), - IPv6: iap("fd7a:115c:a1e0::2"), - User: users[1], - }, - }, - want: []tailcfg.FilterRule{ - { - SrcIPs: []string{ - "100.64.0.1/32", - "100.64.0.2/32", - "fd7a:115c:a1e0::1/128", - "fd7a:115c:a1e0::2/128", - }, - DstPorts: []tailcfg.NetPortRange{ - { - IP: "100.64.0.1/32", - Ports: tailcfg.PortRangeAny, - }, - { - IP: "fd7a:115c:a1e0::1/128", - Ports: tailcfg.PortRangeAny, - }, - }, - }, - { - SrcIPs: []string{ - "100.64.0.1/32", - "100.64.0.2/32", - "fd7a:115c:a1e0::1/128", - "fd7a:115c:a1e0::2/128", - }, - DstPorts: []tailcfg.NetPortRange{ - { - IP: "10.33.0.0/16", - Ports: tailcfg.PortRangeAny, - }, - }, - }, - }, - }, - { - name: "1786-reducing-breaks-exit-nodes-the-client", - pol: ACLPolicy{ - Hosts: Hosts{ - // Exit node - "internal": netip.MustParsePrefix("100.64.0.100/32"), - }, - Groups: Groups{ - "group:team": {"user3", "user2", "user1"}, - }, - ACLs: []ACL{ - { - Action: "accept", - Sources: []string{"group:team"}, - Destinations: []string{ - "internal:*", - }, - }, - { - Action: "accept", - Sources: []string{"group:team"}, - Destinations: []string{ - "autogroup:internet:*", - }, - }, - }, - }, - node: &types.Node{ - IPv4: iap("100.64.0.1"), - IPv6: iap("fd7a:115c:a1e0::1"), - User: users[1], - }, - peers: types.Nodes{ - &types.Node{ - IPv4: iap("100.64.0.2"), - IPv6: iap("fd7a:115c:a1e0::2"), - User: users[2], - }, - // "internal" exit node - &types.Node{ - IPv4: iap("100.64.0.100"), - IPv6: iap("fd7a:115c:a1e0::100"), - User: users[3], - Hostinfo: &tailcfg.Hostinfo{ - RoutableIPs: tsaddr.ExitRoutes(), - }, - }, - }, - want: []tailcfg.FilterRule{}, - }, - { - name: "1786-reducing-breaks-exit-nodes-the-exit", - pol: ACLPolicy{ - Hosts: Hosts{ - // Exit node - "internal": netip.MustParsePrefix("100.64.0.100/32"), - }, - Groups: Groups{ - "group:team": {"user3", "user2", "user1"}, - }, - ACLs: []ACL{ - { - Action: "accept", - Sources: []string{"group:team"}, - Destinations: []string{ - "internal:*", - }, - }, - { - Action: "accept", - Sources: []string{"group:team"}, - Destinations: []string{ - "autogroup:internet:*", - }, - }, - }, - }, - node: &types.Node{ - IPv4: iap("100.64.0.100"), - IPv6: iap("fd7a:115c:a1e0::100"), - User: types.User{Name: "user100"}, - Hostinfo: &tailcfg.Hostinfo{ - RoutableIPs: tsaddr.ExitRoutes(), - }, - ApprovedRoutes: tsaddr.ExitRoutes(), - }, - peers: types.Nodes{ - &types.Node{ - IPv4: iap("100.64.0.2"), - IPv6: iap("fd7a:115c:a1e0::2"), - User: users[2], - }, - &types.Node{ - IPv4: iap("100.64.0.1"), - IPv6: iap("fd7a:115c:a1e0::1"), - User: users[1], - }, - }, - want: []tailcfg.FilterRule{ - { - SrcIPs: []string{ - "100.64.0.1/32", - "100.64.0.2/32", - "fd7a:115c:a1e0::1/128", - "fd7a:115c:a1e0::2/128", - }, - DstPorts: []tailcfg.NetPortRange{ - { - IP: "100.64.0.100/32", - Ports: tailcfg.PortRangeAny, - }, - { - IP: "fd7a:115c:a1e0::100/128", - Ports: tailcfg.PortRangeAny, - }, - }, - }, - { - SrcIPs: []string{ - "100.64.0.1/32", - "100.64.0.2/32", - "fd7a:115c:a1e0::1/128", - "fd7a:115c:a1e0::2/128", - }, - DstPorts: hsExitNodeDest, - }, - }, - }, - { - name: "1786-reducing-breaks-exit-nodes-the-example-from-issue", - pol: ACLPolicy{ - Hosts: Hosts{ - // Exit node - "internal": netip.MustParsePrefix("100.64.0.100/32"), - }, - Groups: Groups{ - "group:team": {"user3", "user2", "user1"}, - }, - ACLs: []ACL{ - { - Action: "accept", - Sources: []string{"group:team"}, - Destinations: []string{ - "internal:*", - }, - }, - { - Action: "accept", - Sources: []string{"group:team"}, - Destinations: []string{ - "0.0.0.0/5:*", - "8.0.0.0/7:*", - "11.0.0.0/8:*", - "12.0.0.0/6:*", - "16.0.0.0/4:*", - "32.0.0.0/3:*", - "64.0.0.0/2:*", - "128.0.0.0/3:*", - "160.0.0.0/5:*", - "168.0.0.0/6:*", - "172.0.0.0/12:*", - "172.32.0.0/11:*", - "172.64.0.0/10:*", - "172.128.0.0/9:*", - "173.0.0.0/8:*", - "174.0.0.0/7:*", - "176.0.0.0/4:*", - "192.0.0.0/9:*", - "192.128.0.0/11:*", - "192.160.0.0/13:*", - "192.169.0.0/16:*", - "192.170.0.0/15:*", - "192.172.0.0/14:*", - "192.176.0.0/12:*", - "192.192.0.0/10:*", - "193.0.0.0/8:*", - "194.0.0.0/7:*", - "196.0.0.0/6:*", - "200.0.0.0/5:*", - "208.0.0.0/4:*", - }, - }, - }, - }, - node: &types.Node{ - IPv4: iap("100.64.0.100"), - IPv6: iap("fd7a:115c:a1e0::100"), - User: users[3], - Hostinfo: &tailcfg.Hostinfo{ - RoutableIPs: tsaddr.ExitRoutes(), - }, - ApprovedRoutes: tsaddr.ExitRoutes(), - }, - peers: types.Nodes{ - &types.Node{ - IPv4: iap("100.64.0.2"), - IPv6: iap("fd7a:115c:a1e0::2"), - User: users[2], - }, - &types.Node{ - IPv4: iap("100.64.0.1"), - IPv6: iap("fd7a:115c:a1e0::1"), - User: users[1], - }, - }, - want: []tailcfg.FilterRule{ - { - SrcIPs: []string{ - "100.64.0.1/32", - "100.64.0.2/32", - "fd7a:115c:a1e0::1/128", - "fd7a:115c:a1e0::2/128", - }, - DstPorts: []tailcfg.NetPortRange{ - { - IP: "100.64.0.100/32", - Ports: tailcfg.PortRangeAny, - }, - { - IP: "fd7a:115c:a1e0::100/128", - Ports: tailcfg.PortRangeAny, - }, - }, - }, - { - SrcIPs: []string{ - "100.64.0.1/32", - "100.64.0.2/32", - "fd7a:115c:a1e0::1/128", - "fd7a:115c:a1e0::2/128", - }, - DstPorts: []tailcfg.NetPortRange{ - {IP: "0.0.0.0/5", Ports: tailcfg.PortRangeAny}, - {IP: "8.0.0.0/7", Ports: tailcfg.PortRangeAny}, - {IP: "11.0.0.0/8", Ports: tailcfg.PortRangeAny}, - {IP: "12.0.0.0/6", Ports: tailcfg.PortRangeAny}, - {IP: "16.0.0.0/4", Ports: tailcfg.PortRangeAny}, - {IP: "32.0.0.0/3", Ports: tailcfg.PortRangeAny}, - {IP: "64.0.0.0/2", Ports: tailcfg.PortRangeAny}, - {IP: "fd7a:115c:a1e0::1/128", Ports: tailcfg.PortRangeAny}, - {IP: "fd7a:115c:a1e0::2/128", Ports: tailcfg.PortRangeAny}, - {IP: "fd7a:115c:a1e0::100/128", Ports: tailcfg.PortRangeAny}, - {IP: "128.0.0.0/3", Ports: tailcfg.PortRangeAny}, - {IP: "160.0.0.0/5", Ports: tailcfg.PortRangeAny}, - {IP: "168.0.0.0/6", Ports: tailcfg.PortRangeAny}, - {IP: "172.0.0.0/12", Ports: tailcfg.PortRangeAny}, - {IP: "172.32.0.0/11", Ports: tailcfg.PortRangeAny}, - {IP: "172.64.0.0/10", Ports: tailcfg.PortRangeAny}, - {IP: "172.128.0.0/9", Ports: tailcfg.PortRangeAny}, - {IP: "173.0.0.0/8", Ports: tailcfg.PortRangeAny}, - {IP: "174.0.0.0/7", Ports: tailcfg.PortRangeAny}, - {IP: "176.0.0.0/4", Ports: tailcfg.PortRangeAny}, - {IP: "192.0.0.0/9", Ports: tailcfg.PortRangeAny}, - {IP: "192.128.0.0/11", Ports: tailcfg.PortRangeAny}, - {IP: "192.160.0.0/13", Ports: tailcfg.PortRangeAny}, - {IP: "192.169.0.0/16", Ports: tailcfg.PortRangeAny}, - {IP: "192.170.0.0/15", Ports: tailcfg.PortRangeAny}, - {IP: "192.172.0.0/14", Ports: tailcfg.PortRangeAny}, - {IP: "192.176.0.0/12", Ports: tailcfg.PortRangeAny}, - {IP: "192.192.0.0/10", Ports: tailcfg.PortRangeAny}, - {IP: "193.0.0.0/8", Ports: tailcfg.PortRangeAny}, - {IP: "194.0.0.0/7", Ports: tailcfg.PortRangeAny}, - {IP: "196.0.0.0/6", Ports: tailcfg.PortRangeAny}, - {IP: "200.0.0.0/5", Ports: tailcfg.PortRangeAny}, - {IP: "208.0.0.0/4", Ports: tailcfg.PortRangeAny}, - }, - }, - }, - }, - { - name: "1786-reducing-breaks-exit-nodes-app-connector-like", - pol: ACLPolicy{ - Hosts: Hosts{ - // Exit node - "internal": netip.MustParsePrefix("100.64.0.100/32"), - }, - Groups: Groups{ - "group:team": {"user3", "user2", "user1"}, - }, - ACLs: []ACL{ - { - Action: "accept", - Sources: []string{"group:team"}, - Destinations: []string{ - "internal:*", - }, - }, - { - Action: "accept", - Sources: []string{"group:team"}, - Destinations: []string{ - "8.0.0.0/8:*", - "16.0.0.0/8:*", - }, - }, - }, - }, - node: &types.Node{ - IPv4: iap("100.64.0.100"), - IPv6: iap("fd7a:115c:a1e0::100"), - User: users[3], - Hostinfo: &tailcfg.Hostinfo{ - RoutableIPs: []netip.Prefix{ - netip.MustParsePrefix("8.0.0.0/16"), - netip.MustParsePrefix("16.0.0.0/16"), - }, - }, - ApprovedRoutes: []netip.Prefix{ - netip.MustParsePrefix("8.0.0.0/16"), - netip.MustParsePrefix("16.0.0.0/16"), - }, - }, - peers: types.Nodes{ - &types.Node{ - IPv4: iap("100.64.0.2"), - IPv6: iap("fd7a:115c:a1e0::2"), - User: users[2], - }, - &types.Node{ - IPv4: iap("100.64.0.1"), - IPv6: iap("fd7a:115c:a1e0::1"), - User: users[1], - }, - }, - want: []tailcfg.FilterRule{ - { - SrcIPs: []string{ - "100.64.0.1/32", - "100.64.0.2/32", - "fd7a:115c:a1e0::1/128", - "fd7a:115c:a1e0::2/128", - }, - DstPorts: []tailcfg.NetPortRange{ - { - IP: "100.64.0.100/32", - Ports: tailcfg.PortRangeAny, - }, - { - IP: "fd7a:115c:a1e0::100/128", - Ports: tailcfg.PortRangeAny, - }, - }, - }, - { - SrcIPs: []string{ - "100.64.0.1/32", - "100.64.0.2/32", - "fd7a:115c:a1e0::1/128", - "fd7a:115c:a1e0::2/128", - }, - DstPorts: []tailcfg.NetPortRange{ - { - IP: "8.0.0.0/8", - Ports: tailcfg.PortRangeAny, - }, - { - IP: "16.0.0.0/8", - Ports: tailcfg.PortRangeAny, - }, - }, - }, - }, - }, - { - name: "1786-reducing-breaks-exit-nodes-app-connector-like2", - pol: ACLPolicy{ - Hosts: Hosts{ - // Exit node - "internal": netip.MustParsePrefix("100.64.0.100/32"), - }, - Groups: Groups{ - "group:team": {"user3", "user2", "user1"}, - }, - ACLs: []ACL{ - { - Action: "accept", - Sources: []string{"group:team"}, - Destinations: []string{ - "internal:*", - }, - }, - { - Action: "accept", - Sources: []string{"group:team"}, - Destinations: []string{ - "8.0.0.0/16:*", - "16.0.0.0/16:*", - }, - }, - }, - }, - node: &types.Node{ - IPv4: iap("100.64.0.100"), - IPv6: iap("fd7a:115c:a1e0::100"), - User: users[3], - Hostinfo: &tailcfg.Hostinfo{ - RoutableIPs: []netip.Prefix{ - netip.MustParsePrefix("8.0.0.0/8"), - netip.MustParsePrefix("16.0.0.0/8"), - }, - }, - ApprovedRoutes: []netip.Prefix{ - netip.MustParsePrefix("8.0.0.0/8"), - netip.MustParsePrefix("16.0.0.0/8"), - }, - }, - peers: types.Nodes{ - &types.Node{ - IPv4: iap("100.64.0.2"), - IPv6: iap("fd7a:115c:a1e0::2"), - User: users[2], - }, - &types.Node{ - IPv4: iap("100.64.0.1"), - IPv6: iap("fd7a:115c:a1e0::1"), - User: users[1], - }, - }, - want: []tailcfg.FilterRule{ - { - SrcIPs: []string{ - "100.64.0.1/32", - "100.64.0.2/32", - "fd7a:115c:a1e0::1/128", - "fd7a:115c:a1e0::2/128", - }, - DstPorts: []tailcfg.NetPortRange{ - { - IP: "100.64.0.100/32", - Ports: tailcfg.PortRangeAny, - }, - { - IP: "fd7a:115c:a1e0::100/128", - Ports: tailcfg.PortRangeAny, - }, - }, - }, - { - SrcIPs: []string{ - "100.64.0.1/32", - "100.64.0.2/32", - "fd7a:115c:a1e0::1/128", - "fd7a:115c:a1e0::2/128", - }, - DstPorts: []tailcfg.NetPortRange{ - { - IP: "8.0.0.0/16", - Ports: tailcfg.PortRangeAny, - }, - { - IP: "16.0.0.0/16", - Ports: tailcfg.PortRangeAny, - }, - }, - }, - }, - }, - { - name: "1817-reduce-breaks-32-mask", - pol: ACLPolicy{ - Hosts: Hosts{ - "vlan1": netip.MustParsePrefix("172.16.0.0/24"), - "dns1": netip.MustParsePrefix("172.16.0.21/32"), - }, - Groups: Groups{ - "group:access": {"user1"}, - }, - ACLs: []ACL{ - { - Action: "accept", - Sources: []string{"group:access"}, - Destinations: []string{ - "tag:access-servers:*", - "dns1:*", - }, - }, - }, - }, - node: &types.Node{ - IPv4: iap("100.64.0.100"), - IPv6: iap("fd7a:115c:a1e0::100"), - User: users[3], - Hostinfo: &tailcfg.Hostinfo{ - RoutableIPs: []netip.Prefix{netip.MustParsePrefix("172.16.0.0/24")}, - }, - ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("172.16.0.0/24")}, - ForcedTags: []string{"tag:access-servers"}, - }, - peers: types.Nodes{ - &types.Node{ - IPv4: iap("100.64.0.1"), - IPv6: iap("fd7a:115c:a1e0::1"), - User: users[1], - }, - }, - want: []tailcfg.FilterRule{ - { - SrcIPs: []string{"100.64.0.1/32", "fd7a:115c:a1e0::1/128"}, - DstPorts: []tailcfg.NetPortRange{ - { - IP: "100.64.0.100/32", - Ports: tailcfg.PortRangeAny, - }, - { - IP: "fd7a:115c:a1e0::100/128", - Ports: tailcfg.PortRangeAny, - }, - { - IP: "172.16.0.21/32", - Ports: tailcfg.PortRangeAny, - }, - }, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, _ := tt.pol.CompileFilterRules( - users, - append(tt.peers, tt.node), - ) - - got = ReduceFilterRules(tt.node, got) - - if diff := cmp.Diff(tt.want, got); diff != "" { - log.Trace().Interface("got", got).Msg("result") - t.Errorf("TestReduceFilterRules() unexpected result (-want +got):\n%s", diff) - } - }) - } -} - func Test_getTags(t *testing.T) { users := []types.User{ { @@ -2885,662 +2159,6 @@ func Test_getTags(t *testing.T) { } } -func Test_getFilteredByACLPeers(t *testing.T) { - type args struct { - nodes types.Nodes - rules []tailcfg.FilterRule - node *types.Node - } - tests := []struct { - name string - args args - want types.Nodes - }{ - { - name: "all hosts can talk to each other", - args: args{ - nodes: types.Nodes{ // list of all nodes in the database - &types.Node{ - ID: 1, - IPv4: iap("100.64.0.1"), - User: types.User{Name: "joe"}, - }, - &types.Node{ - ID: 2, - IPv4: iap("100.64.0.2"), - User: types.User{Name: "marc"}, - }, - &types.Node{ - ID: 3, - IPv4: iap("100.64.0.3"), - User: types.User{Name: "mickael"}, - }, - }, - rules: []tailcfg.FilterRule{ // list of all ACLRules registered - { - SrcIPs: []string{"100.64.0.1", "100.64.0.2", "100.64.0.3"}, - DstPorts: []tailcfg.NetPortRange{ - {IP: "*"}, - }, - }, - }, - node: &types.Node{ // current nodes - ID: 1, - IPv4: iap("100.64.0.1"), - User: types.User{Name: "joe"}, - }, - }, - want: types.Nodes{ - &types.Node{ - ID: 2, - IPv4: iap("100.64.0.2"), - User: types.User{Name: "marc"}, - }, - &types.Node{ - ID: 3, - IPv4: iap("100.64.0.3"), - User: types.User{Name: "mickael"}, - }, - }, - }, - { - name: "One host can talk to another, but not all hosts", - args: args{ - nodes: types.Nodes{ // list of all nodes in the database - &types.Node{ - ID: 1, - IPv4: iap("100.64.0.1"), - User: types.User{Name: "joe"}, - }, - &types.Node{ - ID: 2, - IPv4: iap("100.64.0.2"), - User: types.User{Name: "marc"}, - }, - &types.Node{ - ID: 3, - IPv4: iap("100.64.0.3"), - User: types.User{Name: "mickael"}, - }, - }, - rules: []tailcfg.FilterRule{ // list of all ACLRules registered - { - SrcIPs: []string{"100.64.0.1", "100.64.0.2", "100.64.0.3"}, - DstPorts: []tailcfg.NetPortRange{ - {IP: "100.64.0.2"}, - }, - }, - }, - node: &types.Node{ // current nodes - ID: 1, - IPv4: iap("100.64.0.1"), - User: types.User{Name: "joe"}, - }, - }, - want: types.Nodes{ - &types.Node{ - ID: 2, - IPv4: iap("100.64.0.2"), - User: types.User{Name: "marc"}, - }, - }, - }, - { - name: "host cannot directly talk to destination, but return path is authorized", - args: args{ - nodes: types.Nodes{ // list of all nodes in the database - &types.Node{ - ID: 1, - IPv4: iap("100.64.0.1"), - User: types.User{Name: "joe"}, - }, - &types.Node{ - ID: 2, - IPv4: iap("100.64.0.2"), - User: types.User{Name: "marc"}, - }, - &types.Node{ - ID: 3, - IPv4: iap("100.64.0.3"), - User: types.User{Name: "mickael"}, - }, - }, - rules: []tailcfg.FilterRule{ // list of all ACLRules registered - { - SrcIPs: []string{"100.64.0.3"}, - DstPorts: []tailcfg.NetPortRange{ - {IP: "100.64.0.2"}, - }, - }, - }, - node: &types.Node{ // current nodes - ID: 2, - IPv4: iap("100.64.0.2"), - User: types.User{Name: "marc"}, - }, - }, - want: types.Nodes{ - &types.Node{ - ID: 3, - IPv4: iap("100.64.0.3"), - User: types.User{Name: "mickael"}, - }, - }, - }, - { - name: "rules allows all hosts to reach one destination", - args: args{ - nodes: types.Nodes{ // list of all nodes in the database - &types.Node{ - ID: 1, - IPv4: iap("100.64.0.1"), - User: types.User{Name: "joe"}, - }, - &types.Node{ - ID: 2, - IPv4: iap("100.64.0.2"), - User: types.User{Name: "marc"}, - }, - &types.Node{ - ID: 3, - IPv4: iap("100.64.0.3"), - User: types.User{Name: "mickael"}, - }, - }, - rules: []tailcfg.FilterRule{ // list of all ACLRules registered - { - SrcIPs: []string{"*"}, - DstPorts: []tailcfg.NetPortRange{ - {IP: "100.64.0.2"}, - }, - }, - }, - node: &types.Node{ // current nodes - ID: 1, - IPv4: iap("100.64.0.1"), - User: types.User{Name: "joe"}, - }, - }, - want: types.Nodes{ - &types.Node{ - ID: 2, - IPv4: iap("100.64.0.2"), - User: types.User{Name: "marc"}, - }, - }, - }, - { - name: "rules allows all hosts to reach one destination, destination can reach all hosts", - args: args{ - nodes: types.Nodes{ // list of all nodes in the database - &types.Node{ - ID: 1, - IPv4: iap("100.64.0.1"), - User: types.User{Name: "joe"}, - }, - &types.Node{ - ID: 2, - IPv4: iap("100.64.0.2"), - User: types.User{Name: "marc"}, - }, - &types.Node{ - ID: 3, - IPv4: iap("100.64.0.3"), - User: types.User{Name: "mickael"}, - }, - }, - rules: []tailcfg.FilterRule{ // list of all ACLRules registered - { - SrcIPs: []string{"*"}, - DstPorts: []tailcfg.NetPortRange{ - {IP: "100.64.0.2"}, - }, - }, - }, - node: &types.Node{ // current nodes - ID: 2, - IPv4: iap("100.64.0.2"), - User: types.User{Name: "marc"}, - }, - }, - want: types.Nodes{ - &types.Node{ - ID: 1, - IPv4: iap("100.64.0.1"), - User: types.User{Name: "joe"}, - }, - &types.Node{ - ID: 3, - IPv4: iap("100.64.0.3"), - User: types.User{Name: "mickael"}, - }, - }, - }, - { - name: "rule allows all hosts to reach all destinations", - args: args{ - nodes: types.Nodes{ // list of all nodes in the database - &types.Node{ - ID: 1, - IPv4: iap("100.64.0.1"), - User: types.User{Name: "joe"}, - }, - &types.Node{ - ID: 2, - IPv4: iap("100.64.0.2"), - User: types.User{Name: "marc"}, - }, - &types.Node{ - ID: 3, - IPv4: iap("100.64.0.3"), - User: types.User{Name: "mickael"}, - }, - }, - rules: []tailcfg.FilterRule{ // list of all ACLRules registered - { - SrcIPs: []string{"*"}, - DstPorts: []tailcfg.NetPortRange{ - {IP: "*"}, - }, - }, - }, - node: &types.Node{ // current nodes - ID: 2, - IPv4: iap("100.64.0.2"), - User: types.User{Name: "marc"}, - }, - }, - want: types.Nodes{ - &types.Node{ - ID: 1, - IPv4: iap("100.64.0.1"), - User: types.User{Name: "joe"}, - }, - &types.Node{ - ID: 3, - IPv4: iap("100.64.0.3"), - User: types.User{Name: "mickael"}, - }, - }, - }, - { - name: "without rule all communications are forbidden", - args: args{ - nodes: types.Nodes{ // list of all nodes in the database - &types.Node{ - ID: 1, - IPv4: iap("100.64.0.1"), - User: types.User{Name: "joe"}, - }, - &types.Node{ - ID: 2, - IPv4: iap("100.64.0.2"), - User: types.User{Name: "marc"}, - }, - &types.Node{ - ID: 3, - IPv4: iap("100.64.0.3"), - User: types.User{Name: "mickael"}, - }, - }, - rules: []tailcfg.FilterRule{ // list of all ACLRules registered - }, - node: &types.Node{ // current nodes - ID: 2, - IPv4: iap("100.64.0.2"), - User: types.User{Name: "marc"}, - }, - }, - want: nil, - }, - { - // Investigating 699 - // Found some nodes: [ts-head-8w6paa ts-unstable-lys2ib ts-head-upcrmb ts-unstable-rlwpvr] nodes=ts-head-8w6paa - // ACL rules generated ACL=[{"DstPorts":[{"Bits":null,"IP":"*","Ports":{"First":0,"Last":65535}}],"SrcIPs":["fd7a:115c:a1e0::3","100.64.0.3","fd7a:115c:a1e0::4","100.64.0.4"]}] - // ACL Cache Map={"100.64.0.3":{"*":{}},"100.64.0.4":{"*":{}},"fd7a:115c:a1e0::3":{"*":{}},"fd7a:115c:a1e0::4":{"*":{}}} - name: "issue-699-broken-star", - args: args{ - nodes: types.Nodes{ // - &types.Node{ - ID: 1, - Hostname: "ts-head-upcrmb", - IPv4: iap("100.64.0.3"), - IPv6: iap("fd7a:115c:a1e0::3"), - User: types.User{Name: "user1"}, - }, - &types.Node{ - ID: 2, - Hostname: "ts-unstable-rlwpvr", - IPv4: iap("100.64.0.4"), - IPv6: iap("fd7a:115c:a1e0::4"), - User: types.User{Name: "user1"}, - }, - &types.Node{ - ID: 3, - Hostname: "ts-head-8w6paa", - IPv4: iap("100.64.0.1"), - IPv6: iap("fd7a:115c:a1e0::1"), - User: types.User{Name: "user2"}, - }, - &types.Node{ - ID: 4, - Hostname: "ts-unstable-lys2ib", - IPv4: iap("100.64.0.2"), - IPv6: iap("fd7a:115c:a1e0::2"), - User: types.User{Name: "user2"}, - }, - }, - rules: []tailcfg.FilterRule{ // list of all ACLRules registered - { - DstPorts: []tailcfg.NetPortRange{ - { - IP: "*", - Ports: tailcfg.PortRange{First: 0, Last: 65535}, - }, - }, - SrcIPs: []string{ - "fd7a:115c:a1e0::3", "100.64.0.3", - "fd7a:115c:a1e0::4", "100.64.0.4", - }, - }, - }, - node: &types.Node{ // current nodes - ID: 3, - Hostname: "ts-head-8w6paa", - IPv4: iap("100.64.0.1"), - IPv6: iap("fd7a:115c:a1e0::1"), - User: types.User{Name: "user2"}, - }, - }, - want: types.Nodes{ - &types.Node{ - ID: 1, - Hostname: "ts-head-upcrmb", - IPv4: iap("100.64.0.3"), - IPv6: iap("fd7a:115c:a1e0::3"), - User: types.User{Name: "user1"}, - }, - &types.Node{ - ID: 2, - Hostname: "ts-unstable-rlwpvr", - IPv4: iap("100.64.0.4"), - IPv6: iap("fd7a:115c:a1e0::4"), - User: types.User{Name: "user1"}, - }, - }, - }, - { - name: "failing-edge-case-during-p3-refactor", - args: args{ - nodes: []*types.Node{ - { - ID: 1, - IPv4: iap("100.64.0.2"), - Hostname: "peer1", - User: types.User{Name: "mini"}, - }, - { - ID: 2, - IPv4: iap("100.64.0.3"), - Hostname: "peer2", - User: types.User{Name: "peer2"}, - }, - }, - rules: []tailcfg.FilterRule{ - { - SrcIPs: []string{"100.64.0.1/32"}, - DstPorts: []tailcfg.NetPortRange{ - {IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny}, - {IP: "::/0", Ports: tailcfg.PortRangeAny}, - }, - }, - }, - node: &types.Node{ - ID: 0, - IPv4: iap("100.64.0.1"), - Hostname: "mini", - User: types.User{Name: "mini"}, - }, - }, - want: []*types.Node{ - { - ID: 2, - IPv4: iap("100.64.0.3"), - Hostname: "peer2", - User: types.User{Name: "peer2"}, - }, - }, - }, - { - name: "p4-host-in-netmap-user2-dest-bug", - args: args{ - nodes: []*types.Node{ - { - ID: 1, - IPv4: iap("100.64.0.2"), - Hostname: "user1-2", - User: types.User{Name: "user1"}, - }, - { - ID: 0, - IPv4: iap("100.64.0.1"), - Hostname: "user1-1", - User: types.User{Name: "user1"}, - }, - { - ID: 3, - IPv4: iap("100.64.0.4"), - Hostname: "user2-2", - User: types.User{Name: "user2"}, - }, - }, - rules: []tailcfg.FilterRule{ - { - SrcIPs: []string{ - "100.64.0.3/32", - "100.64.0.4/32", - "fd7a:115c:a1e0::3/128", - "fd7a:115c:a1e0::4/128", - }, - DstPorts: []tailcfg.NetPortRange{ - {IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny}, - {IP: "100.64.0.4/32", Ports: tailcfg.PortRangeAny}, - {IP: "fd7a:115c:a1e0::3/128", Ports: tailcfg.PortRangeAny}, - {IP: "fd7a:115c:a1e0::4/128", Ports: tailcfg.PortRangeAny}, - }, - }, - { - SrcIPs: []string{ - "100.64.0.1/32", - "100.64.0.2/32", - "fd7a:115c:a1e0::1/128", - "fd7a:115c:a1e0::2/128", - }, - DstPorts: []tailcfg.NetPortRange{ - {IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny}, - {IP: "100.64.0.4/32", Ports: tailcfg.PortRangeAny}, - {IP: "fd7a:115c:a1e0::3/128", Ports: tailcfg.PortRangeAny}, - {IP: "fd7a:115c:a1e0::4/128", Ports: tailcfg.PortRangeAny}, - }, - }, - }, - node: &types.Node{ - ID: 2, - IPv4: iap("100.64.0.3"), - Hostname: "user-2-1", - User: types.User{Name: "user2"}, - }, - }, - want: []*types.Node{ - { - ID: 1, - IPv4: iap("100.64.0.2"), - Hostname: "user1-2", - User: types.User{Name: "user1"}, - }, - { - ID: 0, - IPv4: iap("100.64.0.1"), - Hostname: "user1-1", - User: types.User{Name: "user1"}, - }, - { - ID: 3, - IPv4: iap("100.64.0.4"), - Hostname: "user2-2", - User: types.User{Name: "user2"}, - }, - }, - }, - { - name: "p4-host-in-netmap-user1-dest-bug", - args: args{ - nodes: []*types.Node{ - { - ID: 1, - IPv4: iap("100.64.0.2"), - Hostname: "user1-2", - User: types.User{Name: "user1"}, - }, - { - ID: 2, - IPv4: iap("100.64.0.3"), - Hostname: "user-2-1", - User: types.User{Name: "user2"}, - }, - { - ID: 3, - IPv4: iap("100.64.0.4"), - Hostname: "user2-2", - User: types.User{Name: "user2"}, - }, - }, - rules: []tailcfg.FilterRule{ - { - SrcIPs: []string{ - "100.64.0.1/32", - "100.64.0.2/32", - "fd7a:115c:a1e0::1/128", - "fd7a:115c:a1e0::2/128", - }, - DstPorts: []tailcfg.NetPortRange{ - {IP: "100.64.0.1/32", Ports: tailcfg.PortRangeAny}, - {IP: "100.64.0.2/32", Ports: tailcfg.PortRangeAny}, - {IP: "fd7a:115c:a1e0::1/128", Ports: tailcfg.PortRangeAny}, - {IP: "fd7a:115c:a1e0::2/128", Ports: tailcfg.PortRangeAny}, - }, - }, - { - SrcIPs: []string{ - "100.64.0.1/32", - "100.64.0.2/32", - "fd7a:115c:a1e0::1/128", - "fd7a:115c:a1e0::2/128", - }, - DstPorts: []tailcfg.NetPortRange{ - {IP: "100.64.0.3/32", Ports: tailcfg.PortRangeAny}, - {IP: "100.64.0.4/32", Ports: tailcfg.PortRangeAny}, - {IP: "fd7a:115c:a1e0::3/128", Ports: tailcfg.PortRangeAny}, - {IP: "fd7a:115c:a1e0::4/128", Ports: tailcfg.PortRangeAny}, - }, - }, - }, - node: &types.Node{ - ID: 0, - IPv4: iap("100.64.0.1"), - Hostname: "user1-1", - User: types.User{Name: "user1"}, - }, - }, - want: []*types.Node{ - { - ID: 1, - IPv4: iap("100.64.0.2"), - Hostname: "user1-2", - User: types.User{Name: "user1"}, - }, - { - ID: 2, - IPv4: iap("100.64.0.3"), - Hostname: "user-2-1", - User: types.User{Name: "user2"}, - }, - { - ID: 3, - IPv4: iap("100.64.0.4"), - Hostname: "user2-2", - User: types.User{Name: "user2"}, - }, - }, - }, - - { - name: "subnet-router-with-only-route", - args: args{ - nodes: []*types.Node{ - { - ID: 1, - IPv4: iap("100.64.0.1"), - Hostname: "user1", - User: types.User{Name: "user1"}, - }, - { - ID: 2, - IPv4: iap("100.64.0.2"), - Hostname: "router", - User: types.User{Name: "router"}, - Hostinfo: &tailcfg.Hostinfo{ - RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")}, - }, - ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")}, - }, - }, - rules: []tailcfg.FilterRule{ - { - SrcIPs: []string{ - "100.64.0.1/32", - }, - DstPorts: []tailcfg.NetPortRange{ - {IP: "10.33.0.0/16", Ports: tailcfg.PortRangeAny}, - }, - }, - }, - node: &types.Node{ - ID: 1, - IPv4: iap("100.64.0.1"), - Hostname: "user1", - User: types.User{Name: "user1"}, - }, - }, - want: []*types.Node{ - { - ID: 2, - IPv4: iap("100.64.0.2"), - Hostname: "router", - User: types.User{Name: "router"}, - Hostinfo: &tailcfg.Hostinfo{ - RoutableIPs: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")}, - }, - ApprovedRoutes: []netip.Prefix{netip.MustParsePrefix("10.33.0.0/16")}, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := FilterNodesByACL( - tt.args.node, - tt.args.nodes, - tt.args.rules, - ) - if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" { - t.Errorf("FilterNodesByACL() unexpected result (-want +got):\n%s", diff) - } - }) - } -} - func TestSSHRules(t *testing.T) { users := []types.User{ { diff --git a/hscontrol/policy/acls_types.go b/hscontrol/policy/v1/acls_types.go similarity index 99% rename from hscontrol/policy/acls_types.go rename to hscontrol/policy/v1/acls_types.go index 5b5d1838..8c4584c7 100644 --- a/hscontrol/policy/acls_types.go +++ b/hscontrol/policy/v1/acls_types.go @@ -1,4 +1,4 @@ -package policy +package v1 import ( "encoding/json" diff --git a/hscontrol/policy/v1/policy.go b/hscontrol/policy/v1/policy.go new file mode 100644 index 00000000..6341bc6c --- /dev/null +++ b/hscontrol/policy/v1/policy.go @@ -0,0 +1,187 @@ +package v1 + +import ( + "fmt" + "io" + "net/netip" + "os" + "sync" + + "github.com/juanfont/headscale/hscontrol/types" + "github.com/rs/zerolog/log" + "tailscale.com/tailcfg" + "tailscale.com/util/deephash" +) + +func NewPolicyManagerFromPath(path string, users []types.User, nodes types.Nodes) (*PolicyManager, error) { + policyFile, err := os.Open(path) + if err != nil { + return nil, err + } + defer policyFile.Close() + + policyBytes, err := io.ReadAll(policyFile) + if err != nil { + return nil, err + } + + return NewPolicyManager(policyBytes, users, nodes) +} + +func NewPolicyManager(polB []byte, users []types.User, nodes types.Nodes) (*PolicyManager, error) { + var pol *ACLPolicy + var err error + if polB != nil && len(polB) > 0 { + pol, err = LoadACLPolicyFromBytes(polB) + if err != nil { + return nil, fmt.Errorf("parsing policy: %w", err) + } + } + + pm := PolicyManager{ + pol: pol, + users: users, + nodes: nodes, + } + + _, err = pm.updateLocked() + if err != nil { + return nil, err + } + + return &pm, nil +} + +type PolicyManager struct { + mu sync.Mutex + pol *ACLPolicy + + users []types.User + nodes types.Nodes + + filterHash deephash.Sum + filter []tailcfg.FilterRule +} + +// updateLocked updates the filter rules based on the current policy and nodes. +// It must be called with the lock held. +func (pm *PolicyManager) updateLocked() (bool, error) { + filter, err := pm.pol.CompileFilterRules(pm.users, pm.nodes) + if err != nil { + return false, fmt.Errorf("compiling filter rules: %w", err) + } + + filterHash := deephash.Hash(&filter) + if filterHash == pm.filterHash { + return false, nil + } + + pm.filter = filter + pm.filterHash = filterHash + + return true, nil +} + +func (pm *PolicyManager) Filter() []tailcfg.FilterRule { + pm.mu.Lock() + defer pm.mu.Unlock() + return pm.filter +} + +func (pm *PolicyManager) SSHPolicy(node *types.Node) (*tailcfg.SSHPolicy, error) { + pm.mu.Lock() + defer pm.mu.Unlock() + + return pm.pol.CompileSSHPolicy(node, pm.users, pm.nodes) +} + +func (pm *PolicyManager) SetPolicy(polB []byte) (bool, error) { + if len(polB) == 0 { + return false, nil + } + + pol, err := LoadACLPolicyFromBytes(polB) + if err != nil { + return false, fmt.Errorf("parsing policy: %w", err) + } + + pm.mu.Lock() + defer pm.mu.Unlock() + + pm.pol = pol + + return pm.updateLocked() +} + +// SetUsers updates the users in the policy manager and updates the filter rules. +func (pm *PolicyManager) SetUsers(users []types.User) (bool, error) { + pm.mu.Lock() + defer pm.mu.Unlock() + + pm.users = users + return pm.updateLocked() +} + +// SetNodes updates the nodes in the policy manager and updates the filter rules. +func (pm *PolicyManager) SetNodes(nodes types.Nodes) (bool, error) { + pm.mu.Lock() + defer pm.mu.Unlock() + pm.nodes = nodes + return pm.updateLocked() +} + +func (pm *PolicyManager) NodeCanHaveTag(node *types.Node, tag string) bool { + if pm == nil || pm.pol == nil { + return false + } + + pm.mu.Lock() + defer pm.mu.Unlock() + + tags, invalid := pm.pol.TagsOfNode(pm.users, node) + log.Debug().Strs("authorised_tags", tags).Strs("unauthorised_tags", invalid).Uint64("node.id", node.ID.Uint64()).Msg("tags provided by policy") + + for _, t := range tags { + if t == tag { + return true + } + } + + return false +} + +func (pm *PolicyManager) NodeCanApproveRoute(node *types.Node, route netip.Prefix) bool { + if pm == nil || pm.pol == nil { + return false + } + + pm.mu.Lock() + defer pm.mu.Unlock() + + approvers, _ := pm.pol.AutoApprovers.GetRouteApprovers(route) + + for _, approvedAlias := range approvers { + if approvedAlias == node.User.Username() { + return true + } else { + ips, err := pm.pol.ExpandAlias(pm.nodes, pm.users, approvedAlias) + if err != nil { + return false + } + + // approvedIPs should contain all of node's IPs if it matches the rule, so check for first + if ips.Contains(*node.IPv4) { + return true + } + } + } + return false +} + +func (pm *PolicyManager) Version() int { + return 1 +} + +func (pm *PolicyManager) DebugString() string { + return "not implemented for v1" +} diff --git a/hscontrol/policy/pm_test.go b/hscontrol/policy/v1/policy_test.go similarity index 99% rename from hscontrol/policy/pm_test.go rename to hscontrol/policy/v1/policy_test.go index 24b78e4d..e250db2a 100644 --- a/hscontrol/policy/pm_test.go +++ b/hscontrol/policy/v1/policy_test.go @@ -1,4 +1,4 @@ -package policy +package v1 import ( "testing" diff --git a/hscontrol/policy/v2/filter.go b/hscontrol/policy/v2/filter.go new file mode 100644 index 00000000..2d6c3f12 --- /dev/null +++ b/hscontrol/policy/v2/filter.go @@ -0,0 +1,169 @@ +package v2 + +import ( + "errors" + "fmt" + "time" + + "github.com/juanfont/headscale/hscontrol/types" + "github.com/juanfont/headscale/hscontrol/util" + "github.com/rs/zerolog/log" + "go4.org/netipx" + "tailscale.com/tailcfg" +) + +var ( + ErrInvalidAction = errors.New("invalid action") +) + +// compileFilterRules takes a set of nodes and an ACLPolicy and generates a +// set of Tailscale compatible FilterRules used to allow traffic on clients. +func (pol *Policy) compileFilterRules( + users types.Users, + nodes types.Nodes, +) ([]tailcfg.FilterRule, error) { + if pol == nil { + return tailcfg.FilterAllowAll, nil + } + + var rules []tailcfg.FilterRule + + for _, acl := range pol.ACLs { + if acl.Action != "accept" { + return nil, ErrInvalidAction + } + + srcIPs, err := acl.Sources.Resolve(pol, users, nodes) + if err != nil { + log.Trace().Err(err).Msgf("resolving source ips") + } + + if len(srcIPs.Prefixes()) == 0 { + continue + } + + // TODO(kradalby): integrate type into schema + // TODO(kradalby): figure out the _ is wildcard stuff + protocols, _, err := parseProtocol(acl.Protocol) + if err != nil { + return nil, fmt.Errorf("parsing policy, protocol err: %w ", err) + } + + var destPorts []tailcfg.NetPortRange + for _, dest := range acl.Destinations { + ips, err := dest.Alias.Resolve(pol, users, nodes) + if err != nil { + log.Trace().Err(err).Msgf("resolving destination ips") + } + + for _, pref := range ips.Prefixes() { + for _, port := range dest.Ports { + pr := tailcfg.NetPortRange{ + IP: pref.String(), + Ports: port, + } + destPorts = append(destPorts, pr) + } + } + } + + if len(destPorts) == 0 { + continue + } + + rules = append(rules, tailcfg.FilterRule{ + SrcIPs: ipSetToPrefixStringList(srcIPs), + DstPorts: destPorts, + IPProto: protocols, + }) + } + + return rules, nil +} + +func sshAction(accept bool, duration time.Duration) tailcfg.SSHAction { + return tailcfg.SSHAction{ + Reject: !accept, + Accept: accept, + SessionDuration: duration, + AllowAgentForwarding: true, + AllowLocalPortForwarding: true, + } +} + +func (pol *Policy) compileSSHPolicy( + users types.Users, + node *types.Node, + nodes types.Nodes, +) (*tailcfg.SSHPolicy, error) { + if pol == nil || pol.SSHs == nil || len(pol.SSHs) == 0 { + return nil, nil + } + + var rules []*tailcfg.SSHRule + + for index, rule := range pol.SSHs { + var dest netipx.IPSetBuilder + for _, src := range rule.Destinations { + ips, err := src.Resolve(pol, users, nodes) + if err != nil { + log.Trace().Err(err).Msgf("resolving destination ips") + } + dest.AddSet(ips) + } + + destSet, err := dest.IPSet() + if err != nil { + return nil, err + } + + if !node.InIPSet(destSet) { + continue + } + + var action tailcfg.SSHAction + switch rule.Action { + case "accept": + action = sshAction(true, 0) + case "check": + action = sshAction(true, rule.CheckPeriod) + default: + return nil, fmt.Errorf("parsing SSH policy, unknown action %q, index: %d: %w", rule.Action, index, err) + } + + var principals []*tailcfg.SSHPrincipal + srcIPs, err := rule.Sources.Resolve(pol, users, nodes) + if err != nil { + log.Trace().Err(err).Msgf("resolving source ips") + } + + for addr := range util.IPSetAddrIter(srcIPs) { + principals = append(principals, &tailcfg.SSHPrincipal{ + NodeIP: addr.String(), + }) + } + + userMap := make(map[string]string, len(rule.Users)) + for _, user := range rule.Users { + userMap[user.String()] = "=" + } + rules = append(rules, &tailcfg.SSHRule{ + Principals: principals, + SSHUsers: userMap, + Action: &action, + }) + } + + return &tailcfg.SSHPolicy{ + Rules: rules, + }, nil +} + +func ipSetToPrefixStringList(ips *netipx.IPSet) []string { + var out []string + + for _, pref := range ips.Prefixes() { + out = append(out, pref.String()) + } + return out +} diff --git a/hscontrol/policy/v2/filter_test.go b/hscontrol/policy/v2/filter_test.go new file mode 100644 index 00000000..e0b12520 --- /dev/null +++ b/hscontrol/policy/v2/filter_test.go @@ -0,0 +1,378 @@ +package v2 + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/juanfont/headscale/hscontrol/types" + "gorm.io/gorm" + "tailscale.com/tailcfg" +) + +func TestParsing(t *testing.T) { + users := types.Users{ + {Model: gorm.Model{ID: 1}, Name: "testuser"}, + } + tests := []struct { + name string + format string + acl string + want []tailcfg.FilterRule + wantErr bool + }{ + { + name: "invalid-hujson", + format: "hujson", + acl: ` +{ + `, + want: []tailcfg.FilterRule{}, + wantErr: true, + }, + // The new parser will ignore all that is irrelevant + // { + // name: "valid-hujson-invalid-content", + // format: "hujson", + // acl: ` + // { + // "valid_json": true, + // "but_a_policy_though": false + // } + // `, + // want: []tailcfg.FilterRule{}, + // wantErr: true, + // }, + // { + // name: "invalid-cidr", + // format: "hujson", + // acl: ` + // {"example-host-1": "100.100.100.100/42"} + // `, + // want: []tailcfg.FilterRule{}, + // wantErr: true, + // }, + { + name: "basic-rule", + format: "hujson", + acl: ` +{ + "hosts": { + "host-1": "100.100.100.100", + "subnet-1": "100.100.101.100/24", + }, + + "acls": [ + { + "action": "accept", + "src": [ + "subnet-1", + "192.168.1.0/24" + ], + "dst": [ + "*:22,3389", + "host-1:*", + ], + }, + ], +} + `, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"100.100.101.0/24", "192.168.1.0/24"}, + DstPorts: []tailcfg.NetPortRange{ + {IP: "0.0.0.0/0", Ports: tailcfg.PortRange{First: 22, Last: 22}}, + {IP: "0.0.0.0/0", Ports: tailcfg.PortRange{First: 3389, Last: 3389}}, + {IP: "::/0", Ports: tailcfg.PortRange{First: 22, Last: 22}}, + {IP: "::/0", Ports: tailcfg.PortRange{First: 3389, Last: 3389}}, + {IP: "100.100.100.100/32", Ports: tailcfg.PortRangeAny}, + }, + }, + }, + wantErr: false, + }, + { + name: "parse-protocol", + format: "hujson", + acl: ` +{ + "hosts": { + "host-1": "100.100.100.100", + "subnet-1": "100.100.101.100/24", + }, + + "acls": [ + { + "Action": "accept", + "src": [ + "*", + ], + "proto": "tcp", + "dst": [ + "host-1:*", + ], + }, + { + "Action": "accept", + "src": [ + "*", + ], + "proto": "udp", + "dst": [ + "host-1:53", + ], + }, + { + "Action": "accept", + "src": [ + "*", + ], + "proto": "icmp", + "dst": [ + "host-1:*", + ], + }, + ], +}`, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"0.0.0.0/0", "::/0"}, + DstPorts: []tailcfg.NetPortRange{ + {IP: "100.100.100.100/32", Ports: tailcfg.PortRangeAny}, + }, + IPProto: []int{protocolTCP}, + }, + { + SrcIPs: []string{"0.0.0.0/0", "::/0"}, + DstPorts: []tailcfg.NetPortRange{ + {IP: "100.100.100.100/32", Ports: tailcfg.PortRange{First: 53, Last: 53}}, + }, + IPProto: []int{protocolUDP}, + }, + { + SrcIPs: []string{"0.0.0.0/0", "::/0"}, + DstPorts: []tailcfg.NetPortRange{ + {IP: "100.100.100.100/32", Ports: tailcfg.PortRangeAny}, + }, + IPProto: []int{protocolICMP, protocolIPv6ICMP}, + }, + }, + wantErr: false, + }, + { + name: "port-wildcard", + format: "hujson", + acl: ` +{ + "hosts": { + "host-1": "100.100.100.100", + "subnet-1": "100.100.101.100/24", + }, + + "acls": [ + { + "Action": "accept", + "src": [ + "*", + ], + "dst": [ + "host-1:*", + ], + }, + ], +} +`, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"0.0.0.0/0", "::/0"}, + DstPorts: []tailcfg.NetPortRange{ + {IP: "100.100.100.100/32", Ports: tailcfg.PortRangeAny}, + }, + }, + }, + wantErr: false, + }, + { + name: "port-range", + format: "hujson", + acl: ` +{ + "hosts": { + "host-1": "100.100.100.100", + "subnet-1": "100.100.101.100/24", + }, + + "acls": [ + { + "action": "accept", + "src": [ + "subnet-1", + ], + "dst": [ + "host-1:5400-5500", + ], + }, + ], +} +`, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"100.100.101.0/24"}, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "100.100.100.100/32", + Ports: tailcfg.PortRange{First: 5400, Last: 5500}, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "port-group", + format: "hujson", + acl: ` +{ + "groups": { + "group:example": [ + "testuser@", + ], + }, + + "hosts": { + "host-1": "100.100.100.100", + "subnet-1": "100.100.101.100/24", + }, + + "acls": [ + { + "action": "accept", + "src": [ + "group:example", + ], + "dst": [ + "host-1:*", + ], + }, + ], +} +`, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"200.200.200.200/32"}, + DstPorts: []tailcfg.NetPortRange{ + {IP: "100.100.100.100/32", Ports: tailcfg.PortRangeAny}, + }, + }, + }, + wantErr: false, + }, + { + name: "port-user", + format: "hujson", + acl: ` +{ + "hosts": { + "host-1": "100.100.100.100", + "subnet-1": "100.100.101.100/24", + }, + + "acls": [ + { + "action": "accept", + "src": [ + "testuser@", + ], + "dst": [ + "host-1:*", + ], + }, + ], +} +`, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"200.200.200.200/32"}, + DstPorts: []tailcfg.NetPortRange{ + {IP: "100.100.100.100/32", Ports: tailcfg.PortRangeAny}, + }, + }, + }, + wantErr: false, + }, + { + name: "ipv6", + format: "hujson", + acl: ` +{ + "hosts": { + "host-1": "100.100.100.100/32", + "subnet-1": "100.100.101.100/24", + }, + + "acls": [ + { + "action": "accept", + "src": [ + "*", + ], + "dst": [ + "host-1:*", + ], + }, + ], +} +`, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{"0.0.0.0/0", "::/0"}, + DstPorts: []tailcfg.NetPortRange{ + {IP: "100.100.100.100/32", Ports: tailcfg.PortRangeAny}, + }, + }, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pol, err := policyFromBytes([]byte(tt.acl)) + if tt.wantErr && err == nil { + t.Errorf("parsing() error = %v, wantErr %v", err, tt.wantErr) + + return + } else if !tt.wantErr && err != nil { + t.Errorf("parsing() error = %v, wantErr %v", err, tt.wantErr) + + return + } + + if err != nil { + return + } + + rules, err := pol.compileFilterRules( + users, + types.Nodes{ + &types.Node{ + IPv4: ap("100.100.100.100"), + }, + &types.Node{ + IPv4: ap("200.200.200.200"), + User: users[0], + Hostinfo: &tailcfg.Hostinfo{}, + }, + }) + + if (err != nil) != tt.wantErr { + t.Errorf("parsing() error = %v, wantErr %v", err, tt.wantErr) + + return + } + + if diff := cmp.Diff(tt.want, rules); diff != "" { + t.Errorf("parsing() unexpected result (-want +got):\n%s", diff) + } + }) + } +} diff --git a/hscontrol/policy/v2/policy.go b/hscontrol/policy/v2/policy.go new file mode 100644 index 00000000..41f51487 --- /dev/null +++ b/hscontrol/policy/v2/policy.go @@ -0,0 +1,283 @@ +package v2 + +import ( + "encoding/json" + "fmt" + "net/netip" + "strings" + "sync" + + "github.com/juanfont/headscale/hscontrol/types" + "go4.org/netipx" + "tailscale.com/net/tsaddr" + "tailscale.com/tailcfg" + "tailscale.com/util/deephash" +) + +type PolicyManager struct { + mu sync.Mutex + pol *Policy + users []types.User + nodes types.Nodes + + filterHash deephash.Sum + filter []tailcfg.FilterRule + + tagOwnerMapHash deephash.Sum + tagOwnerMap map[Tag]*netipx.IPSet + + autoApproveMapHash deephash.Sum + autoApproveMap map[netip.Prefix]*netipx.IPSet + + // Lazy map of SSH policies + sshPolicyMap map[types.NodeID]*tailcfg.SSHPolicy +} + +// NewPolicyManager creates a new PolicyManager from a policy file and a list of users and nodes. +// It returns an error if the policy file is invalid. +// The policy manager will update the filter rules based on the users and nodes. +func NewPolicyManager(b []byte, users []types.User, nodes types.Nodes) (*PolicyManager, error) { + policy, err := policyFromBytes(b) + if err != nil { + return nil, fmt.Errorf("parsing policy: %w", err) + } + + pm := PolicyManager{ + pol: policy, + users: users, + nodes: nodes, + sshPolicyMap: make(map[types.NodeID]*tailcfg.SSHPolicy, len(nodes)), + } + + _, err = pm.updateLocked() + if err != nil { + return nil, err + } + + return &pm, nil +} + +// updateLocked updates the filter rules based on the current policy and nodes. +// It must be called with the lock held. +func (pm *PolicyManager) updateLocked() (bool, error) { + filter, err := pm.pol.compileFilterRules(pm.users, pm.nodes) + if err != nil { + return false, fmt.Errorf("compiling filter rules: %w", err) + } + + filterHash := deephash.Hash(&filter) + filterChanged := filterHash == pm.filterHash + pm.filter = filter + pm.filterHash = filterHash + + // Order matters, tags might be used in autoapprovers, so we need to ensure + // that the map for tag owners is resolved before resolving autoapprovers. + // TODO(kradalby): Order might not matter after #2417 + tagMap, err := resolveTagOwners(pm.pol, pm.users, pm.nodes) + if err != nil { + return false, fmt.Errorf("resolving tag owners map: %w", err) + } + + tagOwnerMapHash := deephash.Hash(&tagMap) + tagOwnerChanged := tagOwnerMapHash != pm.tagOwnerMapHash + pm.tagOwnerMap = tagMap + pm.tagOwnerMapHash = tagOwnerMapHash + + autoMap, err := resolveAutoApprovers(pm.pol, pm.users, pm.nodes) + if err != nil { + return false, fmt.Errorf("resolving auto approvers map: %w", err) + } + + autoApproveMapHash := deephash.Hash(&autoMap) + autoApproveChanged := autoApproveMapHash != pm.autoApproveMapHash + pm.autoApproveMap = autoMap + pm.autoApproveMapHash = autoApproveMapHash + + // If neither of the calculated values changed, no need to update nodes + if !filterChanged && !tagOwnerChanged && !autoApproveChanged { + return false, nil + } + + // Clear the SSH policy map to ensure it's recalculated with the new policy. + // TODO(kradalby): This could potentially be optimized by only clearing the + // policies for nodes that have changed. Particularly if the only difference is + // that nodes has been added or removed. + clear(pm.sshPolicyMap) + + return true, nil +} + +func (pm *PolicyManager) SSHPolicy(node *types.Node) (*tailcfg.SSHPolicy, error) { + pm.mu.Lock() + defer pm.mu.Unlock() + + if sshPol, ok := pm.sshPolicyMap[node.ID]; ok { + return sshPol, nil + } + + sshPol, err := pm.pol.compileSSHPolicy(pm.users, node, pm.nodes) + if err != nil { + return nil, fmt.Errorf("compiling SSH policy: %w", err) + } + pm.sshPolicyMap[node.ID] = sshPol + + return sshPol, nil +} + +func (pm *PolicyManager) SetPolicy(polB []byte) (bool, error) { + if len(polB) == 0 { + return false, nil + } + + pol, err := policyFromBytes(polB) + if err != nil { + return false, fmt.Errorf("parsing policy: %w", err) + } + + pm.mu.Lock() + defer pm.mu.Unlock() + + pm.pol = pol + + return pm.updateLocked() +} + +// Filter returns the current filter rules for the entire tailnet. +func (pm *PolicyManager) Filter() []tailcfg.FilterRule { + pm.mu.Lock() + defer pm.mu.Unlock() + return pm.filter +} + +// SetUsers updates the users in the policy manager and updates the filter rules. +func (pm *PolicyManager) SetUsers(users []types.User) (bool, error) { + pm.mu.Lock() + defer pm.mu.Unlock() + pm.users = users + return pm.updateLocked() +} + +// SetNodes updates the nodes in the policy manager and updates the filter rules. +func (pm *PolicyManager) SetNodes(nodes types.Nodes) (bool, error) { + pm.mu.Lock() + defer pm.mu.Unlock() + pm.nodes = nodes + return pm.updateLocked() +} + +func (pm *PolicyManager) NodeCanHaveTag(node *types.Node, tag string) bool { + if pm == nil { + return false + } + + pm.mu.Lock() + defer pm.mu.Unlock() + + if ips, ok := pm.tagOwnerMap[Tag(tag)]; ok { + for _, nodeAddr := range node.IPs() { + if ips.Contains(nodeAddr) { + return true + } + } + } + + return false +} + +func (pm *PolicyManager) NodeCanApproveRoute(node *types.Node, route netip.Prefix) bool { + if pm == nil { + return false + } + + pm.mu.Lock() + defer pm.mu.Unlock() + + // The fast path is that a node requests to approve a prefix + // where there is an exact entry, e.g. 10.0.0.0/8, then + // check and return quickly + if _, ok := pm.autoApproveMap[route]; ok { + for _, nodeAddr := range node.IPs() { + if pm.autoApproveMap[route].Contains(nodeAddr) { + return true + } + } + } + + // The slow path is that the node tries to approve + // 10.0.10.0/24, which is a part of 10.0.0.0/8, then we + // cannot just lookup in the prefix map and have to check + // if there is a "parent" prefix available. + for prefix, approveAddrs := range pm.autoApproveMap { + // We do not want the exit node entry to approve all + // sorts of routes. The logic here is that it would be + // unexpected behaviour to have specific routes approved + // just because the node is allowed to designate itself as + // an exit. + if tsaddr.IsExitRoute(prefix) { + continue + } + + // Check if prefix is larger (so containing) and then overlaps + // the route to see if the node can approve a subset of an autoapprover + if prefix.Bits() <= route.Bits() && prefix.Overlaps(route) { + for _, nodeAddr := range node.IPs() { + if approveAddrs.Contains(nodeAddr) { + return true + } + } + } + } + + return false +} + +func (pm *PolicyManager) Version() int { + return 2 +} + +func (pm *PolicyManager) DebugString() string { + var sb strings.Builder + + fmt.Fprintf(&sb, "PolicyManager (v%d):\n\n", pm.Version()) + + sb.WriteString("\n\n") + + if pm.pol != nil { + pol, err := json.MarshalIndent(pm.pol, "", " ") + if err == nil { + sb.WriteString("Policy:\n") + sb.Write(pol) + sb.WriteString("\n\n") + } + } + + fmt.Fprintf(&sb, "AutoApprover (%d):\n", len(pm.autoApproveMap)) + for prefix, approveAddrs := range pm.autoApproveMap { + fmt.Fprintf(&sb, "\t%s:\n", prefix) + for _, iprange := range approveAddrs.Ranges() { + fmt.Fprintf(&sb, "\t\t%s\n", iprange) + } + } + + sb.WriteString("\n\n") + + fmt.Fprintf(&sb, "TagOwner (%d):\n", len(pm.tagOwnerMap)) + for prefix, tagOwners := range pm.tagOwnerMap { + fmt.Fprintf(&sb, "\t%s:\n", prefix) + for _, iprange := range tagOwners.Ranges() { + fmt.Fprintf(&sb, "\t\t%s\n", iprange) + } + } + + sb.WriteString("\n\n") + if pm.filter != nil { + filter, err := json.MarshalIndent(pm.filter, "", " ") + if err == nil { + sb.WriteString("Compiled filter:\n") + sb.Write(filter) + sb.WriteString("\n\n") + } + } + + return sb.String() +} diff --git a/hscontrol/policy/v2/policy_test.go b/hscontrol/policy/v2/policy_test.go new file mode 100644 index 00000000..ee26c596 --- /dev/null +++ b/hscontrol/policy/v2/policy_test.go @@ -0,0 +1,58 @@ +package v2 + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/juanfont/headscale/hscontrol/types" + "github.com/stretchr/testify/require" + "gorm.io/gorm" + "tailscale.com/tailcfg" +) + +func node(name, ipv4, ipv6 string, user types.User, hostinfo *tailcfg.Hostinfo) *types.Node { + return &types.Node{ + ID: 0, + Hostname: name, + IPv4: ap(ipv4), + IPv6: ap(ipv6), + User: user, + UserID: user.ID, + Hostinfo: hostinfo, + } +} + +func TestPolicyManager(t *testing.T) { + users := types.Users{ + {Model: gorm.Model{ID: 1}, Name: "testuser", Email: "testuser@headscale.net"}, + {Model: gorm.Model{ID: 2}, Name: "otheruser", Email: "otheruser@headscale.net"}, + } + + tests := []struct { + name string + pol string + nodes types.Nodes + wantFilter []tailcfg.FilterRule + }{ + { + name: "empty-policy", + pol: "{}", + nodes: types.Nodes{}, + wantFilter: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pm, err := NewPolicyManager([]byte(tt.pol), users, tt.nodes) + require.NoError(t, err) + + filter := pm.Filter() + if diff := cmp.Diff(filter, tt.wantFilter); diff != "" { + t.Errorf("Filter() mismatch (-want +got):\n%s", diff) + } + + // TODO(kradalby): Test SSH Policy + }) + } +} diff --git a/hscontrol/policy/v2/types.go b/hscontrol/policy/v2/types.go new file mode 100644 index 00000000..6e644539 --- /dev/null +++ b/hscontrol/policy/v2/types.go @@ -0,0 +1,1005 @@ +package v2 + +import ( + "bytes" + "encoding/json" + "fmt" + "net/netip" + "strings" + "time" + + "github.com/juanfont/headscale/hscontrol/types" + "github.com/juanfont/headscale/hscontrol/util" + "github.com/tailscale/hujson" + "go4.org/netipx" + "tailscale.com/net/tsaddr" + "tailscale.com/tailcfg" + "tailscale.com/types/ptr" + "tailscale.com/util/multierr" +) + +const Wildcard = Asterix(0) + +type Asterix int + +func (a Asterix) Validate() error { + return nil +} + +func (a Asterix) String() string { + return "*" +} + +func (a Asterix) UnmarshalJSON(b []byte) error { + return nil +} + +func (a Asterix) Resolve(_ *Policy, _ types.Users, nodes types.Nodes) (*netipx.IPSet, error) { + var ips netipx.IPSetBuilder + + // TODO(kradalby): + // Should this actually only be the CGNAT spaces? I do not think so, because + // we also want to include subnet routers right? + ips.AddPrefix(tsaddr.AllIPv4()) + ips.AddPrefix(tsaddr.AllIPv6()) + + return ips.IPSet() +} + +// Username is a string that represents a username, it must contain an @. +type Username string + +func (u Username) Validate() error { + if isUser(string(u)) { + return nil + } + return fmt.Errorf("Username has to contain @, got: %q", u) +} + +func (u *Username) String() string { + return string(*u) +} + +func (u *Username) UnmarshalJSON(b []byte) error { + *u = Username(strings.Trim(string(b), `"`)) + if err := u.Validate(); err != nil { + return err + } + return nil +} + +func (u Username) CanBeTagOwner() bool { + return true +} + +func (u Username) CanBeAutoApprover() bool { + return true +} + +// resolveUser attempts to find a user in the provided [types.Users] slice that matches the Username. +// It prioritizes matching the ProviderIdentifier, and if not found, it falls back to matching the Email or Name. +// If no matching user is found, it returns an error indicating no user matching. +// If multiple matching users are found, it returns an error indicating multiple users matching. +// It returns the matched types.User and a nil error if exactly one match is found. +func (u Username) resolveUser(users types.Users) (types.User, error) { + var potentialUsers types.Users + + // At parsetime, we require all usernames to contain an "@" character, if the + // username token does not naturally do so (like email), the user have to + // add it to the end of the username. We strip it here as we do not expect the + // usernames to be stored with the "@". + uTrimmed := strings.TrimSuffix(u.String(), "@") + + for _, user := range users { + if user.ProviderIdentifier.Valid && user.ProviderIdentifier.String == uTrimmed { + // Prioritize ProviderIdentifier match and exit early + return user, nil + } + + if user.Email == uTrimmed || user.Name == uTrimmed { + potentialUsers = append(potentialUsers, user) + } + } + + if len(potentialUsers) == 0 { + return types.User{}, fmt.Errorf("user with token %q not found", u.String()) + } + + if len(potentialUsers) > 1 { + return types.User{}, fmt.Errorf("multiple users with token %q found: %s", u.String(), potentialUsers.String()) + } + + return potentialUsers[0], nil +} + +func (u Username) Resolve(_ *Policy, users types.Users, nodes types.Nodes) (*netipx.IPSet, error) { + var ips netipx.IPSetBuilder + var errs []error + + user, err := u.resolveUser(users) + if err != nil { + errs = append(errs, err) + } + + for _, node := range nodes { + if node.IsTagged() { + continue + } + + if node.User.ID == user.ID { + node.AppendToIPSet(&ips) + } + } + + return buildIPSetMultiErr(&ips, errs) +} + +// Group is a special string which is always prefixed with `group:` +type Group string + +func (g Group) Validate() error { + if isGroup(string(g)) { + return nil + } + return fmt.Errorf(`Group has to start with "group:", got: %q`, g) +} + +func (g *Group) UnmarshalJSON(b []byte) error { + *g = Group(strings.Trim(string(b), `"`)) + if err := g.Validate(); err != nil { + return err + } + return nil +} + +func (g Group) CanBeTagOwner() bool { + return true +} + +func (g Group) CanBeAutoApprover() bool { + return true +} + +func (g Group) Resolve(p *Policy, users types.Users, nodes types.Nodes) (*netipx.IPSet, error) { + var ips netipx.IPSetBuilder + var errs []error + + for _, user := range p.Groups[g] { + uips, err := user.Resolve(nil, users, nodes) + if err != nil { + errs = append(errs, err) + } + + ips.AddSet(uips) + } + + return buildIPSetMultiErr(&ips, errs) +} + +// Tag is a special string which is always prefixed with `tag:` +type Tag string + +func (t Tag) Validate() error { + if isTag(string(t)) { + return nil + } + return fmt.Errorf(`tag has to start with "tag:", got: %q`, t) +} + +func (t *Tag) UnmarshalJSON(b []byte) error { + *t = Tag(strings.Trim(string(b), `"`)) + if err := t.Validate(); err != nil { + return err + } + return nil +} + +func (t Tag) Resolve(p *Policy, users types.Users, nodes types.Nodes) (*netipx.IPSet, error) { + var ips netipx.IPSetBuilder + + // TODO(kradalby): This is currently resolved twice, and should be resolved once. + // It is added temporary until we sort out the story on how and when we resolve tags + // from the three places they can be "approved": + // - As part of a PreAuthKey (handled in HasTag) + // - As part of ForcedTags (set via CLI) (handled in HasTag) + // - As part of HostInfo.RequestTags and approved by policy (this is happening here) + // Part of #2417 + tagMap, err := resolveTagOwners(p, users, nodes) + if err != nil { + return nil, err + } + + for _, node := range nodes { + if node.HasTag(string(t)) { + node.AppendToIPSet(&ips) + } + + // TODO(kradalby): remove as part of #2417, see comment above + if tagMap != nil { + if tagips, ok := tagMap[t]; ok && node.InIPSet(tagips) && node.Hostinfo != nil { + for _, tag := range node.Hostinfo.RequestTags { + if tag == string(t) { + node.AppendToIPSet(&ips) + } + } + } + } + } + + return ips.IPSet() +} + +func (t Tag) CanBeAutoApprover() bool { + return true +} + +// Host is a string that represents a hostname. +type Host string + +func (h Host) Validate() error { + if isHost(string(h)) { + fmt.Errorf("Hostname %q is invalid", h) + } + return nil +} + +func (h *Host) UnmarshalJSON(b []byte) error { + *h = Host(strings.Trim(string(b), `"`)) + if err := h.Validate(); err != nil { + return err + } + return nil +} + +func (h Host) Resolve(p *Policy, _ types.Users, nodes types.Nodes) (*netipx.IPSet, error) { + var ips netipx.IPSetBuilder + var errs []error + + pref, ok := p.Hosts[h] + if !ok { + return nil, fmt.Errorf("unable to resolve host: %q", h) + } + err := pref.Validate() + if err != nil { + errs = append(errs, err) + } + + ips.AddPrefix(netip.Prefix(pref)) + + // If the IP is a single host, look for a node to ensure we add all the IPs of + // the node to the IPSet. + // appendIfNodeHasIP(nodes, &ips, pref) + + // TODO(kradalby): I am a bit unsure what is the correct way to do this, + // should a host with a non single IP be able to resolve the full host (inc all IPs). + ipsTemp, err := ips.IPSet() + if err != nil { + errs = append(errs, err) + } + for _, node := range nodes { + if node.InIPSet(ipsTemp) { + node.AppendToIPSet(&ips) + } + } + + return buildIPSetMultiErr(&ips, errs) +} + +type Prefix netip.Prefix + +func (p Prefix) Validate() error { + if !netip.Prefix(p).IsValid() { + return fmt.Errorf("Prefix %q is invalid", p) + } + + return nil +} + +func (p Prefix) String() string { + return netip.Prefix(p).String() +} + +func (p *Prefix) parseString(addr string) error { + if !strings.Contains(addr, "/") { + addr, err := netip.ParseAddr(addr) + if err != nil { + return err + } + addrPref, err := addr.Prefix(addr.BitLen()) + if err != nil { + return err + } + + *p = Prefix(addrPref) + return nil + } + + pref, err := netip.ParsePrefix(addr) + if err != nil { + return err + } + *p = Prefix(pref) + return nil +} + +func (p *Prefix) UnmarshalJSON(b []byte) error { + err := p.parseString(strings.Trim(string(b), `"`)) + if err != nil { + return err + } + if err := p.Validate(); err != nil { + return err + } + return nil +} + +// Resolve resolves the Prefix to an IPSet. The IPSet will contain all the IP +// addresses that the Prefix represents within Headscale. It is the product +// of the Prefix and the Policy, Users, and Nodes. +// +// See [Policy], [types.Users], and [types.Nodes] for more details. +func (p Prefix) Resolve(_ *Policy, _ types.Users, nodes types.Nodes) (*netipx.IPSet, error) { + var ips netipx.IPSetBuilder + var errs []error + + ips.AddPrefix(netip.Prefix(p)) + // If the IP is a single host, look for a node to ensure we add all the IPs of + // the node to the IPSet. + // appendIfNodeHasIP(nodes, &ips, pref) + + // TODO(kradalby): I am a bit unsure what is the correct way to do this, + // should a host with a non single IP be able to resolve the full host (inc all IPs). + // Currently this is done because the old implementation did this, we might want to + // drop it before releasing. + // For example: + // If a src or dst includes "64.0.0.0/2:*", it will include 100.64/16 range, which + // means that it will need to fetch the IPv6 addrs of the node to include the full range. + // Clearly, if a user sets the dst to be "64.0.0.0/2:*", it is likely more of a exit node + // and this would be strange behaviour. + ipsTemp, err := ips.IPSet() + if err != nil { + errs = append(errs, err) + } + for _, node := range nodes { + if node.InIPSet(ipsTemp) { + node.AppendToIPSet(&ips) + } + } + + return buildIPSetMultiErr(&ips, errs) +} + +// AutoGroup is a special string which is always prefixed with `autogroup:` +type AutoGroup string + +const ( + AutoGroupInternet = "autogroup:internet" +) + +var autogroups = []string{AutoGroupInternet} + +func (ag AutoGroup) Validate() error { + for _, valid := range autogroups { + if valid == string(ag) { + return nil + } + } + + return fmt.Errorf("AutoGroup is invalid, got: %q, must be one of %v", ag, autogroups) +} + +func (ag *AutoGroup) UnmarshalJSON(b []byte) error { + *ag = AutoGroup(strings.Trim(string(b), `"`)) + if err := ag.Validate(); err != nil { + return err + } + return nil +} + +func (ag AutoGroup) Resolve(_ *Policy, _ types.Users, _ types.Nodes) (*netipx.IPSet, error) { + switch ag { + case AutoGroupInternet: + return util.TheInternet(), nil + } + + return nil, nil +} + +type Alias interface { + Validate() error + UnmarshalJSON([]byte) error + + // Resolve resolves the Alias to an IPSet. The IPSet will contain all the IP + // addresses that the Alias represents within Headscale. It is the product + // of the Alias and the Policy, Users and Nodes. + // This is an interface definition and the implementation is independent of + // the Alias type. + Resolve(*Policy, types.Users, types.Nodes) (*netipx.IPSet, error) +} + +type AliasWithPorts struct { + Alias + Ports []tailcfg.PortRange +} + +func (ve *AliasWithPorts) UnmarshalJSON(b []byte) error { + // TODO(kradalby): use encoding/json/v2 (go-json-experiment) + dec := json.NewDecoder(bytes.NewReader(b)) + var v any + if err := dec.Decode(&v); err != nil { + return err + } + + switch vs := v.(type) { + case string: + var portsPart string + var err error + + if strings.Contains(vs, ":") { + vs, portsPart, err = splitDestinationAndPort(vs) + if err != nil { + return err + } + + ports, err := parsePortRange(portsPart) + if err != nil { + return err + } + ve.Ports = ports + } + + ve.Alias, err = parseAlias(vs) + if err != nil { + return err + } + if err := ve.Alias.Validate(); err != nil { + return err + } + + default: + return fmt.Errorf("type %T not supported", vs) + } + return nil +} + +func isWildcard(str string) bool { + return str == "*" +} + +func isUser(str string) bool { + return strings.Contains(str, "@") +} + +func isGroup(str string) bool { + return strings.HasPrefix(str, "group:") +} + +func isTag(str string) bool { + return strings.HasPrefix(str, "tag:") +} + +func isAutoGroup(str string) bool { + return strings.HasPrefix(str, "autogroup:") +} + +func isHost(str string) bool { + return !isUser(str) && !strings.Contains(str, ":") +} + +func parseAlias(vs string) (Alias, error) { + var pref Prefix + err := pref.parseString(vs) + if err == nil { + return &pref, nil + } + + switch { + case isWildcard(vs): + return Wildcard, nil + case isUser(vs): + return ptr.To(Username(vs)), nil + case isGroup(vs): + return ptr.To(Group(vs)), nil + case isTag(vs): + return ptr.To(Tag(vs)), nil + case isAutoGroup(vs): + return ptr.To(AutoGroup(vs)), nil + } + + if isHost(vs) { + return ptr.To(Host(vs)), nil + } + + return nil, fmt.Errorf(`Invalid alias %q. An alias must be one of the following types: +- wildcard (*) +- user (containing an "@") +- group (starting with "group:") +- tag (starting with "tag:") +- autogroup (starting with "autogroup:") +- host + +Please check the format and try again.`, vs) +} + +// AliasEnc is used to deserialize a Alias. +type AliasEnc struct{ Alias } + +func (ve *AliasEnc) UnmarshalJSON(b []byte) error { + ptr, err := unmarshalPointer[Alias]( + b, + parseAlias, + ) + if err != nil { + return err + } + ve.Alias = ptr + return nil +} + +type Aliases []Alias + +func (a *Aliases) UnmarshalJSON(b []byte) error { + var aliases []AliasEnc + err := json.Unmarshal(b, &aliases) + if err != nil { + return err + } + + *a = make([]Alias, len(aliases)) + for i, alias := range aliases { + (*a)[i] = alias.Alias + } + return nil +} + +func (a Aliases) Resolve(p *Policy, users types.Users, nodes types.Nodes) (*netipx.IPSet, error) { + var ips netipx.IPSetBuilder + var errs []error + + for _, alias := range a { + aips, err := alias.Resolve(p, users, nodes) + if err != nil { + errs = append(errs, err) + } + + ips.AddSet(aips) + } + + return buildIPSetMultiErr(&ips, errs) +} + +func buildIPSetMultiErr(ipBuilder *netipx.IPSetBuilder, errs []error) (*netipx.IPSet, error) { + ips, err := ipBuilder.IPSet() + return ips, multierr.New(append(errs, err)...) +} + +// Helper function to unmarshal a JSON string into either an AutoApprover or Owner pointer +func unmarshalPointer[T any]( + b []byte, + parseFunc func(string) (T, error), +) (T, error) { + var s string + err := json.Unmarshal(b, &s) + if err != nil { + var t T + return t, err + } + + return parseFunc(s) +} + +type AutoApprover interface { + CanBeAutoApprover() bool + UnmarshalJSON([]byte) error +} + +type AutoApprovers []AutoApprover + +func (aa *AutoApprovers) UnmarshalJSON(b []byte) error { + var autoApprovers []AutoApproverEnc + err := json.Unmarshal(b, &autoApprovers) + if err != nil { + return err + } + + *aa = make([]AutoApprover, len(autoApprovers)) + for i, autoApprover := range autoApprovers { + (*aa)[i] = autoApprover.AutoApprover + } + return nil +} + +func parseAutoApprover(s string) (AutoApprover, error) { + switch { + case isUser(s): + return ptr.To(Username(s)), nil + case isGroup(s): + return ptr.To(Group(s)), nil + case isTag(s): + return ptr.To(Tag(s)), nil + } + + return nil, fmt.Errorf(`Invalid AutoApprover %q. An alias must be one of the following types: +- user (containing an "@") +- group (starting with "group:") +- tag (starting with "tag:") + +Please check the format and try again.`, s) +} + +// AutoApproverEnc is used to deserialize a AutoApprover. +type AutoApproverEnc struct{ AutoApprover } + +func (ve *AutoApproverEnc) UnmarshalJSON(b []byte) error { + ptr, err := unmarshalPointer[AutoApprover]( + b, + parseAutoApprover, + ) + if err != nil { + return err + } + ve.AutoApprover = ptr + return nil +} + +type Owner interface { + CanBeTagOwner() bool + UnmarshalJSON([]byte) error +} + +// OwnerEnc is used to deserialize a Owner. +type OwnerEnc struct{ Owner } + +func (ve *OwnerEnc) UnmarshalJSON(b []byte) error { + ptr, err := unmarshalPointer[Owner]( + b, + parseOwner, + ) + if err != nil { + return err + } + ve.Owner = ptr + return nil +} + +type Owners []Owner + +func (o *Owners) UnmarshalJSON(b []byte) error { + var owners []OwnerEnc + err := json.Unmarshal(b, &owners) + if err != nil { + return err + } + + *o = make([]Owner, len(owners)) + for i, owner := range owners { + (*o)[i] = owner.Owner + } + return nil +} + +func parseOwner(s string) (Owner, error) { + switch { + case isUser(s): + return ptr.To(Username(s)), nil + case isGroup(s): + return ptr.To(Group(s)), nil + } + return nil, fmt.Errorf(`Invalid Owner %q. An alias must be one of the following types: +- user (containing an "@") +- group (starting with "group:") +- tag (starting with "tag:") + +Please check the format and try again.`, s) +} + +type Usernames []Username + +// Groups are a map of Group to a list of Username. +type Groups map[Group]Usernames + +// UnmarshalJSON overrides the default JSON unmarshalling for Groups to ensure +// that each group name is validated using the isGroup function. This ensures +// that all group names conform to the expected format, which is always prefixed +// with "group:". If any group name is invalid, an error is returned. +func (g *Groups) UnmarshalJSON(b []byte) error { + var rawGroups map[string][]string + if err := json.Unmarshal(b, &rawGroups); err != nil { + return err + } + + *g = make(Groups) + for key, value := range rawGroups { + group := Group(key) + if err := group.Validate(); err != nil { + return err + } + + var usernames Usernames + + for _, u := range value { + username := Username(u) + if err := username.Validate(); err != nil { + if isGroup(u) { + return fmt.Errorf("Nested groups are not allowed, found %q inside %q", u, group) + } + + return err + } + usernames = append(usernames, username) + } + + (*g)[group] = usernames + } + return nil +} + +// Hosts are alias for IP addresses or subnets. +type Hosts map[Host]Prefix + +func (h *Hosts) UnmarshalJSON(b []byte) error { + var rawHosts map[string]string + if err := json.Unmarshal(b, &rawHosts); err != nil { + return err + } + + *h = make(Hosts) + for key, value := range rawHosts { + host := Host(key) + if err := host.Validate(); err != nil { + return err + } + + var pref Prefix + err := pref.parseString(value) + if err != nil { + return fmt.Errorf("Hostname %q contains an invalid IP address: %q", key, value) + } + + (*h)[host] = pref + } + return nil +} + +// TagOwners are a map of Tag to a list of the UserEntities that own the tag. +type TagOwners map[Tag]Owners + +// resolveTagOwners resolves the TagOwners to a map of Tag to netipx.IPSet. +// The resulting map can be used to quickly look up the IPSet for a given Tag. +// It is intended for internal use in a PolicyManager. +func resolveTagOwners(p *Policy, users types.Users, nodes types.Nodes) (map[Tag]*netipx.IPSet, error) { + if p == nil { + return nil, nil + } + + ret := make(map[Tag]*netipx.IPSet) + + for tag, owners := range p.TagOwners { + var ips netipx.IPSetBuilder + + for _, owner := range owners { + o, ok := owner.(Alias) + if !ok { + // Should never happen + return nil, fmt.Errorf("owner %v is not an Alias", owner) + } + // If it does not resolve, that means the tag is not associated with any IP addresses. + resolved, _ := o.Resolve(p, users, nodes) + ips.AddSet(resolved) + } + + ipSet, err := ips.IPSet() + if err != nil { + return nil, err + } + + ret[tag] = ipSet + } + + return ret, nil +} + +type AutoApproverPolicy struct { + Routes map[netip.Prefix]AutoApprovers `json:"routes"` + ExitNode AutoApprovers `json:"exitNode"` +} + +// resolveAutoApprovers resolves the AutoApprovers to a map of netip.Prefix to netipx.IPSet. +// The resulting map can be used to quickly look up if a node can self-approve a route. +// It is intended for internal use in a PolicyManager. +func resolveAutoApprovers(p *Policy, users types.Users, nodes types.Nodes) (map[netip.Prefix]*netipx.IPSet, error) { + if p == nil { + return nil, nil + } + + routes := make(map[netip.Prefix]*netipx.IPSetBuilder) + + for prefix, autoApprovers := range p.AutoApprovers.Routes { + if _, ok := routes[prefix]; !ok { + routes[prefix] = new(netipx.IPSetBuilder) + } + for _, autoApprover := range autoApprovers { + aa, ok := autoApprover.(Alias) + if !ok { + // Should never happen + return nil, fmt.Errorf("autoApprover %v is not an Alias", autoApprover) + } + // If it does not resolve, that means the autoApprover is not associated with any IP addresses. + ips, _ := aa.Resolve(p, users, nodes) + routes[prefix].AddSet(ips) + } + } + + var exitNodeSetBuilder netipx.IPSetBuilder + if len(p.AutoApprovers.ExitNode) > 0 { + for _, autoApprover := range p.AutoApprovers.ExitNode { + aa, ok := autoApprover.(Alias) + if !ok { + // Should never happen + return nil, fmt.Errorf("autoApprover %v is not an Alias", autoApprover) + } + // If it does not resolve, that means the autoApprover is not associated with any IP addresses. + ips, _ := aa.Resolve(p, users, nodes) + exitNodeSetBuilder.AddSet(ips) + } + } + + ret := make(map[netip.Prefix]*netipx.IPSet) + for prefix, builder := range routes { + ipSet, err := builder.IPSet() + if err != nil { + return nil, err + } + ret[prefix] = ipSet + } + + if len(p.AutoApprovers.ExitNode) > 0 { + exitNodeSet, err := exitNodeSetBuilder.IPSet() + if err != nil { + return nil, err + } + + ret[tsaddr.AllIPv4()] = exitNodeSet + ret[tsaddr.AllIPv6()] = exitNodeSet + } + + return ret, nil +} + +type ACL struct { + Action string `json:"action"` // TODO(kradalby): add strict type + Protocol string `json:"proto"` // TODO(kradalby): add strict type + Sources Aliases `json:"src"` + Destinations []AliasWithPorts `json:"dst"` +} + +// Policy represents a Tailscale Network Policy. +// TODO(kradalby): +// Add validation method checking: +// All users exists +// All groups and users are valid tag TagOwners +// Everything referred to in ACLs exists in other +// entities. +type Policy struct { + // validated is set if the policy has been validated. + // It is not safe to use before it is validated, and + // callers using it should panic if not + validated bool `json:"-"` + + Groups Groups `json:"groups"` + Hosts Hosts `json:"hosts"` + TagOwners TagOwners `json:"tagOwners"` + ACLs []ACL `json:"acls"` + AutoApprovers AutoApproverPolicy `json:"autoApprovers"` + SSHs []SSH `json:"ssh"` +} + +// SSH controls who can ssh into which machines. +type SSH struct { + Action string `json:"action"` // TODO(kradalby): add strict type + Sources SSHSrcAliases `json:"src"` + Destinations SSHDstAliases `json:"dst"` + Users []SSHUser `json:"users"` + CheckPeriod time.Duration `json:"checkPeriod,omitempty"` +} + +// SSHSrcAliases is a list of aliases that can be used as sources in an SSH rule. +// It can be a list of usernames, groups, tags or autogroups. +type SSHSrcAliases []Alias + +func (a *SSHSrcAliases) UnmarshalJSON(b []byte) error { + var aliases []AliasEnc + err := json.Unmarshal(b, &aliases) + if err != nil { + return err + } + + *a = make([]Alias, len(aliases)) + for i, alias := range aliases { + switch alias.Alias.(type) { + case *Username, *Group, *Tag, *AutoGroup: + (*a)[i] = alias.Alias + default: + return fmt.Errorf("type %T not supported", alias.Alias) + } + } + return nil +} + +func (a SSHSrcAliases) Resolve(p *Policy, users types.Users, nodes types.Nodes) (*netipx.IPSet, error) { + var ips netipx.IPSetBuilder + var errs []error + + for _, alias := range a { + aips, err := alias.Resolve(p, users, nodes) + if err != nil { + errs = append(errs, err) + } + + ips.AddSet(aips) + } + + return buildIPSetMultiErr(&ips, errs) +} + +// SSHDstAliases is a list of aliases that can be used as destinations in an SSH rule. +// It can be a list of usernames, tags or autogroups. +type SSHDstAliases []Alias + +func (a *SSHDstAliases) UnmarshalJSON(b []byte) error { + var aliases []AliasEnc + err := json.Unmarshal(b, &aliases) + if err != nil { + return err + } + + *a = make([]Alias, len(aliases)) + for i, alias := range aliases { + switch alias.Alias.(type) { + case *Username, *Tag, *AutoGroup, + // Asterix and Group is actually not supposed to be supported, + // however we do not support autogroups at the moment + // so we will leave it in as there is no other option + // to dynamically give all access + // https://tailscale.com/kb/1193/tailscale-ssh#dst + Asterix, + *Group: + (*a)[i] = alias.Alias + default: + return fmt.Errorf("type %T not supported", alias.Alias) + } + } + return nil +} + +type SSHUser string + +func (u SSHUser) String() string { + return string(u) +} + +func policyFromBytes(b []byte) (*Policy, error) { + if b == nil || len(b) == 0 { + return nil, nil + } + + var policy Policy + ast, err := hujson.Parse(b) + if err != nil { + return nil, fmt.Errorf("parsing HuJSON: %w", err) + } + + ast.Standardize() + acl := ast.Pack() + + err = json.Unmarshal(acl, &policy) + if err != nil { + return nil, fmt.Errorf("parsing policy from bytes: %w", err) + } + + return &policy, nil +} + +const ( + expectedTokenItems = 2 +) diff --git a/hscontrol/policy/v2/types_test.go b/hscontrol/policy/v2/types_test.go new file mode 100644 index 00000000..2218685e --- /dev/null +++ b/hscontrol/policy/v2/types_test.go @@ -0,0 +1,1162 @@ +package v2 + +import ( + "encoding/json" + "net/netip" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/juanfont/headscale/hscontrol/types" + "github.com/juanfont/headscale/hscontrol/util" + "github.com/stretchr/testify/require" + "go4.org/netipx" + xmaps "golang.org/x/exp/maps" + "gorm.io/gorm" + "tailscale.com/net/tsaddr" + "tailscale.com/tailcfg" + "tailscale.com/types/ptr" +) + +func TestUnmarshalPolicy(t *testing.T) { + tests := []struct { + name string + input string + want *Policy + wantErr string + }{ + { + name: "empty", + input: "{}", + want: &Policy{}, + }, + { + name: "groups", + input: ` +{ + "groups": { + "group:example": [ + "derp@headscale.net", + ], + }, +} +`, + want: &Policy{ + Groups: Groups{ + Group("group:example"): []Username{Username("derp@headscale.net")}, + }, + }, + }, + { + name: "basic-types", + input: ` +{ + "groups": { + "group:example": [ + "testuser@headscale.net", + ], + "group:other": [ + "otheruser@headscale.net", + ], + "group:noat": [ + "noat@", + ], + }, + + "tagOwners": { + "tag:user": ["testuser@headscale.net"], + "tag:group": ["group:other"], + "tag:userandgroup": ["testuser@headscale.net", "group:other"], + }, + + "hosts": { + "host-1": "100.100.100.100", + "subnet-1": "100.100.101.100/24", + "outside": "192.168.0.0/16", + }, + + "acls": [ + // All + { + "action": "accept", + "proto": "tcp", + "src": ["*"], + "dst": ["*:*"], + }, + // Users + { + "action": "accept", + "proto": "tcp", + "src": ["testuser@headscale.net"], + "dst": ["otheruser@headscale.net:80"], + }, + // Groups + { + "action": "accept", + "proto": "tcp", + "src": ["group:example"], + "dst": ["group:other:80"], + }, + // Tailscale IP + { + "action": "accept", + "proto": "tcp", + "src": ["100.101.102.103"], + "dst": ["100.101.102.104:80"], + }, + // Subnet + { + "action": "accept", + "proto": "udp", + "src": ["10.0.0.0/8"], + "dst": ["172.16.0.0/16:80"], + }, + // Hosts + { + "action": "accept", + "proto": "tcp", + "src": ["subnet-1"], + "dst": ["host-1:80-88"], + }, + // Tags + { + "action": "accept", + "proto": "tcp", + "src": ["tag:group"], + "dst": ["tag:user:80,443"], + }, + // Autogroup + { + "action": "accept", + "proto": "tcp", + "src": ["tag:group"], + "dst": ["autogroup:internet:80"], + }, + ], +} +`, + want: &Policy{ + Groups: Groups{ + Group("group:example"): []Username{Username("testuser@headscale.net")}, + Group("group:other"): []Username{Username("otheruser@headscale.net")}, + Group("group:noat"): []Username{Username("noat@")}, + }, + TagOwners: TagOwners{ + Tag("tag:user"): Owners{up("testuser@headscale.net")}, + Tag("tag:group"): Owners{gp("group:other")}, + Tag("tag:userandgroup"): Owners{up("testuser@headscale.net"), gp("group:other")}, + }, + Hosts: Hosts{ + "host-1": Prefix(mp("100.100.100.100/32")), + "subnet-1": Prefix(mp("100.100.101.100/24")), + "outside": Prefix(mp("192.168.0.0/16")), + }, + ACLs: []ACL{ + { + Action: "accept", + Protocol: "tcp", + Sources: Aliases{ + Wildcard, + }, + Destinations: []AliasWithPorts{ + { + // TODO(kradalby): Should this be host? + // It is: + // Includes any destination (no restrictions). + Alias: Wildcard, + Ports: []tailcfg.PortRange{tailcfg.PortRangeAny}, + }, + }, + }, + { + Action: "accept", + Protocol: "tcp", + Sources: Aliases{ + ptr.To(Username("testuser@headscale.net")), + }, + Destinations: []AliasWithPorts{ + { + Alias: ptr.To(Username("otheruser@headscale.net")), + Ports: []tailcfg.PortRange{{First: 80, Last: 80}}, + }, + }, + }, + { + Action: "accept", + Protocol: "tcp", + Sources: Aliases{ + gp("group:example"), + }, + Destinations: []AliasWithPorts{ + { + Alias: gp("group:other"), + Ports: []tailcfg.PortRange{{First: 80, Last: 80}}, + }, + }, + }, + { + Action: "accept", + Protocol: "tcp", + Sources: Aliases{ + pp("100.101.102.103/32"), + }, + Destinations: []AliasWithPorts{ + { + Alias: pp("100.101.102.104/32"), + Ports: []tailcfg.PortRange{{First: 80, Last: 80}}, + }, + }, + }, + { + Action: "accept", + Protocol: "udp", + Sources: Aliases{ + pp("10.0.0.0/8"), + }, + Destinations: []AliasWithPorts{ + { + Alias: pp("172.16.0.0/16"), + Ports: []tailcfg.PortRange{{First: 80, Last: 80}}, + }, + }, + }, + { + Action: "accept", + Protocol: "tcp", + Sources: Aliases{ + hp("subnet-1"), + }, + Destinations: []AliasWithPorts{ + { + Alias: hp("host-1"), + Ports: []tailcfg.PortRange{{First: 80, Last: 88}}, + }, + }, + }, + { + Action: "accept", + Protocol: "tcp", + Sources: Aliases{ + tp("tag:group"), + }, + Destinations: []AliasWithPorts{ + { + Alias: tp("tag:user"), + Ports: []tailcfg.PortRange{ + {First: 80, Last: 80}, + {First: 443, Last: 443}, + }, + }, + }, + }, + { + Action: "accept", + Protocol: "tcp", + Sources: Aliases{ + tp("tag:group"), + }, + Destinations: []AliasWithPorts{ + { + Alias: agp("autogroup:internet"), + Ports: []tailcfg.PortRange{ + {First: 80, Last: 80}, + }, + }, + }, + }, + }, + }, + }, + { + name: "invalid-username", + input: ` +{ + "groups": { + "group:example": [ + "valid@", + "invalid", + ], + }, +} +`, + wantErr: `Username has to contain @, got: "invalid"`, + }, + { + name: "invalid-group", + input: ` +{ + "groups": { + "grou:example": [ + "valid@", + ], + }, +} +`, + wantErr: `Group has to start with "group:", got: "grou:example"`, + }, + { + name: "group-in-group", + input: ` +{ + "groups": { + "group:inner": [], + "group:example": [ + "group:inner", + ], + }, +} +`, + // wantErr: `Username has to contain @, got: "group:inner"`, + wantErr: `Nested groups are not allowed, found "group:inner" inside "group:example"`, + }, + { + name: "invalid-addr", + input: ` +{ + "hosts": { + "derp": "10.0", + }, +} +`, + wantErr: `Hostname "derp" contains an invalid IP address: "10.0"`, + }, + { + name: "invalid-prefix", + input: ` +{ + "hosts": { + "derp": "10.0/42", + }, +} +`, + wantErr: `Hostname "derp" contains an invalid IP address: "10.0/42"`, + }, + // TODO(kradalby): Figure out why this doesnt work. + // { + // name: "invalid-hostname", + // input: ` + // { + // "hosts": { + // "derp:merp": "10.0.0.0/31", + // }, + // } + // `, + // wantErr: `Hostname "derp:merp" is invalid`, + // }, + { + name: "invalid-auto-group", + input: ` +{ + "acls": [ + // Autogroup + { + "action": "accept", + "proto": "tcp", + "src": ["tag:group"], + "dst": ["autogroup:invalid:80"], + }, + ], +} +`, + wantErr: `AutoGroup is invalid, got: "autogroup:invalid", must be one of [autogroup:internet]`, + }, + } + + cmps := append(util.Comparers, cmp.Comparer(func(x, y Prefix) bool { + return x == y + })) + cmps = append(cmps, cmpopts.IgnoreUnexported(Policy{})) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + policy, err := policyFromBytes([]byte(tt.input)) + if tt.wantErr == "" { + if err != nil { + t.Fatalf("got %v; want no error", err) + } + } else { + if err == nil { + t.Fatalf("got nil; want error %q", tt.wantErr) + } else if !strings.Contains(err.Error(), tt.wantErr) { + t.Fatalf("got err %v; want error %q", err, tt.wantErr) + } + } + + if diff := cmp.Diff(tt.want, policy, cmps...); diff != "" { + t.Fatalf("unexpected policy (-want +got):\n%s", diff) + } + }) + } +} + +func gp(s string) *Group { return ptr.To(Group(s)) } +func up(s string) *Username { return ptr.To(Username(s)) } +func hp(s string) *Host { return ptr.To(Host(s)) } +func tp(s string) *Tag { return ptr.To(Tag(s)) } +func agp(s string) *AutoGroup { return ptr.To(AutoGroup(s)) } +func mp(pref string) netip.Prefix { return netip.MustParsePrefix(pref) } +func ap(addr string) *netip.Addr { return ptr.To(netip.MustParseAddr(addr)) } +func pp(pref string) *Prefix { return ptr.To(Prefix(mp(pref))) } +func p(pref string) Prefix { return Prefix(mp(pref)) } + +func TestResolvePolicy(t *testing.T) { + users := map[string]types.User{ + "testuser": {Model: gorm.Model{ID: 1}, Name: "testuser"}, + "groupuser": {Model: gorm.Model{ID: 2}, Name: "groupuser"}, + "groupuser1": {Model: gorm.Model{ID: 3}, Name: "groupuser1"}, + "groupuser2": {Model: gorm.Model{ID: 4}, Name: "groupuser2"}, + "notme": {Model: gorm.Model{ID: 5}, Name: "notme"}, + } + tests := []struct { + name string + nodes types.Nodes + pol *Policy + toResolve Alias + want []netip.Prefix + wantErr string + }{ + { + name: "prefix", + toResolve: pp("100.100.101.101/32"), + want: []netip.Prefix{mp("100.100.101.101/32")}, + }, + { + name: "host", + pol: &Policy{ + Hosts: Hosts{ + "testhost": p("100.100.101.102/32"), + }, + }, + toResolve: hp("testhost"), + want: []netip.Prefix{mp("100.100.101.102/32")}, + }, + { + name: "username", + toResolve: ptr.To(Username("testuser@")), + nodes: types.Nodes{ + // Not matching other user + { + User: users["notme"], + IPv4: ap("100.100.101.1"), + }, + // Not matching forced tags + { + User: users["testuser"], + ForcedTags: []string{"tag:anything"}, + IPv4: ap("100.100.101.2"), + }, + // not matchin pak tag + { + User: users["testuser"], + AuthKey: &types.PreAuthKey{ + Tags: []string{"alsotagged"}, + }, + IPv4: ap("100.100.101.3"), + }, + { + User: users["testuser"], + IPv4: ap("100.100.101.103"), + }, + { + User: users["testuser"], + IPv4: ap("100.100.101.104"), + }, + }, + want: []netip.Prefix{mp("100.100.101.103/32"), mp("100.100.101.104/32")}, + }, + { + name: "group", + toResolve: ptr.To(Group("group:testgroup")), + nodes: types.Nodes{ + // Not matching other user + { + User: users["notme"], + IPv4: ap("100.100.101.4"), + }, + // Not matching forced tags + { + User: users["groupuser"], + ForcedTags: []string{"tag:anything"}, + IPv4: ap("100.100.101.5"), + }, + // not matchin pak tag + { + User: users["groupuser"], + AuthKey: &types.PreAuthKey{ + Tags: []string{"tag:alsotagged"}, + }, + IPv4: ap("100.100.101.6"), + }, + { + User: users["groupuser"], + IPv4: ap("100.100.101.203"), + }, + { + User: users["groupuser"], + IPv4: ap("100.100.101.204"), + }, + }, + pol: &Policy{ + Groups: Groups{ + "group:testgroup": Usernames{"groupuser"}, + "group:othergroup": Usernames{"notmetoo"}, + }, + }, + want: []netip.Prefix{mp("100.100.101.203/32"), mp("100.100.101.204/32")}, + }, + { + name: "tag", + toResolve: tp("tag:test"), + nodes: types.Nodes{ + // Not matching other user + { + User: users["notme"], + IPv4: ap("100.100.101.9"), + }, + // Not matching forced tags + { + ForcedTags: []string{"tag:anything"}, + IPv4: ap("100.100.101.10"), + }, + // not matchin pak tag + { + AuthKey: &types.PreAuthKey{ + Tags: []string{"tag:alsotagged"}, + }, + IPv4: ap("100.100.101.11"), + }, + // Not matching forced tags + { + ForcedTags: []string{"tag:test"}, + IPv4: ap("100.100.101.234"), + }, + // not matchin pak tag + { + AuthKey: &types.PreAuthKey{ + Tags: []string{"tag:test"}, + }, + IPv4: ap("100.100.101.239"), + }, + }, + // TODO(kradalby): tests handling TagOwners + hostinfo + pol: &Policy{}, + want: []netip.Prefix{mp("100.100.101.234/32"), mp("100.100.101.239/32")}, + }, + { + name: "empty-policy", + toResolve: pp("100.100.101.101/32"), + pol: &Policy{}, + want: []netip.Prefix{mp("100.100.101.101/32")}, + }, + { + name: "invalid-host", + toResolve: hp("invalidhost"), + pol: &Policy{ + Hosts: Hosts{ + "testhost": p("100.100.101.102/32"), + }, + }, + wantErr: `unable to resolve host: "invalidhost"`, + }, + { + name: "multiple-groups", + toResolve: ptr.To(Group("group:testgroup")), + nodes: types.Nodes{ + { + User: users["groupuser1"], + IPv4: ap("100.100.101.203"), + }, + { + User: users["groupuser2"], + IPv4: ap("100.100.101.204"), + }, + }, + pol: &Policy{ + Groups: Groups{ + "group:testgroup": Usernames{"groupuser1@", "groupuser2@"}, + }, + }, + want: []netip.Prefix{mp("100.100.101.203/32"), mp("100.100.101.204/32")}, + }, + { + name: "autogroup-internet", + toResolve: agp("autogroup:internet"), + want: util.TheInternet().Prefixes(), + }, + { + name: "invalid-username", + toResolve: ptr.To(Username("invaliduser@")), + nodes: types.Nodes{ + { + User: users["testuser"], + IPv4: ap("100.100.101.103"), + }, + }, + wantErr: `user with token "invaliduser@" not found`, + }, + { + name: "invalid-tag", + toResolve: tp("tag:invalid"), + nodes: types.Nodes{ + { + ForcedTags: []string{"tag:test"}, + IPv4: ap("100.100.101.234"), + }, + }, + }, + { + name: "ipv6-address", + toResolve: pp("fd7a:115c:a1e0::1/128"), + want: []netip.Prefix{mp("fd7a:115c:a1e0::1/128")}, + }, + { + name: "wildcard-alias", + toResolve: Wildcard, + want: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ips, err := tt.toResolve.Resolve(tt.pol, + xmaps.Values(users), + tt.nodes) + if tt.wantErr == "" { + if err != nil { + t.Fatalf("got %v; want no error", err) + } + } else { + if err == nil { + t.Fatalf("got nil; want error %q", tt.wantErr) + } else if !strings.Contains(err.Error(), tt.wantErr) { + t.Fatalf("got err %v; want error %q", err, tt.wantErr) + } + } + + var prefs []netip.Prefix + if ips != nil { + if p := ips.Prefixes(); len(p) > 0 { + prefs = p + } + } + + if diff := cmp.Diff(tt.want, prefs, util.Comparers...); diff != "" { + t.Fatalf("unexpected prefs (-want +got):\n%s", diff) + } + }) + } +} + +func TestResolveAutoApprovers(t *testing.T) { + users := types.Users{ + {Model: gorm.Model{ID: 1}, Name: "user1"}, + {Model: gorm.Model{ID: 2}, Name: "user2"}, + {Model: gorm.Model{ID: 3}, Name: "user3"}, + } + + nodes := types.Nodes{ + { + IPv4: ap("100.64.0.1"), + User: users[0], + }, + { + IPv4: ap("100.64.0.2"), + User: users[1], + }, + { + IPv4: ap("100.64.0.3"), + User: users[2], + }, + { + IPv4: ap("100.64.0.4"), + ForcedTags: []string{"tag:testtag"}, + }, + { + IPv4: ap("100.64.0.5"), + ForcedTags: []string{"tag:exittest"}, + }, + } + + tests := []struct { + name string + policy *Policy + want map[netip.Prefix]*netipx.IPSet + wantErr bool + }{ + { + name: "single-route", + policy: &Policy{ + AutoApprovers: AutoApproverPolicy{ + Routes: map[netip.Prefix]AutoApprovers{ + mp("10.0.0.0/24"): {ptr.To(Username("user1@"))}, + }, + }, + }, + want: map[netip.Prefix]*netipx.IPSet{ + mp("10.0.0.0/24"): mustIPSet("100.64.0.1/32"), + }, + wantErr: false, + }, + { + name: "multiple-routes", + policy: &Policy{ + AutoApprovers: AutoApproverPolicy{ + Routes: map[netip.Prefix]AutoApprovers{ + mp("10.0.0.0/24"): {ptr.To(Username("user1@"))}, + mp("10.0.1.0/24"): {ptr.To(Username("user2@"))}, + }, + }, + }, + want: map[netip.Prefix]*netipx.IPSet{ + mp("10.0.0.0/24"): mustIPSet("100.64.0.1/32"), + mp("10.0.1.0/24"): mustIPSet("100.64.0.2/32"), + }, + wantErr: false, + }, + { + name: "exit-node", + policy: &Policy{ + AutoApprovers: AutoApproverPolicy{ + ExitNode: AutoApprovers{ptr.To(Username("user1@"))}, + }, + }, + want: map[netip.Prefix]*netipx.IPSet{ + tsaddr.AllIPv4(): mustIPSet("100.64.0.1/32"), + tsaddr.AllIPv6(): mustIPSet("100.64.0.1/32"), + }, + wantErr: false, + }, + { + name: "group-route", + policy: &Policy{ + Groups: Groups{ + "group:testgroup": Usernames{"user1@", "user2@"}, + }, + AutoApprovers: AutoApproverPolicy{ + Routes: map[netip.Prefix]AutoApprovers{ + mp("10.0.0.0/24"): {ptr.To(Group("group:testgroup"))}, + }, + }, + }, + want: map[netip.Prefix]*netipx.IPSet{ + mp("10.0.0.0/24"): mustIPSet("100.64.0.1/32", "100.64.0.2/32"), + }, + wantErr: false, + }, + { + name: "tag-route-and-exit", + policy: &Policy{ + TagOwners: TagOwners{ + "tag:testtag": Owners{ + ptr.To(Username("user1@")), + ptr.To(Username("user2@")), + }, + "tag:exittest": Owners{ + ptr.To(Group("group:exitgroup")), + }, + }, + Groups: Groups{ + "group:exitgroup": Usernames{"user2@"}, + }, + AutoApprovers: AutoApproverPolicy{ + ExitNode: AutoApprovers{ptr.To(Tag("tag:exittest"))}, + Routes: map[netip.Prefix]AutoApprovers{ + mp("10.0.1.0/24"): {ptr.To(Tag("tag:testtag"))}, + }, + }, + }, + want: map[netip.Prefix]*netipx.IPSet{ + mp("10.0.1.0/24"): mustIPSet("100.64.0.4/32"), + tsaddr.AllIPv4(): mustIPSet("100.64.0.5/32"), + tsaddr.AllIPv6(): mustIPSet("100.64.0.5/32"), + }, + wantErr: false, + }, + { + name: "mixed-routes-and-exit-nodes", + policy: &Policy{ + Groups: Groups{ + "group:testgroup": Usernames{"user1", "user2"}, + }, + AutoApprovers: AutoApproverPolicy{ + Routes: map[netip.Prefix]AutoApprovers{ + mp("10.0.0.0/24"): {ptr.To(Group("group:testgroup"))}, + mp("10.0.1.0/24"): {ptr.To(Username("user3@"))}, + }, + ExitNode: AutoApprovers{ptr.To(Username("user1@"))}, + }, + }, + want: map[netip.Prefix]*netipx.IPSet{ + mp("10.0.0.0/24"): mustIPSet("100.64.0.1/32", "100.64.0.2/32"), + mp("10.0.1.0/24"): mustIPSet("100.64.0.3/32"), + tsaddr.AllIPv4(): mustIPSet("100.64.0.1/32"), + tsaddr.AllIPv6(): mustIPSet("100.64.0.1/32"), + }, + wantErr: false, + }, + } + + cmps := append(util.Comparers, cmp.Comparer(ipSetComparer)) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := resolveAutoApprovers(tt.policy, users, nodes) + if (err != nil) != tt.wantErr { + t.Errorf("resolveAutoApprovers() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want, got, cmps...); diff != "" { + t.Errorf("resolveAutoApprovers() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func mustIPSet(prefixes ...string) *netipx.IPSet { + var builder netipx.IPSetBuilder + for _, p := range prefixes { + builder.AddPrefix(mp(p)) + } + ipSet, _ := builder.IPSet() + return ipSet +} + +func ipSetComparer(x, y *netipx.IPSet) bool { + if x == nil || y == nil { + return x == y + } + return cmp.Equal(x.Prefixes(), y.Prefixes(), util.Comparers...) +} + +func TestNodeCanApproveRoute(t *testing.T) { + users := types.Users{ + {Model: gorm.Model{ID: 1}, Name: "user1"}, + {Model: gorm.Model{ID: 2}, Name: "user2"}, + {Model: gorm.Model{ID: 3}, Name: "user3"}, + } + + nodes := types.Nodes{ + { + IPv4: ap("100.64.0.1"), + User: users[0], + }, + { + IPv4: ap("100.64.0.2"), + User: users[1], + }, + { + IPv4: ap("100.64.0.3"), + User: users[2], + }, + } + + tests := []struct { + name string + policy *Policy + node *types.Node + route netip.Prefix + want bool + wantErr bool + }{ + { + name: "single-route-approval", + policy: &Policy{ + AutoApprovers: AutoApproverPolicy{ + Routes: map[netip.Prefix]AutoApprovers{ + mp("10.0.0.0/24"): {ptr.To(Username("user1@"))}, + }, + }, + }, + node: nodes[0], + route: mp("10.0.0.0/24"), + want: true, + }, + { + name: "multiple-routes-approval", + policy: &Policy{ + AutoApprovers: AutoApproverPolicy{ + Routes: map[netip.Prefix]AutoApprovers{ + mp("10.0.0.0/24"): {ptr.To(Username("user1@"))}, + mp("10.0.1.0/24"): {ptr.To(Username("user2@"))}, + }, + }, + }, + node: nodes[1], + route: mp("10.0.1.0/24"), + want: true, + }, + { + name: "exit-node-approval", + policy: &Policy{ + AutoApprovers: AutoApproverPolicy{ + ExitNode: AutoApprovers{ptr.To(Username("user1@"))}, + }, + }, + node: nodes[0], + route: tsaddr.AllIPv4(), + want: true, + }, + { + name: "group-route-approval", + policy: &Policy{ + Groups: Groups{ + "group:testgroup": Usernames{"user1@", "user2@"}, + }, + AutoApprovers: AutoApproverPolicy{ + Routes: map[netip.Prefix]AutoApprovers{ + mp("10.0.0.0/24"): {ptr.To(Group("group:testgroup"))}, + }, + }, + }, + node: nodes[1], + route: mp("10.0.0.0/24"), + want: true, + }, + { + name: "mixed-routes-and-exit-nodes-approval", + policy: &Policy{ + Groups: Groups{ + "group:testgroup": Usernames{"user1@", "user2@"}, + }, + AutoApprovers: AutoApproverPolicy{ + Routes: map[netip.Prefix]AutoApprovers{ + mp("10.0.0.0/24"): {ptr.To(Group("group:testgroup"))}, + mp("10.0.1.0/24"): {ptr.To(Username("user3@"))}, + }, + ExitNode: AutoApprovers{ptr.To(Username("user1@"))}, + }, + }, + node: nodes[0], + route: tsaddr.AllIPv4(), + want: true, + }, + { + name: "no-approval", + policy: &Policy{ + AutoApprovers: AutoApproverPolicy{ + Routes: map[netip.Prefix]AutoApprovers{ + mp("10.0.0.0/24"): {ptr.To(Username("user2@"))}, + }, + }, + }, + node: nodes[0], + route: mp("10.0.0.0/24"), + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b, err := json.Marshal(tt.policy) + require.NoError(t, err) + + pm, err := NewPolicyManager(b, users, nodes) + require.NoErrorf(t, err, "NewPolicyManager() error = %v", err) + + got := pm.NodeCanApproveRoute(tt.node, tt.route) + if got != tt.want { + t.Errorf("NodeCanApproveRoute() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestResolveTagOwners(t *testing.T) { + users := types.Users{ + {Model: gorm.Model{ID: 1}, Name: "user1"}, + {Model: gorm.Model{ID: 2}, Name: "user2"}, + {Model: gorm.Model{ID: 3}, Name: "user3"}, + } + + nodes := types.Nodes{ + { + IPv4: ap("100.64.0.1"), + User: users[0], + }, + { + IPv4: ap("100.64.0.2"), + User: users[1], + }, + { + IPv4: ap("100.64.0.3"), + User: users[2], + }, + } + + tests := []struct { + name string + policy *Policy + want map[Tag]*netipx.IPSet + wantErr bool + }{ + { + name: "single-tag-owner", + policy: &Policy{ + TagOwners: TagOwners{ + Tag("tag:test"): Owners{ptr.To(Username("user1@"))}, + }, + }, + want: map[Tag]*netipx.IPSet{ + Tag("tag:test"): mustIPSet("100.64.0.1/32"), + }, + wantErr: false, + }, + { + name: "multiple-tag-owners", + policy: &Policy{ + TagOwners: TagOwners{ + Tag("tag:test"): Owners{ptr.To(Username("user1@")), ptr.To(Username("user2@"))}, + }, + }, + want: map[Tag]*netipx.IPSet{ + Tag("tag:test"): mustIPSet("100.64.0.1/32", "100.64.0.2/32"), + }, + wantErr: false, + }, + { + name: "group-tag-owner", + policy: &Policy{ + Groups: Groups{ + "group:testgroup": Usernames{"user1@", "user2@"}, + }, + TagOwners: TagOwners{ + Tag("tag:test"): Owners{ptr.To(Group("group:testgroup"))}, + }, + }, + want: map[Tag]*netipx.IPSet{ + Tag("tag:test"): mustIPSet("100.64.0.1/32", "100.64.0.2/32"), + }, + wantErr: false, + }, + } + + cmps := append(util.Comparers, cmp.Comparer(ipSetComparer)) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := resolveTagOwners(tt.policy, users, nodes) + if (err != nil) != tt.wantErr { + t.Errorf("resolveTagOwners() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want, got, cmps...); diff != "" { + t.Errorf("resolveTagOwners() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestNodeCanHaveTag(t *testing.T) { + users := types.Users{ + {Model: gorm.Model{ID: 1}, Name: "user1"}, + {Model: gorm.Model{ID: 2}, Name: "user2"}, + {Model: gorm.Model{ID: 3}, Name: "user3"}, + } + + nodes := types.Nodes{ + { + IPv4: ap("100.64.0.1"), + User: users[0], + }, + { + IPv4: ap("100.64.0.2"), + User: users[1], + }, + { + IPv4: ap("100.64.0.3"), + User: users[2], + }, + } + + tests := []struct { + name string + policy *Policy + node *types.Node + tag string + want bool + wantErr string + }{ + { + name: "single-tag-owner", + policy: &Policy{ + TagOwners: TagOwners{ + Tag("tag:test"): Owners{ptr.To(Username("user1@"))}, + }, + }, + node: nodes[0], + tag: "tag:test", + want: true, + }, + { + name: "multiple-tag-owners", + policy: &Policy{ + TagOwners: TagOwners{ + Tag("tag:test"): Owners{ptr.To(Username("user1@")), ptr.To(Username("user2@"))}, + }, + }, + node: nodes[1], + tag: "tag:test", + want: true, + }, + { + name: "group-tag-owner", + policy: &Policy{ + Groups: Groups{ + "group:testgroup": Usernames{"user1@", "user2@"}, + }, + TagOwners: TagOwners{ + Tag("tag:test"): Owners{ptr.To(Group("group:testgroup"))}, + }, + }, + node: nodes[1], + tag: "tag:test", + want: true, + }, + { + name: "invalid-group", + policy: &Policy{ + Groups: Groups{ + "group:testgroup": Usernames{"invalid"}, + }, + TagOwners: TagOwners{ + Tag("tag:test"): Owners{ptr.To(Group("group:testgroup"))}, + }, + }, + node: nodes[0], + tag: "tag:test", + want: false, + wantErr: "Username has to contain @", + }, + { + name: "node-cannot-have-tag", + policy: &Policy{ + TagOwners: TagOwners{ + Tag("tag:test"): Owners{ptr.To(Username("user2@"))}, + }, + }, + node: nodes[0], + tag: "tag:test", + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b, err := json.Marshal(tt.policy) + require.NoError(t, err) + + pm, err := NewPolicyManager(b, users, nodes) + if tt.wantErr != "" { + require.ErrorContains(t, err, tt.wantErr) + return + } + require.NoError(t, err) + + got := pm.NodeCanHaveTag(tt.node, tt.tag) + if got != tt.want { + t.Errorf("NodeCanHaveTag() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/hscontrol/policy/v2/utils.go b/hscontrol/policy/v2/utils.go new file mode 100644 index 00000000..9c962af8 --- /dev/null +++ b/hscontrol/policy/v2/utils.go @@ -0,0 +1,164 @@ +package v2 + +import ( + "errors" + "fmt" + "slices" + "strconv" + "strings" + + "tailscale.com/tailcfg" +) + +// splitDestinationAndPort takes an input string and returns the destination and port as a tuple, or an error if the input is invalid. +func splitDestinationAndPort(input string) (string, string, error) { + // Find the last occurrence of the colon character + lastColonIndex := strings.LastIndex(input, ":") + + // Check if the colon character is present and not at the beginning or end of the string + if lastColonIndex == -1 { + return "", "", errors.New("input must contain a colon character separating destination and port") + } + if lastColonIndex == 0 { + return "", "", errors.New("input cannot start with a colon character") + } + if lastColonIndex == len(input)-1 { + return "", "", errors.New("input cannot end with a colon character") + } + + // Split the string into destination and port based on the last colon + destination := input[:lastColonIndex] + port := input[lastColonIndex+1:] + + return destination, port, nil +} + +// parsePortRange parses a port definition string and returns a slice of PortRange structs. +func parsePortRange(portDef string) ([]tailcfg.PortRange, error) { + if portDef == "*" { + return []tailcfg.PortRange{tailcfg.PortRangeAny}, nil + } + + var portRanges []tailcfg.PortRange + parts := strings.Split(portDef, ",") + + for _, part := range parts { + if strings.Contains(part, "-") { + rangeParts := strings.Split(part, "-") + rangeParts = slices.DeleteFunc(rangeParts, func(e string) bool { + return e == "" + }) + if len(rangeParts) != 2 { + return nil, errors.New("invalid port range format") + } + + first, err := parsePort(rangeParts[0]) + if err != nil { + return nil, err + } + + last, err := parsePort(rangeParts[1]) + if err != nil { + return nil, err + } + + if first > last { + return nil, errors.New("invalid port range: first port is greater than last port") + } + + portRanges = append(portRanges, tailcfg.PortRange{First: first, Last: last}) + } else { + port, err := parsePort(part) + if err != nil { + return nil, err + } + + portRanges = append(portRanges, tailcfg.PortRange{First: port, Last: port}) + } + } + + return portRanges, nil +} + +// parsePort parses a single port number from a string. +func parsePort(portStr string) (uint16, error) { + port, err := strconv.Atoi(portStr) + if err != nil { + return 0, errors.New("invalid port number") + } + + if port < 0 || port > 65535 { + return 0, errors.New("port number out of range") + } + + return uint16(port), nil +} + +// For some reason golang.org/x/net/internal/iana is an internal package. +const ( + protocolICMP = 1 // Internet Control Message + protocolIGMP = 2 // Internet Group Management + protocolIPv4 = 4 // IPv4 encapsulation + protocolTCP = 6 // Transmission Control + protocolEGP = 8 // Exterior Gateway Protocol + protocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP) + protocolUDP = 17 // User Datagram + protocolGRE = 47 // Generic Routing Encapsulation + protocolESP = 50 // Encap Security Payload + protocolAH = 51 // Authentication Header + protocolIPv6ICMP = 58 // ICMP for IPv6 + protocolSCTP = 132 // Stream Control Transmission Protocol + ProtocolFC = 133 // Fibre Channel +) + +// parseProtocol reads the proto field of the ACL and generates a list of +// protocols that will be allowed, following the IANA IP protocol number +// https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml +// +// If the ACL proto field is empty, it allows ICMPv4, ICMPv6, TCP, and UDP, +// as per Tailscale behaviour (see tailcfg.FilterRule). +// +// Also returns a boolean indicating if the protocol +// requires all the destinations to use wildcard as port number (only TCP, +// UDP and SCTP support specifying ports). +func parseProtocol(protocol string) ([]int, bool, error) { + switch protocol { + case "": + return nil, false, nil + case "igmp": + return []int{protocolIGMP}, true, nil + case "ipv4", "ip-in-ip": + return []int{protocolIPv4}, true, nil + case "tcp": + return []int{protocolTCP}, false, nil + case "egp": + return []int{protocolEGP}, true, nil + case "igp": + return []int{protocolIGP}, true, nil + case "udp": + return []int{protocolUDP}, false, nil + case "gre": + return []int{protocolGRE}, true, nil + case "esp": + return []int{protocolESP}, true, nil + case "ah": + return []int{protocolAH}, true, nil + case "sctp": + return []int{protocolSCTP}, false, nil + case "icmp": + return []int{protocolICMP, protocolIPv6ICMP}, true, nil + + default: + protocolNumber, err := strconv.Atoi(protocol) + if err != nil { + return nil, false, fmt.Errorf("parsing protocol number: %w", err) + } + + // TODO(kradalby): What is this? + needsWildcard := protocolNumber != protocolTCP && + protocolNumber != protocolUDP && + protocolNumber != protocolSCTP + + return []int{protocolNumber}, needsWildcard, nil + } +} diff --git a/hscontrol/policy/v2/utils_test.go b/hscontrol/policy/v2/utils_test.go new file mode 100644 index 00000000..d1645071 --- /dev/null +++ b/hscontrol/policy/v2/utils_test.go @@ -0,0 +1,102 @@ +package v2 + +import ( + "errors" + "testing" + + "github.com/google/go-cmp/cmp" + "tailscale.com/tailcfg" +) + +// TestParseDestinationAndPort tests the parseDestinationAndPort function using table-driven tests. +func TestParseDestinationAndPort(t *testing.T) { + testCases := []struct { + input string + expectedDst string + expectedPort string + expectedErr error + }{ + {"git-server:*", "git-server", "*", nil}, + {"192.168.1.0/24:22", "192.168.1.0/24", "22", nil}, + {"fd7a:115c:a1e0::2:22", "fd7a:115c:a1e0::2", "22", nil}, + {"fd7a:115c:a1e0::2/128:22", "fd7a:115c:a1e0::2/128", "22", nil}, + {"tag:montreal-webserver:80,443", "tag:montreal-webserver", "80,443", nil}, + {"tag:api-server:443", "tag:api-server", "443", nil}, + {"example-host-1:*", "example-host-1", "*", nil}, + {"hostname:80-90", "hostname", "80-90", nil}, + {"invalidinput", "", "", errors.New("input must contain a colon character separating destination and port")}, + {":invalid", "", "", errors.New("input cannot start with a colon character")}, + {"invalid:", "", "", errors.New("input cannot end with a colon character")}, + } + + for _, testCase := range testCases { + dst, port, err := splitDestinationAndPort(testCase.input) + if dst != testCase.expectedDst || port != testCase.expectedPort || (err != nil && err.Error() != testCase.expectedErr.Error()) { + t.Errorf("parseDestinationAndPort(%q) = (%q, %q, %v), want (%q, %q, %v)", + testCase.input, dst, port, err, testCase.expectedDst, testCase.expectedPort, testCase.expectedErr) + } + } +} + +func TestParsePort(t *testing.T) { + tests := []struct { + input string + expected uint16 + err string + }{ + {"80", 80, ""}, + {"0", 0, ""}, + {"65535", 65535, ""}, + {"-1", 0, "port number out of range"}, + {"65536", 0, "port number out of range"}, + {"abc", 0, "invalid port number"}, + {"", 0, "invalid port number"}, + } + + for _, test := range tests { + result, err := parsePort(test.input) + if err != nil && err.Error() != test.err { + t.Errorf("parsePort(%q) error = %v, expected error = %v", test.input, err, test.err) + } + if err == nil && test.err != "" { + t.Errorf("parsePort(%q) expected error = %v, got nil", test.input, test.err) + } + if result != test.expected { + t.Errorf("parsePort(%q) = %v, expected %v", test.input, result, test.expected) + } + } +} + +func TestParsePortRange(t *testing.T) { + tests := []struct { + input string + expected []tailcfg.PortRange + err string + }{ + {"80", []tailcfg.PortRange{{80, 80}}, ""}, + {"80-90", []tailcfg.PortRange{{80, 90}}, ""}, + {"80,90", []tailcfg.PortRange{{80, 80}, {90, 90}}, ""}, + {"80-91,92,93-95", []tailcfg.PortRange{{80, 91}, {92, 92}, {93, 95}}, ""}, + {"*", []tailcfg.PortRange{tailcfg.PortRangeAny}, ""}, + {"80-", nil, "invalid port range format"}, + {"-90", nil, "invalid port range format"}, + {"80-90,", nil, "invalid port number"}, + {"80,90-", nil, "invalid port range format"}, + {"80-90,abc", nil, "invalid port number"}, + {"80-90,65536", nil, "port number out of range"}, + {"80-90,90-80", nil, "invalid port range: first port is greater than last port"}, + } + + for _, test := range tests { + result, err := parsePortRange(test.input) + if err != nil && err.Error() != test.err { + t.Errorf("parsePortRange(%q) error = %v, expected error = %v", test.input, err, test.err) + } + if err == nil && test.err != "" { + t.Errorf("parsePortRange(%q) expected error = %v, got nil", test.input, test.err) + } + if diff := cmp.Diff(result, test.expected); diff != "" { + t.Errorf("parsePortRange(%q) mismatch (-want +got):\n%s", test.input, diff) + } + } +} diff --git a/hscontrol/poll.go b/hscontrol/poll.go index 7d9e1ab4..6c11bb04 100644 --- a/hscontrol/poll.go +++ b/hscontrol/poll.go @@ -10,10 +10,9 @@ import ( "time" "github.com/juanfont/headscale/hscontrol/mapper" + "github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/types" - "github.com/juanfont/headscale/hscontrol/util" "github.com/rs/zerolog/log" - "github.com/samber/lo" "github.com/sasha-s/go-deadlock" xslices "golang.org/x/exp/slices" "tailscale.com/net/tsaddr" @@ -459,25 +458,10 @@ func (m *mapSession) handleEndpointUpdate() { // TODO(kradalby): I am not sure if we need this? nodesChangedHook(m.h.db, m.h.polMan, m.h.nodeNotifier) - // Take all the routes presented to us by the node and check - // if any of them should be auto approved by the policy. - // If any of them are, add them to the approved routes of the node. - // Keep all the old entries and compact the list to remove duplicates. - var newApproved []netip.Prefix - for _, route := range m.node.Hostinfo.RoutableIPs { - if m.h.polMan.NodeCanApproveRoute(m.node, route) { - newApproved = append(newApproved, route) - } - } - if newApproved != nil { - newApproved = append(newApproved, m.node.ApprovedRoutes...) - slices.SortFunc(newApproved, util.ComparePrefix) - slices.Compact(newApproved) - newApproved = lo.Filter(newApproved, func(route netip.Prefix, index int) bool { - return route.IsValid() - }) - m.node.ApprovedRoutes = newApproved - + // Approve routes if they are auto-approved by the policy. + // If any of them are approved, report them to the primary route tracker + // and send updates accordingly. + if policy.AutoApproveRoutes(m.h.polMan, m.node) { if m.h.primaryRoutes.SetRoutes(m.node.ID, m.node.SubnetRoutes()...) { ctx := types.NotifyCtx(m.ctx, "poll-primary-change", m.node.Hostname) m.h.nodeNotifier.NotifyAll(ctx, types.UpdateFull()) diff --git a/hscontrol/types/node.go b/hscontrol/types/node.go index 7aeef4c0..e506a2c5 100644 --- a/hscontrol/types/node.go +++ b/hscontrol/types/node.go @@ -150,6 +150,68 @@ func (node *Node) IPs() []netip.Addr { return ret } +// HasIP reports if a node has a given IP address. +func (node *Node) HasIP(i netip.Addr) bool { + for _, ip := range node.IPs() { + if ip.Compare(i) == 0 { + return true + } + } + return false +} + +// IsTagged reports if a device is tagged +// and therefore should not be treated as a +// user owned device. +// Currently, this function only handles tags set +// via CLI ("forced tags" and preauthkeys) +func (node *Node) IsTagged() bool { + if len(node.ForcedTags) > 0 { + return true + } + + if node.AuthKey != nil && len(node.AuthKey.Tags) > 0 { + return true + } + + if node.Hostinfo == nil { + return false + } + + // TODO(kradalby): Figure out how tagging should work + // and hostinfo.requestedtags. + // Do this in other work. + + return false +} + +// HasTag reports if a node has a given tag. +// Currently, this function only handles tags set +// via CLI ("forced tags" and preauthkeys) +func (node *Node) HasTag(tag string) bool { + if slices.Contains(node.ForcedTags, tag) { + return true + } + + if node.AuthKey != nil && slices.Contains(node.AuthKey.Tags, tag) { + return true + } + + // TODO(kradalby): Figure out how tagging should work + // and hostinfo.requestedtags. + // Do this in other work. + + return false +} + +func (node *Node) RequestTags() []string { + if node.Hostinfo == nil { + return []string{} + } + + return node.Hostinfo.RequestTags +} + func (node *Node) Prefixes() []netip.Prefix { addrs := []netip.Prefix{} for _, nodeAddress := range node.IPs() { @@ -163,12 +225,8 @@ func (node *Node) Prefixes() []netip.Prefix { func (node *Node) IPsAsString() []string { var ret []string - if node.IPv4 != nil { - ret = append(ret, node.IPv4.String()) - } - - if node.IPv6 != nil { - ret = append(ret, node.IPv6.String()) + for _, ip := range node.IPs() { + ret = append(ret, ip.String()) } return ret @@ -335,9 +393,9 @@ func (node *Node) SubnetRoutes() []netip.Prefix { return routes } -// func (node *Node) String() string { -// return node.Hostname -// } +func (node *Node) String() string { + return node.Hostname +} // PeerChangeFromMapRequest takes a MapRequest and compares it to the node // to produce a PeerChange struct that can be used to updated the node and diff --git a/hscontrol/types/users.go b/hscontrol/types/users.go index 2eba5f0f..93133e4f 100644 --- a/hscontrol/types/users.go +++ b/hscontrol/types/users.go @@ -7,6 +7,7 @@ import ( "fmt" "net/mail" "strconv" + "strings" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" "github.com/juanfont/headscale/hscontrol/util" @@ -18,6 +19,19 @@ import ( type UserID uint64 +type Users []User + +func (u Users) String() string { + var sb strings.Builder + sb.WriteString("[ ") + for _, user := range u { + fmt.Fprintf(&sb, "%d: %s, ", user.ID, user.Name) + } + sb.WriteString(" ]") + + return sb.String() +} + // User is the way Headscale implements the concept of users in Tailscale // // At the end of the day, users in Tailscale are some kind of 'bubbles' or users @@ -74,12 +88,13 @@ func (u *User) Username() string { u.Email, u.Name, u.ProviderIdentifier.String, - u.StringID()) + u.StringID(), + ) } -// DisplayNameOrUsername returns the DisplayName if it exists, otherwise +// Display returns the DisplayName if it exists, otherwise // it will return the Username. -func (u *User) DisplayNameOrUsername() string { +func (u *User) Display() string { return cmp.Or(u.DisplayName, u.Username()) } @@ -91,7 +106,7 @@ func (u *User) profilePicURL() string { func (u *User) TailscaleUser() *tailcfg.User { user := tailcfg.User{ ID: tailcfg.UserID(u.ID), - DisplayName: u.DisplayNameOrUsername(), + DisplayName: u.Display(), ProfilePicURL: u.profilePicURL(), Created: u.CreatedAt, } @@ -101,11 +116,10 @@ func (u *User) TailscaleUser() *tailcfg.User { func (u *User) TailscaleLogin() *tailcfg.Login { login := tailcfg.Login{ - ID: tailcfg.LoginID(u.ID), - // TODO(kradalby): this should reflect registration method. + ID: tailcfg.LoginID(u.ID), Provider: u.Provider, LoginName: u.Username(), - DisplayName: u.DisplayNameOrUsername(), + DisplayName: u.Display(), ProfilePicURL: u.profilePicURL(), } @@ -116,7 +130,7 @@ func (u *User) TailscaleUserProfile() tailcfg.UserProfile { return tailcfg.UserProfile{ ID: tailcfg.UserID(u.ID), LoginName: u.Username(), - DisplayName: u.DisplayNameOrUsername(), + DisplayName: u.Display(), ProfilePicURL: u.profilePicURL(), } } diff --git a/hscontrol/util/addr.go b/hscontrol/util/addr.go index b755a8e7..c91ef0ba 100644 --- a/hscontrol/util/addr.go +++ b/hscontrol/util/addr.go @@ -2,6 +2,7 @@ package util import ( "fmt" + "iter" "net/netip" "strings" @@ -111,3 +112,16 @@ func StringToIPPrefix(prefixes []string) ([]netip.Prefix, error) { return result, nil } + +// IPSetAddrIter returns a function that iterates over all the IPs in the IPSet. +func IPSetAddrIter(ipSet *netipx.IPSet) iter.Seq[netip.Addr] { + return func(yield func(netip.Addr) bool) { + for _, rng := range ipSet.Ranges() { + for ip := rng.From(); ip.Compare(rng.To()) <= 0; ip = ip.Next() { + if !yield(ip) { + return + } + } + } + } +} diff --git a/hscontrol/util/net.go b/hscontrol/util/net.go index 665ce1dd..0d6b4412 100644 --- a/hscontrol/util/net.go +++ b/hscontrol/util/net.go @@ -1,10 +1,13 @@ package util import ( - "cmp" "context" "net" "net/netip" + "sync" + + "go4.org/netipx" + "tailscale.com/net/tsaddr" ) func GrpcSocketDialer(ctx context.Context, addr string) (net.Conn, error) { @@ -13,24 +16,6 @@ func GrpcSocketDialer(ctx context.Context, addr string) (net.Conn, error) { return d.DialContext(ctx, "unix", addr) } -// TODO(kradalby): Remove when in stdlib; -// https://github.com/golang/go/issues/61642 -// Compare returns an integer comparing two prefixes. -// The result will be 0 if p == p2, -1 if p < p2, and +1 if p > p2. -// Prefixes sort first by validity (invalid before valid), then -// address family (IPv4 before IPv6), then prefix length, then -// address. -func ComparePrefix(p, p2 netip.Prefix) int { - if c := cmp.Compare(p.Addr().BitLen(), p2.Addr().BitLen()); c != 0 { - return c - } - if c := cmp.Compare(p.Bits(), p2.Bits()); c != 0 { - return c - } - - return p.Addr().Compare(p2.Addr()) -} - func PrefixesToString(prefixes []netip.Prefix) []string { ret := make([]string, 0, len(prefixes)) for _, prefix := range prefixes { @@ -49,3 +34,29 @@ func MustStringsToPrefixes(strings []string) []netip.Prefix { return ret } + +// TheInternet returns the IPSet for the Internet. +// https://www.youtube.com/watch?v=iDbyYGrswtg +var TheInternet = sync.OnceValue(func() *netipx.IPSet { + var internetBuilder netipx.IPSetBuilder + internetBuilder.AddPrefix(netip.MustParsePrefix("2000::/3")) + internetBuilder.AddPrefix(tsaddr.AllIPv4()) + + // Delete Private network addresses + // https://datatracker.ietf.org/doc/html/rfc1918 + internetBuilder.RemovePrefix(netip.MustParsePrefix("fc00::/7")) + internetBuilder.RemovePrefix(netip.MustParsePrefix("10.0.0.0/8")) + internetBuilder.RemovePrefix(netip.MustParsePrefix("172.16.0.0/12")) + internetBuilder.RemovePrefix(netip.MustParsePrefix("192.168.0.0/16")) + + // Delete Tailscale networks + internetBuilder.RemovePrefix(tsaddr.TailscaleULARange()) + internetBuilder.RemovePrefix(tsaddr.CGNATRange()) + + // Delete "can't find DHCP networks" + internetBuilder.RemovePrefix(netip.MustParsePrefix("fe80::/10")) // link-local + internetBuilder.RemovePrefix(netip.MustParsePrefix("169.254.0.0/16")) + + theInternetSet, _ := internetBuilder.IPSet() + return theInternetSet +}) diff --git a/integration/acl_test.go b/integration/acl_test.go index fb6fef93..fefd75c0 100644 --- a/integration/acl_test.go +++ b/integration/acl_test.go @@ -8,7 +8,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/juanfont/headscale/hscontrol/policy" + policyv1 "github.com/juanfont/headscale/hscontrol/policy/v1" "github.com/juanfont/headscale/integration/hsic" "github.com/juanfont/headscale/integration/tsic" "github.com/stretchr/testify/assert" @@ -50,7 +50,7 @@ var veryLargeDestination = []string{ func aclScenario( t *testing.T, - policy *policy.ACLPolicy, + policy *policyv1.ACLPolicy, clientsPerUser int, ) *Scenario { t.Helper() @@ -77,6 +77,8 @@ func aclScenario( }, hsic.WithACLPolicy(policy), hsic.WithTestName("acl"), + hsic.WithEmbeddedDERPServerOnly(), + hsic.WithTLS(), ) require.NoError(t, err) @@ -100,7 +102,7 @@ func TestACLHostsInNetMapTable(t *testing.T) { // they can access minus one (them self). tests := map[string]struct { users map[string]int - policy policy.ACLPolicy + policy policyv1.ACLPolicy want map[string]int }{ // Test that when we have no ACL, each client netmap has @@ -110,8 +112,8 @@ func TestACLHostsInNetMapTable(t *testing.T) { "user1": 2, "user2": 2, }, - policy: policy.ACLPolicy{ - ACLs: []policy.ACL{ + policy: policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"*"}, @@ -131,8 +133,8 @@ func TestACLHostsInNetMapTable(t *testing.T) { "user1": 2, "user2": 2, }, - policy: policy.ACLPolicy{ - ACLs: []policy.ACL{ + policy: policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"user1"}, @@ -157,8 +159,8 @@ func TestACLHostsInNetMapTable(t *testing.T) { "user1": 2, "user2": 2, }, - policy: policy.ACLPolicy{ - ACLs: []policy.ACL{ + policy: policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"user1"}, @@ -194,8 +196,8 @@ func TestACLHostsInNetMapTable(t *testing.T) { "user1": 2, "user2": 2, }, - policy: policy.ACLPolicy{ - ACLs: []policy.ACL{ + policy: policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"user1"}, @@ -222,8 +224,8 @@ func TestACLHostsInNetMapTable(t *testing.T) { "user1": 2, "user2": 2, }, - policy: policy.ACLPolicy{ - ACLs: []policy.ACL{ + policy: policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"user1"}, @@ -250,8 +252,8 @@ func TestACLHostsInNetMapTable(t *testing.T) { "user1": 2, "user2": 2, }, - policy: policy.ACLPolicy{ - ACLs: []policy.ACL{ + policy: policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"*"}, @@ -306,8 +308,8 @@ func TestACLAllowUser80Dst(t *testing.T) { IntegrationSkip(t) scenario := aclScenario(t, - &policy.ACLPolicy{ - ACLs: []policy.ACL{ + &policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"user1"}, @@ -360,11 +362,11 @@ func TestACLDenyAllPort80(t *testing.T) { IntegrationSkip(t) scenario := aclScenario(t, - &policy.ACLPolicy{ + &policyv1.ACLPolicy{ Groups: map[string][]string{ "group:integration-acl-test": {"user1", "user2"}, }, - ACLs: []policy.ACL{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"group:integration-acl-test"}, @@ -407,8 +409,8 @@ func TestACLAllowUserDst(t *testing.T) { IntegrationSkip(t) scenario := aclScenario(t, - &policy.ACLPolicy{ - ACLs: []policy.ACL{ + &policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"user1"}, @@ -463,8 +465,8 @@ func TestACLAllowStarDst(t *testing.T) { IntegrationSkip(t) scenario := aclScenario(t, - &policy.ACLPolicy{ - ACLs: []policy.ACL{ + &policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"user1"}, @@ -520,11 +522,11 @@ func TestACLNamedHostsCanReachBySubnet(t *testing.T) { IntegrationSkip(t) scenario := aclScenario(t, - &policy.ACLPolicy{ - Hosts: policy.Hosts{ + &policyv1.ACLPolicy{ + Hosts: policyv1.Hosts{ "all": netip.MustParsePrefix("100.64.0.0/24"), }, - ACLs: []policy.ACL{ + ACLs: []policyv1.ACL{ // Everyone can curl test3 { Action: "accept", @@ -617,16 +619,16 @@ func TestACLNamedHostsCanReach(t *testing.T) { IntegrationSkip(t) tests := map[string]struct { - policy policy.ACLPolicy + policy policyv1.ACLPolicy }{ "ipv4": { - policy: policy.ACLPolicy{ - Hosts: policy.Hosts{ + policy: policyv1.ACLPolicy{ + Hosts: policyv1.Hosts{ "test1": netip.MustParsePrefix("100.64.0.1/32"), "test2": netip.MustParsePrefix("100.64.0.2/32"), "test3": netip.MustParsePrefix("100.64.0.3/32"), }, - ACLs: []policy.ACL{ + ACLs: []policyv1.ACL{ // Everyone can curl test3 { Action: "accept", @@ -643,13 +645,13 @@ func TestACLNamedHostsCanReach(t *testing.T) { }, }, "ipv6": { - policy: policy.ACLPolicy{ - Hosts: policy.Hosts{ + policy: policyv1.ACLPolicy{ + Hosts: policyv1.Hosts{ "test1": netip.MustParsePrefix("fd7a:115c:a1e0::1/128"), "test2": netip.MustParsePrefix("fd7a:115c:a1e0::2/128"), "test3": netip.MustParsePrefix("fd7a:115c:a1e0::3/128"), }, - ACLs: []policy.ACL{ + ACLs: []policyv1.ACL{ // Everyone can curl test3 { Action: "accept", @@ -866,11 +868,11 @@ func TestACLDevice1CanAccessDevice2(t *testing.T) { IntegrationSkip(t) tests := map[string]struct { - policy policy.ACLPolicy + policy policyv1.ACLPolicy }{ "ipv4": { - policy: policy.ACLPolicy{ - ACLs: []policy.ACL{ + policy: policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"100.64.0.1"}, @@ -880,8 +882,8 @@ func TestACLDevice1CanAccessDevice2(t *testing.T) { }, }, "ipv6": { - policy: policy.ACLPolicy{ - ACLs: []policy.ACL{ + policy: policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"fd7a:115c:a1e0::1"}, @@ -891,12 +893,12 @@ func TestACLDevice1CanAccessDevice2(t *testing.T) { }, }, "hostv4cidr": { - policy: policy.ACLPolicy{ - Hosts: policy.Hosts{ + policy: policyv1.ACLPolicy{ + Hosts: policyv1.Hosts{ "test1": netip.MustParsePrefix("100.64.0.1/32"), "test2": netip.MustParsePrefix("100.64.0.2/32"), }, - ACLs: []policy.ACL{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"test1"}, @@ -906,12 +908,12 @@ func TestACLDevice1CanAccessDevice2(t *testing.T) { }, }, "hostv6cidr": { - policy: policy.ACLPolicy{ - Hosts: policy.Hosts{ + policy: policyv1.ACLPolicy{ + Hosts: policyv1.Hosts{ "test1": netip.MustParsePrefix("fd7a:115c:a1e0::1/128"), "test2": netip.MustParsePrefix("fd7a:115c:a1e0::2/128"), }, - ACLs: []policy.ACL{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"test1"}, @@ -921,12 +923,12 @@ func TestACLDevice1CanAccessDevice2(t *testing.T) { }, }, "group": { - policy: policy.ACLPolicy{ + policy: policyv1.ACLPolicy{ Groups: map[string][]string{ "group:one": {"user1"}, "group:two": {"user2"}, }, - ACLs: []policy.ACL{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"group:one"}, @@ -1085,15 +1087,18 @@ func TestPolicyUpdateWhileRunningWithCLIInDatabase(t *testing.T) { headscale, err := scenario.Headscale() require.NoError(t, err) - p := policy.ACLPolicy{ - ACLs: []policy.ACL{ + p := policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"user1"}, Destinations: []string{"user2:*"}, }, }, - Hosts: policy.Hosts{}, + Hosts: policyv1.Hosts{}, + } + if usePolicyV2ForTest { + hsic.RewritePolicyToV2(&p) } pBytes, _ := json.Marshal(p) @@ -1118,7 +1123,7 @@ func TestPolicyUpdateWhileRunningWithCLIInDatabase(t *testing.T) { // Get the current policy and check // if it is the same as the one we set. - var output *policy.ACLPolicy + var output *policyv1.ACLPolicy err = executeAndUnmarshal( headscale, []string{ diff --git a/integration/cli_test.go b/integration/cli_test.go index 17c8870d..2f23e8f6 100644 --- a/integration/cli_test.go +++ b/integration/cli_test.go @@ -11,7 +11,7 @@ import ( tcmp "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" - "github.com/juanfont/headscale/hscontrol/policy" + policyv1 "github.com/juanfont/headscale/hscontrol/policy/v1" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/integration/hsic" "github.com/juanfont/headscale/integration/tsic" @@ -915,7 +915,7 @@ func TestNodeAdvertiseTagCommand(t *testing.T) { tests := []struct { name string - policy *policy.ACLPolicy + policy *policyv1.ACLPolicy wantTag bool }{ { @@ -924,8 +924,8 @@ func TestNodeAdvertiseTagCommand(t *testing.T) { }, { name: "with-policy-email", - policy: &policy.ACLPolicy{ - ACLs: []policy.ACL{ + policy: &policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"*"}, @@ -940,8 +940,8 @@ func TestNodeAdvertiseTagCommand(t *testing.T) { }, { name: "with-policy-username", - policy: &policy.ACLPolicy{ - ACLs: []policy.ACL{ + policy: &policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"*"}, @@ -956,11 +956,11 @@ func TestNodeAdvertiseTagCommand(t *testing.T) { }, { name: "with-policy-groups", - policy: &policy.ACLPolicy{ - Groups: policy.Groups{ + policy: &policyv1.ACLPolicy{ + Groups: policyv1.Groups{ "group:admins": []string{"user1"}, }, - ACLs: []policy.ACL{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"*"}, @@ -1726,7 +1726,7 @@ func TestPolicyCommand(t *testing.T) { defer scenario.ShutdownAssertNoPanics(t) spec := map[string]int{ - "policy-user": 0, + "user1": 0, } err = scenario.CreateHeadscaleEnv( @@ -1742,8 +1742,8 @@ func TestPolicyCommand(t *testing.T) { headscale, err := scenario.Headscale() assertNoErr(t, err) - p := policy.ACLPolicy{ - ACLs: []policy.ACL{ + p := policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"*"}, @@ -1751,9 +1751,12 @@ func TestPolicyCommand(t *testing.T) { }, }, TagOwners: map[string][]string{ - "tag:exists": {"policy-user"}, + "tag:exists": {"user1"}, }, } + if usePolicyV2ForTest { + hsic.RewritePolicyToV2(&p) + } pBytes, _ := json.Marshal(p) @@ -1778,7 +1781,7 @@ func TestPolicyCommand(t *testing.T) { // Get the current policy and check // if it is the same as the one we set. - var output *policy.ACLPolicy + var output *policyv1.ACLPolicy err = executeAndUnmarshal( headscale, []string{ @@ -1794,7 +1797,11 @@ func TestPolicyCommand(t *testing.T) { assert.Len(t, output.TagOwners, 1) assert.Len(t, output.ACLs, 1) - assert.Equal(t, output.TagOwners["tag:exists"], []string{"policy-user"}) + if usePolicyV2ForTest { + assert.Equal(t, output.TagOwners["tag:exists"], []string{"user1@"}) + } else { + assert.Equal(t, output.TagOwners["tag:exists"], []string{"user1"}) + } } func TestPolicyBrokenConfigCommand(t *testing.T) { @@ -1806,7 +1813,7 @@ func TestPolicyBrokenConfigCommand(t *testing.T) { defer scenario.ShutdownAssertNoPanics(t) spec := map[string]int{ - "policy-user": 1, + "user1": 1, } err = scenario.CreateHeadscaleEnv( @@ -1822,8 +1829,8 @@ func TestPolicyBrokenConfigCommand(t *testing.T) { headscale, err := scenario.Headscale() assertNoErr(t, err) - p := policy.ACLPolicy{ - ACLs: []policy.ACL{ + p := policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ { // This is an unknown action, so it will return an error // and the config will not be applied. @@ -1833,9 +1840,12 @@ func TestPolicyBrokenConfigCommand(t *testing.T) { }, }, TagOwners: map[string][]string{ - "tag:exists": {"policy-user"}, + "tag:exists": {"user1"}, }, } + if usePolicyV2ForTest { + hsic.RewritePolicyToV2(&p) + } pBytes, _ := json.Marshal(p) diff --git a/integration/general_test.go b/integration/general_test.go index 3bdce469..d6d9e7e1 100644 --- a/integration/general_test.go +++ b/integration/general_test.go @@ -365,7 +365,11 @@ func TestTaildrop(t *testing.T) { "taildrop": len(MustTestVersions), } - err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("taildrop")) + err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, + hsic.WithTestName("taildrop"), + hsic.WithEmbeddedDERPServerOnly(), + hsic.WithTLS(), + ) assertNoErrHeadscaleEnv(t, err) allClients, err := scenario.ListTailscaleClients() diff --git a/integration/hsic/hsic.go b/integration/hsic/hsic.go index b75d9c08..fedf220e 100644 --- a/integration/hsic/hsic.go +++ b/integration/hsic/hsic.go @@ -12,6 +12,7 @@ import ( "net/netip" "os" "path" + "regexp" "sort" "strconv" "strings" @@ -19,7 +20,7 @@ import ( "github.com/davecgh/go-spew/spew" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" - "github.com/juanfont/headscale/hscontrol/policy" + policyv1 "github.com/juanfont/headscale/hscontrol/policy/v1" "github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/util" "github.com/juanfont/headscale/integration/dockertestutil" @@ -64,12 +65,13 @@ type HeadscaleInContainer struct { extraPorts []string caCerts [][]byte hostPortBindings map[string][]string - aclPolicy *policy.ACLPolicy + aclPolicy *policyv1.ACLPolicy env map[string]string tlsCert []byte tlsKey []byte filesInContainer []fileInContainer postgres bool + policyV2 bool } // Option represent optional settings that can be given to a @@ -78,7 +80,7 @@ type Option = func(c *HeadscaleInContainer) // WithACLPolicy adds a hscontrol.ACLPolicy policy to the // HeadscaleInContainer instance. -func WithACLPolicy(acl *policy.ACLPolicy) Option { +func WithACLPolicy(acl *policyv1.ACLPolicy) Option { return func(hsic *HeadscaleInContainer) { if acl == nil { return @@ -186,6 +188,14 @@ func WithPostgres() Option { } } +// WithPolicyV2 tells the integration test to use the new v2 filter. +func WithPolicyV2() Option { + return func(hsic *HeadscaleInContainer) { + hsic.policyV2 = true + hsic.env["HEADSCALE_EXPERIMENTAL_POLICY_V2"] = "1" + } +} + // WithIPAllocationStrategy sets the tests IP Allocation strategy. func WithIPAllocationStrategy(strategy types.IPAllocationStrategy) Option { return func(hsic *HeadscaleInContainer) { @@ -403,6 +413,10 @@ func New( } if hsic.aclPolicy != nil { + // Rewrite all user entries in the policy to have an @ at the end. + if hsic.policyV2 { + RewritePolicyToV2(hsic.aclPolicy) + } data, err := json.Marshal(hsic.aclPolicy) if err != nil { return nil, fmt.Errorf("failed to marshal ACL Policy to JSON: %w", err) @@ -869,3 +883,50 @@ func (t *HeadscaleInContainer) SendInterrupt() error { return nil } + +// TODO(kradalby): Remove this function when v1 is deprecated +func rewriteUsersToV2(strs []string) []string { + var result []string + userPattern := regexp.MustCompile(`^user\d+$`) + + for _, username := range strs { + parts := strings.Split(username, ":") + if len(parts) == 0 { + result = append(result, username) + continue + } + firstPart := parts[0] + if userPattern.MatchString(firstPart) { + modifiedFirst := firstPart + "@" + if len(parts) > 1 { + rest := strings.Join(parts[1:], ":") + username = modifiedFirst + ":" + rest + } else { + username = modifiedFirst + } + } + result = append(result, username) + } + + return result +} + +// rewritePolicyToV2 rewrites the policy to v2 format. +// This mostly means adding the @ prefix to user names. +// replaces are done inplace +func RewritePolicyToV2(pol *policyv1.ACLPolicy) { + for idx := range pol.ACLs { + pol.ACLs[idx].Sources = rewriteUsersToV2(pol.ACLs[idx].Sources) + pol.ACLs[idx].Destinations = rewriteUsersToV2(pol.ACLs[idx].Destinations) + } + for idx := range pol.Groups { + pol.Groups[idx] = rewriteUsersToV2(pol.Groups[idx]) + } + for idx := range pol.TagOwners { + pol.TagOwners[idx] = rewriteUsersToV2(pol.TagOwners[idx]) + } + for idx := range pol.SSHs { + pol.SSHs[idx].Sources = rewriteUsersToV2(pol.SSHs[idx].Sources) + pol.SSHs[idx].Destinations = rewriteUsersToV2(pol.SSHs[idx].Destinations) + } +} diff --git a/integration/route_test.go b/integration/route_test.go index e6f6b5d6..e92a4c37 100644 --- a/integration/route_test.go +++ b/integration/route_test.go @@ -8,7 +8,7 @@ import ( "github.com/google/go-cmp/cmp" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" - "github.com/juanfont/headscale/hscontrol/policy" + policyv1 "github.com/juanfont/headscale/hscontrol/policy/v1" "github.com/juanfont/headscale/hscontrol/util" "github.com/juanfont/headscale/integration/hsic" "github.com/juanfont/headscale/integration/tsic" @@ -29,7 +29,7 @@ func TestEnablingRoutes(t *testing.T) { IntegrationSkip(t) t.Parallel() - user := "enable-routing" + user := "user6" scenario, err := NewScenario(dockertestMaxWait()) require.NoErrorf(t, err, "failed to create scenario: %s", err) @@ -203,7 +203,7 @@ func TestHASubnetRouterFailover(t *testing.T) { IntegrationSkip(t) t.Parallel() - user := "enable-routing" + user := "user9" scenario, err := NewScenario(dockertestMaxWait()) require.NoErrorf(t, err, "failed to create scenario: %s", err) @@ -528,7 +528,7 @@ func TestEnableDisableAutoApprovedRoute(t *testing.T) { expectedRoutes := "172.0.0.0/24" - user := "enable-disable-routing" + user := "user2" scenario, err := NewScenario(dockertestMaxWait()) require.NoErrorf(t, err, "failed to create scenario: %s", err) @@ -539,8 +539,8 @@ func TestEnableDisableAutoApprovedRoute(t *testing.T) { } err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{tsic.WithTags([]string{"tag:approve"})}, hsic.WithTestName("clienableroute"), hsic.WithACLPolicy( - &policy.ACLPolicy{ - ACLs: []policy.ACL{ + &policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"*"}, @@ -550,7 +550,7 @@ func TestEnableDisableAutoApprovedRoute(t *testing.T) { TagOwners: map[string][]string{ "tag:approve": {user}, }, - AutoApprovers: policy.AutoApprovers{ + AutoApprovers: policyv1.AutoApprovers{ Routes: map[string][]string{ expectedRoutes: {"tag:approve"}, }, @@ -640,8 +640,8 @@ func TestAutoApprovedSubRoute2068(t *testing.T) { hsic.WithEmbeddedDERPServerOnly(), hsic.WithTLS(), hsic.WithACLPolicy( - &policy.ACLPolicy{ - ACLs: []policy.ACL{ + &policyv1.ACLPolicy{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"*"}, @@ -651,7 +651,7 @@ func TestAutoApprovedSubRoute2068(t *testing.T) { TagOwners: map[string][]string{ "tag:approve": {user}, }, - AutoApprovers: policy.AutoApprovers{ + AutoApprovers: policyv1.AutoApprovers{ Routes: map[string][]string{ "10.42.0.0/16": {"tag:approve"}, }, @@ -696,7 +696,7 @@ func TestSubnetRouteACL(t *testing.T) { IntegrationSkip(t) t.Parallel() - user := "subnet-route-acl" + user := "user4" scenario, err := NewScenario(dockertestMaxWait()) require.NoErrorf(t, err, "failed to create scenario: %s", err) @@ -707,11 +707,11 @@ func TestSubnetRouteACL(t *testing.T) { } err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clienableroute"), hsic.WithACLPolicy( - &policy.ACLPolicy{ - Groups: policy.Groups{ + &policyv1.ACLPolicy{ + Groups: policyv1.Groups{ "group:admins": {user}, }, - ACLs: []policy.ACL{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"group:admins"}, diff --git a/integration/scenario.go b/integration/scenario.go index d8f00566..1cdc8f5d 100644 --- a/integration/scenario.go +++ b/integration/scenario.go @@ -33,6 +33,7 @@ const ( ) var usePostgresForTest = envknob.Bool("HEADSCALE_INTEGRATION_POSTGRES") +var usePolicyV2ForTest = envknob.Bool("HEADSCALE_EXPERIMENTAL_POLICY_V2") var ( errNoHeadscaleAvailable = errors.New("no headscale available") @@ -230,6 +231,10 @@ func (s *Scenario) Headscale(opts ...hsic.Option) (ControlServer, error) { opts = append(opts, hsic.WithPostgres()) } + if usePolicyV2ForTest { + opts = append(opts, hsic.WithPolicyV2()) + } + headscale, err := hsic.New(s.pool, s.network, opts...) if err != nil { return nil, fmt.Errorf("failed to create headscale container: %w", err) diff --git a/integration/ssh_test.go b/integration/ssh_test.go index d060831d..ade119d3 100644 --- a/integration/ssh_test.go +++ b/integration/ssh_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/juanfont/headscale/hscontrol/policy" + policyv1 "github.com/juanfont/headscale/hscontrol/policy/v1" "github.com/juanfont/headscale/integration/hsic" "github.com/juanfont/headscale/integration/tsic" "github.com/stretchr/testify/assert" @@ -48,7 +48,7 @@ var retry = func(times int, sleepInterval time.Duration, return result, stderr, err } -func sshScenario(t *testing.T, policy *policy.ACLPolicy, clientsPerUser int) *Scenario { +func sshScenario(t *testing.T, policy *policyv1.ACLPolicy, clientsPerUser int) *Scenario { t.Helper() scenario, err := NewScenario(dockertestMaxWait()) assertNoErr(t, err) @@ -92,18 +92,18 @@ func TestSSHOneUserToAll(t *testing.T) { t.Parallel() scenario := sshScenario(t, - &policy.ACLPolicy{ + &policyv1.ACLPolicy{ Groups: map[string][]string{ "group:integration-test": {"user1"}, }, - ACLs: []policy.ACL{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"*"}, Destinations: []string{"*:*"}, }, }, - SSHs: []policy.SSH{ + SSHs: []policyv1.SSH{ { Action: "accept", Sources: []string{"group:integration-test"}, @@ -157,18 +157,18 @@ func TestSSHMultipleUsersAllToAll(t *testing.T) { t.Parallel() scenario := sshScenario(t, - &policy.ACLPolicy{ + &policyv1.ACLPolicy{ Groups: map[string][]string{ "group:integration-test": {"user1", "user2"}, }, - ACLs: []policy.ACL{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"*"}, Destinations: []string{"*:*"}, }, }, - SSHs: []policy.SSH{ + SSHs: []policyv1.SSH{ { Action: "accept", Sources: []string{"group:integration-test"}, @@ -210,18 +210,18 @@ func TestSSHNoSSHConfigured(t *testing.T) { t.Parallel() scenario := sshScenario(t, - &policy.ACLPolicy{ + &policyv1.ACLPolicy{ Groups: map[string][]string{ "group:integration-test": {"user1"}, }, - ACLs: []policy.ACL{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"*"}, Destinations: []string{"*:*"}, }, }, - SSHs: []policy.SSH{}, + SSHs: []policyv1.SSH{}, }, len(MustTestVersions), ) @@ -252,18 +252,18 @@ func TestSSHIsBlockedInACL(t *testing.T) { t.Parallel() scenario := sshScenario(t, - &policy.ACLPolicy{ + &policyv1.ACLPolicy{ Groups: map[string][]string{ "group:integration-test": {"user1"}, }, - ACLs: []policy.ACL{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"*"}, Destinations: []string{"*:80"}, }, }, - SSHs: []policy.SSH{ + SSHs: []policyv1.SSH{ { Action: "accept", Sources: []string{"group:integration-test"}, @@ -301,19 +301,19 @@ func TestSSHUserOnlyIsolation(t *testing.T) { t.Parallel() scenario := sshScenario(t, - &policy.ACLPolicy{ + &policyv1.ACLPolicy{ Groups: map[string][]string{ "group:ssh1": {"user1"}, "group:ssh2": {"user2"}, }, - ACLs: []policy.ACL{ + ACLs: []policyv1.ACL{ { Action: "accept", Sources: []string{"*"}, Destinations: []string{"*:*"}, }, }, - SSHs: []policy.SSH{ + SSHs: []policyv1.SSH{ { Action: "accept", Sources: []string{"group:ssh1"},