Add node expiry test

This commits adds a test to verify that nodes get updated if a node in
their network expires.

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
This commit is contained in:
Kristoffer Dalby 2023-02-02 16:05:52 +01:00 committed by Kristoffer Dalby
parent 3c20d2a178
commit c39085911f
6 changed files with 282 additions and 24 deletions

View File

@ -0,0 +1,57 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestExpireNode
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v18
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go test ./... \
-tags ts2019 \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestExpireNode$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"

View File

@ -1,16 +1,19 @@
package integration
import (
"encoding/json"
"fmt"
"net/netip"
"strings"
"testing"
"time"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic"
"github.com/rs/zerolog/log"
"github.com/samber/lo"
"github.com/stretchr/testify/assert"
)
func TestPingAllByIP(t *testing.T) {
@ -556,20 +559,92 @@ func TestResolveMagicDNS(t *testing.T) {
}
}
func pingAllHelper(t *testing.T, clients []TailscaleClient, addrs []string) int {
t.Helper()
success := 0
func TestExpireNode(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
for _, client := range clients {
for _, addr := range addrs {
err := client.Ping(addr)
if err != nil {
t.Errorf("failed to ping %s from %s: %s", addr, client.Hostname(), err)
} else {
success++
}
scenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
spec := map[string]int{
"user1": len(TailscaleVersions),
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("expirenode"))
if err != nil {
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
allIps, err := scenario.ListTailscaleClientsIPs()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync()
if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
return x.String()
})
success := pingAllHelper(t, allClients, allAddrs)
t.Logf("before expire: %d successful pings out of %d", success, len(allClients)*len(allIps))
for _, client := range allClients {
status, err := client.Status()
assert.NoError(t, err)
// Assert that we have the original count - self
assert.Len(t, status.Peers(), len(TailscaleVersions)-1)
}
headscale, err := scenario.Headscale()
assert.NoError(t, err)
// TODO(kradalby): This is Headscale specific and would not play nicely
// with other implementations of the ControlServer interface
result, err := headscale.Execute([]string{
"headscale", "nodes", "expire", "--identifier", "0", "--output", "json",
})
assert.NoError(t, err)
var machine v1.Machine
err = json.Unmarshal([]byte(result), &machine)
assert.NoError(t, err)
time.Sleep(30 * time.Second)
// Verify that the expired not is no longer present in the Peer list
// of connected nodes.
for _, client := range allClients {
status, err := client.Status()
assert.NoError(t, err)
for _, peerKey := range status.Peers() {
peerStatus := status.Peer[peerKey]
peerPublicKey := strings.TrimPrefix(peerStatus.PublicKey.String(), "nodekey:")
assert.NotEqual(t, machine.NodeKey, peerPublicKey)
}
if client.Hostname() != machine.Name {
// Assert that we have the original count - self - expired node
assert.Len(t, status.Peers(), len(TailscaleVersions)-2)
}
}
return success
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}

View File

@ -26,6 +26,7 @@ const (
var (
errNoHeadscaleAvailable = errors.New("no headscale available")
errNoUserAvailable = errors.New("no user available")
errNoClientFound = errors.New("client not found")
// Tailscale started adding TS2021 support in CapabilityVersion>=28 (v1.24.0), but
// proper support in Headscale was only added for CapabilityVersion>=39 clients (v1.30.0).
@ -203,7 +204,11 @@ func (s *Scenario) Headscale(opts ...hsic.Option) (ControlServer, error) {
return headscale, nil
}
func (s *Scenario) CreatePreAuthKey(user string, reusable bool, ephemeral bool) (*v1.PreAuthKey, error) {
func (s *Scenario) CreatePreAuthKey(
user string,
reusable bool,
ephemeral bool,
) (*v1.PreAuthKey, error) {
if headscale, err := s.Headscale(); err == nil {
key, err := headscale.CreateAuthKey(user, reusable, ephemeral)
if err != nil {
@ -440,6 +445,24 @@ func (s *Scenario) ListTailscaleClients(users ...string) ([]TailscaleClient, err
return allClients, nil
}
func (s *Scenario) FindTailscaleClientByIP(ip netip.Addr) (TailscaleClient, error) {
clients, err := s.ListTailscaleClients()
if err != nil {
return nil, err
}
for _, client := range clients {
ips, _ := client.IPs()
for _, ip2 := range ips {
if ip == ip2 {
return client, nil
}
}
}
return nil, errNoClientFound
}
func (s *Scenario) ListTailscaleClientsIPs(users ...string) ([]netip.Addr, error) {
var allIps []netip.Addr

View File

@ -4,6 +4,7 @@ import (
"net/netip"
"net/url"
"github.com/juanfont/headscale/integration/tsic"
"tailscale.com/ipn/ipnstate"
)
@ -22,6 +23,6 @@ type TailscaleClient interface {
WaitForReady() error
WaitForLogout() error
WaitForPeers(expected int) error
Ping(hostnameOrIP string) error
Ping(hostnameOrIP string, opts ...tsic.PingOption) error
ID() string
}

View File

@ -7,7 +7,9 @@ import (
"log"
"net/netip"
"net/url"
"strconv"
"strings"
"time"
"github.com/cenkalti/backoff/v4"
"github.com/juanfont/headscale"
@ -20,6 +22,7 @@ import (
const (
tsicHashLength = 6
defaultPingCount = 10
dockerContextPath = "../."
headscaleCertPath = "/usr/local/share/ca-certificates/headscale.crt"
)
@ -49,6 +52,7 @@ type TailscaleInContainer struct {
headscaleCert []byte
headscaleHostname string
withSSH bool
withTags []string
}
type Option = func(c *TailscaleInContainer)
@ -85,6 +89,12 @@ func WithHeadscaleName(hsName string) Option {
}
}
func WithTags(tags []string) Option {
return func(tsic *TailscaleInContainer) {
tsic.withTags = tags
}
}
func WithSSH() Option {
return func(tsic *TailscaleInContainer) {
tsic.withSSH = true
@ -231,6 +241,12 @@ func (t *TailscaleInContainer) Up(
command = append(command, "--ssh")
}
if len(t.withTags) > 0 {
command = append(command,
fmt.Sprintf(`--advertise-tags=%s`, strings.Join(t.withTags, ",")),
)
}
if _, _, err := t.Execute(command); err != nil {
return fmt.Errorf("failed to join tailscale client: %w", err)
}
@ -390,17 +406,55 @@ func (t *TailscaleInContainer) WaitForPeers(expected int) error {
})
}
// TODO(kradalby): Make multiping, go routine magic.
func (t *TailscaleInContainer) Ping(hostnameOrIP string) error {
return t.pool.Retry(func() error {
command := []string{
"tailscale", "ping",
"--timeout=1s",
"--c=10",
"--until-direct=true",
hostnameOrIP,
}
type (
PingOption = func(args *pingArgs)
pingArgs struct {
timeout time.Duration
count int
direct bool
}
)
func WithPingTimeout(timeout time.Duration) PingOption {
return func(args *pingArgs) {
args.timeout = timeout
}
}
func WithPingCount(count int) PingOption {
return func(args *pingArgs) {
args.count = count
}
}
func WithPingUntilDirect(direct bool) PingOption {
return func(args *pingArgs) {
args.direct = direct
}
}
// TODO(kradalby): Make multiping, go routine magic.
func (t *TailscaleInContainer) Ping(hostnameOrIP string, opts ...PingOption) error {
args := pingArgs{
timeout: time.Second,
count: defaultPingCount,
direct: true,
}
for _, opt := range opts {
opt(&args)
}
command := []string{
"tailscale", "ping",
fmt.Sprintf("--timeout=%s", args.timeout),
fmt.Sprintf("--c=%d", args.count),
fmt.Sprintf("--until-direct=%s", strconv.FormatBool(args.direct)),
}
command = append(command, hostnameOrIP)
return t.pool.Retry(func() error {
result, _, err := t.Execute(command)
if err != nil {
log.Printf(

48
integration/utils.go Normal file
View File

@ -0,0 +1,48 @@
package integration
import (
"testing"
)
func pingAllHelper(t *testing.T, clients []TailscaleClient, addrs []string) int {
t.Helper()
success := 0
for _, client := range clients {
for _, addr := range addrs {
err := client.Ping(addr)
if err != nil {
t.Errorf("failed to ping %s from %s: %s", addr, client.Hostname(), err)
} else {
success++
}
}
}
return success
}
// pingAllNegativeHelper is intended to have 1 or more nodes timeing out from the ping,
// it counts failures instead of successes.
// func pingAllNegativeHelper(t *testing.T, clients []TailscaleClient, addrs []string) int {
// t.Helper()
// failures := 0
//
// timeout := 100
// count := 3
//
// for _, client := range clients {
// for _, addr := range addrs {
// err := client.Ping(
// addr,
// tsic.WithPingTimeout(time.Duration(timeout)*time.Millisecond),
// tsic.WithPingCount(count),
// )
// if err != nil {
// failures++
// }
// }
// }
//
// return failures
// }