Merge pull request #121 from juanfont/main

New integration test for tailscale 1.14
This commit is contained in:
Kristoffer Dalby 2021-09-23 14:51:26 +01:00 committed by GitHub
commit 7db91c68be
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 368 additions and 196 deletions

View File

@ -49,6 +49,7 @@ jobs:
${{ secrets.DOCKERHUB_USERNAME }}/headscale
ghcr.io/${{ github.repository_owner }}/headscale
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=sha

View File

@ -1,4 +1,4 @@
FROM golang:latest AS build
FROM golang:1.17.1-bullseye AS build
ENV GOPATH /go
COPY go.mod go.sum /go/src/headscale/

View File

@ -1,9 +1,11 @@
FROM ubuntu:latest
ARG TAILSCALE_VERSION
RUN apt-get update \
&& apt-get install -y gnupg curl \
&& curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.gpg | apt-key add - \
&& curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.list | tee /etc/apt/sources.list.d/tailscale.list \
&& apt-get update \
&& apt-get install -y tailscale \
&& apt-get install -y tailscale=${TAILSCALE_VERSION} \
&& rm -rf /var/lib/apt/lists/*

View File

@ -18,7 +18,7 @@ Headscale implements this coordination server.
- [x] Base functionality (nodes can communicate with each other)
- [x] Node registration through the web flow
- [x] Network changes are relied to the nodes
- [x] Network changes are relayed to the nodes
- [x] Namespace support (~equivalent to multi-user in Tailscale.com)
- [x] Routing (advertise & accept, including exit nodes)
- [x] Node registration via pre-auth keys (including reusable keys, and ephemeral node support)
@ -43,7 +43,7 @@ Suggestions/PRs welcomed!
```shell
docker pull headscale/headscale:x.x.x
```
<!--
<!--
or
```shell
docker pull ghrc.io/juanfont/headscale:x.x.x
@ -73,8 +73,15 @@ Suggestions/PRs welcomed!
headscale namespaces create myfirstnamespace
```
or docker:
the db.sqlite mount is only needed if you use sqlite
```shell
docker run -v ./private.key:/private.key -v ./config.json:/config.json headscale/headscale:x.x.x headscale namespace create myfirstnamespace
touch db.sqlite
docker run -v $(pwd)/private.key:/private.key -v $(pwd)/config.json:/config.json -v $(pwd)/derp.yaml:/derp.yaml -v $(pwd)/db.sqlite:/db.sqlite -p 127.0.0.1:8000:8000 headscale/headscale:x.x.x headscale namespaces create myfirstnamespace
```
or if your server is already running in docker:
```shell
docker exec <container_name> headscale create myfirstnamespace
```
5. Run the server
@ -82,11 +89,13 @@ Suggestions/PRs welcomed!
headscale serve
```
or docker:
the db.sqlite mount is only needed if you use sqlite
```shell
docker run -v $(pwd)/private.key:/private.key -v $(pwd)/config.json:/config.json -v $(pwd)/derb.yaml:/derb.yaml -p 127.0.0.1:8080:8080 headscale/headscale:x.x.x headscale serve
docker run -v $(pwd)/private.key:/private.key -v $(pwd)/config.json:/config.json -v $(pwd)/derp.yaml:/derp.yaml -v $(pwd)/db.sqlite:/db.sqlite -p 127.0.0.1:8000:8000 headscale/headscale:x.x.x headscale serve
```
6. If you used tailscale.com before in your nodes, make sure you clear the tailscaled data folder
6. If you used tailscale.com before in your nodes, make sure you clear the tailscald data folder
```shell
systemctl stop tailscaled
rm -fr /var/lib/tailscale
@ -106,7 +115,11 @@ Suggestions/PRs welcomed!
```
or docker:
```shell
docker run -v ./private.key:/private.key -v ./config.json:/config.json headscale/headscale:x.x.x headscale -n myfirstnamespace node register YOURMACHINEKEY
docker run -v $(pwd)/private.key:/private.key -v $(pwd)/config.json:/config.json -v $(pwd)/derp.yaml:/derp.yaml headscale/headscale:x.x.x headscale -n myfirstnamespace node register YOURMACHINEKEY
```
or if your server is already running in docker:
```shell
docker exec <container_name> headscale -n myfistnamespace node register YOURMACHINEKEY
```
Alternatively, you can use Auth Keys to register your machines:
@ -117,7 +130,11 @@ Alternatively, you can use Auth Keys to register your machines:
```
or docker:
```shell
docker run -v ./private.key:/private.key -v ./config.json:/config.json headscale/headscale:x.x.x headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h
docker run -v $(pwd)/private.key:/private.key -v $(pwd)/config.json:/config.json -v$(pwd)/derp.yaml:/derp.yaml -v $(pwd)/db.sqlite:/db.sqlite headscale/headscale:x.x.x headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h
```
or if your server is already running in docker:
```shell
docker exec <container_name> headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h
```
2. Use the authkey from your machine to register it

View File

@ -1,3 +1,4 @@
//go:build integration
// +build integration
package headscale
@ -5,6 +6,7 @@ package headscale
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"log"
@ -19,36 +21,62 @@ import (
"github.com/ory/dockertest/v3/docker"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"tailscale.com/ipn/ipnstate"
"inet.af/netaddr"
)
var integrationTmpDir string
var ih Headscale
var (
integrationTmpDir string
ih Headscale
)
var pool dockertest.Pool
var network dockertest.Network
var headscale dockertest.Resource
var tailscaleCount int = 25
var tailscales map[string]dockertest.Resource
var (
pool dockertest.Pool
network dockertest.Network
headscale dockertest.Resource
)
var tailscaleVersions = []string{"1.14.3", "1.12.3"}
type TestNamespace struct {
count int
tailscales map[string]dockertest.Resource
}
type IntegrationTestSuite struct {
suite.Suite
stats *suite.SuiteInformation
namespaces map[string]TestNamespace
}
func TestIntegrationTestSuite(t *testing.T) {
s := new(IntegrationTestSuite)
s.namespaces = map[string]TestNamespace{
"main": {
count: 20,
tailscales: make(map[string]dockertest.Resource),
},
"shared": {
count: 5,
tailscales: make(map[string]dockertest.Resource),
},
}
suite.Run(t, s)
// HandleStats, which allows us to check if we passed and save logs
// is called after TearDown, so we cannot tear down containers before
// we have potentially saved the logs.
for _, tailscale := range tailscales {
for _, scales := range s.namespaces {
for _, tailscale := range scales.tailscales {
if err := pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
}
}
if !s.stats.Passed() {
err := saveLog(&headscale, "test_output")
@ -119,12 +147,12 @@ func saveLog(resource *dockertest.Resource, basePath string) error {
fmt.Printf("Saving logs for %s to %s\n", resource.Container.Name, basePath)
err = ioutil.WriteFile(path.Join(basePath, resource.Container.Name+".stdout.log"), []byte(stdout.String()), 0644)
err = ioutil.WriteFile(path.Join(basePath, resource.Container.Name+".stdout.log"), []byte(stdout.String()), 0o644)
if err != nil {
return err
}
err = ioutil.WriteFile(path.Join(basePath, resource.Container.Name+".stderr.log"), []byte(stdout.String()), 0644)
err = ioutil.WriteFile(path.Join(basePath, resource.Container.Name+".stderr.log"), []byte(stdout.String()), 0o644)
if err != nil {
return err
}
@ -140,6 +168,32 @@ func dockerRestartPolicy(config *docker.HostConfig) {
}
}
func tailscaleContainer(namespace, identifier, version string) (string, *dockertest.Resource) {
tailscaleBuildOptions := &dockertest.BuildOptions{
Dockerfile: "Dockerfile.tailscale",
ContextDir: ".",
BuildArgs: []docker.BuildArg{
{
Name: "TAILSCALE_VERSION",
Value: version,
},
},
}
hostname := fmt.Sprintf("%s-tailscale-%s-%s", namespace, strings.Replace(version, ".", "-", -1), identifier)
tailscaleOptions := &dockertest.RunOptions{
Name: hostname,
Networks: []*dockertest.Network{&network},
Cmd: []string{"tailscaled", "--tun=userspace-networking", "--socks5-server=localhost:1055"},
}
pts, err := pool.BuildAndRunWithBuildOptions(tailscaleBuildOptions, tailscaleOptions, dockerRestartPolicy)
if err != nil {
log.Fatalf("Could not start resource: %s", err)
}
fmt.Printf("Created %s container\n", hostname)
return hostname, pts
}
func (s *IntegrationTestSuite) SetupSuite() {
var err error
h = Headscale{
@ -164,11 +218,6 @@ func (s *IntegrationTestSuite) SetupSuite() {
ContextDir: ".",
}
tailscaleBuildOptions := &dockertest.BuildOptions{
Dockerfile: "Dockerfile.tailscale",
ContextDir: ".",
}
currentPath, err := os.Getwd()
if err != nil {
log.Fatalf("Could not determine current path: %s", err)
@ -183,7 +232,7 @@ func (s *IntegrationTestSuite) SetupSuite() {
Networks: []*dockertest.Network{&network},
Cmd: []string{"headscale", "serve"},
PortBindings: map[docker.Port][]docker.PortBinding{
"8080/tcp": []docker.PortBinding{{HostPort: "8080"}},
"8080/tcp": {{HostPort: "8080"}},
},
}
@ -196,21 +245,13 @@ func (s *IntegrationTestSuite) SetupSuite() {
fmt.Println("Created headscale container")
fmt.Println("Creating tailscale containers")
tailscales = make(map[string]dockertest.Resource)
for i := 0; i < tailscaleCount; i++ {
hostname := fmt.Sprintf("tailscale%d", i)
tailscaleOptions := &dockertest.RunOptions{
Name: hostname,
Networks: []*dockertest.Network{&network},
Cmd: []string{"tailscaled", "--tun=userspace-networking", "--socks5-server=localhost:1055"},
}
for namespace, scales := range s.namespaces {
for i := 0; i < scales.count; i++ {
version := tailscaleVersions[i%len(tailscaleVersions)]
if pts, err := pool.BuildAndRunWithBuildOptions(tailscaleBuildOptions, tailscaleOptions, dockerRestartPolicy); err == nil {
tailscales[hostname] = *pts
} else {
log.Fatalf("Could not start resource: %s", err)
hostname, container := tailscaleContainer(namespace, fmt.Sprint(i), version)
scales.tailscales[hostname] = *container
}
fmt.Printf("Created %s container\n", hostname)
}
fmt.Println("Waiting for headscale to be ready")
@ -231,29 +272,31 @@ func (s *IntegrationTestSuite) SetupSuite() {
}
fmt.Println("headscale container is ready")
fmt.Println("Creating headscale namespace")
for namespace, scales := range s.namespaces {
fmt.Printf("Creating headscale namespace: %s\n", namespace)
result, err := executeCommand(
&headscale,
[]string{"headscale", "namespaces", "create", "test"},
[]string{"headscale", "namespaces", "create", namespace},
)
assert.Nil(s.T(), err)
fmt.Println("headscale create namespace result: ", result)
fmt.Println("Creating pre auth key")
fmt.Printf("Creating pre auth key for %s\n", namespace)
authKey, err := executeCommand(
&headscale,
[]string{"headscale", "-n", "test", "preauthkeys", "create", "--reusable", "--expiration", "24h"},
[]string{"headscale", "--namespace", namespace, "preauthkeys", "create", "--reusable", "--expiration", "24h"},
)
assert.Nil(s.T(), err)
headscaleEndpoint := fmt.Sprintf("http://headscale:%s", headscale.GetPort("8080/tcp"))
fmt.Printf("Joining tailscale containers to headscale at %s\n", headscaleEndpoint)
for hostname, tailscale := range tailscales {
for hostname, tailscale := range scales.tailscales {
command := []string{"tailscale", "up", "-login-server", headscaleEndpoint, "--authkey", strings.TrimSuffix(authKey, "\n"), "--hostname", hostname}
fmt.Println("Join command:", command)
fmt.Printf("Running join command for %s\n", hostname)
result, err = executeCommand(
result, err := executeCommand(
&tailscale,
command,
)
@ -261,6 +304,7 @@ func (s *IntegrationTestSuite) SetupSuite() {
assert.Nil(s.T(), err)
fmt.Printf("%s joined\n", hostname)
}
}
// The nodes need a bit of time to get their updated maps from headscale
// TODO: See if we can have a more deterministic wait here.
@ -275,10 +319,11 @@ func (s *IntegrationTestSuite) HandleStats(suiteName string, stats *suite.SuiteI
}
func (s *IntegrationTestSuite) TestListNodes() {
for namespace, scales := range s.namespaces {
fmt.Println("Listing nodes")
result, err := executeCommand(
&headscale,
[]string{"headscale", "-n", "test", "nodes", "list"},
[]string{"headscale", "--namespace", namespace, "nodes", "list"},
)
assert.Nil(s.T(), err)
@ -286,19 +331,21 @@ func (s *IntegrationTestSuite) TestListNodes() {
// Chck that the correct count of host is present in node list
lines := strings.Split(result, "\n")
assert.Equal(s.T(), len(tailscales), len(lines)-2)
assert.Equal(s.T(), len(scales.tailscales), len(lines)-2)
for hostname, _ := range tailscales {
for hostname := range scales.tailscales {
assert.Contains(s.T(), result, hostname)
}
}
}
func (s *IntegrationTestSuite) TestGetIpAddresses() {
for _, scales := range s.namespaces {
ipPrefix := netaddr.MustParseIPPrefix("100.64.0.0/10")
ips, err := getIPs()
ips, err := getIPs(scales.tailscales)
assert.Nil(s.T(), err)
for hostname, _ := range tailscales {
for hostname := range scales.tailscales {
s.T().Run(hostname, func(t *testing.T) {
ip := ips[hostname]
@ -311,15 +358,17 @@ func (s *IntegrationTestSuite) TestGetIpAddresses() {
ips[hostname] = ip
})
}
}
}
func (s *IntegrationTestSuite) TestStatus() {
ips, err := getIPs()
for _, scales := range s.namespaces {
ips, err := getIPs(scales.tailscales)
assert.Nil(s.T(), err)
for hostname, tailscale := range tailscales {
for hostname, tailscale := range scales.tailscales {
s.T().Run(hostname, func(t *testing.T) {
command := []string{"tailscale", "status"}
command := []string{"tailscale", "status", "--json"}
fmt.Printf("Getting status for %s\n", hostname)
result, err := executeCommand(
@ -327,28 +376,47 @@ func (s *IntegrationTestSuite) TestStatus() {
command,
)
assert.Nil(t, err)
// fmt.Printf("Status for %s: %s", hostname, result)
var status ipnstate.Status
err = json.Unmarshal([]byte(result), &status)
assert.Nil(s.T(), err)
// TODO(kradalby): Replace this check with peer length of SAME namespace
// Check if we have as many nodes in status
// as we have IPs/tailscales
lines := strings.Split(result, "\n")
assert.Equal(t, len(ips), len(lines)-1)
assert.Equal(t, len(tailscales), len(lines)-1)
// lines := strings.Split(result, "\n")
// assert.Equal(t, len(ips), len(lines)-1)
// assert.Equal(t, len(scales.tailscales), len(lines)-1)
peerIps := getIPsfromIPNstate(status)
// Check that all hosts is present in all hosts status
for ipHostname, ip := range ips {
assert.Contains(t, result, ip.String())
assert.Contains(t, result, ipHostname)
if hostname != ipHostname {
assert.Contains(t, peerIps, ip)
}
}
})
}
}
}
func getIPsfromIPNstate(status ipnstate.Status) []netaddr.IP {
ips := make([]netaddr.IP, 0)
for _, peer := range status.Peer {
ips = append(ips, peer.TailscaleIPs...)
}
return ips
}
func (s *IntegrationTestSuite) TestPingAllPeers() {
ips, err := getIPs()
for _, scales := range s.namespaces {
ips, err := getIPs(scales.tailscales)
assert.Nil(s.T(), err)
for hostname, tailscale := range tailscales {
for hostname, tailscale := range scales.tailscales {
for peername, ip := range ips {
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
// We currently cant ping ourselves, so skip that.
@ -375,9 +443,93 @@ func (s *IntegrationTestSuite) TestPingAllPeers() {
})
}
}
}
}
func getIPs() (map[string]netaddr.IP, error) {
func (s *IntegrationTestSuite) TestSharedNodes() {
main := s.namespaces["main"]
shared := s.namespaces["shared"]
result, err := executeCommand(
&headscale,
[]string{"headscale", "nodes", "list", "-o", "json", "--namespace", "shared"},
)
assert.Nil(s.T(), err)
var machineList []Machine
err = json.Unmarshal([]byte(result), &machineList)
assert.Nil(s.T(), err)
for _, machine := range machineList {
result, err := executeCommand(
&headscale,
[]string{"headscale", "nodes", "share", "--namespace", "shared", fmt.Sprint(machine.ID), "main"},
)
assert.Nil(s.T(), err)
fmt.Println("Shared node with result: ", result)
}
result, err = executeCommand(
&headscale,
[]string{"headscale", "nodes", "list", "--namespace", "main"},
)
assert.Nil(s.T(), err)
fmt.Println("Nodelist after sharing", result)
// Chck that the correct count of host is present in node list
lines := strings.Split(result, "\n")
assert.Equal(s.T(), len(main.tailscales)+len(shared.tailscales), len(lines)-2)
for hostname := range main.tailscales {
assert.Contains(s.T(), result, hostname)
}
for hostname := range shared.tailscales {
assert.Contains(s.T(), result, hostname)
}
// TODO(kradalby): Figure out why these connections are not set up
// // TODO: See if we can have a more deterministic wait here.
// time.Sleep(100 * time.Second)
// mainIps, err := getIPs(main.tailscales)
// assert.Nil(s.T(), err)
// sharedIps, err := getIPs(shared.tailscales)
// assert.Nil(s.T(), err)
// for hostname, tailscale := range main.tailscales {
// for peername, ip := range sharedIps {
// s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
// // We currently cant ping ourselves, so skip that.
// if peername != hostname {
// // We are only interested in "direct ping" which means what we
// // might need a couple of more attempts before reaching the node.
// command := []string{
// "tailscale", "ping",
// "--timeout=1s",
// "--c=20",
// "--until-direct=true",
// ip.String(),
// }
// fmt.Printf("Pinging from %s (%s) to %s (%s)\n", hostname, mainIps[hostname], peername, ip)
// result, err := executeCommand(
// &tailscale,
// command,
// )
// assert.Nil(t, err)
// fmt.Printf("Result for %s: %s\n", hostname, result)
// assert.Contains(t, result, "pong")
// }
// })
// }
// }
}
func getIPs(tailscales map[string]dockertest.Resource) (map[string]netaddr.IP, error) {
ips := make(map[string]netaddr.IP)
for hostname, tailscale := range tailscales {
command := []string{"tailscale", "ip"}