mirror of
https://github.com/juanfont/headscale.git
synced 2024-12-24 05:05:53 -05:00
Added integration tests for the embedded DERP server
This commit is contained in:
parent
88378c22fb
commit
e9eb90fa76
@ -3,6 +3,7 @@
|
||||
// development
|
||||
integration_test.go
|
||||
integration_test/
|
||||
!integration_test/etc_embedded_derp/tls/server.crt
|
||||
|
||||
Dockerfile*
|
||||
docker-compose*
|
||||
|
@ -7,5 +7,10 @@ RUN apt-get update \
|
||||
&& curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.gpg | apt-key add - \
|
||||
&& curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.list | tee /etc/apt/sources.list.d/tailscale.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y tailscale=${TAILSCALE_VERSION} dnsutils \
|
||||
&& apt-get install -y ca-certificates tailscale=${TAILSCALE_VERSION} dnsutils \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ADD integration_test/etc_embedded_derp/tls/server.crt /usr/local/share/ca-certificates/
|
||||
RUN chmod 644 /usr/local/share/ca-certificates/server.crt
|
||||
|
||||
RUN update-ca-certificates
|
3
Makefile
3
Makefile
@ -23,6 +23,9 @@ test_integration:
|
||||
test_integration_cli:
|
||||
go test -tags integration -v integration_cli_test.go integration_common_test.go
|
||||
|
||||
test_integration_derp:
|
||||
go test -tags integration -v integration_embedded_derp_test.go integration_common_test.go
|
||||
|
||||
coverprofile_func:
|
||||
go tool cover -func=coverage.out
|
||||
|
||||
|
384
integration_embedded_derp_test.go
Normal file
384
integration_embedded_derp_test.go
Normal file
@ -0,0 +1,384 @@
|
||||
//go:build integration
|
||||
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
const (
|
||||
headscaleHostname = "headscale-derp"
|
||||
namespaceName = "derpnamespace"
|
||||
totalContainers = 3
|
||||
)
|
||||
|
||||
type IntegrationDERPTestSuite struct {
|
||||
suite.Suite
|
||||
stats *suite.SuiteInformation
|
||||
|
||||
pool dockertest.Pool
|
||||
networks map[int]dockertest.Network // so we keep the containers isolated
|
||||
headscale dockertest.Resource
|
||||
|
||||
tailscales map[string]dockertest.Resource
|
||||
joinWaitGroup sync.WaitGroup
|
||||
}
|
||||
|
||||
func TestDERPIntegrationTestSuite(t *testing.T) {
|
||||
s := new(IntegrationDERPTestSuite)
|
||||
|
||||
s.tailscales = make(map[string]dockertest.Resource)
|
||||
s.networks = make(map[int]dockertest.Network)
|
||||
|
||||
suite.Run(t, s)
|
||||
|
||||
// HandleStats, which allows us to check if we passed and save logs
|
||||
// is called after TearDown, so we cannot tear down containers before
|
||||
// we have potentially saved the logs.
|
||||
for _, tailscale := range s.tailscales {
|
||||
if err := s.pool.Purge(&tailscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !s.stats.Passed() {
|
||||
err := s.saveLog(&s.headscale, "test_output")
|
||||
if err != nil {
|
||||
log.Printf("Could not save log: %s\n", err)
|
||||
}
|
||||
}
|
||||
if err := s.pool.Purge(&s.headscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
for _, network := range s.networks {
|
||||
if err := network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) SetupSuite() {
|
||||
if ppool, err := dockertest.NewPool(""); err == nil {
|
||||
s.pool = *ppool
|
||||
} else {
|
||||
log.Fatalf("Could not connect to docker: %s", err)
|
||||
}
|
||||
|
||||
for i := 0; i < totalContainers; i++ {
|
||||
if pnetwork, err := s.pool.CreateNetwork(fmt.Sprintf("headscale-derp-%d", i)); err == nil {
|
||||
s.networks[i] = *pnetwork
|
||||
} else {
|
||||
log.Fatalf("Could not create network: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
headscaleBuildOptions := &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile",
|
||||
ContextDir: ".",
|
||||
}
|
||||
|
||||
currentPath, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatalf("Could not determine current path: %s", err)
|
||||
}
|
||||
|
||||
headscaleOptions := &dockertest.RunOptions{
|
||||
Name: headscaleHostname,
|
||||
Mounts: []string{
|
||||
fmt.Sprintf("%s/integration_test/etc_embedded_derp:/etc/headscale", currentPath),
|
||||
},
|
||||
Cmd: []string{"headscale", "serve"},
|
||||
ExposedPorts: []string{"8443/tcp", "3478/udp"},
|
||||
PortBindings: map[docker.Port][]docker.PortBinding{
|
||||
"8443/tcp": {{HostPort: "8443"}},
|
||||
"3478/udp": {{HostPort: "3478"}},
|
||||
},
|
||||
}
|
||||
|
||||
log.Println("Creating headscale container")
|
||||
if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil {
|
||||
s.headscale = *pheadscale
|
||||
} else {
|
||||
log.Fatalf("Could not start resource: %s", err)
|
||||
}
|
||||
log.Println("Created headscale container to test DERP")
|
||||
|
||||
log.Println("Creating tailscale containers")
|
||||
|
||||
for i := 0; i < totalContainers; i++ {
|
||||
version := tailscaleVersions[i%len(tailscaleVersions)]
|
||||
hostname, container := s.tailscaleContainer(
|
||||
fmt.Sprint(i),
|
||||
version,
|
||||
s.networks[i],
|
||||
)
|
||||
s.tailscales[hostname] = *container
|
||||
}
|
||||
|
||||
log.Println("Waiting for headscale to be ready")
|
||||
hostEndpoint := fmt.Sprintf("localhost:%s", s.headscale.GetPort("8443/tcp"))
|
||||
|
||||
if err := s.pool.Retry(func() error {
|
||||
url := fmt.Sprintf("https://%s/health", hostEndpoint)
|
||||
insecureTransport := http.DefaultTransport.(*http.Transport).Clone()
|
||||
insecureTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
client := &http.Client{Transport: insecureTransport}
|
||||
resp, err := client.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("status code not OK")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
// TODO(kradalby): If we cannot access headscale, or any other fatal error during
|
||||
// test setup, we need to abort and tear down. However, testify does not seem to
|
||||
// support that at the moment:
|
||||
// https://github.com/stretchr/testify/issues/849
|
||||
return // fmt.Errorf("Could not connect to headscale: %s", err)
|
||||
}
|
||||
log.Println("headscale container is ready")
|
||||
|
||||
log.Printf("Creating headscale namespace: %s\n", namespaceName)
|
||||
result, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{"headscale", "namespaces", "create", namespaceName},
|
||||
[]string{},
|
||||
)
|
||||
log.Println("headscale create namespace result: ", result)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
log.Printf("Creating pre auth key for %s\n", namespaceName)
|
||||
preAuthResult, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"--namespace",
|
||||
namespaceName,
|
||||
"preauthkeys",
|
||||
"create",
|
||||
"--reusable",
|
||||
"--expiration",
|
||||
"24h",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
[]string{"LOG_LEVEL=error"},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
var preAuthKey v1.PreAuthKey
|
||||
err = json.Unmarshal([]byte(preAuthResult), &preAuthKey)
|
||||
assert.Nil(s.T(), err)
|
||||
assert.True(s.T(), preAuthKey.Reusable)
|
||||
|
||||
headscaleEndpoint := fmt.Sprintf("https://headscale:%s", s.headscale.GetPort("8443/tcp"))
|
||||
|
||||
log.Printf(
|
||||
"Joining tailscale containers to headscale at %s\n",
|
||||
headscaleEndpoint,
|
||||
)
|
||||
for hostname, tailscale := range s.tailscales {
|
||||
s.joinWaitGroup.Add(1)
|
||||
go s.Join(headscaleEndpoint, preAuthKey.Key, hostname, tailscale)
|
||||
}
|
||||
|
||||
s.joinWaitGroup.Wait()
|
||||
|
||||
// The nodes need a bit of time to get their updated maps from headscale
|
||||
// TODO: See if we can have a more deterministic wait here.
|
||||
time.Sleep(60 * time.Second)
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) Join(
|
||||
endpoint, key, hostname string,
|
||||
tailscale dockertest.Resource,
|
||||
) {
|
||||
defer s.joinWaitGroup.Done()
|
||||
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"up",
|
||||
"-login-server",
|
||||
endpoint,
|
||||
"--authkey",
|
||||
key,
|
||||
"--hostname",
|
||||
hostname,
|
||||
}
|
||||
|
||||
log.Println("Join command:", command)
|
||||
log.Printf("Running join command for %s\n", hostname)
|
||||
_, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
log.Printf("%s joined\n", hostname)
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) tailscaleContainer(identifier, version string, network dockertest.Network,
|
||||
) (string, *dockertest.Resource) {
|
||||
tailscaleBuildOptions := &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile.tailscale",
|
||||
ContextDir: ".",
|
||||
BuildArgs: []docker.BuildArg{
|
||||
{
|
||||
Name: "TAILSCALE_VERSION",
|
||||
Value: version,
|
||||
},
|
||||
},
|
||||
}
|
||||
hostname := fmt.Sprintf(
|
||||
"tailscale-%s-%s",
|
||||
strings.Replace(version, ".", "-", -1),
|
||||
identifier,
|
||||
)
|
||||
tailscaleOptions := &dockertest.RunOptions{
|
||||
Name: hostname,
|
||||
Networks: []*dockertest.Network{&network},
|
||||
Cmd: []string{
|
||||
"tailscaled", "--tun=tsdev",
|
||||
},
|
||||
|
||||
// expose the host IP address, so we can access it from inside the container
|
||||
ExtraHosts: []string{"host.docker.internal:host-gateway", "headscale:host-gateway"},
|
||||
}
|
||||
|
||||
pts, err := s.pool.BuildAndRunWithBuildOptions(
|
||||
tailscaleBuildOptions,
|
||||
tailscaleOptions,
|
||||
DockerRestartPolicy,
|
||||
DockerAllowLocalIPv6,
|
||||
DockerAllowNetworkAdministration,
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not start resource: %s", err)
|
||||
}
|
||||
log.Printf("Created %s container\n", hostname)
|
||||
|
||||
return hostname, pts
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) TearDownSuite() {
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) HandleStats(
|
||||
suiteName string,
|
||||
stats *suite.SuiteInformation,
|
||||
) {
|
||||
s.stats = stats
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) saveLog(
|
||||
resource *dockertest.Resource,
|
||||
basePath string,
|
||||
) error {
|
||||
err := os.MkdirAll(basePath, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
|
||||
err = s.pool.Client.Logs(
|
||||
docker.LogsOptions{
|
||||
Context: context.TODO(),
|
||||
Container: resource.Container.ID,
|
||||
OutputStream: &stdout,
|
||||
ErrorStream: &stderr,
|
||||
Tail: "all",
|
||||
RawTerminal: false,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
Follow: false,
|
||||
Timestamps: false,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Saving logs for %s to %s\n", resource.Container.Name, basePath)
|
||||
|
||||
err = ioutil.WriteFile(
|
||||
path.Join(basePath, resource.Container.Name+".stdout.log"),
|
||||
[]byte(stdout.String()),
|
||||
0o644,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(
|
||||
path.Join(basePath, resource.Container.Name+".stderr.log"),
|
||||
[]byte(stdout.String()),
|
||||
0o644,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) TestPingAllPeersByHostname() {
|
||||
ips, err := getIPs(s.tailscales)
|
||||
assert.Nil(s.T(), err)
|
||||
for hostname, tailscale := range s.tailscales {
|
||||
for peername := range ips {
|
||||
if peername == hostname {
|
||||
continue
|
||||
}
|
||||
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
|
||||
command := []string{
|
||||
"tailscale", "ping",
|
||||
"--timeout=10s",
|
||||
"--c=5",
|
||||
"--until-direct=false",
|
||||
peername,
|
||||
}
|
||||
|
||||
log.Printf(
|
||||
"Pinging using hostname from %s to %s\n",
|
||||
hostname,
|
||||
peername,
|
||||
)
|
||||
log.Println(command)
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
log.Printf("Result for %s: %s\n", hostname, result)
|
||||
assert.Contains(t, result, "via DERP")
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
18
integration_test/etc_embedded_derp/tls/server.crt
Normal file
18
integration_test/etc_embedded_derp/tls/server.crt
Normal file
@ -0,0 +1,18 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIC8jCCAdqgAwIBAgIULbu+UbSTMG/LtxooLLh7BgSEyqEwDQYJKoZIhvcNAQEL
|
||||
BQAwFDESMBAGA1UEAwwJaGVhZHNjYWxlMCAXDTIyMDMwNTE2NDgwM1oYDzI1MjEx
|
||||
MTA0MTY0ODAzWjAUMRIwEAYDVQQDDAloZWFkc2NhbGUwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQDqcfpToLZUF0rlNwXkkt3lbyw4Cl4TJdx36o2PKaOK
|
||||
U+tze/IjRsCWeMwrcR1o9TNZcxsD+c2J48D1WATuQJlMeg+2UJXGaTGRKkkbPMy3
|
||||
5m7AFf/Q16UEOgm2NYjZaQ8faRGIMYURG/6sXmNeETJvBixpBev9yKJuVXgqHNS4
|
||||
NpEkNwdOCuAZXrmw0HCbiusawJOay4tFvhH14rav8Uimonl8UTNVXufMzyUOuoaQ
|
||||
TGflmzYX3hIoswRnTPlIWFoqObvx2Q8H+of3uQJXy0m8I6OrIoXLNxnqYMfFls79
|
||||
9SYgVc2jPsCbh5fwyRbx2Hof7sIZ1K/mNgxJRG1E3ZiLAgMBAAGjOjA4MBQGA1Ud
|
||||
EQQNMAuCCWhlYWRzY2FsZTALBgNVHQ8EBAMCB4AwEwYDVR0lBAwwCgYIKwYBBQUH
|
||||
AwEwDQYJKoZIhvcNAQELBQADggEBANGlVN7NCsJaKz0k0nhlRGK+tcxn2p1PXN/i
|
||||
Iy+JX8ahixPC4ocRwOhrXgb390ZXLLwq08HrWYRB/Wi1VUzCp5d8dVxvrR43dJ+v
|
||||
L2EOBiIKgcu2C3pWW1qRR46/EoXUU9kSH2VNBvIhNufi32kEOidoDzxtQf6qVCoF
|
||||
guUt1JkAqrynv1UvR/2ZRM/WzM/oJ8qfECwrwDxyYhkqU5Z5jCWg0C6kPIBvNdzt
|
||||
B0eheWS+ZxVwkePTR4e17kIafwknth3lo+orxVrq/xC+OVM1bGrt2ZyD64ZvEqQl
|
||||
w6kgbzBdLScAQptWOFThwhnJsg0UbYKimZsnYmjVEuN59TJv92M=
|
||||
-----END CERTIFICATE-----
|
Loading…
Reference in New Issue
Block a user