General fixups discovered by checking errors
There was a lot of tests that actually threw a lot of errors and that did not pass all the way because we didnt check everything. This commit should fix all of these cases. Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
This commit is contained in:
parent
f8a58aa15b
commit
1766e6b5df
|
@ -1,7 +1,7 @@
|
||||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||||
|
|
||||||
name: Integration Test v2 - TestSSHOneUserAllToAll
|
name: Integration Test v2 - TestSSHOneUserToAll
|
||||||
|
|
||||||
on: [pull_request]
|
on: [pull_request]
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@ concurrency:
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
TestSSHOneUserAllToAll:
|
TestSSHOneUserToAll:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
@ -34,7 +34,7 @@ jobs:
|
||||||
integration_test/
|
integration_test/
|
||||||
config-example.yaml
|
config-example.yaml
|
||||||
|
|
||||||
- name: Run TestSSHOneUserAllToAll
|
- name: Run TestSSHOneUserToAll
|
||||||
if: steps.changed-files.outputs.any_changed == 'true'
|
if: steps.changed-files.outputs.any_changed == 'true'
|
||||||
run: |
|
run: |
|
||||||
nix develop --command -- docker run \
|
nix develop --command -- docker run \
|
||||||
|
@ -50,7 +50,7 @@ jobs:
|
||||||
-failfast \
|
-failfast \
|
||||||
-timeout 120m \
|
-timeout 120m \
|
||||||
-parallel 1 \
|
-parallel 1 \
|
||||||
-run "^TestSSHOneUserAllToAll$"
|
-run "^TestSSHOneUserToAll$"
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v3
|
- uses: actions/upload-artifact@v3
|
||||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
|
@ -1,7 +1,7 @@
|
||||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||||
|
|
||||||
name: Integration Test v2 - TestSSUserOnlyIsolation
|
name: Integration Test v2 - TestSSHUserOnlyIsolation
|
||||||
|
|
||||||
on: [pull_request]
|
on: [pull_request]
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@ concurrency:
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
TestSSUserOnlyIsolation:
|
TestSSHUserOnlyIsolation:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
@ -34,7 +34,7 @@ jobs:
|
||||||
integration_test/
|
integration_test/
|
||||||
config-example.yaml
|
config-example.yaml
|
||||||
|
|
||||||
- name: Run TestSSUserOnlyIsolation
|
- name: Run TestSSHUserOnlyIsolation
|
||||||
if: steps.changed-files.outputs.any_changed == 'true'
|
if: steps.changed-files.outputs.any_changed == 'true'
|
||||||
run: |
|
run: |
|
||||||
nix develop --command -- docker run \
|
nix develop --command -- docker run \
|
||||||
|
@ -50,7 +50,7 @@ jobs:
|
||||||
-failfast \
|
-failfast \
|
||||||
-timeout 120m \
|
-timeout 120m \
|
||||||
-parallel 1 \
|
-parallel 1 \
|
||||||
-run "^TestSSUserOnlyIsolation$"
|
-run "^TestSSHUserOnlyIsolation$"
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v3
|
- uses: actions/upload-artifact@v3
|
||||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
|
@ -30,6 +30,7 @@ linters:
|
||||||
- exhaustruct
|
- exhaustruct
|
||||||
- nolintlint
|
- nolintlint
|
||||||
- musttag # causes issues with imported libs
|
- musttag # causes issues with imported libs
|
||||||
|
- depguard
|
||||||
|
|
||||||
# deprecated
|
# deprecated
|
||||||
- structcheck # replaced by unused
|
- structcheck # replaced by unused
|
||||||
|
|
|
@ -480,6 +480,8 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
|
||||||
baseDomain = "headscale.net" // does not really matter when MagicDNS is not enabled
|
baseDomain = "headscale.net" // does not really matter when MagicDNS is not enabled
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Trace().Interface("dns_config", dnsConfig).Msg("DNS configuration loaded")
|
||||||
|
|
||||||
return dnsConfig, baseDomain
|
return dnsConfig, baseDomain
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -61,6 +61,10 @@ func aclScenario(
|
||||||
|
|
||||||
err = scenario.CreateHeadscaleEnv(spec,
|
err = scenario.CreateHeadscaleEnv(spec,
|
||||||
[]tsic.Option{
|
[]tsic.Option{
|
||||||
|
// Alpine containers dont have ip6tables set up, which causes
|
||||||
|
// tailscaled to stop configuring the wgengine, causing it
|
||||||
|
// to not configure DNS.
|
||||||
|
tsic.WithNetfilter("off"),
|
||||||
tsic.WithDockerEntrypoint([]string{
|
tsic.WithDockerEntrypoint([]string{
|
||||||
"/bin/sh",
|
"/bin/sh",
|
||||||
"-c",
|
"-c",
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"github.com/ory/dockertest/v3"
|
"github.com/ory/dockertest/v3"
|
||||||
"github.com/ory/dockertest/v3/docker"
|
"github.com/ory/dockertest/v3/docker"
|
||||||
"github.com/samber/lo"
|
"github.com/samber/lo"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -90,6 +91,7 @@ func TestOIDCAuthenticationPingAll(t *testing.T) {
|
||||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This test is really flaky.
|
||||||
func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) {
|
func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) {
|
||||||
IntegrationSkip(t)
|
IntegrationSkip(t)
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
@ -99,13 +101,15 @@ func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) {
|
||||||
baseScenario, err := NewScenario()
|
baseScenario, err := NewScenario()
|
||||||
assertNoErr(t, err)
|
assertNoErr(t, err)
|
||||||
|
|
||||||
|
baseScenario.pool.MaxWait = 5 * time.Minute
|
||||||
|
|
||||||
scenario := AuthOIDCScenario{
|
scenario := AuthOIDCScenario{
|
||||||
Scenario: baseScenario,
|
Scenario: baseScenario,
|
||||||
}
|
}
|
||||||
defer scenario.Shutdown()
|
defer scenario.Shutdown()
|
||||||
|
|
||||||
spec := map[string]int{
|
spec := map[string]int{
|
||||||
"user1": len(MustTestVersions),
|
"user1": 3,
|
||||||
}
|
}
|
||||||
|
|
||||||
oidcConfig, err := scenario.runMockOIDC(shortAccessTTL)
|
oidcConfig, err := scenario.runMockOIDC(shortAccessTTL)
|
||||||
|
@ -143,9 +147,13 @@ func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) {
|
||||||
success := pingAllHelper(t, allClients, allAddrs)
|
success := pingAllHelper(t, allClients, allAddrs)
|
||||||
t.Logf("%d successful pings out of %d (before expiry)", success, len(allClients)*len(allIps))
|
t.Logf("%d successful pings out of %d (before expiry)", success, len(allClients)*len(allIps))
|
||||||
|
|
||||||
// await all nodes being logged out after OIDC token expiry
|
// This is not great, but this sadly is a time dependent test, so the
|
||||||
err = scenario.WaitForTailscaleLogout()
|
// safe thing to do is wait out the whole TTL time before checking if
|
||||||
assertNoErrLogout(t, err)
|
// the clients have logged out. The Wait function cant do it itself
|
||||||
|
// as it has an upper bound of 1 min.
|
||||||
|
time.Sleep(shortAccessTTL)
|
||||||
|
|
||||||
|
assertTailscaleNodesLogout(t, allClients)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *AuthOIDCScenario) CreateHeadscaleEnv(
|
func (s *AuthOIDCScenario) CreateHeadscaleEnv(
|
||||||
|
@ -296,13 +304,15 @@ func (s *AuthOIDCScenario) runTailscaleUp(
|
||||||
|
|
||||||
log.Printf("%s login url: %s\n", c.Hostname(), loginURL.String())
|
log.Printf("%s login url: %s\n", c.Hostname(), loginURL.String())
|
||||||
|
|
||||||
|
if err := s.pool.Retry(func() error {
|
||||||
|
log.Printf("%s logging in with url", c.Hostname())
|
||||||
httpClient := &http.Client{Transport: insecureTransport}
|
httpClient := &http.Client{Transport: insecureTransport}
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil)
|
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil)
|
||||||
resp, err := httpClient.Do(req)
|
resp, err := httpClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf(
|
log.Printf(
|
||||||
"%s failed to get login url %s: %s",
|
"%s failed to login using url %s: %s",
|
||||||
c.Hostname(),
|
c.Hostname(),
|
||||||
loginURL,
|
loginURL,
|
||||||
err,
|
err,
|
||||||
|
@ -311,6 +321,12 @@ func (s *AuthOIDCScenario) runTailscaleUp(
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
log.Printf("%s response code of oidc login request was %s", c.Hostname(), resp.Status)
|
||||||
|
|
||||||
|
return errStatusCodeNotOK
|
||||||
|
}
|
||||||
|
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
_, err = io.ReadAll(resp.Body)
|
_, err = io.ReadAll(resp.Body)
|
||||||
|
@ -320,6 +336,11 @@ func (s *AuthOIDCScenario) runTailscaleUp(
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
log.Printf("Finished request for %s to join tailnet", c.Hostname())
|
log.Printf("Finished request for %s to join tailnet", c.Hostname())
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -357,3 +378,14 @@ func (s *AuthOIDCScenario) Shutdown() {
|
||||||
|
|
||||||
s.Scenario.Shutdown()
|
s.Scenario.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func assertTailscaleNodesLogout(t *testing.T, clients []TailscaleClient) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
for _, client := range clients {
|
||||||
|
status, err := client.Status()
|
||||||
|
assertNoErr(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, "NeedsLogin", status.BackendState)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -78,14 +78,14 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
// MustTestVersions is the minimum set of versions we should test.
|
// MustTestVersions is the minimum set of versions we should test.
|
||||||
// At the moment, this is arbitrarily choosen as:
|
// At the moment, this is arbitrarily chosen as:
|
||||||
//
|
//
|
||||||
// - Two unstable (HEAD and unstable)
|
// - Two unstable (HEAD and unstable)
|
||||||
// - Two latest versions
|
// - Two latest versions
|
||||||
// - Two oldest versions
|
// - Two oldest versions.
|
||||||
MustTestVersions = append(
|
MustTestVersions = append(
|
||||||
tailscaleVersions2021[0:4],
|
tailscaleVersions2021[0:4],
|
||||||
tailscaleVersions2019[len(tailscaleVersions2019)-2:len(tailscaleVersions2019)]...,
|
tailscaleVersions2019[len(tailscaleVersions2019)-2:]...,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -573,7 +573,7 @@ func (s *Scenario) WaitForTailscaleLogout() error {
|
||||||
for _, client := range user.Clients {
|
for _, client := range user.Clients {
|
||||||
c := client
|
c := client
|
||||||
user.syncWaitGroup.Go(func() error {
|
user.syncWaitGroup.Go(func() error {
|
||||||
return c.WaitForLogout()
|
return c.WaitForNeedsLogin()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if err := user.syncWaitGroup.Wait(); err != nil {
|
if err := user.syncWaitGroup.Wait(); err != nil {
|
||||||
|
|
|
@ -2,6 +2,7 @@ package integration
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -53,10 +54,16 @@ func sshScenario(t *testing.T, policy *policy.ACLPolicy, clientsPerUser int) *Sc
|
||||||
|
|
||||||
err = scenario.CreateHeadscaleEnv(spec,
|
err = scenario.CreateHeadscaleEnv(spec,
|
||||||
[]tsic.Option{
|
[]tsic.Option{
|
||||||
|
tsic.WithSSH(),
|
||||||
|
|
||||||
|
// Alpine containers dont have ip6tables set up, which causes
|
||||||
|
// tailscaled to stop configuring the wgengine, causing it
|
||||||
|
// to not configure DNS.
|
||||||
|
tsic.WithNetfilter("off"),
|
||||||
tsic.WithDockerEntrypoint([]string{
|
tsic.WithDockerEntrypoint([]string{
|
||||||
"/bin/sh",
|
"/bin/sh",
|
||||||
"-c",
|
"-c",
|
||||||
"/bin/sleep 3 ; apk add openssh ; update-ca-certificates ; tailscaled --tun=tsdev",
|
"/bin/sleep 3 ; apk add openssh ; adduser ssh-it-user ; update-ca-certificates ; tailscaled --tun=tsdev",
|
||||||
}),
|
}),
|
||||||
tsic.WithDockerWorkdir("/"),
|
tsic.WithDockerWorkdir("/"),
|
||||||
},
|
},
|
||||||
|
@ -77,7 +84,7 @@ func sshScenario(t *testing.T, policy *policy.ACLPolicy, clientsPerUser int) *Sc
|
||||||
return scenario
|
return scenario
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSSHOneUserAllToAll(t *testing.T) {
|
func TestSSHOneUserToAll(t *testing.T) {
|
||||||
IntegrationSkip(t)
|
IntegrationSkip(t)
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
|
@ -97,7 +104,7 @@ func TestSSHOneUserAllToAll(t *testing.T) {
|
||||||
{
|
{
|
||||||
Action: "accept",
|
Action: "accept",
|
||||||
Sources: []string{"group:integration-test"},
|
Sources: []string{"group:integration-test"},
|
||||||
Destinations: []string{"group:integration-test"},
|
Destinations: []string{"*"},
|
||||||
Users: []string{"ssh-it-user"},
|
Users: []string{"ssh-it-user"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -109,13 +116,19 @@ func TestSSHOneUserAllToAll(t *testing.T) {
|
||||||
allClients, err := scenario.ListTailscaleClients()
|
allClients, err := scenario.ListTailscaleClients()
|
||||||
assertNoErrListClients(t, err)
|
assertNoErrListClients(t, err)
|
||||||
|
|
||||||
|
user1Clients, err := scenario.ListTailscaleClients("user1")
|
||||||
|
assertNoErrListClients(t, err)
|
||||||
|
|
||||||
|
user2Clients, err := scenario.ListTailscaleClients("user2")
|
||||||
|
assertNoErrListClients(t, err)
|
||||||
|
|
||||||
err = scenario.WaitForTailscaleSync()
|
err = scenario.WaitForTailscaleSync()
|
||||||
assertNoErrSync(t, err)
|
assertNoErrSync(t, err)
|
||||||
|
|
||||||
_, err = scenario.ListTailscaleClientsFQDNs()
|
_, err = scenario.ListTailscaleClientsFQDNs()
|
||||||
assertNoErrListFQDN(t, err)
|
assertNoErrListFQDN(t, err)
|
||||||
|
|
||||||
for _, client := range allClients {
|
for _, client := range user1Clients {
|
||||||
for _, peer := range allClients {
|
for _, peer := range allClients {
|
||||||
if client.Hostname() == peer.Hostname() {
|
if client.Hostname() == peer.Hostname() {
|
||||||
continue
|
continue
|
||||||
|
@ -124,6 +137,16 @@ func TestSSHOneUserAllToAll(t *testing.T) {
|
||||||
assertSSHHostname(t, client, peer)
|
assertSSHHostname(t, client, peer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, client := range user2Clients {
|
||||||
|
for _, peer := range allClients {
|
||||||
|
if client.Hostname() == peer.Hostname() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
assertSSHPermissionDenied(t, client, peer)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSSHMultipleUsersAllToAll(t *testing.T) {
|
func TestSSHMultipleUsersAllToAll(t *testing.T) {
|
||||||
|
@ -270,7 +293,7 @@ func TestSSHIsBlockedInACL(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSSUserOnlyIsolation(t *testing.T) {
|
func TestSSHUserOnlyIsolation(t *testing.T) {
|
||||||
IntegrationSkip(t)
|
IntegrationSkip(t)
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
|
@ -365,11 +388,14 @@ func doSSH(t *testing.T, client TailscaleClient, peer TailscaleClient) (string,
|
||||||
peerFQDN, _ := peer.FQDN()
|
peerFQDN, _ := peer.FQDN()
|
||||||
|
|
||||||
command := []string{
|
command := []string{
|
||||||
"ssh", "-o StrictHostKeyChecking=no", "-o ConnectTimeout=1",
|
"/usr/bin/ssh", "-o StrictHostKeyChecking=no", "-o ConnectTimeout=1",
|
||||||
fmt.Sprintf("%s@%s", "ssh-it-user", peerFQDN),
|
fmt.Sprintf("%s@%s", "ssh-it-user", peerFQDN),
|
||||||
"'hostname'",
|
"'hostname'",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Printf("Running from %s to %s", client.Hostname(), peer.Hostname())
|
||||||
|
log.Printf("Command: %s", strings.Join(command, " "))
|
||||||
|
|
||||||
return retry(10, 1*time.Second, func() (string, string, error) {
|
return retry(10, 1*time.Second, func() (string, string, error) {
|
||||||
return client.Execute(command)
|
return client.Execute(command)
|
||||||
})
|
})
|
||||||
|
@ -381,27 +407,28 @@ func assertSSHHostname(t *testing.T, client TailscaleClient, peer TailscaleClien
|
||||||
result, _, err := doSSH(t, client, peer)
|
result, _, err := doSSH(t, client, peer)
|
||||||
assertNoErr(t, err)
|
assertNoErr(t, err)
|
||||||
|
|
||||||
assert.Contains(t, peer.ID(), strings.ReplaceAll(result, "\n", ""))
|
assertContains(t, peer.ID(), strings.ReplaceAll(result, "\n", ""))
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertSSHPermissionDenied(t *testing.T, client TailscaleClient, peer TailscaleClient) {
|
func assertSSHPermissionDenied(t *testing.T, client TailscaleClient, peer TailscaleClient) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
result, stderr, err := doSSH(t, client, peer)
|
result, stderr, _ := doSSH(t, client, peer)
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
assert.Empty(t, result)
|
assert.Empty(t, result)
|
||||||
|
|
||||||
assert.Contains(t, stderr, "Permission denied (tailscale)")
|
assertContains(t, stderr, "Permission denied (tailscale)")
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertSSHTimeout(t *testing.T, client TailscaleClient, peer TailscaleClient) {
|
func assertSSHTimeout(t *testing.T, client TailscaleClient, peer TailscaleClient) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
result, stderr, err := doSSH(t, client, peer)
|
result, stderr, _ := doSSH(t, client, peer)
|
||||||
assertNoErr(t, err)
|
|
||||||
|
|
||||||
assert.Empty(t, result)
|
assert.Empty(t, result)
|
||||||
|
|
||||||
assert.Contains(t, stderr, "Connection timed out")
|
if !strings.Contains(stderr, "Connection timed out") &&
|
||||||
|
!strings.Contains(stderr, "Operation timed out") {
|
||||||
|
t.Fatalf("connection did not time out")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,6 @@ type TailscaleClient interface {
|
||||||
Status() (*ipnstate.Status, error)
|
Status() (*ipnstate.Status, error)
|
||||||
WaitForNeedsLogin() error
|
WaitForNeedsLogin() error
|
||||||
WaitForRunning() error
|
WaitForRunning() error
|
||||||
WaitForLogout() error
|
|
||||||
WaitForPeers(expected int) error
|
WaitForPeers(expected int) error
|
||||||
Ping(hostnameOrIP string, opts ...tsic.PingOption) error
|
Ping(hostnameOrIP string, opts ...tsic.PingOption) error
|
||||||
Curl(url string, opts ...tsic.CurlOption) (string, error)
|
Curl(url string, opts ...tsic.CurlOption) (string, error)
|
||||||
|
|
|
@ -35,7 +35,6 @@ var (
|
||||||
errTailscaleCannotUpWithoutAuthkey = errors.New("cannot up without authkey")
|
errTailscaleCannotUpWithoutAuthkey = errors.New("cannot up without authkey")
|
||||||
errTailscaleNotConnected = errors.New("tailscale not connected")
|
errTailscaleNotConnected = errors.New("tailscale not connected")
|
||||||
errTailscaledNotReadyForLogin = errors.New("tailscaled not ready for login")
|
errTailscaledNotReadyForLogin = errors.New("tailscaled not ready for login")
|
||||||
errTailscaleNotLoggedOut = errors.New("tailscale not logged out")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func errTailscaleStatus(hostname string, err error) error {
|
func errTailscaleStatus(hostname string, err error) error {
|
||||||
|
@ -64,6 +63,7 @@ type TailscaleInContainer struct {
|
||||||
withEntrypoint []string
|
withEntrypoint []string
|
||||||
withExtraHosts []string
|
withExtraHosts []string
|
||||||
workdir string
|
workdir string
|
||||||
|
netfilter string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Option represent optional settings that can be given to a
|
// Option represent optional settings that can be given to a
|
||||||
|
@ -148,6 +148,15 @@ func WithDockerEntrypoint(args []string) Option {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithNetfilter configures Tailscales parameter --netfilter-mode
|
||||||
|
// allowing us to turn of modifying ip[6]tables/nftables.
|
||||||
|
// It takes: "on", "off", "nodivert".
|
||||||
|
func WithNetfilter(state string) Option {
|
||||||
|
return func(tsic *TailscaleInContainer) {
|
||||||
|
tsic.netfilter = state
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// New returns a new TailscaleInContainer instance.
|
// New returns a new TailscaleInContainer instance.
|
||||||
func New(
|
func New(
|
||||||
pool *dockertest.Pool,
|
pool *dockertest.Pool,
|
||||||
|
@ -340,6 +349,10 @@ func (t *TailscaleInContainer) Login(
|
||||||
command = append(command, "--ssh")
|
command = append(command, "--ssh")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if t.netfilter != "" {
|
||||||
|
command = append(command, "--netfilter-mode="+t.netfilter)
|
||||||
|
}
|
||||||
|
|
||||||
if len(t.withTags) > 0 {
|
if len(t.withTags) > 0 {
|
||||||
command = append(command,
|
command = append(command,
|
||||||
fmt.Sprintf(`--advertise-tags=%s`, strings.Join(t.withTags, ",")),
|
fmt.Sprintf(`--advertise-tags=%s`, strings.Join(t.withTags, ",")),
|
||||||
|
@ -513,22 +526,6 @@ func (t *TailscaleInContainer) WaitForRunning() error {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForLogout blocks until the Tailscale instance has logged out.
|
|
||||||
func (t *TailscaleInContainer) WaitForLogout() error {
|
|
||||||
return fmt.Errorf("%s err: %w", t.hostname, t.pool.Retry(func() error {
|
|
||||||
status, err := t.Status()
|
|
||||||
if err != nil {
|
|
||||||
return errTailscaleStatus(t.hostname, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if status.CurrentTailnet == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return errTailscaleNotLoggedOut
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForPeers blocks until N number of peers is present in the
|
// WaitForPeers blocks until N number of peers is present in the
|
||||||
// Peer list of the Tailscale instance.
|
// Peer list of the Tailscale instance.
|
||||||
func (t *TailscaleInContainer) WaitForPeers(expected int) error {
|
func (t *TailscaleInContainer) WaitForPeers(expected int) error {
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package integration
|
package integration
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -59,6 +60,13 @@ func assertNoErrLogout(t *testing.T, err error) {
|
||||||
assertNoErrf(t, "failed to log out tailscale nodes: %s", err)
|
assertNoErrf(t, "failed to log out tailscale nodes: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func assertContains(t *testing.T, str, subStr string) {
|
||||||
|
t.Helper()
|
||||||
|
if !strings.Contains(str, subStr) {
|
||||||
|
t.Fatalf("%#v does not contain %#v", str, subStr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func pingAllHelper(t *testing.T, clients []TailscaleClient, addrs []string) int {
|
func pingAllHelper(t *testing.T, clients []TailscaleClient, addrs []string) int {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
success := 0
|
success := 0
|
||||||
|
|
Loading…
Reference in New Issue