2022-10-18 03:59:43 -06:00
package integration
import (
2023-02-02 08:05:52 -07:00
"encoding/json"
2022-10-23 06:13:22 -06:00
"fmt"
2022-12-22 08:41:49 -07:00
"net/netip"
2022-10-24 06:59:14 -06:00
"strings"
2022-10-18 03:59:43 -06:00
"testing"
2022-10-23 06:13:22 -06:00
"time"
2023-02-02 08:05:52 -07:00
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
2022-11-14 07:01:31 -07:00
"github.com/juanfont/headscale/integration/hsic"
2022-11-08 08:10:03 -07:00
"github.com/juanfont/headscale/integration/tsic"
2022-10-23 06:13:22 -06:00
"github.com/rs/zerolog/log"
2023-02-02 02:14:33 -07:00
"github.com/samber/lo"
2023-02-02 08:05:52 -07:00
"github.com/stretchr/testify/assert"
2023-12-09 10:09:24 -07:00
"tailscale.com/client/tailscale/apitype"
"tailscale.com/types/key"
2022-10-18 03:59:43 -06:00
)
2022-10-21 06:08:04 -06:00
func TestPingAllByIP ( t * testing . T ) {
2022-10-18 03:59:43 -06:00
IntegrationSkip ( t )
2022-11-22 04:05:58 -07:00
t . Parallel ( )
2022-10-18 03:59:43 -06:00
scenario , err := NewScenario ( )
2023-08-29 00:33:33 -06:00
assertNoErr ( t , err )
defer scenario . Shutdown ( )
2022-10-18 03:59:43 -06:00
2024-02-08 09:28:19 -07:00
// TODO(kradalby): it does not look like the user thing works, only second
// get created? maybe only when many?
2022-10-18 03:59:43 -06:00
spec := map [ string ] int {
2023-08-31 06:57:43 -06:00
"user1" : len ( MustTestVersions ) ,
"user2" : len ( MustTestVersions ) ,
2022-10-18 03:59:43 -06:00
}
2024-02-08 23:26:41 -07:00
headscaleConfig := map [ string ] string {
"HEADSCALE_DERP_URLS" : "" ,
"HEADSCALE_DERP_SERVER_ENABLED" : "true" ,
"HEADSCALE_DERP_SERVER_REGION_ID" : "999" ,
"HEADSCALE_DERP_SERVER_REGION_CODE" : "headscale" ,
"HEADSCALE_DERP_SERVER_REGION_NAME" : "Headscale Embedded DERP" ,
"HEADSCALE_DERP_SERVER_STUN_LISTEN_ADDR" : "0.0.0.0:3478" ,
"HEADSCALE_DERP_SERVER_PRIVATE_KEY_PATH" : "/tmp/derp.key" ,
// Envknob for enabling DERP debug logs
"DERP_DEBUG_LOGS" : "true" ,
"DERP_PROBER_DEBUG_LOGS" : "true" ,
}
err = scenario . CreateHeadscaleEnv ( spec ,
[ ] tsic . Option { } ,
hsic . WithTestName ( "pingallbyip" ) ,
hsic . WithConfigEnv ( headscaleConfig ) ,
hsic . WithTLS ( ) ,
hsic . WithHostnameAsServerURL ( ) ,
)
2023-08-29 00:33:33 -06:00
assertNoErrHeadscaleEnv ( t , err )
2022-10-18 03:59:43 -06:00
2022-10-23 04:41:35 -06:00
allClients , err := scenario . ListTailscaleClients ( )
2023-08-29 00:33:33 -06:00
assertNoErrListClients ( t , err )
2022-10-18 03:59:43 -06:00
2022-10-23 04:41:35 -06:00
allIps , err := scenario . ListTailscaleClientsIPs ( )
2023-08-29 00:33:33 -06:00
assertNoErrListClientIPs ( t , err )
2022-10-18 03:59:43 -06:00
2022-10-18 04:19:43 -06:00
err = scenario . WaitForTailscaleSync ( )
2023-08-29 00:33:33 -06:00
assertNoErrSync ( t , err )
2022-10-18 03:59:43 -06:00
2024-02-23 02:59:24 -07:00
// assertClientsState(t, allClients)
2024-02-08 23:26:41 -07:00
allAddrs := lo . Map ( allIps , func ( x netip . Addr , index int ) string {
return x . String ( )
} )
success := pingAllHelper ( t , allClients , allAddrs )
t . Logf ( "%d successful pings out of %d" , success , len ( allClients ) * len ( allIps ) )
}
func TestPingAllByIPPublicDERP ( t * testing . T ) {
IntegrationSkip ( t )
t . Parallel ( )
scenario , err := NewScenario ( )
assertNoErr ( t , err )
defer scenario . Shutdown ( )
spec := map [ string ] int {
"user1" : len ( MustTestVersions ) ,
"user2" : len ( MustTestVersions ) ,
}
err = scenario . CreateHeadscaleEnv ( spec ,
[ ] tsic . Option { } ,
hsic . WithTestName ( "pingallbyippubderp" ) ,
)
assertNoErrHeadscaleEnv ( t , err )
allClients , err := scenario . ListTailscaleClients ( )
assertNoErrListClients ( t , err )
allIps , err := scenario . ListTailscaleClientsIPs ( )
assertNoErrListClientIPs ( t , err )
err = scenario . WaitForTailscaleSync ( )
assertNoErrSync ( t , err )
2024-02-23 02:59:24 -07:00
// assertClientsState(t, allClients)
2024-02-08 23:26:41 -07:00
2023-02-02 02:14:33 -07:00
allAddrs := lo . Map ( allIps , func ( x netip . Addr , index int ) string {
return x . String ( )
} )
2022-10-18 03:59:43 -06:00
2023-02-02 02:14:33 -07:00
success := pingAllHelper ( t , allClients , allAddrs )
2022-10-18 03:59:43 -06:00
t . Logf ( "%d successful pings out of %d" , success , len ( allClients ) * len ( allIps ) )
}
2022-10-21 06:08:14 -06:00
2022-12-22 08:41:49 -07:00
func TestAuthKeyLogoutAndRelogin ( t * testing . T ) {
IntegrationSkip ( t )
t . Parallel ( )
scenario , err := NewScenario ( )
2023-08-29 00:33:33 -06:00
assertNoErr ( t , err )
defer scenario . Shutdown ( )
2022-12-22 08:41:49 -07:00
spec := map [ string ] int {
2023-08-31 06:57:43 -06:00
"user1" : len ( MustTestVersions ) ,
"user2" : len ( MustTestVersions ) ,
2022-12-22 08:41:49 -07:00
}
err = scenario . CreateHeadscaleEnv ( spec , [ ] tsic . Option { } , hsic . WithTestName ( "pingallbyip" ) )
2023-08-29 00:33:33 -06:00
assertNoErrHeadscaleEnv ( t , err )
2022-12-22 08:41:49 -07:00
allClients , err := scenario . ListTailscaleClients ( )
2023-08-29 00:33:33 -06:00
assertNoErrListClients ( t , err )
2022-12-22 08:41:49 -07:00
err = scenario . WaitForTailscaleSync ( )
2023-08-29 00:33:33 -06:00
assertNoErrSync ( t , err )
2022-12-22 08:41:49 -07:00
2024-02-23 02:59:24 -07:00
// assertClientsState(t, allClients)
2024-02-08 23:26:41 -07:00
2022-12-22 08:41:49 -07:00
clientIPs := make ( map [ TailscaleClient ] [ ] netip . Addr )
for _ , client := range allClients {
ips , err := client . IPs ( )
if err != nil {
2023-08-29 00:33:33 -06:00
t . Fatalf ( "failed to get IPs for client %s: %s" , client . Hostname ( ) , err )
2022-12-22 08:41:49 -07:00
}
clientIPs [ client ] = ips
}
for _ , client := range allClients {
err := client . Logout ( )
if err != nil {
2023-08-29 00:33:33 -06:00
t . Fatalf ( "failed to logout client %s: %s" , client . Hostname ( ) , err )
2022-12-22 08:41:49 -07:00
}
}
2023-08-29 00:33:33 -06:00
err = scenario . WaitForTailscaleLogout ( )
assertNoErrLogout ( t , err )
2022-12-22 08:41:49 -07:00
t . Logf ( "all clients logged out" )
headscale , err := scenario . Headscale ( )
2023-08-29 00:33:33 -06:00
assertNoErrGetHeadscale ( t , err )
2022-12-22 08:41:49 -07:00
2023-01-17 09:43:44 -07:00
for userName := range spec {
key , err := scenario . CreatePreAuthKey ( userName , true , false )
2022-12-22 08:41:49 -07:00
if err != nil {
2023-08-29 00:33:33 -06:00
t . Fatalf ( "failed to create pre-auth key for user %s: %s" , userName , err )
2022-12-22 08:41:49 -07:00
}
2023-01-17 09:43:44 -07:00
err = scenario . RunTailscaleUp ( userName , headscale . GetEndpoint ( ) , key . GetKey ( ) )
2022-12-22 08:41:49 -07:00
if err != nil {
2023-08-29 00:33:33 -06:00
t . Fatalf ( "failed to run tailscale up for user %s: %s" , userName , err )
2022-12-22 08:41:49 -07:00
}
}
err = scenario . WaitForTailscaleSync ( )
2023-08-29 00:33:33 -06:00
assertNoErrSync ( t , err )
2022-12-22 08:41:49 -07:00
2024-02-23 02:59:24 -07:00
// assertClientsState(t, allClients)
2024-02-08 23:26:41 -07:00
2022-12-22 08:41:49 -07:00
allClients , err = scenario . ListTailscaleClients ( )
2023-08-29 00:33:33 -06:00
assertNoErrListClients ( t , err )
2022-12-22 08:41:49 -07:00
allIps , err := scenario . ListTailscaleClientsIPs ( )
2023-08-29 00:33:33 -06:00
assertNoErrListClientIPs ( t , err )
2022-12-22 08:41:49 -07:00
2023-02-02 02:14:33 -07:00
allAddrs := lo . Map ( allIps , func ( x netip . Addr , index int ) string {
return x . String ( )
} )
2022-12-22 08:41:49 -07:00
2023-02-02 02:14:33 -07:00
success := pingAllHelper ( t , allClients , allAddrs )
2022-12-22 08:41:49 -07:00
t . Logf ( "%d successful pings out of %d" , success , len ( allClients ) * len ( allIps ) )
for _ , client := range allClients {
ips , err := client . IPs ( )
if err != nil {
2023-08-29 00:33:33 -06:00
t . Fatalf ( "failed to get IPs for client %s: %s" , client . Hostname ( ) , err )
2022-12-22 08:41:49 -07:00
}
// lets check if the IPs are the same
if len ( ips ) != len ( clientIPs [ client ] ) {
2023-08-29 00:33:33 -06:00
t . Fatalf ( "IPs changed for client %s" , client . Hostname ( ) )
2022-12-22 08:41:49 -07:00
}
for _ , ip := range ips {
found := false
for _ , oldIP := range clientIPs [ client ] {
if ip == oldIP {
found = true
break
}
}
if ! found {
2023-08-29 00:33:33 -06:00
t . Fatalf (
2023-02-02 02:14:33 -07:00
"IPs changed for client %s. Used to be %v now %v" ,
client . Hostname ( ) ,
clientIPs [ client ] ,
ips ,
)
2022-12-22 08:41:49 -07:00
}
}
}
}
2022-12-27 12:05:21 -07:00
func TestEphemeral ( t * testing . T ) {
IntegrationSkip ( t )
t . Parallel ( )
scenario , err := NewScenario ( )
2023-08-29 00:33:33 -06:00
assertNoErr ( t , err )
defer scenario . Shutdown ( )
2022-12-27 12:05:21 -07:00
spec := map [ string ] int {
2023-08-31 06:57:43 -06:00
"user1" : len ( MustTestVersions ) ,
"user2" : len ( MustTestVersions ) ,
2022-12-27 12:05:21 -07:00
}
headscale , err := scenario . Headscale ( hsic . WithTestName ( "ephemeral" ) )
2023-08-29 00:33:33 -06:00
assertNoErrHeadscaleEnv ( t , err )
2022-12-27 12:05:21 -07:00
2023-01-17 09:43:44 -07:00
for userName , clientCount := range spec {
err = scenario . CreateUser ( userName )
2022-12-27 12:05:21 -07:00
if err != nil {
2023-08-29 00:33:33 -06:00
t . Fatalf ( "failed to create user %s: %s" , userName , err )
2022-12-27 12:05:21 -07:00
}
2023-01-17 09:43:44 -07:00
err = scenario . CreateTailscaleNodesInUser ( userName , "all" , clientCount , [ ] tsic . Option { } ... )
2022-12-27 12:05:21 -07:00
if err != nil {
2023-08-29 00:33:33 -06:00
t . Fatalf ( "failed to create tailscale nodes in user %s: %s" , userName , err )
2022-12-27 12:05:21 -07:00
}
2023-01-17 09:43:44 -07:00
key , err := scenario . CreatePreAuthKey ( userName , true , true )
2022-12-27 12:05:21 -07:00
if err != nil {
2023-08-29 00:33:33 -06:00
t . Fatalf ( "failed to create pre-auth key for user %s: %s" , userName , err )
2022-12-27 12:05:21 -07:00
}
2023-01-17 09:43:44 -07:00
err = scenario . RunTailscaleUp ( userName , headscale . GetEndpoint ( ) , key . GetKey ( ) )
2022-12-27 12:05:21 -07:00
if err != nil {
2023-08-29 00:33:33 -06:00
t . Fatalf ( "failed to run tailscale up for user %s: %s" , userName , err )
2022-12-27 12:05:21 -07:00
}
}
err = scenario . WaitForTailscaleSync ( )
2023-08-29 00:33:33 -06:00
assertNoErrSync ( t , err )
2022-12-27 12:05:21 -07:00
allClients , err := scenario . ListTailscaleClients ( )
2023-08-29 00:33:33 -06:00
assertNoErrListClients ( t , err )
2022-12-27 12:05:21 -07:00
allIps , err := scenario . ListTailscaleClientsIPs ( )
2023-08-29 00:33:33 -06:00
assertNoErrListClientIPs ( t , err )
2022-12-27 12:05:21 -07:00
2023-02-02 02:14:33 -07:00
allAddrs := lo . Map ( allIps , func ( x netip . Addr , index int ) string {
return x . String ( )
} )
2022-12-27 12:05:21 -07:00
2023-02-02 02:14:33 -07:00
success := pingAllHelper ( t , allClients , allAddrs )
2022-12-27 12:05:21 -07:00
t . Logf ( "%d successful pings out of %d" , success , len ( allClients ) * len ( allIps ) )
for _ , client := range allClients {
err := client . Logout ( )
if err != nil {
2023-08-29 00:33:33 -06:00
t . Fatalf ( "failed to logout client %s: %s" , client . Hostname ( ) , err )
2022-12-27 12:05:21 -07:00
}
}
2023-08-29 00:33:33 -06:00
err = scenario . WaitForTailscaleLogout ( )
assertNoErrLogout ( t , err )
2022-12-27 12:05:21 -07:00
t . Logf ( "all clients logged out" )
2023-01-17 09:43:44 -07:00
for userName := range spec {
2023-09-24 05:42:05 -06:00
nodes , err := headscale . ListNodesInUser ( userName )
2022-12-27 12:05:21 -07:00
if err != nil {
log . Error ( ) .
Err ( err ) .
2023-01-17 09:43:44 -07:00
Str ( "user" , userName ) .
2023-09-24 05:42:05 -06:00
Msg ( "Error listing nodes in user" )
2022-12-27 12:05:21 -07:00
return
}
2023-09-24 05:42:05 -06:00
if len ( nodes ) != 0 {
t . Fatalf ( "expected no nodes, got %d in user %s" , len ( nodes ) , userName )
2022-12-27 12:05:21 -07:00
}
}
}
2022-10-21 06:08:14 -06:00
func TestPingAllByHostname ( t * testing . T ) {
IntegrationSkip ( t )
2022-11-22 04:05:58 -07:00
t . Parallel ( )
2022-10-21 06:08:14 -06:00
scenario , err := NewScenario ( )
2023-08-29 00:33:33 -06:00
assertNoErr ( t , err )
defer scenario . Shutdown ( )
2022-10-21 06:08:14 -06:00
spec := map [ string ] int {
2023-12-09 10:09:24 -07:00
"user3" : len ( MustTestVersions ) ,
"user4" : len ( MustTestVersions ) ,
2022-10-21 06:08:14 -06:00
}
2022-11-08 08:10:03 -07:00
err = scenario . CreateHeadscaleEnv ( spec , [ ] tsic . Option { } , hsic . WithTestName ( "pingallbyname" ) )
2023-08-29 00:33:33 -06:00
assertNoErrHeadscaleEnv ( t , err )
2022-10-21 06:08:14 -06:00
2022-10-23 04:41:35 -06:00
allClients , err := scenario . ListTailscaleClients ( )
2023-08-29 00:33:33 -06:00
assertNoErrListClients ( t , err )
2022-10-21 06:08:14 -06:00
err = scenario . WaitForTailscaleSync ( )
2023-08-29 00:33:33 -06:00
assertNoErrSync ( t , err )
2022-10-21 06:08:14 -06:00
2024-02-23 02:59:24 -07:00
// assertClientsState(t, allClients)
2024-02-08 23:26:41 -07:00
2022-10-23 04:41:35 -06:00
allHostnames , err := scenario . ListTailscaleClientsFQDNs ( )
2023-08-29 00:33:33 -06:00
assertNoErrListFQDN ( t , err )
2022-10-21 09:44:40 -06:00
2023-02-02 02:14:33 -07:00
success := pingAllHelper ( t , allClients , allHostnames )
2022-10-21 06:08:14 -06:00
t . Logf ( "%d successful pings out of %d" , success , len ( allClients ) * len ( allClients ) )
}
2022-10-23 06:13:22 -06:00
2022-12-01 06:01:06 -07:00
// If subtests are parallel, then they will start before setup is run.
// This might mean we approach setup slightly wrong, but for now, ignore
// the linter
// nolint:tparallel
2022-10-23 06:13:22 -06:00
func TestTaildrop ( t * testing . T ) {
IntegrationSkip ( t )
2022-11-22 04:05:58 -07:00
t . Parallel ( )
2022-10-23 06:13:22 -06:00
retry := func ( times int , sleepInverval time . Duration , doWork func ( ) error ) error {
var err error
for attempts := 0 ; attempts < times ; attempts ++ {
err = doWork ( )
if err == nil {
return nil
}
time . Sleep ( sleepInverval )
}
return err
}
scenario , err := NewScenario ( )
2023-08-29 00:33:33 -06:00
assertNoErr ( t , err )
defer scenario . Shutdown ( )
2022-10-23 06:13:22 -06:00
spec := map [ string ] int {
2023-12-09 10:09:24 -07:00
"taildrop" : len ( MustTestVersions ) ,
2022-10-23 06:13:22 -06:00
}
2022-11-08 08:10:03 -07:00
err = scenario . CreateHeadscaleEnv ( spec , [ ] tsic . Option { } , hsic . WithTestName ( "taildrop" ) )
2023-08-29 00:33:33 -06:00
assertNoErrHeadscaleEnv ( t , err )
2022-10-23 06:13:22 -06:00
allClients , err := scenario . ListTailscaleClients ( )
2023-08-29 00:33:33 -06:00
assertNoErrListClients ( t , err )
2022-10-23 06:13:22 -06:00
err = scenario . WaitForTailscaleSync ( )
2023-08-29 00:33:33 -06:00
assertNoErrSync ( t , err )
2022-10-23 06:13:22 -06:00
// This will essentially fetch and cache all the FQDNs
_ , err = scenario . ListTailscaleClientsFQDNs ( )
2023-08-29 00:33:33 -06:00
assertNoErrListFQDN ( t , err )
2022-10-23 06:13:22 -06:00
2023-12-09 10:09:24 -07:00
for _ , client := range allClients {
if ! strings . Contains ( client . Hostname ( ) , "head" ) {
command := [ ] string { "apk" , "add" , "curl" }
_ , _ , err := client . Execute ( command )
if err != nil {
t . Fatalf ( "failed to install curl on %s, err: %s" , client . Hostname ( ) , err )
}
}
2024-02-08 09:28:19 -07:00
curlCommand := [ ] string {
"curl" ,
"--unix-socket" ,
"/var/run/tailscale/tailscaled.sock" ,
"http://local-tailscaled.sock/localapi/v0/file-targets" ,
}
2023-12-09 10:09:24 -07:00
err = retry ( 10 , 1 * time . Second , func ( ) error {
result , _ , err := client . Execute ( curlCommand )
if err != nil {
return err
}
var fts [ ] apitype . FileTarget
err = json . Unmarshal ( [ ] byte ( result ) , & fts )
if err != nil {
return err
}
if len ( fts ) != len ( allClients ) - 1 {
ftStr := fmt . Sprintf ( "FileTargets for %s:\n" , client . Hostname ( ) )
for _ , ft := range fts {
ftStr += fmt . Sprintf ( "\t%s\n" , ft . Node . Name )
}
2024-02-08 09:28:19 -07:00
return fmt . Errorf (
"client %s does not have all its peers as FileTargets, got %d, want: %d\n%s" ,
client . Hostname ( ) ,
len ( fts ) ,
len ( allClients ) - 1 ,
ftStr ,
)
2023-12-09 10:09:24 -07:00
}
return err
} )
if err != nil {
2024-02-08 09:28:19 -07:00
t . Errorf (
"failed to query localapi for filetarget on %s, err: %s" ,
client . Hostname ( ) ,
err ,
)
2023-12-09 10:09:24 -07:00
}
}
2022-10-23 06:13:22 -06:00
for _ , client := range allClients {
command := [ ] string { "touch" , fmt . Sprintf ( "/tmp/file_from_%s" , client . Hostname ( ) ) }
2022-11-03 10:00:23 -06:00
if _ , _ , err := client . Execute ( command ) ; err != nil {
2023-08-29 00:33:33 -06:00
t . Fatalf ( "failed to create taildrop file on %s, err: %s" , client . Hostname ( ) , err )
2022-10-23 06:13:22 -06:00
}
for _ , peer := range allClients {
if client . Hostname ( ) == peer . Hostname ( ) {
continue
}
// It is safe to ignore this error as we handled it when caching it
peerFQDN , _ := peer . FQDN ( )
t . Run ( fmt . Sprintf ( "%s-%s" , client . Hostname ( ) , peer . Hostname ( ) ) , func ( t * testing . T ) {
command := [ ] string {
"tailscale" , "file" , "cp" ,
fmt . Sprintf ( "/tmp/file_from_%s" , client . Hostname ( ) ) ,
fmt . Sprintf ( "%s:" , peerFQDN ) ,
}
err := retry ( 10 , 1 * time . Second , func ( ) error {
t . Logf (
"Sending file from %s to %s\n" ,
client . Hostname ( ) ,
peer . Hostname ( ) ,
)
2022-11-03 10:00:23 -06:00
_ , _ , err := client . Execute ( command )
2022-10-23 06:13:22 -06:00
return err
} )
if err != nil {
2023-08-29 00:33:33 -06:00
t . Fatalf (
2023-12-09 10:09:24 -07:00
"failed to send taildrop file on %s with command %q, err: %s" ,
2022-10-23 06:13:22 -06:00
client . Hostname ( ) ,
2023-12-09 10:09:24 -07:00
strings . Join ( command , " " ) ,
2022-10-23 06:13:22 -06:00
err ,
)
}
} )
}
}
for _ , client := range allClients {
command := [ ] string {
"tailscale" , "file" ,
"get" ,
"/tmp/" ,
}
2022-11-03 10:00:23 -06:00
if _ , _ , err := client . Execute ( command ) ; err != nil {
2023-08-29 00:33:33 -06:00
t . Fatalf ( "failed to get taildrop file on %s, err: %s" , client . Hostname ( ) , err )
2022-10-23 06:13:22 -06:00
}
for _ , peer := range allClients {
if client . Hostname ( ) == peer . Hostname ( ) {
continue
}
t . Run ( fmt . Sprintf ( "%s-%s" , client . Hostname ( ) , peer . Hostname ( ) ) , func ( t * testing . T ) {
command := [ ] string {
"ls" ,
fmt . Sprintf ( "/tmp/file_from_%s" , peer . Hostname ( ) ) ,
}
log . Printf (
"Checking file in %s from %s\n" ,
client . Hostname ( ) ,
peer . Hostname ( ) ,
)
2022-11-03 10:00:23 -06:00
result , _ , err := client . Execute ( command )
2023-08-29 00:33:33 -06:00
assertNoErrf ( t , "failed to execute command to ls taildrop: %s" , err )
2022-10-23 06:13:22 -06:00
log . Printf ( "Result for %s: %s\n" , peer . Hostname ( ) , result )
if fmt . Sprintf ( "/tmp/file_from_%s\n" , peer . Hostname ( ) ) != result {
2023-08-29 00:33:33 -06:00
t . Fatalf (
2022-10-23 06:13:22 -06:00
"taildrop result is not correct %s, wanted %s" ,
result ,
fmt . Sprintf ( "/tmp/file_from_%s\n" , peer . Hostname ( ) ) ,
)
}
} )
}
}
}
2022-10-24 06:59:14 -06:00
2024-02-08 23:26:41 -07:00
func TestResolveMagicDNS ( t * testing . T ) {
IntegrationSkip ( t )
t . Parallel ( )
scenario , err := NewScenario ( )
assertNoErr ( t , err )
defer scenario . Shutdown ( )
spec := map [ string ] int {
"magicdns1" : len ( MustTestVersions ) ,
"magicdns2" : len ( MustTestVersions ) ,
}
err = scenario . CreateHeadscaleEnv ( spec , [ ] tsic . Option { } , hsic . WithTestName ( "magicdns" ) )
assertNoErrHeadscaleEnv ( t , err )
allClients , err := scenario . ListTailscaleClients ( )
assertNoErrListClients ( t , err )
err = scenario . WaitForTailscaleSync ( )
assertNoErrSync ( t , err )
2024-02-23 02:59:24 -07:00
// assertClientsState(t, allClients)
2024-02-08 23:26:41 -07:00
// Poor mans cache
_ , err = scenario . ListTailscaleClientsFQDNs ( )
assertNoErrListFQDN ( t , err )
_ , err = scenario . ListTailscaleClientsIPs ( )
assertNoErrListClientIPs ( t , err )
for _ , client := range allClients {
for _ , peer := range allClients {
// It is safe to ignore this error as we handled it when caching it
peerFQDN , _ := peer . FQDN ( )
command := [ ] string {
"tailscale" ,
"ip" , peerFQDN ,
}
result , _ , err := client . Execute ( command )
if err != nil {
t . Fatalf (
"failed to execute resolve/ip command %s from %s: %s" ,
peerFQDN ,
client . Hostname ( ) ,
err ,
)
}
ips , err := peer . IPs ( )
if err != nil {
t . Fatalf (
"failed to get ips for %s: %s" ,
peer . Hostname ( ) ,
err ,
)
}
for _ , ip := range ips {
if ! strings . Contains ( result , ip . String ( ) ) {
t . Fatalf ( "ip %s is not found in \n%s\n" , ip . String ( ) , result )
}
}
}
}
}
2023-02-02 08:05:52 -07:00
func TestExpireNode ( t * testing . T ) {
IntegrationSkip ( t )
t . Parallel ( )
2023-02-02 02:14:33 -07:00
2023-02-02 08:05:52 -07:00
scenario , err := NewScenario ( )
2023-08-29 00:33:33 -06:00
assertNoErr ( t , err )
defer scenario . Shutdown ( )
2023-02-02 08:05:52 -07:00
spec := map [ string ] int {
2023-08-31 06:57:43 -06:00
"user1" : len ( MustTestVersions ) ,
2023-02-02 08:05:52 -07:00
}
err = scenario . CreateHeadscaleEnv ( spec , [ ] tsic . Option { } , hsic . WithTestName ( "expirenode" ) )
2023-08-29 00:33:33 -06:00
assertNoErrHeadscaleEnv ( t , err )
2023-02-02 08:05:52 -07:00
allClients , err := scenario . ListTailscaleClients ( )
2023-08-29 00:33:33 -06:00
assertNoErrListClients ( t , err )
2023-02-02 08:05:52 -07:00
allIps , err := scenario . ListTailscaleClientsIPs ( )
2023-08-29 00:33:33 -06:00
assertNoErrListClientIPs ( t , err )
2023-02-02 08:05:52 -07:00
err = scenario . WaitForTailscaleSync ( )
2023-08-29 00:33:33 -06:00
assertNoErrSync ( t , err )
2023-02-02 08:05:52 -07:00
2024-02-23 02:59:24 -07:00
// assertClientsState(t, allClients)
2024-02-08 23:26:41 -07:00
2023-02-02 08:05:52 -07:00
allAddrs := lo . Map ( allIps , func ( x netip . Addr , index int ) string {
return x . String ( )
} )
success := pingAllHelper ( t , allClients , allAddrs )
t . Logf ( "before expire: %d successful pings out of %d" , success , len ( allClients ) * len ( allIps ) )
for _ , client := range allClients {
status , err := client . Status ( )
2023-08-29 00:33:33 -06:00
assertNoErr ( t , err )
2023-02-02 08:05:52 -07:00
// Assert that we have the original count - self
2024-01-05 02:41:56 -07:00
assert . Len ( t , status . Peers ( ) , spec [ "user1" ] - 1 )
2023-02-02 08:05:52 -07:00
}
headscale , err := scenario . Headscale ( )
2023-08-29 00:33:33 -06:00
assertNoErr ( t , err )
2023-02-02 08:05:52 -07:00
// TODO(kradalby): This is Headscale specific and would not play nicely
// with other implementations of the ControlServer interface
result , err := headscale . Execute ( [ ] string {
2024-02-08 09:28:19 -07:00
"headscale" , "nodes" , "expire" , "--identifier" , "1" , "--output" , "json" ,
2023-02-02 08:05:52 -07:00
} )
2023-08-29 00:33:33 -06:00
assertNoErr ( t , err )
2023-02-02 08:05:52 -07:00
2023-09-24 05:42:05 -06:00
var node v1 . Node
err = json . Unmarshal ( [ ] byte ( result ) , & node )
2023-08-29 00:33:33 -06:00
assertNoErr ( t , err )
2023-02-02 08:05:52 -07:00
2023-12-09 10:09:24 -07:00
var expiredNodeKey key . NodePublic
err = expiredNodeKey . UnmarshalText ( [ ] byte ( node . GetNodeKey ( ) ) )
assertNoErr ( t , err )
t . Logf ( "Node %s with node_key %s has been expired" , node . GetName ( ) , expiredNodeKey . String ( ) )
2024-01-05 02:41:56 -07:00
time . Sleep ( 2 * time . Minute )
2023-02-02 08:05:52 -07:00
2023-12-09 10:09:24 -07:00
now := time . Now ( )
// Verify that the expired node has been marked in all peers list.
for _ , client := range allClients {
status , err := client . Status ( )
assertNoErr ( t , err )
if client . Hostname ( ) != node . GetName ( ) {
t . Logf ( "available peers of %s: %v" , client . Hostname ( ) , status . Peers ( ) )
2024-01-05 02:41:56 -07:00
// Ensures that the node is present, and that it is expired.
2023-12-09 10:09:24 -07:00
if peerStatus , ok := status . Peer [ expiredNodeKey ] ; ok {
assertNotNil ( t , peerStatus . Expired )
2024-01-05 02:41:56 -07:00
assert . NotNil ( t , peerStatus . KeyExpiry )
2024-02-08 09:28:19 -07:00
t . Logf (
"node %q should have a key expire before %s, was %s" ,
peerStatus . HostName ,
now . String ( ) ,
peerStatus . KeyExpiry ,
)
2024-01-05 02:41:56 -07:00
if peerStatus . KeyExpiry != nil {
2024-02-08 09:28:19 -07:00
assert . Truef (
t ,
peerStatus . KeyExpiry . Before ( now ) ,
"node %q should have a key expire before %s, was %s" ,
peerStatus . HostName ,
now . String ( ) ,
peerStatus . KeyExpiry ,
)
2024-01-05 02:41:56 -07:00
}
2024-02-08 09:28:19 -07:00
assert . Truef (
t ,
peerStatus . Expired ,
"node %q should be expired, expired is %v" ,
peerStatus . HostName ,
peerStatus . Expired ,
)
2024-01-05 02:41:56 -07:00
_ , stderr , _ := client . Execute ( [ ] string { "tailscale" , "ping" , node . GetName ( ) } )
if ! strings . Contains ( stderr , "node key has expired" ) {
2024-02-08 09:28:19 -07:00
t . Errorf (
"expected to be unable to ping expired host %q from %q" ,
node . GetName ( ) ,
client . Hostname ( ) ,
)
2024-01-05 02:41:56 -07:00
}
} else {
t . Errorf ( "failed to find node %q with nodekey (%s) in mapresponse, should be present even if it is expired" , node . GetName ( ) , expiredNodeKey )
}
} else {
if status . Self . KeyExpiry != nil {
assert . Truef ( t , status . Self . KeyExpiry . Before ( now ) , "node %q should have a key expire before %s, was %s" , status . Self . HostName , now . String ( ) , status . Self . KeyExpiry )
2023-12-09 10:09:24 -07:00
}
2024-01-05 02:41:56 -07:00
// NeedsLogin means that the node has understood that it is no longer
// valid.
2024-02-08 09:28:19 -07:00
assert . Equalf ( t , "NeedsLogin" , status . BackendState , "checking node %q" , status . Self . HostName )
2023-12-09 10:09:24 -07:00
}
}
}
2024-02-23 02:59:24 -07:00
func TestNodeOnlineStatus ( t * testing . T ) {
2023-12-09 10:09:24 -07:00
IntegrationSkip ( t )
t . Parallel ( )
scenario , err := NewScenario ( )
assertNoErr ( t , err )
defer scenario . Shutdown ( )
spec := map [ string ] int {
"user1" : len ( MustTestVersions ) ,
}
2024-02-23 02:59:24 -07:00
err = scenario . CreateHeadscaleEnv ( spec , [ ] tsic . Option { } , hsic . WithTestName ( "online" ) )
2023-12-09 10:09:24 -07:00
assertNoErrHeadscaleEnv ( t , err )
allClients , err := scenario . ListTailscaleClients ( )
assertNoErrListClients ( t , err )
allIps , err := scenario . ListTailscaleClientsIPs ( )
assertNoErrListClientIPs ( t , err )
err = scenario . WaitForTailscaleSync ( )
assertNoErrSync ( t , err )
2024-02-23 02:59:24 -07:00
// assertClientsState(t, allClients)
2024-02-08 23:26:41 -07:00
2023-12-09 10:09:24 -07:00
allAddrs := lo . Map ( allIps , func ( x netip . Addr , index int ) string {
return x . String ( )
} )
success := pingAllHelper ( t , allClients , allAddrs )
t . Logf ( "before expire: %d successful pings out of %d" , success , len ( allClients ) * len ( allIps ) )
2023-02-02 08:05:52 -07:00
for _ , client := range allClients {
status , err := client . Status ( )
2023-08-29 00:33:33 -06:00
assertNoErr ( t , err )
2023-02-02 08:05:52 -07:00
2023-12-09 10:09:24 -07:00
// Assert that we have the original count - self
assert . Len ( t , status . Peers ( ) , len ( MustTestVersions ) - 1 )
}
2023-02-02 08:05:52 -07:00
2023-12-09 10:09:24 -07:00
headscale , err := scenario . Headscale ( )
assertNoErr ( t , err )
// Duration is chosen arbitrarily, 10m is reported in #1561
testDuration := 12 * time . Minute
start := time . Now ( )
end := start . Add ( testDuration )
2023-02-02 08:05:52 -07:00
2023-12-09 10:09:24 -07:00
log . Printf ( "Starting online test from %v to %v" , start , end )
for {
// Let the test run continuously for X minutes to verify
// all nodes stay connected and has the expected status over time.
if end . Before ( time . Now ( ) ) {
return
2023-02-02 08:05:52 -07:00
}
2023-12-09 10:09:24 -07:00
result , err := headscale . Execute ( [ ] string {
"headscale" , "nodes" , "list" , "--output" , "json" ,
} )
assertNoErr ( t , err )
var nodes [ ] * v1 . Node
err = json . Unmarshal ( [ ] byte ( result ) , & nodes )
assertNoErr ( t , err )
// Verify that headscale reports the nodes as online
for _ , node := range nodes {
// All nodes should be online
assert . Truef (
t ,
node . GetOnline ( ) ,
"expected %s to have online status in Headscale, marked as offline %s after start" ,
node . GetName ( ) ,
time . Since ( start ) ,
)
2023-02-02 02:14:33 -07:00
}
2023-12-09 10:09:24 -07:00
// Verify that all nodes report all nodes to be online
for _ , client := range allClients {
status , err := client . Status ( )
assertNoErr ( t , err )
for _ , peerKey := range status . Peers ( ) {
peerStatus := status . Peer [ peerKey ]
// .Online is only available from CapVer 16, which
// is not present in 1.18 which is the lowest we
// test.
if strings . Contains ( client . Hostname ( ) , "1-18" ) {
continue
}
// All peers of this nodess are reporting to be
// connected to the control server
assert . Truef (
t ,
peerStatus . Online ,
"expected node %s to be marked as online in %s peer list, marked as offline %s after start" ,
peerStatus . HostName ,
client . Hostname ( ) ,
time . Since ( start ) ,
)
}
}
// Check maximum once per second
time . Sleep ( time . Second )
2023-02-02 02:14:33 -07:00
}
}
2024-02-23 02:59:24 -07:00
// TestPingAllByIPManyUpDown is a variant of the PingAll
// test which will take the tailscale node up and down
// five times ensuring they are able to restablish connectivity.
func TestPingAllByIPManyUpDown ( t * testing . T ) {
IntegrationSkip ( t )
t . Parallel ( )
scenario , err := NewScenario ( )
assertNoErr ( t , err )
defer scenario . Shutdown ( )
// TODO(kradalby): it does not look like the user thing works, only second
// get created? maybe only when many?
spec := map [ string ] int {
"user1" : len ( MustTestVersions ) ,
"user2" : len ( MustTestVersions ) ,
}
headscaleConfig := map [ string ] string {
"HEADSCALE_DERP_URLS" : "" ,
"HEADSCALE_DERP_SERVER_ENABLED" : "true" ,
"HEADSCALE_DERP_SERVER_REGION_ID" : "999" ,
"HEADSCALE_DERP_SERVER_REGION_CODE" : "headscale" ,
"HEADSCALE_DERP_SERVER_REGION_NAME" : "Headscale Embedded DERP" ,
"HEADSCALE_DERP_SERVER_STUN_LISTEN_ADDR" : "0.0.0.0:3478" ,
"HEADSCALE_DERP_SERVER_PRIVATE_KEY_PATH" : "/tmp/derp.key" ,
// Envknob for enabling DERP debug logs
"DERP_DEBUG_LOGS" : "true" ,
"DERP_PROBER_DEBUG_LOGS" : "true" ,
}
err = scenario . CreateHeadscaleEnv ( spec ,
[ ] tsic . Option { } ,
hsic . WithTestName ( "pingallbyip" ) ,
hsic . WithConfigEnv ( headscaleConfig ) ,
hsic . WithTLS ( ) ,
hsic . WithHostnameAsServerURL ( ) ,
)
assertNoErrHeadscaleEnv ( t , err )
allClients , err := scenario . ListTailscaleClients ( )
assertNoErrListClients ( t , err )
allIps , err := scenario . ListTailscaleClientsIPs ( )
assertNoErrListClientIPs ( t , err )
err = scenario . WaitForTailscaleSync ( )
assertNoErrSync ( t , err )
// assertClientsState(t, allClients)
allAddrs := lo . Map ( allIps , func ( x netip . Addr , index int ) string {
return x . String ( )
} )
success := pingAllHelper ( t , allClients , allAddrs )
t . Logf ( "%d successful pings out of %d" , success , len ( allClients ) * len ( allIps ) )
for run := range 3 {
t . Logf ( "Starting DownUpPing run %d" , run + 1 )
for _ , client := range allClients {
t . Logf ( "taking down %q" , client . Hostname ( ) )
client . Down ( )
}
time . Sleep ( 5 * time . Second )
for _ , client := range allClients {
t . Logf ( "bringing up %q" , client . Hostname ( ) )
client . Up ( )
}
time . Sleep ( 5 * time . Second )
err = scenario . WaitForTailscaleSync ( )
assertNoErrSync ( t , err )
success := pingAllHelper ( t , allClients , allAddrs )
t . Logf ( "%d successful pings out of %d" , success , len ( allClients ) * len ( allIps ) )
}
}