node selfupdate and fix subnet router when ACL is enabled (#1673)
Fixes #1604 Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
This commit is contained in:
parent
65376e2842
commit
1e22f17f36
9 changed files with 506 additions and 0 deletions
67
.github/workflows/test-integration-v2-TestSubnetRouteACL.yaml
vendored
Normal file
67
.github/workflows/test-integration-v2-TestSubnetRouteACL.yaml
vendored
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||||
|
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||||
|
|
||||||
|
name: Integration Test v2 - TestSubnetRouteACL
|
||||||
|
|
||||||
|
on: [pull_request]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
TestSubnetRouteACL:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 2
|
||||||
|
|
||||||
|
- uses: DeterminateSystems/nix-installer-action@main
|
||||||
|
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||||
|
- uses: satackey/action-docker-layer-caching@main
|
||||||
|
continue-on-error: true
|
||||||
|
|
||||||
|
- name: Get changed files
|
||||||
|
id: changed-files
|
||||||
|
uses: tj-actions/changed-files@v34
|
||||||
|
with:
|
||||||
|
files: |
|
||||||
|
*.nix
|
||||||
|
go.*
|
||||||
|
**/*.go
|
||||||
|
integration_test/
|
||||||
|
config-example.yaml
|
||||||
|
|
||||||
|
- name: Run TestSubnetRouteACL
|
||||||
|
uses: Wandalen/wretry.action@master
|
||||||
|
if: steps.changed-files.outputs.any_changed == 'true'
|
||||||
|
with:
|
||||||
|
attempt_limit: 5
|
||||||
|
command: |
|
||||||
|
nix develop --command -- docker run \
|
||||||
|
--tty --rm \
|
||||||
|
--volume ~/.cache/hs-integration-go:/go \
|
||||||
|
--name headscale-test-suite \
|
||||||
|
--volume $PWD:$PWD -w $PWD/integration \
|
||||||
|
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||||
|
--volume $PWD/control_logs:/tmp/control \
|
||||||
|
golang:1 \
|
||||||
|
go run gotest.tools/gotestsum@latest -- ./... \
|
||||||
|
-failfast \
|
||||||
|
-timeout 120m \
|
||||||
|
-parallel 1 \
|
||||||
|
-run "^TestSubnetRouteACL$"
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||||
|
with:
|
||||||
|
name: logs
|
||||||
|
path: "control_logs/*.log"
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||||
|
with:
|
||||||
|
name: pprof
|
||||||
|
path: "control_logs/*.pprof.tar"
|
|
@ -739,6 +739,19 @@ func (hsdb *HSDatabase) enableRoutes(node *types.Node, routeStrs ...string) erro
|
||||||
stateUpdate, node.MachineKey.String())
|
stateUpdate, node.MachineKey.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Send an update to the node itself with to ensure it
|
||||||
|
// has an updated packetfilter allowing the new route
|
||||||
|
// if it is defined in the ACL.
|
||||||
|
selfUpdate := types.StateUpdate{
|
||||||
|
Type: types.StateSelfUpdate,
|
||||||
|
ChangeNodes: types.Nodes{node},
|
||||||
|
}
|
||||||
|
if selfUpdate.Valid() {
|
||||||
|
hsdb.notifier.NotifyByMachineKey(
|
||||||
|
selfUpdate,
|
||||||
|
node.MachineKey)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -278,6 +278,18 @@ func (m *Mapper) LiteMapResponse(
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rules, sshPolicy, err := policy.GenerateFilterAndSSHRules(
|
||||||
|
pol,
|
||||||
|
node,
|
||||||
|
nodeMapToList(m.peers),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp.PacketFilter = policy.ReduceFilterRules(node, rules)
|
||||||
|
resp.SSHPolicy = sshPolicy
|
||||||
|
|
||||||
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
|
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -250,6 +250,21 @@ func ReduceFilterRules(node *types.Node, rules []tailcfg.FilterRule) []tailcfg.F
|
||||||
if node.IPAddresses.InIPSet(expanded) {
|
if node.IPAddresses.InIPSet(expanded) {
|
||||||
dests = append(dests, dest)
|
dests = append(dests, dest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the node exposes routes, ensure they are note removed
|
||||||
|
// when the filters are reduced.
|
||||||
|
if node.Hostinfo != nil {
|
||||||
|
// TODO(kradalby): Evaluate if we should only keep
|
||||||
|
// the routes if the route is enabled. This will
|
||||||
|
// require database access in this part of the code.
|
||||||
|
if len(node.Hostinfo.RoutableIPs) > 0 {
|
||||||
|
for _, routableIP := range node.Hostinfo.RoutableIPs {
|
||||||
|
if expanded.ContainsPrefix(routableIP) {
|
||||||
|
dests = append(dests, dest)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(dests) > 0 {
|
if len(dests) > 0 {
|
||||||
|
|
|
@ -1901,6 +1901,81 @@ func TestReduceFilterRules(t *testing.T) {
|
||||||
},
|
},
|
||||||
want: []tailcfg.FilterRule{},
|
want: []tailcfg.FilterRule{},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "1604-subnet-routers-are-preserved",
|
||||||
|
pol: ACLPolicy{
|
||||||
|
Groups: Groups{
|
||||||
|
"group:admins": {"user1"},
|
||||||
|
},
|
||||||
|
ACLs: []ACL{
|
||||||
|
{
|
||||||
|
Action: "accept",
|
||||||
|
Sources: []string{"group:admins"},
|
||||||
|
Destinations: []string{"group:admins:*"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Action: "accept",
|
||||||
|
Sources: []string{"group:admins"},
|
||||||
|
Destinations: []string{"10.33.0.0/16:*"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
node: &types.Node{
|
||||||
|
IPAddresses: types.NodeAddresses{
|
||||||
|
netip.MustParseAddr("100.64.0.1"),
|
||||||
|
netip.MustParseAddr("fd7a:115c:a1e0::1"),
|
||||||
|
},
|
||||||
|
User: types.User{Name: "user1"},
|
||||||
|
Hostinfo: &tailcfg.Hostinfo{
|
||||||
|
RoutableIPs: []netip.Prefix{
|
||||||
|
netip.MustParsePrefix("10.33.0.0/16"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
peers: types.Nodes{
|
||||||
|
&types.Node{
|
||||||
|
IPAddresses: types.NodeAddresses{
|
||||||
|
netip.MustParseAddr("100.64.0.2"),
|
||||||
|
netip.MustParseAddr("fd7a:115c:a1e0::2"),
|
||||||
|
},
|
||||||
|
User: types.User{Name: "user1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: []tailcfg.FilterRule{
|
||||||
|
{
|
||||||
|
SrcIPs: []string{
|
||||||
|
"100.64.0.1/32",
|
||||||
|
"100.64.0.2/32",
|
||||||
|
"fd7a:115c:a1e0::1/128",
|
||||||
|
"fd7a:115c:a1e0::2/128",
|
||||||
|
},
|
||||||
|
DstPorts: []tailcfg.NetPortRange{
|
||||||
|
{
|
||||||
|
IP: "100.64.0.1/32",
|
||||||
|
Ports: tailcfg.PortRangeAny,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IP: "fd7a:115c:a1e0::1/128",
|
||||||
|
Ports: tailcfg.PortRangeAny,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
SrcIPs: []string{
|
||||||
|
"100.64.0.1/32",
|
||||||
|
"100.64.0.2/32",
|
||||||
|
"fd7a:115c:a1e0::1/128",
|
||||||
|
"fd7a:115c:a1e0::2/128",
|
||||||
|
},
|
||||||
|
DstPorts: []tailcfg.NetPortRange{
|
||||||
|
{
|
||||||
|
IP: "10.33.0.0/16",
|
||||||
|
Ports: tailcfg.PortRangeAny,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
|
|
@ -153,6 +153,8 @@ func (h *Headscale) handlePoll(
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Send an update to all peers to propagate the new routes
|
||||||
|
// available.
|
||||||
stateUpdate := types.StateUpdate{
|
stateUpdate := types.StateUpdate{
|
||||||
Type: types.StatePeerChanged,
|
Type: types.StatePeerChanged,
|
||||||
ChangeNodes: types.Nodes{node},
|
ChangeNodes: types.Nodes{node},
|
||||||
|
@ -164,6 +166,19 @@ func (h *Headscale) handlePoll(
|
||||||
node.MachineKey.String())
|
node.MachineKey.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Send an update to the node itself with to ensure it
|
||||||
|
// has an updated packetfilter allowing the new route
|
||||||
|
// if it is defined in the ACL.
|
||||||
|
selfUpdate := types.StateUpdate{
|
||||||
|
Type: types.StateSelfUpdate,
|
||||||
|
ChangeNodes: types.Nodes{node},
|
||||||
|
}
|
||||||
|
if selfUpdate.Valid() {
|
||||||
|
h.nodeNotifier.NotifyByMachineKey(
|
||||||
|
selfUpdate,
|
||||||
|
node.MachineKey)
|
||||||
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -378,6 +393,16 @@ func (h *Headscale) handlePoll(
|
||||||
var data []byte
|
var data []byte
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
|
// Ensure the node object is updated, for example, there
|
||||||
|
// might have been a hostinfo update in a sidechannel
|
||||||
|
// which contains data needed to generate a map response.
|
||||||
|
node, err = h.db.GetNodeByMachineKey(node.MachineKey)
|
||||||
|
if err != nil {
|
||||||
|
logErr(err, "Could not get machine from db")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
switch update.Type {
|
switch update.Type {
|
||||||
case types.StateFullUpdate:
|
case types.StateFullUpdate:
|
||||||
logInfo("Sending Full MapResponse")
|
logInfo("Sending Full MapResponse")
|
||||||
|
|
|
@ -9,11 +9,15 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||||
"github.com/juanfont/headscale/hscontrol/policy"
|
"github.com/juanfont/headscale/hscontrol/policy"
|
||||||
|
"github.com/juanfont/headscale/hscontrol/util"
|
||||||
"github.com/juanfont/headscale/integration/hsic"
|
"github.com/juanfont/headscale/integration/hsic"
|
||||||
"github.com/juanfont/headscale/integration/tsic"
|
"github.com/juanfont/headscale/integration/tsic"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"tailscale.com/types/ipproto"
|
||||||
|
"tailscale.com/wgengine/filter"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This test is both testing the routes command and the propagation of
|
// This test is both testing the routes command and the propagation of
|
||||||
|
@ -921,3 +925,271 @@ func TestEnableDisableAutoApprovedRoute(t *testing.T) {
|
||||||
assert.Equal(t, true, reAdvertisedRoutes[0].GetEnabled())
|
assert.Equal(t, true, reAdvertisedRoutes[0].GetEnabled())
|
||||||
assert.Equal(t, true, reAdvertisedRoutes[0].GetIsPrimary())
|
assert.Equal(t, true, reAdvertisedRoutes[0].GetIsPrimary())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestSubnetRouteACL verifies that Subnet routes are distributed
|
||||||
|
// as expected when ACLs are activated.
|
||||||
|
// It implements the issue from
|
||||||
|
// https://github.com/juanfont/headscale/issues/1604
|
||||||
|
func TestSubnetRouteACL(t *testing.T) {
|
||||||
|
IntegrationSkip(t)
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
user := "subnet-route-acl"
|
||||||
|
|
||||||
|
scenario, err := NewScenario()
|
||||||
|
assertNoErrf(t, "failed to create scenario: %s", err)
|
||||||
|
defer scenario.Shutdown()
|
||||||
|
|
||||||
|
spec := map[string]int{
|
||||||
|
user: 2,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clienableroute"), hsic.WithACLPolicy(
|
||||||
|
&policy.ACLPolicy{
|
||||||
|
Groups: policy.Groups{
|
||||||
|
"group:admins": {user},
|
||||||
|
},
|
||||||
|
ACLs: []policy.ACL{
|
||||||
|
{
|
||||||
|
Action: "accept",
|
||||||
|
Sources: []string{"group:admins"},
|
||||||
|
Destinations: []string{"group:admins:*"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Action: "accept",
|
||||||
|
Sources: []string{"group:admins"},
|
||||||
|
Destinations: []string{"10.33.0.0/16:*"},
|
||||||
|
},
|
||||||
|
// {
|
||||||
|
// Action: "accept",
|
||||||
|
// Sources: []string{"group:admins"},
|
||||||
|
// Destinations: []string{"0.0.0.0/0:*"},
|
||||||
|
// },
|
||||||
|
},
|
||||||
|
},
|
||||||
|
))
|
||||||
|
assertNoErrHeadscaleEnv(t, err)
|
||||||
|
|
||||||
|
allClients, err := scenario.ListTailscaleClients()
|
||||||
|
assertNoErrListClients(t, err)
|
||||||
|
|
||||||
|
err = scenario.WaitForTailscaleSync()
|
||||||
|
assertNoErrSync(t, err)
|
||||||
|
|
||||||
|
headscale, err := scenario.Headscale()
|
||||||
|
assertNoErrGetHeadscale(t, err)
|
||||||
|
|
||||||
|
expectedRoutes := map[string]string{
|
||||||
|
"1": "10.33.0.0/16",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort nodes by ID
|
||||||
|
sort.SliceStable(allClients, func(i, j int) bool {
|
||||||
|
statusI, err := allClients[i].Status()
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
statusJ, err := allClients[j].Status()
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return statusI.Self.ID < statusJ.Self.ID
|
||||||
|
})
|
||||||
|
|
||||||
|
subRouter1 := allClients[0]
|
||||||
|
|
||||||
|
client := allClients[1]
|
||||||
|
|
||||||
|
// advertise HA route on node 1 and 2
|
||||||
|
// ID 1 will be primary
|
||||||
|
// ID 2 will be secondary
|
||||||
|
for _, client := range allClients {
|
||||||
|
status, err := client.Status()
|
||||||
|
assertNoErr(t, err)
|
||||||
|
|
||||||
|
if route, ok := expectedRoutes[string(status.Self.ID)]; ok {
|
||||||
|
command := []string{
|
||||||
|
"tailscale",
|
||||||
|
"set",
|
||||||
|
"--advertise-routes=" + route,
|
||||||
|
}
|
||||||
|
_, _, err = client.Execute(command)
|
||||||
|
assertNoErrf(t, "failed to advertise route: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = scenario.WaitForTailscaleSync()
|
||||||
|
assertNoErrSync(t, err)
|
||||||
|
|
||||||
|
var routes []*v1.Route
|
||||||
|
err = executeAndUnmarshal(
|
||||||
|
headscale,
|
||||||
|
[]string{
|
||||||
|
"headscale",
|
||||||
|
"routes",
|
||||||
|
"list",
|
||||||
|
"--output",
|
||||||
|
"json",
|
||||||
|
},
|
||||||
|
&routes,
|
||||||
|
)
|
||||||
|
|
||||||
|
assertNoErr(t, err)
|
||||||
|
assert.Len(t, routes, 1)
|
||||||
|
|
||||||
|
for _, route := range routes {
|
||||||
|
assert.Equal(t, true, route.GetAdvertised())
|
||||||
|
assert.Equal(t, false, route.GetEnabled())
|
||||||
|
assert.Equal(t, false, route.GetIsPrimary())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that no routes has been sent to the client,
|
||||||
|
// they are not yet enabled.
|
||||||
|
for _, client := range allClients {
|
||||||
|
status, err := client.Status()
|
||||||
|
assertNoErr(t, err)
|
||||||
|
|
||||||
|
for _, peerKey := range status.Peers() {
|
||||||
|
peerStatus := status.Peer[peerKey]
|
||||||
|
|
||||||
|
assert.Nil(t, peerStatus.PrimaryRoutes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable all routes
|
||||||
|
for _, route := range routes {
|
||||||
|
_, err = headscale.Execute(
|
||||||
|
[]string{
|
||||||
|
"headscale",
|
||||||
|
"routes",
|
||||||
|
"enable",
|
||||||
|
"--route",
|
||||||
|
strconv.Itoa(int(route.GetId())),
|
||||||
|
})
|
||||||
|
assertNoErr(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
|
var enablingRoutes []*v1.Route
|
||||||
|
err = executeAndUnmarshal(
|
||||||
|
headscale,
|
||||||
|
[]string{
|
||||||
|
"headscale",
|
||||||
|
"routes",
|
||||||
|
"list",
|
||||||
|
"--output",
|
||||||
|
"json",
|
||||||
|
},
|
||||||
|
&enablingRoutes,
|
||||||
|
)
|
||||||
|
assertNoErr(t, err)
|
||||||
|
assert.Len(t, enablingRoutes, 1)
|
||||||
|
|
||||||
|
// Node 1 has active route
|
||||||
|
assert.Equal(t, true, enablingRoutes[0].GetAdvertised())
|
||||||
|
assert.Equal(t, true, enablingRoutes[0].GetEnabled())
|
||||||
|
assert.Equal(t, true, enablingRoutes[0].GetIsPrimary())
|
||||||
|
|
||||||
|
// Verify that the client has routes from the primary machine
|
||||||
|
srs1, _ := subRouter1.Status()
|
||||||
|
|
||||||
|
clientStatus, err := client.Status()
|
||||||
|
assertNoErr(t, err)
|
||||||
|
|
||||||
|
srs1PeerStatus := clientStatus.Peer[srs1.Self.PublicKey]
|
||||||
|
|
||||||
|
assertNotNil(t, srs1PeerStatus.PrimaryRoutes)
|
||||||
|
|
||||||
|
t.Logf("subnet1 has following routes: %v", srs1PeerStatus.PrimaryRoutes.AsSlice())
|
||||||
|
assert.Len(t, srs1PeerStatus.PrimaryRoutes.AsSlice(), 1)
|
||||||
|
assert.Contains(
|
||||||
|
t,
|
||||||
|
srs1PeerStatus.PrimaryRoutes.AsSlice(),
|
||||||
|
netip.MustParsePrefix(expectedRoutes[string(srs1.Self.ID)]),
|
||||||
|
)
|
||||||
|
|
||||||
|
clientNm, err := client.Netmap()
|
||||||
|
assertNoErr(t, err)
|
||||||
|
|
||||||
|
wantClientFilter := []filter.Match{
|
||||||
|
{
|
||||||
|
IPProto: []ipproto.Proto{
|
||||||
|
ipproto.TCP, ipproto.UDP, ipproto.ICMPv4, ipproto.ICMPv6,
|
||||||
|
},
|
||||||
|
Srcs: []netip.Prefix{
|
||||||
|
netip.MustParsePrefix("100.64.0.1/32"),
|
||||||
|
netip.MustParsePrefix("100.64.0.2/32"),
|
||||||
|
netip.MustParsePrefix("fd7a:115c:a1e0::1/128"),
|
||||||
|
netip.MustParsePrefix("fd7a:115c:a1e0::2/128"),
|
||||||
|
},
|
||||||
|
Dsts: []filter.NetPortRange{
|
||||||
|
{
|
||||||
|
Net: netip.MustParsePrefix("100.64.0.2/32"),
|
||||||
|
Ports: filter.PortRange{0, 0xffff},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Net: netip.MustParsePrefix("fd7a:115c:a1e0::2/128"),
|
||||||
|
Ports: filter.PortRange{0, 0xffff},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Caps: []filter.CapMatch{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(wantClientFilter, clientNm.PacketFilter, util.PrefixComparer); diff != "" {
|
||||||
|
t.Errorf("Client (%s) filter, unexpected result (-want +got):\n%s", client.Hostname(), diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
subnetNm, err := subRouter1.Netmap()
|
||||||
|
assertNoErr(t, err)
|
||||||
|
|
||||||
|
wantSubnetFilter := []filter.Match{
|
||||||
|
{
|
||||||
|
IPProto: []ipproto.Proto{
|
||||||
|
ipproto.TCP, ipproto.UDP, ipproto.ICMPv4, ipproto.ICMPv6,
|
||||||
|
},
|
||||||
|
Srcs: []netip.Prefix{
|
||||||
|
netip.MustParsePrefix("100.64.0.1/32"),
|
||||||
|
netip.MustParsePrefix("100.64.0.2/32"),
|
||||||
|
netip.MustParsePrefix("fd7a:115c:a1e0::1/128"),
|
||||||
|
netip.MustParsePrefix("fd7a:115c:a1e0::2/128"),
|
||||||
|
},
|
||||||
|
Dsts: []filter.NetPortRange{
|
||||||
|
{
|
||||||
|
Net: netip.MustParsePrefix("100.64.0.1/32"),
|
||||||
|
Ports: filter.PortRange{0, 0xffff},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Net: netip.MustParsePrefix("fd7a:115c:a1e0::1/128"),
|
||||||
|
Ports: filter.PortRange{0, 0xffff},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Caps: []filter.CapMatch{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
IPProto: []ipproto.Proto{
|
||||||
|
ipproto.TCP, ipproto.UDP, ipproto.ICMPv4, ipproto.ICMPv6,
|
||||||
|
},
|
||||||
|
Srcs: []netip.Prefix{
|
||||||
|
netip.MustParsePrefix("100.64.0.1/32"),
|
||||||
|
netip.MustParsePrefix("100.64.0.2/32"),
|
||||||
|
netip.MustParsePrefix("fd7a:115c:a1e0::1/128"),
|
||||||
|
netip.MustParsePrefix("fd7a:115c:a1e0::2/128"),
|
||||||
|
},
|
||||||
|
Dsts: []filter.NetPortRange{
|
||||||
|
{
|
||||||
|
Net: netip.MustParsePrefix("10.33.0.0/16"),
|
||||||
|
Ports: filter.PortRange{0, 0xffff},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Caps: []filter.CapMatch{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(wantSubnetFilter, subnetNm.PacketFilter, util.PrefixComparer); diff != "" {
|
||||||
|
t.Errorf("Subnet (%s) filter, unexpected result (-want +got):\n%s", subRouter1.Hostname(), diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||||
"github.com/juanfont/headscale/integration/tsic"
|
"github.com/juanfont/headscale/integration/tsic"
|
||||||
"tailscale.com/ipn/ipnstate"
|
"tailscale.com/ipn/ipnstate"
|
||||||
|
"tailscale.com/types/netmap"
|
||||||
)
|
)
|
||||||
|
|
||||||
// nolint
|
// nolint
|
||||||
|
@ -26,6 +27,7 @@ type TailscaleClient interface {
|
||||||
IPs() ([]netip.Addr, error)
|
IPs() ([]netip.Addr, error)
|
||||||
FQDN() (string, error)
|
FQDN() (string, error)
|
||||||
Status() (*ipnstate.Status, error)
|
Status() (*ipnstate.Status, error)
|
||||||
|
Netmap() (*netmap.NetworkMap, error)
|
||||||
WaitForNeedsLogin() error
|
WaitForNeedsLogin() error
|
||||||
WaitForRunning() error
|
WaitForRunning() error
|
||||||
WaitForPeers(expected int) error
|
WaitForPeers(expected int) error
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"github.com/ory/dockertest/v3"
|
"github.com/ory/dockertest/v3"
|
||||||
"github.com/ory/dockertest/v3/docker"
|
"github.com/ory/dockertest/v3/docker"
|
||||||
"tailscale.com/ipn/ipnstate"
|
"tailscale.com/ipn/ipnstate"
|
||||||
|
"tailscale.com/types/netmap"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -519,6 +520,30 @@ func (t *TailscaleInContainer) Status() (*ipnstate.Status, error) {
|
||||||
return &status, err
|
return &status, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Netmap returns the current Netmap (netmap.NetworkMap) of the Tailscale instance.
|
||||||
|
// Only works with Tailscale 1.56.1 and newer.
|
||||||
|
func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) {
|
||||||
|
command := []string{
|
||||||
|
"tailscale",
|
||||||
|
"debug",
|
||||||
|
"netmap",
|
||||||
|
}
|
||||||
|
|
||||||
|
result, stderr, err := t.Execute(command)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("stderr: %s\n", stderr)
|
||||||
|
return nil, fmt.Errorf("failed to execute tailscale debug netmap command: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var nm netmap.NetworkMap
|
||||||
|
err = json.Unmarshal([]byte(result), &nm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to unmarshal tailscale netmap: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &nm, err
|
||||||
|
}
|
||||||
|
|
||||||
// FQDN returns the FQDN as a string of the Tailscale instance.
|
// FQDN returns the FQDN as a string of the Tailscale instance.
|
||||||
func (t *TailscaleInContainer) FQDN() (string, error) {
|
func (t *TailscaleInContainer) FQDN() (string, error) {
|
||||||
if t.fqdn != "" {
|
if t.fqdn != "" {
|
||||||
|
|
Loading…
Reference in a new issue