diff --git a/.github/workflows/test-integration-v2-TestSubnetRouteACL.yaml b/.github/workflows/test-integration-v2-TestSubnetRouteACL.yaml new file mode 100644 index 0000000..3cb3f11 --- /dev/null +++ b/.github/workflows/test-integration-v2-TestSubnetRouteACL.yaml @@ -0,0 +1,67 @@ +# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go +# To regenerate, run "go generate" in cmd/gh-action-integration-generator/ + +name: Integration Test v2 - TestSubnetRouteACL + +on: [pull_request] + +concurrency: + group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + TestSubnetRouteACL: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 2 + + - uses: DeterminateSystems/nix-installer-action@main + - uses: DeterminateSystems/magic-nix-cache-action@main + - uses: satackey/action-docker-layer-caching@main + continue-on-error: true + + - name: Get changed files + id: changed-files + uses: tj-actions/changed-files@v34 + with: + files: | + *.nix + go.* + **/*.go + integration_test/ + config-example.yaml + + - name: Run TestSubnetRouteACL + uses: Wandalen/wretry.action@master + if: steps.changed-files.outputs.any_changed == 'true' + with: + attempt_limit: 5 + command: | + nix develop --command -- docker run \ + --tty --rm \ + --volume ~/.cache/hs-integration-go:/go \ + --name headscale-test-suite \ + --volume $PWD:$PWD -w $PWD/integration \ + --volume /var/run/docker.sock:/var/run/docker.sock \ + --volume $PWD/control_logs:/tmp/control \ + golang:1 \ + go run gotest.tools/gotestsum@latest -- ./... \ + -failfast \ + -timeout 120m \ + -parallel 1 \ + -run "^TestSubnetRouteACL$" + + - uses: actions/upload-artifact@v3 + if: always() && steps.changed-files.outputs.any_changed == 'true' + with: + name: logs + path: "control_logs/*.log" + + - uses: actions/upload-artifact@v3 + if: always() && steps.changed-files.outputs.any_changed == 'true' + with: + name: pprof + path: "control_logs/*.pprof.tar" diff --git a/hscontrol/db/node.go b/hscontrol/db/node.go index ce535b9..880a0e1 100644 --- a/hscontrol/db/node.go +++ b/hscontrol/db/node.go @@ -739,6 +739,19 @@ func (hsdb *HSDatabase) enableRoutes(node *types.Node, routeStrs ...string) erro stateUpdate, node.MachineKey.String()) } + // Send an update to the node itself with to ensure it + // has an updated packetfilter allowing the new route + // if it is defined in the ACL. + selfUpdate := types.StateUpdate{ + Type: types.StateSelfUpdate, + ChangeNodes: types.Nodes{node}, + } + if selfUpdate.Valid() { + hsdb.notifier.NotifyByMachineKey( + selfUpdate, + node.MachineKey) + } + return nil } diff --git a/hscontrol/mapper/mapper.go b/hscontrol/mapper/mapper.go index d6404ce..9998f12 100644 --- a/hscontrol/mapper/mapper.go +++ b/hscontrol/mapper/mapper.go @@ -278,6 +278,18 @@ func (m *Mapper) LiteMapResponse( return nil, err } + rules, sshPolicy, err := policy.GenerateFilterAndSSHRules( + pol, + node, + nodeMapToList(m.peers), + ) + if err != nil { + return nil, err + } + + resp.PacketFilter = policy.ReduceFilterRules(node, rules) + resp.SSHPolicy = sshPolicy + return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress) } diff --git a/hscontrol/policy/acls.go b/hscontrol/policy/acls.go index 4798d81..1dd664c 100644 --- a/hscontrol/policy/acls.go +++ b/hscontrol/policy/acls.go @@ -250,6 +250,21 @@ func ReduceFilterRules(node *types.Node, rules []tailcfg.FilterRule) []tailcfg.F if node.IPAddresses.InIPSet(expanded) { dests = append(dests, dest) } + + // If the node exposes routes, ensure they are note removed + // when the filters are reduced. + if node.Hostinfo != nil { + // TODO(kradalby): Evaluate if we should only keep + // the routes if the route is enabled. This will + // require database access in this part of the code. + if len(node.Hostinfo.RoutableIPs) > 0 { + for _, routableIP := range node.Hostinfo.RoutableIPs { + if expanded.ContainsPrefix(routableIP) { + dests = append(dests, dest) + } + } + } + } } if len(dests) > 0 { diff --git a/hscontrol/policy/acls_test.go b/hscontrol/policy/acls_test.go index c048778..4a74bda 100644 --- a/hscontrol/policy/acls_test.go +++ b/hscontrol/policy/acls_test.go @@ -1901,6 +1901,81 @@ func TestReduceFilterRules(t *testing.T) { }, want: []tailcfg.FilterRule{}, }, + { + name: "1604-subnet-routers-are-preserved", + pol: ACLPolicy{ + Groups: Groups{ + "group:admins": {"user1"}, + }, + ACLs: []ACL{ + { + Action: "accept", + Sources: []string{"group:admins"}, + Destinations: []string{"group:admins:*"}, + }, + { + Action: "accept", + Sources: []string{"group:admins"}, + Destinations: []string{"10.33.0.0/16:*"}, + }, + }, + }, + node: &types.Node{ + IPAddresses: types.NodeAddresses{ + netip.MustParseAddr("100.64.0.1"), + netip.MustParseAddr("fd7a:115c:a1e0::1"), + }, + User: types.User{Name: "user1"}, + Hostinfo: &tailcfg.Hostinfo{ + RoutableIPs: []netip.Prefix{ + netip.MustParsePrefix("10.33.0.0/16"), + }, + }, + }, + peers: types.Nodes{ + &types.Node{ + IPAddresses: types.NodeAddresses{ + netip.MustParseAddr("100.64.0.2"), + netip.MustParseAddr("fd7a:115c:a1e0::2"), + }, + User: types.User{Name: "user1"}, + }, + }, + want: []tailcfg.FilterRule{ + { + SrcIPs: []string{ + "100.64.0.1/32", + "100.64.0.2/32", + "fd7a:115c:a1e0::1/128", + "fd7a:115c:a1e0::2/128", + }, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "100.64.0.1/32", + Ports: tailcfg.PortRangeAny, + }, + { + IP: "fd7a:115c:a1e0::1/128", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + { + SrcIPs: []string{ + "100.64.0.1/32", + "100.64.0.2/32", + "fd7a:115c:a1e0::1/128", + "fd7a:115c:a1e0::2/128", + }, + DstPorts: []tailcfg.NetPortRange{ + { + IP: "10.33.0.0/16", + Ports: tailcfg.PortRangeAny, + }, + }, + }, + }, + }, } for _, tt := range tests { diff --git a/hscontrol/poll.go b/hscontrol/poll.go index b4ac6b5..c867f26 100644 --- a/hscontrol/poll.go +++ b/hscontrol/poll.go @@ -153,6 +153,8 @@ func (h *Headscale) handlePoll( return } + // Send an update to all peers to propagate the new routes + // available. stateUpdate := types.StateUpdate{ Type: types.StatePeerChanged, ChangeNodes: types.Nodes{node}, @@ -164,6 +166,19 @@ func (h *Headscale) handlePoll( node.MachineKey.String()) } + // Send an update to the node itself with to ensure it + // has an updated packetfilter allowing the new route + // if it is defined in the ACL. + selfUpdate := types.StateUpdate{ + Type: types.StateSelfUpdate, + ChangeNodes: types.Nodes{node}, + } + if selfUpdate.Valid() { + h.nodeNotifier.NotifyByMachineKey( + selfUpdate, + node.MachineKey) + } + return } } @@ -378,6 +393,16 @@ func (h *Headscale) handlePoll( var data []byte var err error + // Ensure the node object is updated, for example, there + // might have been a hostinfo update in a sidechannel + // which contains data needed to generate a map response. + node, err = h.db.GetNodeByMachineKey(node.MachineKey) + if err != nil { + logErr(err, "Could not get machine from db") + + return + } + switch update.Type { case types.StateFullUpdate: logInfo("Sending Full MapResponse") diff --git a/integration/route_test.go b/integration/route_test.go index 3edab6a..741ba24 100644 --- a/integration/route_test.go +++ b/integration/route_test.go @@ -9,11 +9,15 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" v1 "github.com/juanfont/headscale/gen/go/headscale/v1" "github.com/juanfont/headscale/hscontrol/policy" + "github.com/juanfont/headscale/hscontrol/util" "github.com/juanfont/headscale/integration/hsic" "github.com/juanfont/headscale/integration/tsic" "github.com/stretchr/testify/assert" + "tailscale.com/types/ipproto" + "tailscale.com/wgengine/filter" ) // This test is both testing the routes command and the propagation of @@ -921,3 +925,271 @@ func TestEnableDisableAutoApprovedRoute(t *testing.T) { assert.Equal(t, true, reAdvertisedRoutes[0].GetEnabled()) assert.Equal(t, true, reAdvertisedRoutes[0].GetIsPrimary()) } + +// TestSubnetRouteACL verifies that Subnet routes are distributed +// as expected when ACLs are activated. +// It implements the issue from +// https://github.com/juanfont/headscale/issues/1604 +func TestSubnetRouteACL(t *testing.T) { + IntegrationSkip(t) + t.Parallel() + + user := "subnet-route-acl" + + scenario, err := NewScenario() + assertNoErrf(t, "failed to create scenario: %s", err) + defer scenario.Shutdown() + + spec := map[string]int{ + user: 2, + } + + err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clienableroute"), hsic.WithACLPolicy( + &policy.ACLPolicy{ + Groups: policy.Groups{ + "group:admins": {user}, + }, + ACLs: []policy.ACL{ + { + Action: "accept", + Sources: []string{"group:admins"}, + Destinations: []string{"group:admins:*"}, + }, + { + Action: "accept", + Sources: []string{"group:admins"}, + Destinations: []string{"10.33.0.0/16:*"}, + }, + // { + // Action: "accept", + // Sources: []string{"group:admins"}, + // Destinations: []string{"0.0.0.0/0:*"}, + // }, + }, + }, + )) + assertNoErrHeadscaleEnv(t, err) + + allClients, err := scenario.ListTailscaleClients() + assertNoErrListClients(t, err) + + err = scenario.WaitForTailscaleSync() + assertNoErrSync(t, err) + + headscale, err := scenario.Headscale() + assertNoErrGetHeadscale(t, err) + + expectedRoutes := map[string]string{ + "1": "10.33.0.0/16", + } + + // Sort nodes by ID + sort.SliceStable(allClients, func(i, j int) bool { + statusI, err := allClients[i].Status() + if err != nil { + return false + } + + statusJ, err := allClients[j].Status() + if err != nil { + return false + } + + return statusI.Self.ID < statusJ.Self.ID + }) + + subRouter1 := allClients[0] + + client := allClients[1] + + // advertise HA route on node 1 and 2 + // ID 1 will be primary + // ID 2 will be secondary + for _, client := range allClients { + status, err := client.Status() + assertNoErr(t, err) + + if route, ok := expectedRoutes[string(status.Self.ID)]; ok { + command := []string{ + "tailscale", + "set", + "--advertise-routes=" + route, + } + _, _, err = client.Execute(command) + assertNoErrf(t, "failed to advertise route: %s", err) + } + } + + err = scenario.WaitForTailscaleSync() + assertNoErrSync(t, err) + + var routes []*v1.Route + err = executeAndUnmarshal( + headscale, + []string{ + "headscale", + "routes", + "list", + "--output", + "json", + }, + &routes, + ) + + assertNoErr(t, err) + assert.Len(t, routes, 1) + + for _, route := range routes { + assert.Equal(t, true, route.GetAdvertised()) + assert.Equal(t, false, route.GetEnabled()) + assert.Equal(t, false, route.GetIsPrimary()) + } + + // Verify that no routes has been sent to the client, + // they are not yet enabled. + for _, client := range allClients { + status, err := client.Status() + assertNoErr(t, err) + + for _, peerKey := range status.Peers() { + peerStatus := status.Peer[peerKey] + + assert.Nil(t, peerStatus.PrimaryRoutes) + } + } + + // Enable all routes + for _, route := range routes { + _, err = headscale.Execute( + []string{ + "headscale", + "routes", + "enable", + "--route", + strconv.Itoa(int(route.GetId())), + }) + assertNoErr(t, err) + } + + time.Sleep(5 * time.Second) + + var enablingRoutes []*v1.Route + err = executeAndUnmarshal( + headscale, + []string{ + "headscale", + "routes", + "list", + "--output", + "json", + }, + &enablingRoutes, + ) + assertNoErr(t, err) + assert.Len(t, enablingRoutes, 1) + + // Node 1 has active route + assert.Equal(t, true, enablingRoutes[0].GetAdvertised()) + assert.Equal(t, true, enablingRoutes[0].GetEnabled()) + assert.Equal(t, true, enablingRoutes[0].GetIsPrimary()) + + // Verify that the client has routes from the primary machine + srs1, _ := subRouter1.Status() + + clientStatus, err := client.Status() + assertNoErr(t, err) + + srs1PeerStatus := clientStatus.Peer[srs1.Self.PublicKey] + + assertNotNil(t, srs1PeerStatus.PrimaryRoutes) + + t.Logf("subnet1 has following routes: %v", srs1PeerStatus.PrimaryRoutes.AsSlice()) + assert.Len(t, srs1PeerStatus.PrimaryRoutes.AsSlice(), 1) + assert.Contains( + t, + srs1PeerStatus.PrimaryRoutes.AsSlice(), + netip.MustParsePrefix(expectedRoutes[string(srs1.Self.ID)]), + ) + + clientNm, err := client.Netmap() + assertNoErr(t, err) + + wantClientFilter := []filter.Match{ + { + IPProto: []ipproto.Proto{ + ipproto.TCP, ipproto.UDP, ipproto.ICMPv4, ipproto.ICMPv6, + }, + Srcs: []netip.Prefix{ + netip.MustParsePrefix("100.64.0.1/32"), + netip.MustParsePrefix("100.64.0.2/32"), + netip.MustParsePrefix("fd7a:115c:a1e0::1/128"), + netip.MustParsePrefix("fd7a:115c:a1e0::2/128"), + }, + Dsts: []filter.NetPortRange{ + { + Net: netip.MustParsePrefix("100.64.0.2/32"), + Ports: filter.PortRange{0, 0xffff}, + }, + { + Net: netip.MustParsePrefix("fd7a:115c:a1e0::2/128"), + Ports: filter.PortRange{0, 0xffff}, + }, + }, + Caps: []filter.CapMatch{}, + }, + } + + if diff := cmp.Diff(wantClientFilter, clientNm.PacketFilter, util.PrefixComparer); diff != "" { + t.Errorf("Client (%s) filter, unexpected result (-want +got):\n%s", client.Hostname(), diff) + } + + subnetNm, err := subRouter1.Netmap() + assertNoErr(t, err) + + wantSubnetFilter := []filter.Match{ + { + IPProto: []ipproto.Proto{ + ipproto.TCP, ipproto.UDP, ipproto.ICMPv4, ipproto.ICMPv6, + }, + Srcs: []netip.Prefix{ + netip.MustParsePrefix("100.64.0.1/32"), + netip.MustParsePrefix("100.64.0.2/32"), + netip.MustParsePrefix("fd7a:115c:a1e0::1/128"), + netip.MustParsePrefix("fd7a:115c:a1e0::2/128"), + }, + Dsts: []filter.NetPortRange{ + { + Net: netip.MustParsePrefix("100.64.0.1/32"), + Ports: filter.PortRange{0, 0xffff}, + }, + { + Net: netip.MustParsePrefix("fd7a:115c:a1e0::1/128"), + Ports: filter.PortRange{0, 0xffff}, + }, + }, + Caps: []filter.CapMatch{}, + }, + { + IPProto: []ipproto.Proto{ + ipproto.TCP, ipproto.UDP, ipproto.ICMPv4, ipproto.ICMPv6, + }, + Srcs: []netip.Prefix{ + netip.MustParsePrefix("100.64.0.1/32"), + netip.MustParsePrefix("100.64.0.2/32"), + netip.MustParsePrefix("fd7a:115c:a1e0::1/128"), + netip.MustParsePrefix("fd7a:115c:a1e0::2/128"), + }, + Dsts: []filter.NetPortRange{ + { + Net: netip.MustParsePrefix("10.33.0.0/16"), + Ports: filter.PortRange{0, 0xffff}, + }, + }, + Caps: []filter.CapMatch{}, + }, + } + + if diff := cmp.Diff(wantSubnetFilter, subnetNm.PacketFilter, util.PrefixComparer); diff != "" { + t.Errorf("Subnet (%s) filter, unexpected result (-want +got):\n%s", subRouter1.Hostname(), diff) + } +} diff --git a/integration/tailscale.go b/integration/tailscale.go index e7bf71b..7187a81 100644 --- a/integration/tailscale.go +++ b/integration/tailscale.go @@ -7,6 +7,7 @@ import ( "github.com/juanfont/headscale/integration/dockertestutil" "github.com/juanfont/headscale/integration/tsic" "tailscale.com/ipn/ipnstate" + "tailscale.com/types/netmap" ) // nolint @@ -26,6 +27,7 @@ type TailscaleClient interface { IPs() ([]netip.Addr, error) FQDN() (string, error) Status() (*ipnstate.Status, error) + Netmap() (*netmap.NetworkMap, error) WaitForNeedsLogin() error WaitForRunning() error WaitForPeers(expected int) error diff --git a/integration/tsic/tsic.go b/integration/tsic/tsic.go index 7404f6e..c30118d 100644 --- a/integration/tsic/tsic.go +++ b/integration/tsic/tsic.go @@ -17,6 +17,7 @@ import ( "github.com/ory/dockertest/v3" "github.com/ory/dockertest/v3/docker" "tailscale.com/ipn/ipnstate" + "tailscale.com/types/netmap" ) const ( @@ -519,6 +520,30 @@ func (t *TailscaleInContainer) Status() (*ipnstate.Status, error) { return &status, err } +// Netmap returns the current Netmap (netmap.NetworkMap) of the Tailscale instance. +// Only works with Tailscale 1.56.1 and newer. +func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) { + command := []string{ + "tailscale", + "debug", + "netmap", + } + + result, stderr, err := t.Execute(command) + if err != nil { + fmt.Printf("stderr: %s\n", stderr) + return nil, fmt.Errorf("failed to execute tailscale debug netmap command: %w", err) + } + + var nm netmap.NetworkMap + err = json.Unmarshal([]byte(result), &nm) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal tailscale netmap: %w", err) + } + + return &nm, err +} + // FQDN returns the FQDN as a string of the Tailscale instance. func (t *TailscaleInContainer) FQDN() (string, error) { if t.fqdn != "" {