Compare commits

..

No commits in common. "6b635b3566b5fd8c05df1687563dfe1d508e1aad" and "d017339af4b13a6eae1e49499f2ba4ae4d991431" have entirely different histories.

120 changed files with 3123 additions and 5411 deletions

View file

@ -26,7 +26,7 @@ jobs:
key: ${{ github.ref }} key: ${{ github.ref }}
path: .cache path: .cache
- name: Setup dependencies - name: Setup dependencies
run: pip install -r docs/requirements.txt run: pip install mkdocs-material pillow cairosvg mkdocs-minify-plugin
- name: Build docs - name: Build docs
run: mkdocs build --strict run: mkdocs build --strict
- name: Upload artifact - name: Upload artifact

View file

@ -12,10 +12,10 @@ jobs:
steps: steps:
- uses: actions/stale@v5 - uses: actions/stale@v5
with: with:
days-before-issue-stale: 90 days-before-issue-stale: 180
days-before-issue-close: 7 days-before-issue-close: 14
stale-issue-label: "stale" stale-issue-label: "stale"
stale-issue-message: "This issue is stale because it has been open for 90 days with no activity." stale-issue-message: "This issue is stale because it has been open for 180 days with no activity."
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale." close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
days-before-pr-stale: -1 days-before-pr-stale: -1
days-before-pr-close: -1 days-before-pr-close: -1

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestACLAllowStarDst - name: Run TestACLAllowStarDst
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestACLAllowUser80Dst - name: Run TestACLAllowUser80Dst
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestACLAllowUserDst - name: Run TestACLAllowUserDst
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestACLDenyAllPort80 - name: Run TestACLDenyAllPort80
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestACLDevice1CanAccessDevice2 - name: Run TestACLDevice1CanAccessDevice2
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestACLHostsInNetMapTable - name: Run TestACLHostsInNetMapTable
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestACLNamedHostsCanReach - name: Run TestACLNamedHostsCanReach
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestACLNamedHostsCanReachBySubnet - name: Run TestACLNamedHostsCanReachBySubnet
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestApiKeyCommand - name: Run TestApiKeyCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestAuthKeyLogoutAndRelogin - name: Run TestAuthKeyLogoutAndRelogin
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestAuthWebFlowAuthenticationPingAll - name: Run TestAuthWebFlowAuthenticationPingAll
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestAuthWebFlowLogoutAndRelogin - name: Run TestAuthWebFlowLogoutAndRelogin
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestCreateTailscale - name: Run TestCreateTailscale
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestDERPServerScenario - name: Run TestDERPServerScenario
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestEnablingRoutes - name: Run TestEnablingRoutes
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestEphemeral - name: Run TestEphemeral
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestExpireNode - name: Run TestExpireNode
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestHASubnetRouterFailover
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestHASubnetRouterFailover:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestHASubnetRouterFailover
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestHASubnetRouterFailover$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestHeadscale - name: Run TestHeadscale
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestNodeAdvertiseTagNoACLCommand
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestNodeAdvertiseTagNoACLCommand:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestNodeAdvertiseTagNoACLCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestNodeAdvertiseTagNoACLCommand$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View file

@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestNodeAdvertiseTagWithACLCommand
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestNodeAdvertiseTagWithACLCommand:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestNodeAdvertiseTagWithACLCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestNodeAdvertiseTagWithACLCommand$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestNodeCommand - name: Run TestNodeCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestNodeExpireCommand - name: Run TestNodeExpireCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestNodeMoveCommand - name: Run TestNodeMoveCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestNodeOnlineLastSeenStatus
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestNodeOnlineLastSeenStatus:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestNodeOnlineLastSeenStatus
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestNodeOnlineLastSeenStatus$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestNodeRenameCommand - name: Run TestNodeRenameCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestNodeTagCommand - name: Run TestNodeTagCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestOIDCAuthenticationPingAll - name: Run TestOIDCAuthenticationPingAll
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestOIDCExpireNodesBasedOnTokenExpiry - name: Run TestOIDCExpireNodesBasedOnTokenExpiry
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestPingAllByHostname - name: Run TestPingAllByHostname
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestPingAllByIP - name: Run TestPingAllByIP
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestPreAuthKeyCommand - name: Run TestPreAuthKeyCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestPreAuthKeyCommandReusableEphemeral - name: Run TestPreAuthKeyCommandReusableEphemeral
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestPreAuthKeyCommandWithoutExpiry - name: Run TestPreAuthKeyCommandWithoutExpiry
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestResolveMagicDNS - name: Run TestResolveMagicDNS
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestSSHIsBlockedInACL - name: Run TestSSHIsBlockedInACL
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestSSHMultipleUsersAllToAll - name: Run TestSSHMultipleUsersAllToAll
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestSSHNoSSHConfigured - name: Run TestSSHNoSSHConfigured
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestSSHOneUserToAll - name: Run TestSSHOneUserToAll
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestSSHUserOnlyIsolation - name: Run TestSSHUserOnlyIsolation
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestTaildrop - name: Run TestTaildrop
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestTailscaleNodesJoiningHeadcale - name: Run TestTailscaleNodesJoiningHeadcale
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -35,11 +35,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run TestUserCommand - name: Run TestUserCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -49,6 +46,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

1
.gitignore vendored
View file

@ -1,6 +1,5 @@
ignored/ ignored/
tailscale/ tailscale/
.vscode/
# Binaries for programs and plugins # Binaries for programs and plugins
*.exe *.exe

View file

@ -23,18 +23,11 @@ after improving the test harness as part of adopting [#1460](https://github.com/
### BREAKING ### BREAKING
- Code reorganisation, a lot of code has moved, please review the following PRs accordingly [#1473](https://github.com/juanfont/headscale/pull/1473) Code reorganisation, a lot of code has moved, please review the following PRs accordingly [#1473](https://github.com/juanfont/headscale/pull/1473)
- API: Machine is now Node [#1553](https://github.com/juanfont/headscale/pull/1553) API: Machine is now Node [#1553](https://github.com/juanfont/headscale/pull/1553)
- Remove support for older Tailscale clients [#1611](https://github.com/juanfont/headscale/pull/1611)
- The latest supported client is 1.36
- Headscale checks that _at least_ one DERP is defined at start [#1564](https://github.com/juanfont/headscale/pull/1564)
- If no DERP is configured, the server will fail to start, this can be because it cannot load the DERPMap from file or url.
- Embedded DERP server requires a private key [#1611](https://github.com/juanfont/headscale/pull/1611)
- Add a filepath entry to [`derp.server.private_key_path`](https://github.com/juanfont/headscale/blob/b35993981297e18393706b2c963d6db882bba6aa/config-example.yaml#L95)
### Changes ### Changes
Use versioned migrations [#1644](https://github.com/juanfont/headscale/pull/1644)
Make the OIDC callback page better [#1484](https://github.com/juanfont/headscale/pull/1484) Make the OIDC callback page better [#1484](https://github.com/juanfont/headscale/pull/1484)
Allow use of the username OIDC claim [#1287](https://github.com/juanfont/headscale/pull/1287) Allow use of the username OIDC claim [#1287](https://github.com/juanfont/headscale/pull/1287)
SSH support [#1487](https://github.com/juanfont/headscale/pull/1487) SSH support [#1487](https://github.com/juanfont/headscale/pull/1487)
@ -43,7 +36,6 @@ Use error group handling to ensure tests actually pass [#1535](https://github.co
Fix hang on SIGTERM [#1492](https://github.com/juanfont/headscale/pull/1492) taken from [#1480](https://github.com/juanfont/headscale/pull/1480) Fix hang on SIGTERM [#1492](https://github.com/juanfont/headscale/pull/1492) taken from [#1480](https://github.com/juanfont/headscale/pull/1480)
Send logs to stderr by default [#1524](https://github.com/juanfont/headscale/pull/1524) Send logs to stderr by default [#1524](https://github.com/juanfont/headscale/pull/1524)
Fix [TS-2023-006](https://tailscale.com/security-bulletins/#ts-2023-006) security UPnP issue [#1563](https://github.com/juanfont/headscale/pull/1563) Fix [TS-2023-006](https://tailscale.com/security-bulletins/#ts-2023-006) security UPnP issue [#1563](https://github.com/juanfont/headscale/pull/1563)
Turn off gRPC logging [#1640](https://github.com/juanfont/headscale/pull/1640) fixes [#1259](https://github.com/juanfont/headscale/issues/1259)
Add `oidc.groups_claim`, `oidc.email_claim`, and `oidc.username_claim` to allow setting those claim names [#1594](https://github.com/juanfont/headscale/pull/1594) Add `oidc.groups_claim`, `oidc.email_claim`, and `oidc.username_claim` to allow setting those claim names [#1594](https://github.com/juanfont/headscale/pull/1594)
## 0.22.3 (2023-05-12) ## 0.22.3 (2023-05-12)

View file

@ -9,7 +9,7 @@ RUN go mod download
COPY . . COPY . .
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale RUN CGO_ENABLED=0 GOOS=linux go install -tags ts2019 -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
RUN strip /go/bin/headscale RUN strip /go/bin/headscale
RUN test -e /go/bin/headscale RUN test -e /go/bin/headscale

View file

@ -9,7 +9,7 @@ RUN go mod download
COPY . . COPY . .
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale RUN CGO_ENABLED=0 GOOS=linux go install -tags ts2019 -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
RUN test -e /go/bin/headscale RUN test -e /go/bin/headscale
# Debug image # Debug image

View file

@ -10,6 +10,8 @@ ifeq ($(filter $(GOOS), openbsd netbsd soloaris plan9), )
else else
endif endif
TAGS = -tags ts2019
# GO_SOURCES = $(wildcard *.go) # GO_SOURCES = $(wildcard *.go)
# PROTO_SOURCES = $(wildcard **/*.proto) # PROTO_SOURCES = $(wildcard **/*.proto)
GO_SOURCES = $(call rwildcard,,*.go) GO_SOURCES = $(call rwildcard,,*.go)
@ -22,7 +24,7 @@ build:
dev: lint test build dev: lint test build
test: test:
gotestsum -- -short -coverprofile=coverage.out ./... gotestsum -- $(TAGS) -short -coverprofile=coverage.out ./...
test_integration: test_integration:
docker run \ docker run \
@ -32,7 +34,7 @@ test_integration:
-v $$PWD:$$PWD -w $$PWD/integration \ -v $$PWD:$$PWD -w $$PWD/integration \
-v /var/run/docker.sock:/var/run/docker.sock \ -v /var/run/docker.sock:/var/run/docker.sock \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- -failfast ./... -timeout 120m -parallel 8 go run gotest.tools/gotestsum@latest -- $(TAGS) -failfast ./... -timeout 120m -parallel 8
lint: lint:
golangci-lint run --fix --timeout 10m golangci-lint run --fix --timeout 10m

View file

@ -466,13 +466,6 @@ make build
<sub style="font-size:14px"><b>unreality</b></sub> <sub style="font-size:14px"><b>unreality</b></sub>
</a> </a>
</td> </td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/vsychov>
<img src=https://avatars.githubusercontent.com/u/2186303?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=MichaelKo/>
<br />
<sub style="font-size:14px"><b>MichaelKo</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/kevin1sMe> <a href=https://github.com/kevin1sMe>
<img src=https://avatars.githubusercontent.com/u/6886076?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=kevinlin/> <img src=https://avatars.githubusercontent.com/u/6886076?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=kevinlin/>
@ -480,8 +473,6 @@ make build
<sub style="font-size:14px"><b>kevinlin</b></sub> <sub style="font-size:14px"><b>kevinlin</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/QZAiXH> <a href=https://github.com/QZAiXH>
<img src=https://avatars.githubusercontent.com/u/23068780?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Snack/> <img src=https://avatars.githubusercontent.com/u/23068780?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Snack/>
@ -489,6 +480,8 @@ make build
<sub style="font-size:14px"><b>Snack</b></sub> <sub style="font-size:14px"><b>Snack</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/artemklevtsov> <a href=https://github.com/artemklevtsov>
<img src=https://avatars.githubusercontent.com/u/603798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Artem Klevtsov/> <img src=https://avatars.githubusercontent.com/u/603798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Artem Klevtsov/>
@ -524,8 +517,6 @@ make build
<sub style="font-size:14px"><b>LIU HANCHENG</b></sub> <sub style="font-size:14px"><b>LIU HANCHENG</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/motiejus> <a href=https://github.com/motiejus>
<img src=https://avatars.githubusercontent.com/u/107720?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Motiejus Jakštys/> <img src=https://avatars.githubusercontent.com/u/107720?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Motiejus Jakštys/>
@ -533,6 +524,8 @@ make build
<sub style="font-size:14px"><b>Motiejus Jakštys</b></sub> <sub style="font-size:14px"><b>Motiejus Jakštys</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/pvinis> <a href=https://github.com/pvinis>
<img src=https://avatars.githubusercontent.com/u/100233?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pavlos Vinieratos/> <img src=https://avatars.githubusercontent.com/u/100233?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pavlos Vinieratos/>
@ -554,6 +547,13 @@ make build
<sub style="font-size:14px"><b>Steven Honson</b></sub> <sub style="font-size:14px"><b>Steven Honson</b></sub>
</a> </a>
</td> </td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/vsychov>
<img src=https://avatars.githubusercontent.com/u/2186303?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=MichaelKo/>
<br />
<sub style="font-size:14px"><b>MichaelKo</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/ratsclub> <a href=https://github.com/ratsclub>
<img src=https://avatars.githubusercontent.com/u/25647735?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Victor Freire/> <img src=https://avatars.githubusercontent.com/u/25647735?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Victor Freire/>
@ -577,13 +577,6 @@ make build
<sub style="font-size:14px"><b>thomas</b></sub> <sub style="font-size:14px"><b>thomas</b></sub>
</a> </a>
</td> </td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/puzpuzpuz>
<img src=https://avatars.githubusercontent.com/u/37772591?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Andrei Pechkurov/>
<br />
<sub style="font-size:14px"><b>Andrei Pechkurov</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/linsomniac> <a href=https://github.com/linsomniac>
<img src=https://avatars.githubusercontent.com/u/466380?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Sean Reifschneider/> <img src=https://avatars.githubusercontent.com/u/466380?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Sean Reifschneider/>
@ -605,6 +598,13 @@ make build
<sub style="font-size:14px"><b>Albert Copeland</b></sub> <sub style="font-size:14px"><b>Albert Copeland</b></sub>
</a> </a>
</td> </td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/puzpuzpuz>
<img src=https://avatars.githubusercontent.com/u/37772591?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Andrei Pechkurov/>
<br />
<sub style="font-size:14px"><b>Andrei Pechkurov</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/theryecatcher> <a href=https://github.com/theryecatcher>
<img src=https://avatars.githubusercontent.com/u/16442416?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Anoop Sundaresh/> <img src=https://avatars.githubusercontent.com/u/16442416?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Anoop Sundaresh/>
@ -658,13 +658,6 @@ make build
</td> </td>
</tr> </tr>
<tr> <tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/winterheart>
<img src=https://avatars.githubusercontent.com/u/81112?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Azamat H. Hackimov/>
<br />
<sub style="font-size:14px"><b>Azamat H. Hackimov</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/stensonb> <a href=https://github.com/stensonb>
<img src=https://avatars.githubusercontent.com/u/933389?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Bryan Stenson/> <img src=https://avatars.githubusercontent.com/u/933389?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Bryan Stenson/>
@ -700,8 +693,6 @@ make build
<sub style="font-size:14px"><b>Felix Kronlage-Dammers</b></sub> <sub style="font-size:14px"><b>Felix Kronlage-Dammers</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/felixonmars> <a href=https://github.com/felixonmars>
<img src=https://avatars.githubusercontent.com/u/1006477?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Felix Yan/> <img src=https://avatars.githubusercontent.com/u/1006477?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Felix Yan/>
@ -709,6 +700,8 @@ make build
<sub style="font-size:14px"><b>Felix Yan</b></sub> <sub style="font-size:14px"><b>Felix Yan</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/gabe565> <a href=https://github.com/gabe565>
<img src=https://avatars.githubusercontent.com/u/7717888?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Gabe Cook/> <img src=https://avatars.githubusercontent.com/u/7717888?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Gabe Cook/>
@ -730,13 +723,6 @@ make build
<sub style="font-size:14px"><b>hrtkpf</b></sub> <sub style="font-size:14px"><b>hrtkpf</b></sub>
</a> </a>
</td> </td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/jessebot>
<img src=https://avatars.githubusercontent.com/u/2389292?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=JesseBot/>
<br />
<sub style="font-size:14px"><b>JesseBot</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/jimt> <a href=https://github.com/jimt>
<img src=https://avatars.githubusercontent.com/u/180326?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jim Tittsler/> <img src=https://avatars.githubusercontent.com/u/180326?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jim Tittsler/>
@ -744,8 +730,6 @@ make build
<sub style="font-size:14px"><b>Jim Tittsler</b></sub> <sub style="font-size:14px"><b>Jim Tittsler</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/jsiebens> <a href=https://github.com/jsiebens>
<img src=https://avatars.githubusercontent.com/u/499769?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Johan Siebens/> <img src=https://avatars.githubusercontent.com/u/499769?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Johan Siebens/>
@ -760,6 +744,8 @@ make build
<sub style="font-size:14px"><b>John Axel Eriksson</b></sub> <sub style="font-size:14px"><b>John Axel Eriksson</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/ShadowJonathan> <a href=https://github.com/ShadowJonathan>
<img src=https://avatars.githubusercontent.com/u/22740616?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jonathan de Jong/> <img src=https://avatars.githubusercontent.com/u/22740616?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jonathan de Jong/>
@ -788,8 +774,6 @@ make build
<sub style="font-size:14px"><b>Lucalux</b></sub> <sub style="font-size:14px"><b>Lucalux</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/foxtrot> <a href=https://github.com/foxtrot>
<img src=https://avatars.githubusercontent.com/u/4153572?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Marc/> <img src=https://avatars.githubusercontent.com/u/4153572?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Marc/>
@ -804,6 +788,8 @@ make build
<sub style="font-size:14px"><b>Mesar Hameed</b></sub> <sub style="font-size:14px"><b>Mesar Hameed</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/mikejsavage> <a href=https://github.com/mikejsavage>
<img src=https://avatars.githubusercontent.com/u/579299?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Michael Savage/> <img src=https://avatars.githubusercontent.com/u/579299?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Michael Savage/>
@ -832,8 +818,6 @@ make build
<sub style="font-size:14px"><b>Pontus N</b></sub> <sub style="font-size:14px"><b>Pontus N</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/nnsee> <a href=https://github.com/nnsee>
<img src=https://avatars.githubusercontent.com/u/36747857?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Rasmus Moorats/> <img src=https://avatars.githubusercontent.com/u/36747857?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Rasmus Moorats/>
@ -848,6 +832,8 @@ make build
<sub style="font-size:14px"><b>rcursaru</b></sub> <sub style="font-size:14px"><b>rcursaru</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/renovate-bot> <a href=https://github.com/renovate-bot>
<img src=https://avatars.githubusercontent.com/u/25180681?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mend Renovate/> <img src=https://avatars.githubusercontent.com/u/25180681?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mend Renovate/>
@ -864,9 +850,9 @@ make build
</td> </td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/muzy> <a href=https://github.com/muzy>
<img src=https://avatars.githubusercontent.com/u/321723?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Sebastian/> <img src=https://avatars.githubusercontent.com/u/321723?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Sebastian Muszytowski/>
<br /> <br />
<sub style="font-size:14px"><b>Sebastian</b></sub> <sub style="font-size:14px"><b>Sebastian Muszytowski</b></sub>
</a> </a>
</td> </td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
@ -876,8 +862,6 @@ make build
<sub style="font-size:14px"><b>Shaanan Cohney</b></sub> <sub style="font-size:14px"><b>Shaanan Cohney</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/6ixfalls> <a href=https://github.com/6ixfalls>
<img src=https://avatars.githubusercontent.com/u/23470032?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Six/> <img src=https://avatars.githubusercontent.com/u/23470032?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Six/>
@ -892,6 +876,8 @@ make build
<sub style="font-size:14px"><b>Stefan VanBuren</b></sub> <sub style="font-size:14px"><b>Stefan VanBuren</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/sophware> <a href=https://github.com/sophware>
<img src=https://avatars.githubusercontent.com/u/41669?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=sophware/> <img src=https://avatars.githubusercontent.com/u/41669?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=sophware/>
@ -920,8 +906,6 @@ make build
<sub style="font-size:14px"><b>The Gitter Badger</b></sub> <sub style="font-size:14px"><b>The Gitter Badger</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/tianon> <a href=https://github.com/tianon>
<img src=https://avatars.githubusercontent.com/u/161631?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tianon Gravi/> <img src=https://avatars.githubusercontent.com/u/161631?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tianon Gravi/>
@ -936,6 +920,8 @@ make build
<sub style="font-size:14px"><b>Till Hoffmann</b></sub> <sub style="font-size:14px"><b>Till Hoffmann</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/woudsma> <a href=https://github.com/woudsma>
<img src=https://avatars.githubusercontent.com/u/6162978?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tjerk Woudsma/> <img src=https://avatars.githubusercontent.com/u/6162978?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tjerk Woudsma/>
@ -964,8 +950,6 @@ make build
<sub style="font-size:14px"><b>Zachary Newell</b></sub> <sub style="font-size:14px"><b>Zachary Newell</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/zekker6> <a href=https://github.com/zekker6>
<img src=https://avatars.githubusercontent.com/u/1367798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zakhar Bessarab/> <img src=https://avatars.githubusercontent.com/u/1367798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zakhar Bessarab/>
@ -980,6 +964,8 @@ make build
<sub style="font-size:14px"><b>Zhiyuan Zheng</b></sub> <sub style="font-size:14px"><b>Zhiyuan Zheng</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/Bpazy> <a href=https://github.com/Bpazy>
<img src=https://avatars.githubusercontent.com/u/9838749?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ziyuan Han/> <img src=https://avatars.githubusercontent.com/u/9838749?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ziyuan Han/>
@ -1008,8 +994,6 @@ make build
<sub style="font-size:14px"><b>dnaq</b></sub> <sub style="font-size:14px"><b>dnaq</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/nning> <a href=https://github.com/nning>
<img src=https://avatars.githubusercontent.com/u/557430?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=henning mueller/> <img src=https://avatars.githubusercontent.com/u/557430?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=henning mueller/>
@ -1024,6 +1008,8 @@ make build
<sub style="font-size:14px"><b>ignoramous</b></sub> <sub style="font-size:14px"><b>ignoramous</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/jimyag> <a href=https://github.com/jimyag>
<img src=https://avatars.githubusercontent.com/u/69233189?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=jimyag/> <img src=https://avatars.githubusercontent.com/u/69233189?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=jimyag/>
@ -1052,8 +1038,6 @@ make build
<sub style="font-size:14px"><b>ma6174</b></sub> <sub style="font-size:14px"><b>ma6174</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/manju-rn> <a href=https://github.com/manju-rn>
<img src=https://avatars.githubusercontent.com/u/26291847?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=manju-rn/> <img src=https://avatars.githubusercontent.com/u/26291847?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=manju-rn/>
@ -1068,6 +1052,8 @@ make build
<sub style="font-size:14px"><b>nicholas-yap</b></sub> <sub style="font-size:14px"><b>nicholas-yap</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/pernila> <a href=https://github.com/pernila>
<img src=https://avatars.githubusercontent.com/u/12460060?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tommi Pernila/> <img src=https://avatars.githubusercontent.com/u/12460060?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tommi Pernila/>
@ -1096,8 +1082,6 @@ make build
<sub style="font-size:14px"><b>zy</b></sub> <sub style="font-size:14px"><b>zy</b></sub>
</a> </a>
</td> </td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0"> <td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/atorregrosa-smd> <a href=https://github.com/atorregrosa-smd>
<img src=https://avatars.githubusercontent.com/u/78434679?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Àlex Torregrosa/> <img src=https://avatars.githubusercontent.com/u/78434679?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Àlex Torregrosa/>

View file

@ -0,0 +1,47 @@
package main
import (
"log"
"github.com/juanfont/headscale/integration"
"github.com/juanfont/headscale/integration/tsic"
"github.com/ory/dockertest/v3"
)
func main() {
log.Printf("creating docker pool")
pool, err := dockertest.NewPool("")
if err != nil {
log.Fatalf("could not connect to docker: %s", err)
}
log.Printf("creating docker network")
network, err := pool.CreateNetwork("docker-integration-net")
if err != nil {
log.Fatalf("failed to create or get network: %s", err)
}
for _, version := range integration.AllVersions {
log.Printf("creating container image for Tailscale (%s)", version)
tsClient, err := tsic.New(
pool,
version,
network,
)
if err != nil {
log.Fatalf("failed to create tailscale node: %s", err)
}
err = tsClient.Shutdown()
if err != nil {
log.Fatalf("failed to shut down container: %s", err)
}
}
network.Close()
err = pool.RemoveNetwork(network)
if err != nil {
log.Fatalf("failed to remove network: %s", err)
}
}

View file

@ -56,11 +56,8 @@ jobs:
config-example.yaml config-example.yaml
- name: Run {{.Name}} - name: Run {{.Name}}
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true' if: steps.changed-files.outputs.any_changed == 'true'
with: run: |
attempt_limit: 5
command: |
nix develop --command -- docker run \ nix develop --command -- docker run \
--tty --rm \ --tty --rm \
--volume ~/.cache/hs-integration-go:/go \ --volume ~/.cache/hs-integration-go:/go \
@ -70,6 +67,7 @@ jobs:
--volume $PWD/control_logs:/tmp/control \ --volume $PWD/control_logs:/tmp/control \
golang:1 \ golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \ go run gotest.tools/gotestsum@latest -- ./... \
-tags ts2019 \
-failfast \ -failfast \
-timeout 120m \ -timeout 120m \
-parallel 1 \ -parallel 1 \

View file

@ -67,7 +67,7 @@ var listAPIKeys = &cobra.Command{
} }
if output != "" { if output != "" {
SuccessOutput(response.GetApiKeys(), "", output) SuccessOutput(response.ApiKeys, "", output)
return return
} }
@ -75,11 +75,11 @@ var listAPIKeys = &cobra.Command{
tableData := pterm.TableData{ tableData := pterm.TableData{
{"ID", "Prefix", "Expiration", "Created"}, {"ID", "Prefix", "Expiration", "Created"},
} }
for _, key := range response.GetApiKeys() { for _, key := range response.ApiKeys {
expiration := "-" expiration := "-"
if key.GetExpiration() != nil { if key.GetExpiration() != nil {
expiration = ColourTime(key.GetExpiration().AsTime()) expiration = ColourTime(key.Expiration.AsTime())
} }
tableData = append(tableData, []string{ tableData = append(tableData, []string{
@ -155,7 +155,7 @@ If you loose a key, create a new one and revoke (expire) the old one.`,
return return
} }
SuccessOutput(response.GetApiKey(), response.GetApiKey(), output) SuccessOutput(response.ApiKey, response.ApiKey, output)
}, },
} }

View file

@ -4,10 +4,10 @@ import (
"fmt" "fmt"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1" v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"tailscale.com/types/key"
) )
const ( const (
@ -93,13 +93,11 @@ var createNodeCmd = &cobra.Command{
return return
} }
if !util.NodePublicKeyRegex.Match([]byte(machineKey)) {
var mkey key.MachinePublic err = errPreAuthKeyMalformed
err = mkey.UnmarshalText([]byte(machineKey))
if err != nil {
ErrorOutput( ErrorOutput(
err, err,
fmt.Sprintf("Failed to parse machine key from flag: %s", err), fmt.Sprintf("Error: %s", err),
output, output,
) )
@ -135,6 +133,6 @@ var createNodeCmd = &cobra.Command{
return return
} }
SuccessOutput(response.GetNode(), "Node created", output) SuccessOutput(response.Node, "Node created", output)
}, },
} }

View file

@ -152,8 +152,8 @@ var registerNodeCmd = &cobra.Command{
} }
SuccessOutput( SuccessOutput(
response.GetNode(), response.Node,
fmt.Sprintf("Node %s registered", response.GetNode().GetGivenName()), output) fmt.Sprintf("Node %s registered", response.Node.GivenName), output)
}, },
} }
@ -196,12 +196,12 @@ var listNodesCmd = &cobra.Command{
} }
if output != "" { if output != "" {
SuccessOutput(response.GetNodes(), "", output) SuccessOutput(response.Nodes, "", output)
return return
} }
tableData, err := nodesToPtables(user, showTags, response.GetNodes()) tableData, err := nodesToPtables(user, showTags, response.Nodes)
if err != nil { if err != nil {
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output) ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
@ -262,7 +262,7 @@ var expireNodeCmd = &cobra.Command{
return return
} }
SuccessOutput(response.GetNode(), "Node expired", output) SuccessOutput(response.Node, "Node expired", output)
}, },
} }
@ -310,7 +310,7 @@ var renameNodeCmd = &cobra.Command{
return return
} }
SuccessOutput(response.GetNode(), "Node renamed", output) SuccessOutput(response.Node, "Node renamed", output)
}, },
} }
@ -364,7 +364,7 @@ var deleteNodeCmd = &cobra.Command{
prompt := &survey.Confirm{ prompt := &survey.Confirm{
Message: fmt.Sprintf( Message: fmt.Sprintf(
"Do you want to remove the node %s?", "Do you want to remove the node %s?",
getResponse.GetNode().GetName(), getResponse.GetNode().Name,
), ),
} }
err = survey.AskOne(prompt, &confirm) err = survey.AskOne(prompt, &confirm)
@ -473,7 +473,7 @@ var moveNodeCmd = &cobra.Command{
return return
} }
SuccessOutput(moveResponse.GetNode(), "Node moved to another user", output) SuccessOutput(moveResponse.Node, "Node moved to another user", output)
}, },
} }
@ -493,7 +493,7 @@ func nodesToPtables(
"Ephemeral", "Ephemeral",
"Last seen", "Last seen",
"Expiration", "Expiration",
"Connected", "Online",
"Expired", "Expired",
} }
if showTags { if showTags {
@ -507,21 +507,21 @@ func nodesToPtables(
for _, node := range nodes { for _, node := range nodes {
var ephemeral bool var ephemeral bool
if node.GetPreAuthKey() != nil && node.GetPreAuthKey().GetEphemeral() { if node.PreAuthKey != nil && node.PreAuthKey.Ephemeral {
ephemeral = true ephemeral = true
} }
var lastSeen time.Time var lastSeen time.Time
var lastSeenTime string var lastSeenTime string
if node.GetLastSeen() != nil { if node.LastSeen != nil {
lastSeen = node.GetLastSeen().AsTime() lastSeen = node.LastSeen.AsTime()
lastSeenTime = lastSeen.Format("2006-01-02 15:04:05") lastSeenTime = lastSeen.Format("2006-01-02 15:04:05")
} }
var expiry time.Time var expiry time.Time
var expiryTime string var expiryTime string
if node.GetExpiry() != nil { if node.Expiry != nil {
expiry = node.GetExpiry().AsTime() expiry = node.Expiry.AsTime()
expiryTime = expiry.Format("2006-01-02 15:04:05") expiryTime = expiry.Format("2006-01-02 15:04:05")
} else { } else {
expiryTime = "N/A" expiryTime = "N/A"
@ -529,7 +529,7 @@ func nodesToPtables(
var machineKey key.MachinePublic var machineKey key.MachinePublic
err := machineKey.UnmarshalText( err := machineKey.UnmarshalText(
[]byte(node.GetMachineKey()), []byte(util.MachinePublicKeyEnsurePrefix(node.MachineKey)),
) )
if err != nil { if err != nil {
machineKey = key.MachinePublic{} machineKey = key.MachinePublic{}
@ -537,14 +537,14 @@ func nodesToPtables(
var nodeKey key.NodePublic var nodeKey key.NodePublic
err = nodeKey.UnmarshalText( err = nodeKey.UnmarshalText(
[]byte(node.GetNodeKey()), []byte(util.NodePublicKeyEnsurePrefix(node.NodeKey)),
) )
if err != nil { if err != nil {
return nil, err return nil, err
} }
var online string var online string
if node.GetOnline() { if node.Online {
online = pterm.LightGreen("online") online = pterm.LightGreen("online")
} else { } else {
online = pterm.LightRed("offline") online = pterm.LightRed("offline")
@ -558,36 +558,36 @@ func nodesToPtables(
} }
var forcedTags string var forcedTags string
for _, tag := range node.GetForcedTags() { for _, tag := range node.ForcedTags {
forcedTags += "," + tag forcedTags += "," + tag
} }
forcedTags = strings.TrimLeft(forcedTags, ",") forcedTags = strings.TrimLeft(forcedTags, ",")
var invalidTags string var invalidTags string
for _, tag := range node.GetInvalidTags() { for _, tag := range node.InvalidTags {
if !contains(node.GetForcedTags(), tag) { if !contains(node.ForcedTags, tag) {
invalidTags += "," + pterm.LightRed(tag) invalidTags += "," + pterm.LightRed(tag)
} }
} }
invalidTags = strings.TrimLeft(invalidTags, ",") invalidTags = strings.TrimLeft(invalidTags, ",")
var validTags string var validTags string
for _, tag := range node.GetValidTags() { for _, tag := range node.ValidTags {
if !contains(node.GetForcedTags(), tag) { if !contains(node.ForcedTags, tag) {
validTags += "," + pterm.LightGreen(tag) validTags += "," + pterm.LightGreen(tag)
} }
} }
validTags = strings.TrimLeft(validTags, ",") validTags = strings.TrimLeft(validTags, ",")
var user string var user string
if currentUser == "" || (currentUser == node.GetUser().GetName()) { if currentUser == "" || (currentUser == node.User.Name) {
user = pterm.LightMagenta(node.GetUser().GetName()) user = pterm.LightMagenta(node.User.Name)
} else { } else {
// Shared into this user // Shared into this user
user = pterm.LightYellow(node.GetUser().GetName()) user = pterm.LightYellow(node.User.Name)
} }
var IPV4Address string var IPV4Address string
var IPV6Address string var IPV6Address string
for _, addr := range node.GetIpAddresses() { for _, addr := range node.IpAddresses {
if netip.MustParseAddr(addr).Is4() { if netip.MustParseAddr(addr).Is4() {
IPV4Address = addr IPV4Address = addr
} else { } else {
@ -596,8 +596,8 @@ func nodesToPtables(
} }
nodeData := []string{ nodeData := []string{
strconv.FormatUint(node.GetId(), util.Base10), strconv.FormatUint(node.Id, util.Base10),
node.GetName(), node.Name,
node.GetGivenName(), node.GetGivenName(),
machineKey.ShortString(), machineKey.ShortString(),
nodeKey.ShortString(), nodeKey.ShortString(),

View file

@ -84,7 +84,7 @@ var listPreAuthKeys = &cobra.Command{
} }
if output != "" { if output != "" {
SuccessOutput(response.GetPreAuthKeys(), "", output) SuccessOutput(response.PreAuthKeys, "", output)
return return
} }
@ -101,10 +101,10 @@ var listPreAuthKeys = &cobra.Command{
"Tags", "Tags",
}, },
} }
for _, key := range response.GetPreAuthKeys() { for _, key := range response.PreAuthKeys {
expiration := "-" expiration := "-"
if key.GetExpiration() != nil { if key.GetExpiration() != nil {
expiration = ColourTime(key.GetExpiration().AsTime()) expiration = ColourTime(key.Expiration.AsTime())
} }
var reusable string var reusable string
@ -116,7 +116,7 @@ var listPreAuthKeys = &cobra.Command{
aclTags := "" aclTags := ""
for _, tag := range key.GetAclTags() { for _, tag := range key.AclTags {
aclTags += "," + tag aclTags += "," + tag
} }
@ -214,7 +214,7 @@ var createPreAuthKeyCmd = &cobra.Command{
return return
} }
SuccessOutput(response.GetPreAuthKey(), response.GetPreAuthKey().GetKey(), output) SuccessOutput(response.PreAuthKey, response.PreAuthKey.Key, output)
}, },
} }

View file

@ -87,12 +87,12 @@ var listRoutesCmd = &cobra.Command{
} }
if output != "" { if output != "" {
SuccessOutput(response.GetRoutes(), "", output) SuccessOutput(response.Routes, "", output)
return return
} }
routes = response.GetRoutes() routes = response.Routes
} else { } else {
response, err := client.GetNodeRoutes(ctx, &v1.GetNodeRoutesRequest{ response, err := client.GetNodeRoutes(ctx, &v1.GetNodeRoutesRequest{
NodeId: machineID, NodeId: machineID,
@ -108,12 +108,12 @@ var listRoutesCmd = &cobra.Command{
} }
if output != "" { if output != "" {
SuccessOutput(response.GetRoutes(), "", output) SuccessOutput(response.Routes, "", output)
return return
} }
routes = response.GetRoutes() routes = response.Routes
} }
tableData := routesToPtables(routes) tableData := routesToPtables(routes)
@ -271,25 +271,25 @@ func routesToPtables(routes []*v1.Route) pterm.TableData {
for _, route := range routes { for _, route := range routes {
var isPrimaryStr string var isPrimaryStr string
prefix, err := netip.ParsePrefix(route.GetPrefix()) prefix, err := netip.ParsePrefix(route.Prefix)
if err != nil { if err != nil {
log.Printf("Error parsing prefix %s: %s", route.GetPrefix(), err) log.Printf("Error parsing prefix %s: %s", route.Prefix, err)
continue continue
} }
if prefix == types.ExitRouteV4 || prefix == types.ExitRouteV6 { if prefix == types.ExitRouteV4 || prefix == types.ExitRouteV6 {
isPrimaryStr = "-" isPrimaryStr = "-"
} else { } else {
isPrimaryStr = strconv.FormatBool(route.GetIsPrimary()) isPrimaryStr = strconv.FormatBool(route.IsPrimary)
} }
tableData = append(tableData, tableData = append(tableData,
[]string{ []string{
strconv.FormatUint(route.GetId(), Base10), strconv.FormatUint(route.Id, Base10),
route.GetNode().GetGivenName(), route.Node.GivenName,
route.GetPrefix(), route.Prefix,
strconv.FormatBool(route.GetAdvertised()), strconv.FormatBool(route.Advertised),
strconv.FormatBool(route.GetEnabled()), strconv.FormatBool(route.Enabled),
isPrimaryStr, isPrimaryStr,
}) })
} }

View file

@ -67,7 +67,7 @@ var createUserCmd = &cobra.Command{
return return
} }
SuccessOutput(response.GetUser(), "User created", output) SuccessOutput(response.User, "User created", output)
}, },
} }
@ -169,7 +169,7 @@ var listUsersCmd = &cobra.Command{
} }
if output != "" { if output != "" {
SuccessOutput(response.GetUsers(), "", output) SuccessOutput(response.Users, "", output)
return return
} }
@ -236,6 +236,6 @@ var renameUserCmd = &cobra.Command{
return return
} }
SuccessOutput(response.GetUser(), "User renamed", output) SuccessOutput(response.User, "User renamed", output)
}, },
} }

View file

@ -40,12 +40,19 @@ grpc_listen_addr: 127.0.0.1:50443
# are doing. # are doing.
grpc_allow_insecure: false grpc_allow_insecure: false
# Private key used to encrypt the traffic between headscale
# and Tailscale clients.
# The private key file will be autogenerated if it's missing.
#
private_key_path: /var/lib/headscale/private.key
# The Noise section includes specific configuration for the # The Noise section includes specific configuration for the
# TS2021 Noise protocol # TS2021 Noise protocol
noise: noise:
# The Noise private key is used to encrypt the # The Noise private key is used to encrypt the
# traffic between headscale and Tailscale clients when # traffic between headscale and Tailscale clients when
# using the new Noise-based protocol. # using the new Noise-based protocol. It must be different
# from the legacy private key.
private_key_path: /var/lib/headscale/noise_private.key private_key_path: /var/lib/headscale/noise_private.key
# List of IP prefixes to allocate tailaddresses from. # List of IP prefixes to allocate tailaddresses from.
@ -88,12 +95,6 @@ derp:
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/ # For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
stun_listen_addr: "0.0.0.0:3478" stun_listen_addr: "0.0.0.0:3478"
# Private key used to encrypt the traffic between headscale DERP
# and Tailscale clients.
# The private key file will be autogenerated if it's missing.
#
private_key_path: /var/lib/headscale/derp_server_private.key
# List of externally available DERP maps encoded in JSON # List of externally available DERP maps encoded in JSON
urls: urls:
- https://controlplane.tailscale.com/derpmap/default - https://controlplane.tailscale.com/derpmap/default

View file

@ -1,5 +0,0 @@
cairosvg~=2.7.1
mkdocs-material~=9.4.14
mkdocs-minify-plugin~=0.7.1
pillow~=10.1.0

View file

@ -28,7 +28,7 @@ cd ./headscale
touch ./config/db.sqlite touch ./config/db.sqlite
``` ```
3. **(Strongly Recommended)** Download a copy of the [example configuration](https://github.com/juanfont/headscale/blob/main/config-example.yaml) from the headscale repository. 3. **(Strongly Recommended)** Download a copy of the [example configuration][config-example.yaml](https://github.com/juanfont/headscale/blob/main/config-example.yaml) from the headscale repository.
Using wget: Using wget:

View file

@ -5,11 +5,11 @@
"systems": "systems" "systems": "systems"
}, },
"locked": { "locked": {
"lastModified": 1701680307, "lastModified": 1694529238,
"narHash": "sha256-kAuep2h5ajznlPMD9rnQyffWG8EM/C73lejGofXvdM8=", "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
"owner": "numtide", "owner": "numtide",
"repo": "flake-utils", "repo": "flake-utils",
"rev": "4022d587cbbfd70fe950c1e2083a02621806a725", "rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -20,11 +20,11 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1701998057, "lastModified": 1699186365,
"narHash": "sha256-gAJGhcTO9cso7XDfAScXUlPcva427AUT2q02qrmXPdo=", "narHash": "sha256-Pxrw5U8mBsL3NlrJ6q1KK1crzvSUcdfwb9083sKDrcU=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "09dc04054ba2ff1f861357d0e7e76d021b273cd7", "rev": "a0b3b06b7a82c965ae0bb1d59f6e386fe755001d",
"type": "github" "type": "github"
}, },
"original": { "original": {

View file

@ -26,12 +26,14 @@
version = headscaleVersion; version = headscaleVersion;
src = pkgs.lib.cleanSource self; src = pkgs.lib.cleanSource self;
tags = ["ts2019"];
# Only run unit tests when testing a build # Only run unit tests when testing a build
checkFlags = ["-short"]; checkFlags = ["-short"];
# When updating go.mod or go.sum, a new sha will need to be calculated, # When updating go.mod or go.sum, a new sha will need to be calculated,
# update this if you have a mismatch after doing a change to thos files. # update this if you have a mismatch after doing a change to thos files.
vendorHash = "sha256-8x4RKaS8vnBYTPlvQTkDKWIAJOgPF99hvPiuRyTMrA8="; vendorSha256 = "sha256-Q6eySc8lXYhkWka7Y+qOM6viv7QhdjFZDX8PttaLfr4=";
ldflags = ["-s" "-w" "-X github.com/juanfont/headscale/cmd/headscale/cli.Version=v${version}"]; ldflags = ["-s" "-w" "-X github.com/juanfont/headscale/cmd/headscale/cli.Version=v${version}"];
}; };
@ -47,7 +49,7 @@
sha256 = "sha256-2K9KAg8iSubiTbujyFGN3yggrL+EDyeUCs9OOta/19A="; sha256 = "sha256-2K9KAg8iSubiTbujyFGN3yggrL+EDyeUCs9OOta/19A=";
}; };
vendorHash = "sha256-rxYuzn4ezAxaeDhxd8qdOzt+CKYIh03A9zKNdzILq18="; vendorSha256 = "sha256-rxYuzn4ezAxaeDhxd8qdOzt+CKYIh03A9zKNdzILq18=";
nativeBuildInputs = [pkgs.installShellFiles]; nativeBuildInputs = [pkgs.installShellFiles];
}; };
@ -69,7 +71,7 @@
sha256 = "sha256-lnNdsDCpeSHtl2lC1IhUw11t3cnGF+37qSM7HDvKLls="; sha256 = "sha256-lnNdsDCpeSHtl2lC1IhUw11t3cnGF+37qSM7HDvKLls=";
}; };
vendorHash = "sha256-dGdnDuRbwg8fU7uB5GaHEWa/zI3w06onqjturvooJQA="; vendorSha256 = "sha256-dGdnDuRbwg8fU7uB5GaHEWa/zI3w06onqjturvooJQA=";
nativeBuildInputs = [pkgs.installShellFiles]; nativeBuildInputs = [pkgs.installShellFiles];
@ -127,7 +129,15 @@
buildInputs = devDeps; buildInputs = devDeps;
shellHook = '' shellHook = ''
export GOFLAGS=-tags="ts2019"
export PATH="$PWD/result/bin:$PATH" export PATH="$PWD/result/bin:$PATH"
mkdir -p ./ignored
export HEADSCALE_PRIVATE_KEY_PATH="./ignored/private.key"
export HEADSCALE_NOISE_PRIVATE_KEY_PATH="./ignored/noise_private.key"
export HEADSCALE_DB_PATH="./ignored/db.sqlite"
export HEADSCALE_TLS_LETSENCRYPT_CACHE_DIR="./ignored/cache"
export HEADSCALE_UNIX_SOCKET="./ignored/headscale.sock"
''; '';
}; };

208
go.mod
View file

@ -1,181 +1,137 @@
module github.com/juanfont/headscale module github.com/juanfont/headscale
go 1.21.0 go 1.21
toolchain go1.21.4 toolchain go1.21.1
require ( require (
github.com/AlecAivazis/survey/v2 v2.3.7 github.com/AlecAivazis/survey/v2 v2.3.6
github.com/coreos/go-oidc/v3 v3.8.0 github.com/coreos/go-oidc/v3 v3.5.0
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/davecgh/go-spew v1.1.1
github.com/deckarep/golang-set/v2 v2.4.0 github.com/deckarep/golang-set/v2 v2.3.0
github.com/efekarakus/termcolor v1.0.1 github.com/efekarakus/termcolor v1.0.1
github.com/glebarez/sqlite v1.10.0 github.com/glebarez/sqlite v1.7.0
github.com/go-gormigrate/gormigrate/v2 v2.1.1
github.com/gofrs/uuid/v5 v5.0.0 github.com/gofrs/uuid/v5 v5.0.0
github.com/google/go-cmp v0.6.0 github.com/google/go-cmp v0.5.9
github.com/gorilla/mux v1.8.1 github.com/gorilla/mux v1.8.0
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2
github.com/klauspost/compress v1.17.3 github.com/klauspost/compress v1.16.7
github.com/oauth2-proxy/mockoidc v0.0.0-20220308204021-b9169deeb282 github.com/oauth2-proxy/mockoidc v0.0.0-20220308204021-b9169deeb282
github.com/ory/dockertest/v3 v3.10.0 github.com/ory/dockertest/v3 v3.9.1
github.com/patrickmn/go-cache v2.1.0+incompatible github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/philip-bui/grpc-zerolog v1.0.1 github.com/philip-bui/grpc-zerolog v1.0.1
github.com/pkg/profile v1.7.0 github.com/pkg/profile v1.7.0
github.com/prometheus/client_golang v1.17.0 github.com/prometheus/client_golang v1.15.1
github.com/prometheus/common v0.45.0 github.com/prometheus/common v0.42.0
github.com/pterm/pterm v0.12.71 github.com/pterm/pterm v0.12.58
github.com/puzpuzpuz/xsync/v3 v3.0.2 github.com/puzpuzpuz/xsync/v2 v2.4.0
github.com/rs/zerolog v1.31.0 github.com/rs/zerolog v1.29.0
github.com/samber/lo v1.38.1 github.com/samber/lo v1.38.1
github.com/spf13/cobra v1.8.0 github.com/spf13/cobra v1.7.0
github.com/spf13/viper v1.17.0 github.com/spf13/viper v1.16.0
github.com/stretchr/testify v1.8.4 github.com/stretchr/testify v1.8.4
github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a
github.com/tailscale/tailsql v0.0.0-20231216172832-51483e0c711b
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e
go4.org/netipx v0.0.0-20230824141953-6213f710f925 go4.org/netipx v0.0.0-20230728180743-ad4cb58a6516
golang.org/x/crypto v0.16.0 golang.org/x/crypto v0.12.0
golang.org/x/exp v0.0.0-20231127185646-65229373498e golang.org/x/net v0.14.0
golang.org/x/net v0.19.0 golang.org/x/oauth2 v0.7.0
golang.org/x/oauth2 v0.15.0 golang.org/x/sync v0.2.0
golang.org/x/sync v0.5.0 google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 google.golang.org/grpc v1.55.0
google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.30.0
google.golang.org/protobuf v1.31.0
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
gorm.io/driver/postgres v1.5.4 gorm.io/driver/postgres v1.4.8
gorm.io/gorm v1.25.5 gorm.io/gorm v1.24.6
tailscale.com v1.56.1 tailscale.com v1.50.0
) )
require ( require (
atomicgo.dev/cursor v0.2.0 // indirect atomicgo.dev/cursor v0.1.1 // indirect
atomicgo.dev/keyboard v0.2.9 // indirect atomicgo.dev/keyboard v0.2.9 // indirect
atomicgo.dev/schedule v0.1.0 // indirect
filippo.io/edwards25519 v1.0.0 // indirect filippo.io/edwards25519 v1.0.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
github.com/akutz/memconn v0.1.0 // indirect github.com/akutz/memconn v0.1.0 // indirect
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 // indirect
github.com/aws/aws-sdk-go-v2 v1.21.0 // indirect
github.com/aws/aws-sdk-go-v2/config v1.18.42 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.40 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.43 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 // indirect
github.com/aws/aws-sdk-go-v2/service/ssm v1.38.0 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.14.1 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.22.0 // indirect
github.com/aws/smithy-go v1.14.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cenkalti/backoff/v4 v4.2.0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/containerd/console v1.0.3 // indirect github.com/containerd/console v1.0.3 // indirect
github.com/containerd/continuity v0.4.3 // indirect github.com/containerd/continuity v0.3.0 // indirect
github.com/coreos/go-iptables v0.7.0 // indirect github.com/coreos/go-iptables v0.6.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/dblohm7/wingoes v0.0.0-20230821191801-fc76608aecf0 // indirect
github.com/dblohm7/wingoes v0.0.0-20231025182615-65d8b4b5428f // indirect github.com/docker/cli v23.0.5+incompatible // indirect
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e // indirect github.com/docker/docker v24.0.4+incompatible // indirect
github.com/docker/cli v24.0.7+incompatible // indirect
github.com/docker/docker v24.0.7+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect github.com/dustin/go-humanize v1.0.1 // indirect
github.com/felixge/fgprof v0.9.3 // indirect github.com/felixge/fgprof v0.9.3 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/fxamacker/cbor/v2 v2.5.0 // indirect github.com/fxamacker/cbor/v2 v2.4.0 // indirect
github.com/glebarez/go-sqlite v1.21.2 // indirect github.com/glebarez/go-sqlite v1.20.3 // indirect
github.com/go-jose/go-jose/v3 v3.0.1 // indirect github.com/go-jose/go-jose/v3 v3.0.0 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect github.com/golang/protobuf v1.5.3 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/go-github v17.0.0+incompatible // indirect github.com/google/go-github v17.0.0+incompatible // indirect
github.com/google/go-querystring v1.1.0 // indirect github.com/google/go-querystring v1.1.0 // indirect
github.com/google/nftables v0.1.1-0.20230115205135-9aa6fdf5a28c // indirect github.com/google/nftables v0.1.1-0.20230115205135-9aa6fdf5a28c // indirect
github.com/google/pprof v0.0.0-20231127191134-f3a68a39ae15 // indirect github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.4.0 // indirect github.com/google/uuid v1.3.0 // indirect
github.com/gookit/color v1.5.4 // indirect github.com/gookit/color v1.5.3 // indirect
github.com/gorilla/csrf v1.7.1 // indirect
github.com/gorilla/securecookie v1.1.1 // indirect
github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/go-version v1.6.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hdevalence/ed25519consensus v0.1.0 // indirect github.com/hdevalence/ed25519consensus v0.1.0 // indirect
github.com/illarion/gonotify v1.0.1 // indirect
github.com/imdario/mergo v0.3.16 // indirect github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/insomniacslk/dhcp v0.0.0-20230908212754-65c27093e38a // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.5.0 // indirect github.com/jackc/pgx/v5 v5.3.0 // indirect
github.com/jackc/puddle/v2 v2.2.1 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect github.com/jinzhu/now v1.1.5 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 // indirect github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 // indirect
github.com/jsimonetti/rtnetlink v1.4.0 // indirect github.com/jsimonetti/rtnetlink v1.3.2 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a // indirect
github.com/kr/pretty v0.3.1 // indirect github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect github.com/kr/text v0.2.0 // indirect
github.com/lib/pq v1.10.7 // indirect github.com/lib/pq v1.10.7 // indirect
github.com/lithammer/fuzzysearch v1.1.8 // indirect github.com/lithammer/fuzzysearch v1.1.5 // indirect
github.com/magiconair/properties v1.8.7 // indirect github.com/magiconair/properties v1.8.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-isatty v0.0.18 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mdlayher/genetlink v1.3.2 // indirect
github.com/mdlayher/netlink v1.7.2 // indirect github.com/mdlayher/netlink v1.7.2 // indirect
github.com/mdlayher/sdnotify v1.0.0 // indirect github.com/mdlayher/socket v0.4.1 // indirect
github.com/mdlayher/socket v0.5.0 // indirect
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
github.com/miekg/dns v1.1.57 // indirect github.com/miekg/dns v1.1.55 // indirect
github.com/mitchellh/go-ps v1.0.0 // indirect github.com/mitchellh/go-ps v1.0.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/term v0.5.0 // indirect github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0-rc5 // indirect github.com/opencontainers/image-spec v1.1.0-rc3 // indirect
github.com/opencontainers/runc v1.1.10 // indirect github.com/opencontainers/runc v1.1.4 // indirect
github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/pelletier/go-toml/v2 v2.0.8 // indirect
github.com/pierrec/lz4/v4 v4.1.18 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rivo/uniseg v0.4.4 // indirect github.com/rivo/uniseg v0.4.4 // indirect
github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/safchain/ethtool v0.3.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/spf13/afero v1.9.5 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/spf13/cast v1.5.1 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.6.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
github.com/subosito/gotenv v1.6.0 // indirect github.com/subosito/gotenv v1.4.2 // indirect
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e // indirect
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 // indirect
github.com/tailscale/golang-x-crypto v0.0.0-20230713185742-f0b76a10a08e // indirect
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 // indirect
github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 // indirect github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 // indirect
github.com/tailscale/setec v0.0.0-20230926024544-07dde05889e7 // indirect
github.com/tailscale/web-client-prebuilt v0.0.0-20231213172531-a4fa669015b2 // indirect
github.com/tailscale/wireguard-go v0.0.0-20231121184858-cc193a0b3272 // indirect
github.com/tcnksm/go-httpstat v0.2.0 // indirect
github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63 // indirect
github.com/vishvananda/netlink v1.2.1-beta.2 // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect
github.com/vishvananda/netns v0.0.4 // indirect github.com/vishvananda/netns v0.0.4 // indirect
github.com/x448/float16 v0.8.4 // indirect github.com/x448/float16 v0.8.4 // indirect
@ -183,27 +139,23 @@ require (
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
go.uber.org/multierr v1.11.0 // indirect
go4.org/mem v0.0.0-20220726221520-4f986261bf13 // indirect go4.org/mem v0.0.0-20220726221520-4f986261bf13 // indirect
golang.org/x/mod v0.14.0 // indirect golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 // indirect
golang.org/x/sys v0.15.0 // indirect golang.org/x/mod v0.11.0 // indirect
golang.org/x/term v0.15.0 // indirect golang.org/x/sys v0.11.0 // indirect
golang.org/x/text v0.14.0 // indirect golang.org/x/term v0.11.0 // indirect
golang.org/x/time v0.5.0 // indirect golang.org/x/text v0.12.0 // indirect
golang.org/x/tools v0.16.0 // indirect golang.org/x/time v0.3.0 // indirect
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect golang.org/x/tools v0.9.1 // indirect
golang.zx2c4.com/wireguard/windows v0.5.3 // indirect golang.zx2c4.com/wireguard/windows v0.5.3 // indirect
google.golang.org/appengine v1.6.8 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
gvisor.dev/gvisor v0.0.0-20230928000133-4fe30062272c // indirect gotest.tools/v3 v3.4.0 // indirect
inet.af/peercred v0.0.0-20210906144145-0893ea02156a // indirect modernc.org/libc v1.22.2 // indirect
modernc.org/libc v1.34.11 // indirect modernc.org/mathutil v1.5.0 // indirect
modernc.org/mathutil v1.6.0 // indirect modernc.org/memory v1.5.0 // indirect
modernc.org/memory v1.7.2 // indirect modernc.org/sqlite v1.20.3 // indirect
modernc.org/sqlite v1.28.0 // indirect nhooyr.io/websocket v1.8.7 // indirect
nhooyr.io/websocket v1.8.10 // indirect
) )

905
go.sum

File diff suppressed because it is too large Load diff

View file

@ -48,7 +48,6 @@ import (
"google.golang.org/grpc/peer" "google.golang.org/grpc/peer"
"google.golang.org/grpc/reflection" "google.golang.org/grpc/reflection"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"tailscale.com/envknob"
"tailscale.com/tailcfg" "tailscale.com/tailcfg"
"tailscale.com/types/dnstype" "tailscale.com/types/dnstype"
"tailscale.com/types/key" "tailscale.com/types/key"
@ -60,9 +59,6 @@ var (
errUnsupportedLetsEncryptChallengeType = errors.New( errUnsupportedLetsEncryptChallengeType = errors.New(
"unknown value for Lets Encrypt challenge type", "unknown value for Lets Encrypt challenge type",
) )
errEmptyInitialDERPMap = errors.New(
"initial DERPMap is empty, Headscale requries at least one entry",
)
) )
const ( const (
@ -81,6 +77,7 @@ type Headscale struct {
dbString string dbString string
dbType string dbType string
dbDebug bool dbDebug bool
privateKey2019 *key.MachinePrivate
noisePrivateKey *key.MachinePrivate noisePrivateKey *key.MachinePrivate
DERPMap *tailcfg.DERPMap DERPMap *tailcfg.DERPMap
@ -99,23 +96,26 @@ type Headscale struct {
pollNetMapStreamWG sync.WaitGroup pollNetMapStreamWG sync.WaitGroup
} }
var (
profilingEnabled = envknob.Bool("HEADSCALE_PROFILING_ENABLED")
tailsqlEnabled = envknob.Bool("HEADSCALE_DEBUG_TAILSQL_ENABLED")
tailsqlStateDir = envknob.String("HEADSCALE_DEBUG_TAILSQL_STATE_DIR")
tailsqlTSKey = envknob.String("TS_AUTHKEY")
)
func NewHeadscale(cfg *types.Config) (*Headscale, error) { func NewHeadscale(cfg *types.Config) (*Headscale, error) {
if profilingEnabled { if _, enableProfile := os.LookupEnv("HEADSCALE_PROFILING_ENABLED"); enableProfile {
runtime.SetBlockProfileRate(1) runtime.SetBlockProfileRate(1)
} }
privateKey, err := readOrCreatePrivateKey(cfg.PrivateKeyPath)
if err != nil {
return nil, fmt.Errorf("failed to read or create private key: %w", err)
}
// TS2021 requires to have a different key from the legacy protocol.
noisePrivateKey, err := readOrCreatePrivateKey(cfg.NoisePrivateKeyPath) noisePrivateKey, err := readOrCreatePrivateKey(cfg.NoisePrivateKeyPath)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read or create Noise protocol private key: %w", err) return nil, fmt.Errorf("failed to read or create Noise protocol private key: %w", err)
} }
if privateKey.Equal(*noisePrivateKey) {
return nil, fmt.Errorf("private key and noise private key are the same: %w", err)
}
var dbString string var dbString string
switch cfg.DBtype { switch cfg.DBtype {
case db.Postgres: case db.Postgres:
@ -156,6 +156,7 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
cfg: cfg, cfg: cfg,
dbType: cfg.DBtype, dbType: cfg.DBtype,
dbString: dbString, dbString: dbString,
privateKey2019: privateKey,
noisePrivateKey: noisePrivateKey, noisePrivateKey: noisePrivateKey,
registrationCache: registrationCache, registrationCache: registrationCache,
pollNetMapStreamWG: sync.WaitGroup{}, pollNetMapStreamWG: sync.WaitGroup{},
@ -198,21 +199,10 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
} }
if cfg.DERP.ServerEnabled { if cfg.DERP.ServerEnabled {
derpServerKey, err := readOrCreatePrivateKey(cfg.DERP.ServerPrivateKeyPath) // TODO(kradalby): replace this key with a dedicated DERP key.
if err != nil {
return nil, fmt.Errorf("failed to read or create DERP server private key: %w", err)
}
if derpServerKey.Equal(*noisePrivateKey) {
return nil, fmt.Errorf(
"DERP server private key and noise private key are the same: %w",
err,
)
}
embeddedDERPServer, err := derpServer.NewDERPServer( embeddedDERPServer, err := derpServer.NewDERPServer(
cfg.ServerURL, cfg.ServerURL,
key.NodePrivate(*derpServerKey), key.NodePrivate(*privateKey),
&cfg.DERP, &cfg.DERP,
) )
if err != nil { if err != nil {
@ -273,14 +263,21 @@ func (h *Headscale) scheduledDERPMapUpdateWorker(cancelChan <-chan struct{}) {
h.DERPMap.Regions[region.RegionID] = &region h.DERPMap.Regions[region.RegionID] = &region
} }
stateUpdate := types.StateUpdate{ h.nodeNotifier.NotifyAll(types.StateUpdate{
Type: types.StateDERPUpdated, Type: types.StateDERPUpdated,
DERPMap: h.DERPMap, DERPMap: *h.DERPMap,
})
} }
if stateUpdate.Valid() {
h.nodeNotifier.NotifyAll(stateUpdate)
} }
} }
func (h *Headscale) failoverSubnetRoutes(milliSeconds int64) {
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
for range ticker.C {
err := h.db.HandlePrimarySubnetFailover()
if err != nil {
log.Error().Err(err).Msg("failed to handle primary subnet failover")
}
} }
} }
@ -452,9 +449,10 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
router.HandleFunc("/health", h.HealthHandler).Methods(http.MethodGet) router.HandleFunc("/health", h.HealthHandler).Methods(http.MethodGet)
router.HandleFunc("/key", h.KeyHandler).Methods(http.MethodGet) router.HandleFunc("/key", h.KeyHandler).Methods(http.MethodGet)
router.HandleFunc("/register/{mkey}", h.RegisterWebAPI).Methods(http.MethodGet) router.HandleFunc("/register/{nkey}", h.RegisterWebAPI).Methods(http.MethodGet)
h.addLegacyHandlers(router)
router.HandleFunc("/oidc/register/{mkey}", h.RegisterOIDC).Methods(http.MethodGet) router.HandleFunc("/oidc/register/{nkey}", h.RegisterOIDC).Methods(http.MethodGet)
router.HandleFunc("/oidc/callback", h.OIDCCallback).Methods(http.MethodGet) router.HandleFunc("/oidc/callback", h.OIDCCallback).Methods(http.MethodGet)
router.HandleFunc("/apple", h.AppleConfigMessage).Methods(http.MethodGet) router.HandleFunc("/apple", h.AppleConfigMessage).Methods(http.MethodGet)
router.HandleFunc("/apple/{platform}", h.ApplePlatformConfig). router.HandleFunc("/apple/{platform}", h.ApplePlatformConfig).
@ -512,15 +510,13 @@ func (h *Headscale) Serve() error {
go h.scheduledDERPMapUpdateWorker(derpMapCancelChannel) go h.scheduledDERPMapUpdateWorker(derpMapCancelChannel)
} }
if len(h.DERPMap.Regions) == 0 {
return errEmptyInitialDERPMap
}
// TODO(kradalby): These should have cancel channels and be cleaned // TODO(kradalby): These should have cancel channels and be cleaned
// up on shutdown. // up on shutdown.
go h.expireEphemeralNodes(updateInterval) go h.expireEphemeralNodes(updateInterval)
go h.expireExpiredMachines(updateInterval) go h.expireExpiredMachines(updateInterval)
go h.failoverSubnetRoutes(updateInterval)
if zl.GlobalLevel() == zl.TraceLevel { if zl.GlobalLevel() == zl.TraceLevel {
zerolog.RespLog = true zerolog.RespLog = true
} else { } else {
@ -576,10 +572,7 @@ func (h *Headscale) Serve() error {
} }
// Start the local gRPC server without TLS and without authentication // Start the local gRPC server without TLS and without authentication
grpcSocket := grpc.NewServer( grpcSocket := grpc.NewServer(zerolog.UnaryInterceptor())
// Uncomment to debug grpc communication.
// zerolog.UnaryInterceptor(),
)
v1.RegisterHeadscaleServiceServer(grpcSocket, newHeadscaleV1APIServer(h)) v1.RegisterHeadscaleServiceServer(grpcSocket, newHeadscaleV1APIServer(h))
reflection.Register(grpcSocket) reflection.Register(grpcSocket)
@ -619,8 +612,7 @@ func (h *Headscale) Serve() error {
grpc.UnaryInterceptor( grpc.UnaryInterceptor(
grpcMiddleware.ChainUnaryServer( grpcMiddleware.ChainUnaryServer(
h.grpcAuthenticationInterceptor, h.grpcAuthenticationInterceptor,
// Uncomment to debug grpc communication. zerolog.NewUnaryServerInterceptor(),
// zerolog.NewUnaryServerInterceptor(),
), ),
), ),
} }
@ -706,18 +698,6 @@ func (h *Headscale) Serve() error {
log.Info(). log.Info().
Msgf("listening and serving metrics on: %s", h.cfg.MetricsAddr) Msgf("listening and serving metrics on: %s", h.cfg.MetricsAddr)
var tailsqlContext context.Context
if tailsqlEnabled {
if h.cfg.DBtype != db.Sqlite {
log.Fatal().Str("type", h.cfg.DBtype).Msgf("tailsql only support %q", db.Sqlite)
}
if tailsqlTSKey == "" {
log.Fatal().Msg("tailsql requires TS_AUTHKEY to be set")
}
tailsqlContext = context.Background()
go runTailSQLService(ctx, util.TSLogfWrapper(), tailsqlStateDir, h.cfg.DBpath)
}
// Handle common process-killing signals so we can gracefully shut down: // Handle common process-killing signals so we can gracefully shut down:
h.shutdownChan = make(chan struct{}) h.shutdownChan = make(chan struct{})
sigc := make(chan os.Signal, 1) sigc := make(chan os.Signal, 1)
@ -783,10 +763,6 @@ func (h *Headscale) Serve() error {
grpcListener.Close() grpcListener.Close()
} }
if tailsqlContext != nil {
tailsqlContext.Done()
}
// Close network listeners // Close network listeners
promHTTPListener.Close() promHTTPListener.Close()
httpListener.Close() httpListener.Close()
@ -924,8 +900,7 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
err = os.WriteFile(path, machineKeyStr, privateKeyFileMode) err = os.WriteFile(path, machineKeyStr, privateKeyFileMode)
if err != nil { if err != nil {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"failed to save private key to disk at path %q: %w", "failed to save private key to disk: %w",
path,
err, err,
) )
} }
@ -936,9 +911,16 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
} }
trimmedPrivateKey := strings.TrimSpace(string(privateKey)) trimmedPrivateKey := strings.TrimSpace(string(privateKey))
privateKeyEnsurePrefix := util.PrivateKeyEnsurePrefix(trimmedPrivateKey)
var machineKey key.MachinePrivate var machineKey key.MachinePrivate
if err = machineKey.UnmarshalText([]byte(trimmedPrivateKey)); err != nil { if err = machineKey.UnmarshalText([]byte(privateKeyEnsurePrefix)); err != nil {
log.Info().
Str("path", path).
Msg("This might be due to a legacy (headscale pre-0.12) private key. " +
"If the key is in WireGuard format, delete the key and restart headscale. " +
"A new key will automatically be generated. All Tailscale clients will have to be restarted")
return nil, fmt.Errorf("failed to parse private key: %w", err) return nil, fmt.Errorf("failed to parse private key: %w", err)
} }

View file

@ -1,13 +1,13 @@
package hscontrol package hscontrol
import ( import (
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"net/http" "net/http"
"strings" "strings"
"time" "time"
"github.com/juanfont/headscale/hscontrol/mapper"
"github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util" "github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
@ -16,62 +16,22 @@ import (
"tailscale.com/types/key" "tailscale.com/types/key"
) )
func logAuthFunc( // handleRegister is the common logic for registering a client in the legacy and Noise protocols
registerRequest tailcfg.RegisterRequest, //
machineKey key.MachinePublic, // When using Noise, the machineKey is Zero.
) (func(string), func(string), func(error, string)) {
return func(msg string) {
log.Info().
Caller().
Str("machine_key", machineKey.ShortString()).
Str("node_key", registerRequest.NodeKey.ShortString()).
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
Str("node", registerRequest.Hostinfo.Hostname).
Str("followup", registerRequest.Followup).
Time("expiry", registerRequest.Expiry).
Msg(msg)
},
func(msg string) {
log.Trace().
Caller().
Str("machine_key", machineKey.ShortString()).
Str("node_key", registerRequest.NodeKey.ShortString()).
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
Str("node", registerRequest.Hostinfo.Hostname).
Str("followup", registerRequest.Followup).
Time("expiry", registerRequest.Expiry).
Msg(msg)
},
func(err error, msg string) {
log.Error().
Caller().
Str("machine_key", machineKey.ShortString()).
Str("node_key", registerRequest.NodeKey.ShortString()).
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
Str("node", registerRequest.Hostinfo.Hostname).
Str("followup", registerRequest.Followup).
Time("expiry", registerRequest.Expiry).
Err(err).
Msg(msg)
}
}
// handleRegister is the logic for registering a client.
func (h *Headscale) handleRegister( func (h *Headscale) handleRegister(
writer http.ResponseWriter, writer http.ResponseWriter,
req *http.Request, req *http.Request,
registerRequest tailcfg.RegisterRequest, registerRequest tailcfg.RegisterRequest,
machineKey key.MachinePublic, machineKey key.MachinePublic,
isNoise bool,
) { ) {
logInfo, logTrace, logErr := logAuthFunc(registerRequest, machineKey)
now := time.Now().UTC() now := time.Now().UTC()
logTrace("handleRegister called, looking up machine in DB")
node, err := h.db.GetNodeByAnyKey(machineKey, registerRequest.NodeKey, registerRequest.OldNodeKey) node, err := h.db.GetNodeByAnyKey(machineKey, registerRequest.NodeKey, registerRequest.OldNodeKey)
logTrace("handleRegister database lookup has returned")
if errors.Is(err, gorm.ErrRecordNotFound) { if errors.Is(err, gorm.ErrRecordNotFound) {
// If the node has AuthKey set, handle registration via PreAuthKeys // If the node has AuthKey set, handle registration via PreAuthKeys
if registerRequest.Auth.AuthKey != "" { if registerRequest.Auth.AuthKey != "" {
h.handleAuthKey(writer, registerRequest, machineKey) h.handleAuthKey(writer, registerRequest, machineKey, isNoise)
return return
} }
@ -85,29 +45,49 @@ func (h *Headscale) handleRegister(
// is that the client will hammer headscale with requests until it gets a // is that the client will hammer headscale with requests until it gets a
// successful RegisterResponse. // successful RegisterResponse.
if registerRequest.Followup != "" { if registerRequest.Followup != "" {
logTrace("register request is a followup") if _, ok := h.registrationCache.Get(util.NodePublicKeyStripPrefix(registerRequest.NodeKey)); ok {
if _, ok := h.registrationCache.Get(machineKey.String()); ok { log.Debug().
logTrace("Node is waiting for interactive login") Caller().
Str("node", registerRequest.Hostinfo.Hostname).
Str("machine_key", machineKey.ShortString()).
Str("node_key", registerRequest.NodeKey.ShortString()).
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
Str("follow_up", registerRequest.Followup).
Bool("noise", isNoise).
Msg("Node is waiting for interactive login")
select { select {
case <-req.Context().Done(): case <-req.Context().Done():
return return
case <-time.After(registrationHoldoff): case <-time.After(registrationHoldoff):
h.handleNewNode(writer, registerRequest, machineKey) h.handleNewNode(writer, registerRequest, machineKey, isNoise)
return return
} }
} }
} }
logInfo("Node not found in database, creating new") log.Info().
Caller().
Str("node", registerRequest.Hostinfo.Hostname).
Str("machine_key", machineKey.ShortString()).
Str("node_key", registerRequest.NodeKey.ShortString()).
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
Str("follow_up", registerRequest.Followup).
Bool("noise", isNoise).
Msg("New node not yet in the database")
givenName, err := h.db.GenerateGivenName( givenName, err := h.db.GenerateGivenName(
machineKey, machineKey.String(),
registerRequest.Hostinfo.Hostname, registerRequest.Hostinfo.Hostname,
) )
if err != nil { if err != nil {
logErr(err, "Failed to generate given name for node") log.Error().
Caller().
Str("func", "RegistrationHandler").
Str("hostinfo.name", registerRequest.Hostinfo.Hostname).
Err(err).
Msg("Failed to generate given name for node")
return return
} }
@ -117,26 +97,31 @@ func (h *Headscale) handleRegister(
// We create the node and then keep it around until a callback // We create the node and then keep it around until a callback
// happens // happens
newNode := types.Node{ newNode := types.Node{
MachineKey: machineKey, MachineKey: util.MachinePublicKeyStripPrefix(machineKey),
Hostname: registerRequest.Hostinfo.Hostname, Hostname: registerRequest.Hostinfo.Hostname,
GivenName: givenName, GivenName: givenName,
NodeKey: registerRequest.NodeKey, NodeKey: util.NodePublicKeyStripPrefix(registerRequest.NodeKey),
LastSeen: &now, LastSeen: &now,
Expiry: &time.Time{}, Expiry: &time.Time{},
} }
if !registerRequest.Expiry.IsZero() { if !registerRequest.Expiry.IsZero() {
logTrace("Non-zero expiry time requested") log.Trace().
Caller().
Bool("noise", isNoise).
Str("node", registerRequest.Hostinfo.Hostname).
Time("expiry", registerRequest.Expiry).
Msg("Non-zero expiry time requested")
newNode.Expiry = &registerRequest.Expiry newNode.Expiry = &registerRequest.Expiry
} }
h.registrationCache.Set( h.registrationCache.Set(
machineKey.String(), newNode.NodeKey,
newNode, newNode,
registerCacheExpiration, registerCacheExpiration,
) )
h.handleNewNode(writer, registerRequest, machineKey) h.handleNewNode(writer, registerRequest, machineKey, isNoise)
return return
} }
@ -149,7 +134,11 @@ func (h *Headscale) handleRegister(
// (juan): For a while we had a bug where we were not storing the MachineKey for the nodes using the TS2021, // (juan): For a while we had a bug where we were not storing the MachineKey for the nodes using the TS2021,
// due to a misunderstanding of the protocol https://github.com/juanfont/headscale/issues/1054 // due to a misunderstanding of the protocol https://github.com/juanfont/headscale/issues/1054
// So if we have a not valid MachineKey (but we were able to fetch the node with the NodeKeys), we update it. // So if we have a not valid MachineKey (but we were able to fetch the node with the NodeKeys), we update it.
if err != nil || node.MachineKey.IsZero() { var storedMachineKey key.MachinePublic
err = storedMachineKey.UnmarshalText(
[]byte(util.MachinePublicKeyEnsurePrefix(node.MachineKey)),
)
if err != nil || storedMachineKey.IsZero() {
if err := h.db.NodeSetMachineKey(node, machineKey); err != nil { if err := h.db.NodeSetMachineKey(node, machineKey); err != nil {
log.Error(). log.Error().
Caller(). Caller().
@ -167,12 +156,12 @@ func (h *Headscale) handleRegister(
// - Trying to log out (sending a expiry in the past) // - Trying to log out (sending a expiry in the past)
// - A valid, registered node, looking for /map // - A valid, registered node, looking for /map
// - Expired node wanting to reauthenticate // - Expired node wanting to reauthenticate
if node.NodeKey.String() == registerRequest.NodeKey.String() { if node.NodeKey == util.NodePublicKeyStripPrefix(registerRequest.NodeKey) {
// The client sends an Expiry in the past if the client is requesting to expire the key (aka logout) // The client sends an Expiry in the past if the client is requesting to expire the key (aka logout)
// https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go#L648 // https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go#L648
if !registerRequest.Expiry.IsZero() && if !registerRequest.Expiry.IsZero() &&
registerRequest.Expiry.UTC().Before(now) { registerRequest.Expiry.UTC().Before(now) {
h.handleNodeLogOut(writer, *node, machineKey) h.handleNodeLogOut(writer, *node, machineKey, isNoise)
return return
} }
@ -180,20 +169,21 @@ func (h *Headscale) handleRegister(
// If node is not expired, and it is register, we have a already accepted this node, // If node is not expired, and it is register, we have a already accepted this node,
// let it proceed with a valid registration // let it proceed with a valid registration
if !node.IsExpired() { if !node.IsExpired() {
h.handleNodeWithValidRegistration(writer, *node, machineKey) h.handleNodeWithValidRegistration(writer, *node, machineKey, isNoise)
return return
} }
} }
// The NodeKey we have matches OldNodeKey, which means this is a refresh after a key expiration // The NodeKey we have matches OldNodeKey, which means this is a refresh after a key expiration
if node.NodeKey.String() == registerRequest.OldNodeKey.String() && if node.NodeKey == util.NodePublicKeyStripPrefix(registerRequest.OldNodeKey) &&
!node.IsExpired() { !node.IsExpired() {
h.handleNodeKeyRefresh( h.handleNodeKeyRefresh(
writer, writer,
registerRequest, registerRequest,
*node, *node,
machineKey, machineKey,
isNoise,
) )
return return
@ -208,7 +198,7 @@ func (h *Headscale) handleRegister(
} }
// The node has expired or it is logged out // The node has expired or it is logged out
h.handleNodeExpiredOrLoggedOut(writer, registerRequest, *node, machineKey) h.handleNodeExpiredOrLoggedOut(writer, registerRequest, *node, machineKey, isNoise)
// TODO(juan): RegisterRequest includes an Expiry time, that we could optionally use // TODO(juan): RegisterRequest includes an Expiry time, that we could optionally use
node.Expiry = &time.Time{} node.Expiry = &time.Time{}
@ -217,9 +207,9 @@ func (h *Headscale) handleRegister(
// we need to make sure the NodeKey matches the one in the request // we need to make sure the NodeKey matches the one in the request
// TODO(juan): What happens when using fast user switching between two // TODO(juan): What happens when using fast user switching between two
// headscale-managed tailnets? // headscale-managed tailnets?
node.NodeKey = registerRequest.NodeKey node.NodeKey = util.NodePublicKeyStripPrefix(registerRequest.NodeKey)
h.registrationCache.Set( h.registrationCache.Set(
machineKey.String(), util.NodePublicKeyStripPrefix(registerRequest.NodeKey),
*node, *node,
registerCacheExpiration, registerCacheExpiration,
) )
@ -229,6 +219,7 @@ func (h *Headscale) handleRegister(
} }
// handleAuthKey contains the logic to manage auth key client registration // handleAuthKey contains the logic to manage auth key client registration
// It is used both by the legacy and the new Noise protocol.
// When using Noise, the machineKey is Zero. // When using Noise, the machineKey is Zero.
// //
// TODO: check if any locks are needed around IP allocation. // TODO: check if any locks are needed around IP allocation.
@ -236,10 +227,12 @@ func (h *Headscale) handleAuthKey(
writer http.ResponseWriter, writer http.ResponseWriter,
registerRequest tailcfg.RegisterRequest, registerRequest tailcfg.RegisterRequest,
machineKey key.MachinePublic, machineKey key.MachinePublic,
isNoise bool,
) { ) {
log.Debug(). log.Debug().
Caller(). Caller().
Str("node", registerRequest.Hostinfo.Hostname). Str("node", registerRequest.Hostinfo.Hostname).
Bool("noise", isNoise).
Msgf("Processing auth key for %s", registerRequest.Hostinfo.Hostname) Msgf("Processing auth key for %s", registerRequest.Hostinfo.Hostname)
resp := tailcfg.RegisterResponse{} resp := tailcfg.RegisterResponse{}
@ -247,15 +240,17 @@ func (h *Headscale) handleAuthKey(
if err != nil { if err != nil {
log.Error(). log.Error().
Caller(). Caller().
Bool("noise", isNoise).
Str("node", registerRequest.Hostinfo.Hostname). Str("node", registerRequest.Hostinfo.Hostname).
Err(err). Err(err).
Msg("Failed authentication via AuthKey") Msg("Failed authentication via AuthKey")
resp.MachineAuthorized = false resp.MachineAuthorized = false
respBody, err := json.Marshal(resp) respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
if err != nil { if err != nil {
log.Error(). log.Error().
Caller(). Caller().
Bool("noise", isNoise).
Str("node", registerRequest.Hostinfo.Hostname). Str("node", registerRequest.Hostinfo.Hostname).
Err(err). Err(err).
Msg("Cannot encode message") Msg("Cannot encode message")
@ -272,12 +267,14 @@ func (h *Headscale) handleAuthKey(
if err != nil { if err != nil {
log.Error(). log.Error().
Caller(). Caller().
Bool("noise", isNoise).
Err(err). Err(err).
Msg("Failed to write response") Msg("Failed to write response")
} }
log.Error(). log.Error().
Caller(). Caller().
Bool("noise", isNoise).
Str("node", registerRequest.Hostinfo.Hostname). Str("node", registerRequest.Hostinfo.Hostname).
Msg("Failed authentication via AuthKey") Msg("Failed authentication via AuthKey")
@ -293,10 +290,11 @@ func (h *Headscale) handleAuthKey(
log.Debug(). log.Debug().
Caller(). Caller().
Bool("noise", isNoise).
Str("node", registerRequest.Hostinfo.Hostname). Str("node", registerRequest.Hostinfo.Hostname).
Msg("Authentication key was valid, proceeding to acquire IP addresses") Msg("Authentication key was valid, proceeding to acquire IP addresses")
nodeKey := registerRequest.NodeKey nodeKey := util.NodePublicKeyStripPrefix(registerRequest.NodeKey)
// retrieve node information if it exist // retrieve node information if it exist
// The error is not important, because if it does not // The error is not important, because if it does not
@ -306,6 +304,7 @@ func (h *Headscale) handleAuthKey(
if node != nil { if node != nil {
log.Trace(). log.Trace().
Caller(). Caller().
Bool("noise", isNoise).
Str("node", node.Hostname). Str("node", node.Hostname).
Msg("node was already registered before, refreshing with new auth key") Msg("node was already registered before, refreshing with new auth key")
@ -315,6 +314,7 @@ func (h *Headscale) handleAuthKey(
if err != nil { if err != nil {
log.Error(). log.Error().
Caller(). Caller().
Bool("noise", isNoise).
Str("node", node.Hostname). Str("node", node.Hostname).
Err(err). Err(err).
Msg("Failed to refresh node") Msg("Failed to refresh node")
@ -322,7 +322,7 @@ func (h *Headscale) handleAuthKey(
return return
} }
aclTags := pak.Proto().GetAclTags() aclTags := pak.Proto().AclTags
if len(aclTags) > 0 { if len(aclTags) > 0 {
// This conditional preserves the existing behaviour, although SaaS would reset the tags on auth-key login // This conditional preserves the existing behaviour, although SaaS would reset the tags on auth-key login
err = h.db.SetTags(node, aclTags) err = h.db.SetTags(node, aclTags)
@ -330,6 +330,7 @@ func (h *Headscale) handleAuthKey(
if err != nil { if err != nil {
log.Error(). log.Error().
Caller(). Caller().
Bool("noise", isNoise).
Str("node", node.Hostname). Str("node", node.Hostname).
Strs("aclTags", aclTags). Strs("aclTags", aclTags).
Err(err). Err(err).
@ -341,10 +342,11 @@ func (h *Headscale) handleAuthKey(
} else { } else {
now := time.Now().UTC() now := time.Now().UTC()
givenName, err := h.db.GenerateGivenName(machineKey, registerRequest.Hostinfo.Hostname) givenName, err := h.db.GenerateGivenName(util.MachinePublicKeyStripPrefix(machineKey), registerRequest.Hostinfo.Hostname)
if err != nil { if err != nil {
log.Error(). log.Error().
Caller(). Caller().
Bool("noise", isNoise).
Str("func", "RegistrationHandler"). Str("func", "RegistrationHandler").
Str("hostinfo.name", registerRequest.Hostinfo.Hostname). Str("hostinfo.name", registerRequest.Hostinfo.Hostname).
Err(err). Err(err).
@ -357,13 +359,13 @@ func (h *Headscale) handleAuthKey(
Hostname: registerRequest.Hostinfo.Hostname, Hostname: registerRequest.Hostinfo.Hostname,
GivenName: givenName, GivenName: givenName,
UserID: pak.User.ID, UserID: pak.User.ID,
MachineKey: machineKey, MachineKey: util.MachinePublicKeyStripPrefix(machineKey),
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
Expiry: &registerRequest.Expiry, Expiry: &registerRequest.Expiry,
NodeKey: nodeKey, NodeKey: nodeKey,
LastSeen: &now, LastSeen: &now,
AuthKeyID: uint(pak.ID), AuthKeyID: uint(pak.ID),
ForcedTags: pak.Proto().GetAclTags(), ForcedTags: pak.Proto().AclTags,
} }
node, err = h.db.RegisterNode( node, err = h.db.RegisterNode(
@ -372,6 +374,7 @@ func (h *Headscale) handleAuthKey(
if err != nil { if err != nil {
log.Error(). log.Error().
Caller(). Caller().
Bool("noise", isNoise).
Err(err). Err(err).
Msg("could not register node") Msg("could not register node")
nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name). nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name).
@ -386,6 +389,7 @@ func (h *Headscale) handleAuthKey(
if err != nil { if err != nil {
log.Error(). log.Error().
Caller(). Caller().
Bool("noise", isNoise).
Err(err). Err(err).
Msg("Failed to use pre-auth key") Msg("Failed to use pre-auth key")
nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name). nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name).
@ -401,10 +405,11 @@ func (h *Headscale) handleAuthKey(
// Otherwise it will need to exec `tailscale up` twice to fetch the *LoginName* // Otherwise it will need to exec `tailscale up` twice to fetch the *LoginName*
resp.Login = *pak.User.TailscaleLogin() resp.Login = *pak.User.TailscaleLogin()
respBody, err := json.Marshal(resp) respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
if err != nil { if err != nil {
log.Error(). log.Error().
Caller(). Caller().
Bool("noise", isNoise).
Str("node", registerRequest.Hostinfo.Hostname). Str("node", registerRequest.Hostinfo.Hostname).
Err(err). Err(err).
Msg("Cannot encode message") Msg("Cannot encode message")
@ -422,105 +427,52 @@ func (h *Headscale) handleAuthKey(
if err != nil { if err != nil {
log.Error(). log.Error().
Caller(). Caller().
Bool("noise", isNoise).
Err(err). Err(err).
Msg("Failed to write response") Msg("Failed to write response")
} }
log.Info(). log.Info().
Bool("noise", isNoise).
Str("node", registerRequest.Hostinfo.Hostname). Str("node", registerRequest.Hostinfo.Hostname).
Str("ips", strings.Join(node.IPAddresses.StringSlice(), ", ")). Str("ips", strings.Join(node.IPAddresses.StringSlice(), ", ")).
Msg("Successfully authenticated via AuthKey") Msg("Successfully authenticated via AuthKey")
} }
// handleNewNode returns the authorisation URL to the client based on what type // handleNewNode exposes for both legacy and Noise the functionality to get a URL
// of registration headscale is configured with. // for authorizing the node. This url is then showed to the user by the local Tailscale client.
// This url is then showed to the user by the local Tailscale client.
func (h *Headscale) handleNewNode( func (h *Headscale) handleNewNode(
writer http.ResponseWriter, writer http.ResponseWriter,
registerRequest tailcfg.RegisterRequest, registerRequest tailcfg.RegisterRequest,
machineKey key.MachinePublic, machineKey key.MachinePublic,
isNoise bool,
) { ) {
logInfo, logTrace, logErr := logAuthFunc(registerRequest, machineKey)
resp := tailcfg.RegisterResponse{} resp := tailcfg.RegisterResponse{}
// The node registration is new, redirect the client to the registration URL // The node registration is new, redirect the client to the registration URL
logTrace("The node seems to be new, sending auth url") log.Debug().
Caller().
Bool("noise", isNoise).
Str("node", registerRequest.Hostinfo.Hostname).
Msg("The node seems to be new, sending auth url")
if h.oauth2Config != nil { if h.oauth2Config != nil {
resp.AuthURL = fmt.Sprintf( resp.AuthURL = fmt.Sprintf(
"%s/oidc/register/%s", "%s/oidc/register/%s",
strings.TrimSuffix(h.cfg.ServerURL, "/"), strings.TrimSuffix(h.cfg.ServerURL, "/"),
machineKey.String(), registerRequest.NodeKey,
) )
} else { } else {
resp.AuthURL = fmt.Sprintf("%s/register/%s", resp.AuthURL = fmt.Sprintf("%s/register/%s",
strings.TrimSuffix(h.cfg.ServerURL, "/"), strings.TrimSuffix(h.cfg.ServerURL, "/"),
machineKey.String()) registerRequest.NodeKey)
} }
respBody, err := json.Marshal(resp) respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
if err != nil {
logErr(err, "Cannot encode message")
http.Error(writer, "Internal server error", http.StatusInternalServerError)
return
}
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
writer.WriteHeader(http.StatusOK)
_, err = writer.Write(respBody)
if err != nil {
logErr(err, "Failed to write response")
}
logInfo(fmt.Sprintf("Successfully sent auth url: %s", resp.AuthURL))
}
func (h *Headscale) handleNodeLogOut(
writer http.ResponseWriter,
node types.Node,
machineKey key.MachinePublic,
) {
resp := tailcfg.RegisterResponse{}
log.Info().
Str("node", node.Hostname).
Msg("Client requested logout")
now := time.Now()
err := h.db.NodeSetExpiry(&node, now)
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Failed to expire node")
http.Error(writer, "Internal server error", http.StatusInternalServerError)
return
}
stateUpdate := types.StateUpdate{
Type: types.StatePeerChangedPatch,
ChangePatches: []*tailcfg.PeerChange{
{
NodeID: tailcfg.NodeID(node.ID),
KeyExpiry: &now,
},
},
}
if stateUpdate.Valid() {
h.nodeNotifier.NotifyWithIgnore(stateUpdate, node.MachineKey.String())
}
resp.AuthURL = ""
resp.MachineAuthorized = false
resp.NodeKeyExpired = true
resp.User = *node.User.TailscaleUser()
respBody, err := json.Marshal(resp)
if err != nil { if err != nil {
log.Error(). log.Error().
Caller(). Caller().
Bool("noise", isNoise).
Err(err). Err(err).
Msg("Cannot encode message") Msg("Cannot encode message")
http.Error(writer, "Internal server error", http.StatusInternalServerError) http.Error(writer, "Internal server error", http.StatusInternalServerError)
@ -533,6 +485,68 @@ func (h *Headscale) handleNodeLogOut(
_, err = writer.Write(respBody) _, err = writer.Write(respBody)
if err != nil { if err != nil {
log.Error(). log.Error().
Bool("noise", isNoise).
Caller().
Err(err).
Msg("Failed to write response")
}
log.Info().
Caller().
Bool("noise", isNoise).
Str("AuthURL", resp.AuthURL).
Str("node", registerRequest.Hostinfo.Hostname).
Msg("Successfully sent auth url")
}
func (h *Headscale) handleNodeLogOut(
writer http.ResponseWriter,
node types.Node,
machineKey key.MachinePublic,
isNoise bool,
) {
resp := tailcfg.RegisterResponse{}
log.Info().
Bool("noise", isNoise).
Str("node", node.Hostname).
Msg("Client requested logout")
now := time.Now()
err := h.db.NodeSetExpiry(&node, now)
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Err(err).
Msg("Failed to expire node")
http.Error(writer, "Internal server error", http.StatusInternalServerError)
return
}
resp.AuthURL = ""
resp.MachineAuthorized = false
resp.NodeKeyExpired = true
resp.User = *node.User.TailscaleUser()
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
if err != nil {
log.Error().
Caller().
Bool("noise", isNoise).
Err(err).
Msg("Cannot encode message")
http.Error(writer, "Internal server error", http.StatusInternalServerError)
return
}
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
writer.WriteHeader(http.StatusOK)
_, err = writer.Write(respBody)
if err != nil {
log.Error().
Bool("noise", isNoise).
Caller(). Caller().
Err(err). Err(err).
Msg("Failed to write response") Msg("Failed to write response")
@ -554,6 +568,7 @@ func (h *Headscale) handleNodeLogOut(
log.Info(). log.Info().
Caller(). Caller().
Bool("noise", isNoise).
Str("node", node.Hostname). Str("node", node.Hostname).
Msg("Successfully logged out") Msg("Successfully logged out")
} }
@ -562,12 +577,14 @@ func (h *Headscale) handleNodeWithValidRegistration(
writer http.ResponseWriter, writer http.ResponseWriter,
node types.Node, node types.Node,
machineKey key.MachinePublic, machineKey key.MachinePublic,
isNoise bool,
) { ) {
resp := tailcfg.RegisterResponse{} resp := tailcfg.RegisterResponse{}
// The node registration is valid, respond with redirect to /map // The node registration is valid, respond with redirect to /map
log.Debug(). log.Debug().
Caller(). Caller().
Bool("noise", isNoise).
Str("node", node.Hostname). Str("node", node.Hostname).
Msg("Client is registered and we have the current NodeKey. All clear to /map") Msg("Client is registered and we have the current NodeKey. All clear to /map")
@ -576,10 +593,11 @@ func (h *Headscale) handleNodeWithValidRegistration(
resp.User = *node.User.TailscaleUser() resp.User = *node.User.TailscaleUser()
resp.Login = *node.User.TailscaleLogin() resp.Login = *node.User.TailscaleLogin()
respBody, err := json.Marshal(resp) respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
if err != nil { if err != nil {
log.Error(). log.Error().
Caller(). Caller().
Bool("noise", isNoise).
Err(err). Err(err).
Msg("Cannot encode message") Msg("Cannot encode message")
nodeRegistrations.WithLabelValues("update", "web", "error", node.User.Name). nodeRegistrations.WithLabelValues("update", "web", "error", node.User.Name).
@ -597,12 +615,14 @@ func (h *Headscale) handleNodeWithValidRegistration(
if err != nil { if err != nil {
log.Error(). log.Error().
Caller(). Caller().
Bool("noise", isNoise).
Err(err). Err(err).
Msg("Failed to write response") Msg("Failed to write response")
} }
log.Info(). log.Info().
Caller(). Caller().
Bool("noise", isNoise).
Str("node", node.Hostname). Str("node", node.Hostname).
Msg("Node successfully authorized") Msg("Node successfully authorized")
} }
@ -612,11 +632,13 @@ func (h *Headscale) handleNodeKeyRefresh(
registerRequest tailcfg.RegisterRequest, registerRequest tailcfg.RegisterRequest,
node types.Node, node types.Node,
machineKey key.MachinePublic, machineKey key.MachinePublic,
isNoise bool,
) { ) {
resp := tailcfg.RegisterResponse{} resp := tailcfg.RegisterResponse{}
log.Info(). log.Info().
Caller(). Caller().
Bool("noise", isNoise).
Str("node", node.Hostname). Str("node", node.Hostname).
Msg("We have the OldNodeKey in the database. This is a key refresh") Msg("We have the OldNodeKey in the database. This is a key refresh")
@ -633,10 +655,11 @@ func (h *Headscale) handleNodeKeyRefresh(
resp.AuthURL = "" resp.AuthURL = ""
resp.User = *node.User.TailscaleUser() resp.User = *node.User.TailscaleUser()
respBody, err := json.Marshal(resp) respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
if err != nil { if err != nil {
log.Error(). log.Error().
Caller(). Caller().
Bool("noise", isNoise).
Err(err). Err(err).
Msg("Cannot encode message") Msg("Cannot encode message")
http.Error(writer, "Internal server error", http.StatusInternalServerError) http.Error(writer, "Internal server error", http.StatusInternalServerError)
@ -650,12 +673,14 @@ func (h *Headscale) handleNodeKeyRefresh(
if err != nil { if err != nil {
log.Error(). log.Error().
Caller(). Caller().
Bool("noise", isNoise).
Err(err). Err(err).
Msg("Failed to write response") Msg("Failed to write response")
} }
log.Info(). log.Info().
Caller(). Caller().
Bool("noise", isNoise).
Str("node_key", registerRequest.NodeKey.ShortString()). Str("node_key", registerRequest.NodeKey.ShortString()).
Str("old_node_key", registerRequest.OldNodeKey.ShortString()). Str("old_node_key", registerRequest.OldNodeKey.ShortString()).
Str("node", node.Hostname). Str("node", node.Hostname).
@ -667,11 +692,12 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut(
registerRequest tailcfg.RegisterRequest, registerRequest tailcfg.RegisterRequest,
node types.Node, node types.Node,
machineKey key.MachinePublic, machineKey key.MachinePublic,
isNoise bool,
) { ) {
resp := tailcfg.RegisterResponse{} resp := tailcfg.RegisterResponse{}
if registerRequest.Auth.AuthKey != "" { if registerRequest.Auth.AuthKey != "" {
h.handleAuthKey(writer, registerRequest, machineKey) h.handleAuthKey(writer, registerRequest, machineKey, isNoise)
return return
} }
@ -679,6 +705,7 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut(
// The client has registered before, but has expired or logged out // The client has registered before, but has expired or logged out
log.Trace(). log.Trace().
Caller(). Caller().
Bool("noise", isNoise).
Str("node", node.Hostname). Str("node", node.Hostname).
Str("machine_key", machineKey.ShortString()). Str("machine_key", machineKey.ShortString()).
Str("node_key", registerRequest.NodeKey.ShortString()). Str("node_key", registerRequest.NodeKey.ShortString()).
@ -688,17 +715,18 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut(
if h.oauth2Config != nil { if h.oauth2Config != nil {
resp.AuthURL = fmt.Sprintf("%s/oidc/register/%s", resp.AuthURL = fmt.Sprintf("%s/oidc/register/%s",
strings.TrimSuffix(h.cfg.ServerURL, "/"), strings.TrimSuffix(h.cfg.ServerURL, "/"),
machineKey.String()) registerRequest.NodeKey)
} else { } else {
resp.AuthURL = fmt.Sprintf("%s/register/%s", resp.AuthURL = fmt.Sprintf("%s/register/%s",
strings.TrimSuffix(h.cfg.ServerURL, "/"), strings.TrimSuffix(h.cfg.ServerURL, "/"),
machineKey.String()) registerRequest.NodeKey)
} }
respBody, err := json.Marshal(resp) respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
if err != nil { if err != nil {
log.Error(). log.Error().
Caller(). Caller().
Bool("noise", isNoise).
Err(err). Err(err).
Msg("Cannot encode message") Msg("Cannot encode message")
nodeRegistrations.WithLabelValues("reauth", "web", "error", node.User.Name). nodeRegistrations.WithLabelValues("reauth", "web", "error", node.User.Name).
@ -716,12 +744,14 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut(
if err != nil { if err != nil {
log.Error(). log.Error().
Caller(). Caller().
Bool("noise", isNoise).
Err(err). Err(err).
Msg("Failed to write response") Msg("Failed to write response")
} }
log.Trace(). log.Trace().
Caller(). Caller().
Bool("noise", isNoise).
Str("machine_key", machineKey.ShortString()). Str("machine_key", machineKey.ShortString()).
Str("node_key", registerRequest.NodeKey.ShortString()). Str("node_key", registerRequest.NodeKey.ShortString()).
Str("node_key_old", registerRequest.OldNodeKey.ShortString()). Str("node_key_old", registerRequest.OldNodeKey.ShortString()).

61
hscontrol/auth_legacy.go Normal file
View file

@ -0,0 +1,61 @@
//go:build ts2019
package hscontrol
import (
"io"
"net/http"
"github.com/gorilla/mux"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
)
// RegistrationHandler handles the actual registration process of a machine
// Endpoint /machine/:mkey.
func (h *Headscale) RegistrationHandler(
writer http.ResponseWriter,
req *http.Request,
) {
vars := mux.Vars(req)
machineKeyStr, ok := vars["mkey"]
if !ok || machineKeyStr == "" {
log.Error().
Str("handler", "RegistrationHandler").
Msg("No machine ID in request")
http.Error(writer, "No machine ID in request", http.StatusBadRequest)
return
}
body, _ := io.ReadAll(req.Body)
var machineKey key.MachinePublic
err := machineKey.UnmarshalText([]byte(util.MachinePublicKeyEnsurePrefix(machineKeyStr)))
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Cannot parse machine key")
nodeRegistrations.WithLabelValues("unknown", "web", "error", "unknown").Inc()
http.Error(writer, "Cannot parse machine key", http.StatusBadRequest)
return
}
registerRequest := tailcfg.RegisterRequest{}
err = util.DecodeAndUnmarshalNaCl(body, &registerRequest, &machineKey, h.privateKey2019)
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Cannot decode message")
nodeRegistrations.WithLabelValues("unknown", "web", "error", "unknown").Inc()
http.Error(writer, "Cannot decode message", http.StatusBadRequest)
return
}
h.handleRegister(writer, req, registerRequest, machineKey, false)
}

View file

@ -39,19 +39,7 @@ func (ns *noiseServer) NoiseRegistrationHandler(
return return
} }
// Reject unsupported versions
if registerRequest.Version < MinimumCapVersion {
log.Info().
Caller().
Int("min_version", int(MinimumCapVersion)).
Int("client_version", int(registerRequest.Version)).
Msg("unsupported client connected")
http.Error(writer, "Internal error", http.StatusBadRequest)
return
}
ns.nodeKey = registerRequest.NodeKey ns.nodeKey = registerRequest.NodeKey
ns.headscale.handleRegister(writer, req, registerRequest, ns.conn.Peer()) ns.headscale.handleRegister(writer, req, registerRequest, ns.conn.Peer(), true)
} }

View file

@ -35,6 +35,9 @@ func (s *Suite) TestGetUsedIps(c *check.C) {
node := types.Node{ node := types.Node{
ID: 0, ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnode", Hostname: "testnode",
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
@ -80,6 +83,9 @@ func (s *Suite) TestGetMultiIp(c *check.C) {
node := types.Node{ node := types.Node{
ID: uint64(index), ID: uint64(index),
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnode", Hostname: "testnode",
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
@ -167,6 +173,9 @@ func (s *Suite) TestGetAvailableIpNodeWithoutIP(c *check.C) {
node := types.Node{ node := types.Node{
ID: 0, ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnode", Hostname: "testnode",
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,

View file

@ -2,16 +2,13 @@ package db
import ( import (
"context" "context"
"database/sql"
"errors" "errors"
"fmt" "fmt"
"net/netip" "net/netip"
"strings"
"sync" "sync"
"time" "time"
"github.com/glebarez/sqlite" "github.com/glebarez/sqlite"
"github.com/go-gormigrate/gormigrate/v2"
"github.com/juanfont/headscale/hscontrol/notifier" "github.com/juanfont/headscale/hscontrol/notifier"
"github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util" "github.com/juanfont/headscale/hscontrol/util"
@ -22,11 +19,15 @@ import (
) )
const ( const (
dbVersion = "1"
Postgres = "postgres" Postgres = "postgres"
Sqlite = "sqlite3" Sqlite = "sqlite3"
) )
var errDatabaseNotSupported = errors.New("database type not supported") var (
errValueNotFound = errors.New("not found")
errDatabaseNotSupported = errors.New("database type not supported")
)
// KV is a key-value store in a psql table. For future use... // KV is a key-value store in a psql table. For future use...
// TODO(kradalby): Is this used for anything? // TODO(kradalby): Is this used for anything?
@ -61,125 +62,80 @@ func NewHeadscaleDatabase(
return nil, err return nil, err
} }
migrations := gormigrate.New(dbConn, gormigrate.DefaultOptions, []*gormigrate.Migration{ db := HSDatabase{
// New migrations should be added as transactions at the end of this list. db: dbConn,
// The initial commit here is quite messy, completely out of order and notifier: notifier,
// has no versioning and is the tech debt of not having versioned migrations
// prior to this point. This first migration is all DB changes to bring a DB ipPrefixes: ipPrefixes,
// up to 0.23.0. baseDomain: baseDomain,
{
ID: "202312101416",
Migrate: func(tx *gorm.DB) error {
if dbType == Postgres {
tx.Exec(`create extension if not exists "uuid-ossp";`)
} }
_ = tx.Migrator().RenameTable("namespaces", "users") log.Debug().Msgf("database %#v", dbConn)
if dbType == Postgres {
dbConn.Exec(`create extension if not exists "uuid-ossp";`)
}
_ = dbConn.Migrator().RenameTable("namespaces", "users")
// the big rename from Machine to Node // the big rename from Machine to Node
_ = tx.Migrator().RenameTable("machines", "nodes") _ = dbConn.Migrator().RenameTable("machines", "nodes")
_ = tx.Migrator().RenameColumn(&types.Route{}, "machine_id", "node_id") _ = dbConn.Migrator().RenameColumn(&types.Route{}, "machine_id", "node_id")
err = tx.AutoMigrate(types.User{}) err = dbConn.AutoMigrate(types.User{})
if err != nil { if err != nil {
return err return nil, err
} }
_ = tx.Migrator().RenameColumn(&types.Node{}, "namespace_id", "user_id") _ = dbConn.Migrator().RenameColumn(&types.Node{}, "namespace_id", "user_id")
_ = tx.Migrator().RenameColumn(&types.PreAuthKey{}, "namespace_id", "user_id") _ = dbConn.Migrator().RenameColumn(&types.PreAuthKey{}, "namespace_id", "user_id")
_ = tx.Migrator().RenameColumn(&types.Node{}, "ip_address", "ip_addresses") _ = dbConn.Migrator().RenameColumn(&types.Node{}, "ip_address", "ip_addresses")
_ = tx.Migrator().RenameColumn(&types.Node{}, "name", "hostname") _ = dbConn.Migrator().RenameColumn(&types.Node{}, "name", "hostname")
// GivenName is used as the primary source of DNS names, make sure // GivenName is used as the primary source of DNS names, make sure
// the field is populated and normalized if it was not when the // the field is populated and normalized if it was not when the
// node was registered. // node was registered.
_ = tx.Migrator().RenameColumn(&types.Node{}, "nickname", "given_name") _ = dbConn.Migrator().RenameColumn(&types.Node{}, "nickname", "given_name")
// If the Node table has a column for registered, // If the MacNodehine table has a column for registered,
// find all occourences of "false" and drop them. Then // find all occourences of "false" and drop them. Then
// remove the column. // remove the column.
if tx.Migrator().HasColumn(&types.Node{}, "registered") { if dbConn.Migrator().HasColumn(&types.Node{}, "registered") {
log.Info(). log.Info().
Msg(`Database has legacy "registered" column in node, removing...`) Msg(`Database has legacy "registered" column in node, removing...`)
nodes := types.Nodes{} nodes := types.Nodes{}
if err := tx.Not("registered").Find(&nodes).Error; err != nil { if err := dbConn.Not("registered").Find(&nodes).Error; err != nil {
log.Error().Err(err).Msg("Error accessing db") log.Error().Err(err).Msg("Error accessing db")
} }
for _, node := range nodes { for _, node := range nodes {
log.Info(). log.Info().
Str("node", node.Hostname). Str("node", node.Hostname).
Str("machine_key", node.MachineKey.ShortString()). Str("machine_key", node.MachineKey).
Msg("Deleting unregistered node") Msg("Deleting unregistered node")
if err := tx.Delete(&types.Node{}, node.ID).Error; err != nil { if err := dbConn.Delete(&types.Node{}, node.ID).Error; err != nil {
log.Error(). log.Error().
Err(err). Err(err).
Str("node", node.Hostname). Str("node", node.Hostname).
Str("machine_key", node.MachineKey.ShortString()). Str("machine_key", node.MachineKey).
Msg("Error deleting unregistered node") Msg("Error deleting unregistered node")
} }
} }
err := tx.Migrator().DropColumn(&types.Node{}, "registered") err := dbConn.Migrator().DropColumn(&types.Node{}, "registered")
if err != nil { if err != nil {
log.Error().Err(err).Msg("Error dropping registered column") log.Error().Err(err).Msg("Error dropping registered column")
} }
} }
err = tx.AutoMigrate(&types.Route{}) err = dbConn.AutoMigrate(&types.Route{})
if err != nil { if err != nil {
return err return nil, err
} }
err = tx.AutoMigrate(&types.Node{}) if dbConn.Migrator().HasColumn(&types.Node{}, "enabled_routes") {
if err != nil {
return err
}
// Ensure all keys have correct prefixes
// https://github.com/tailscale/tailscale/blob/main/types/key/node.go#L35
type result struct {
ID uint64
MachineKey string
NodeKey string
DiscoKey string
}
var results []result
err = tx.Raw("SELECT id, node_key, machine_key, disco_key FROM nodes").Find(&results).Error
if err != nil {
return err
}
for _, node := range results {
mKey := node.MachineKey
if !strings.HasPrefix(node.MachineKey, "mkey:") {
mKey = "mkey:" + node.MachineKey
}
nKey := node.NodeKey
if !strings.HasPrefix(node.NodeKey, "nodekey:") {
nKey = "nodekey:" + node.NodeKey
}
dKey := node.DiscoKey
if !strings.HasPrefix(node.DiscoKey, "discokey:") {
dKey = "discokey:" + node.DiscoKey
}
err := tx.Exec(
"UPDATE nodes SET machine_key = @mKey, node_key = @nKey, disco_key = @dKey WHERE ID = @id",
sql.Named("mKey", mKey),
sql.Named("nKey", nKey),
sql.Named("dKey", dKey),
sql.Named("id", node.ID),
).Error
if err != nil {
return err
}
}
if tx.Migrator().HasColumn(&types.Node{}, "enabled_routes") {
log.Info().Msgf("Database has legacy enabled_routes column in node, migrating...") log.Info().Msgf("Database has legacy enabled_routes column in node, migrating...")
type NodeAux struct { type NodeAux struct {
@ -188,7 +144,7 @@ func NewHeadscaleDatabase(
} }
nodesAux := []NodeAux{} nodesAux := []NodeAux{}
err := tx.Table("nodes").Select("id, enabled_routes").Scan(&nodesAux).Error err := dbConn.Table("nodes").Select("id, enabled_routes").Scan(&nodesAux).Error
if err != nil { if err != nil {
log.Fatal().Err(err).Msg("Error accessing db") log.Fatal().Err(err).Msg("Error accessing db")
} }
@ -203,7 +159,7 @@ func NewHeadscaleDatabase(
continue continue
} }
err = tx.Preload("Node"). err = dbConn.Preload("Node").
Where("node_id = ? AND prefix = ?", node.ID, types.IPPrefix(prefix)). Where("node_id = ? AND prefix = ?", node.ID, types.IPPrefix(prefix)).
First(&types.Route{}). First(&types.Route{}).
Error Error
@ -221,7 +177,7 @@ func NewHeadscaleDatabase(
Enabled: true, Enabled: true,
Prefix: types.IPPrefix(prefix), Prefix: types.IPPrefix(prefix),
} }
if err := tx.Create(&route).Error; err != nil { if err := dbConn.Create(&route).Error; err != nil {
log.Error().Err(err).Msg("Error creating route") log.Error().Err(err).Msg("Error creating route")
} else { } else {
log.Info(). log.Info().
@ -232,15 +188,20 @@ func NewHeadscaleDatabase(
} }
} }
err = tx.Migrator().DropColumn(&types.Node{}, "enabled_routes") err = dbConn.Migrator().DropColumn(&types.Node{}, "enabled_routes")
if err != nil { if err != nil {
log.Error().Err(err).Msg("Error dropping enabled_routes column") log.Error().Err(err).Msg("Error dropping enabled_routes column")
} }
} }
if tx.Migrator().HasColumn(&types.Node{}, "given_name") { err = dbConn.AutoMigrate(&types.Node{})
if err != nil {
return nil, err
}
if dbConn.Migrator().HasColumn(&types.Node{}, "given_name") {
nodes := types.Nodes{} nodes := types.Nodes{}
if err := tx.Find(&nodes).Error; err != nil { if err := dbConn.Find(&nodes).Error; err != nil {
log.Error().Err(err).Msg("Error accessing db") log.Error().Err(err).Msg("Error accessing db")
} }
@ -257,9 +218,7 @@ func NewHeadscaleDatabase(
Msg("Failed to normalize node hostname in DB migration") Msg("Failed to normalize node hostname in DB migration")
} }
err = tx.Model(nodes[item]).Updates(types.Node{ err = db.RenameNode(nodes[item], normalizedHostname)
GivenName: normalizedHostname,
}).Error
if err != nil { if err != nil {
log.Error(). log.Error().
Caller(). Caller().
@ -271,58 +230,30 @@ func NewHeadscaleDatabase(
} }
} }
err = tx.AutoMigrate(&KV{}) err = dbConn.AutoMigrate(&KV{})
if err != nil { if err != nil {
return err return nil, err
} }
err = tx.AutoMigrate(&types.PreAuthKey{}) err = dbConn.AutoMigrate(&types.PreAuthKey{})
if err != nil { if err != nil {
return err return nil, err
} }
err = tx.AutoMigrate(&types.PreAuthKeyACLTag{}) err = dbConn.AutoMigrate(&types.PreAuthKeyACLTag{})
if err != nil { if err != nil {
return err return nil, err
} }
_ = tx.Migrator().DropTable("shared_machines") _ = dbConn.Migrator().DropTable("shared_machines")
err = tx.AutoMigrate(&types.APIKey{}) err = dbConn.AutoMigrate(&types.APIKey{})
if err != nil { if err != nil {
return err return nil, err
} }
return nil // TODO(kradalby): is this needed?
}, err = db.setValue("db_version", dbVersion)
Rollback: func(tx *gorm.DB) error {
return nil
},
},
{
// drop key-value table, it is not used, and has not contained
// useful data for a long time or ever.
ID: "202312101430",
Migrate: func(tx *gorm.DB) error {
return tx.Migrator().DropTable("kvs")
},
Rollback: func(tx *gorm.DB) error {
return nil
},
},
})
if err = migrations.Migrate(); err != nil {
log.Fatal().Err(err).Msgf("Migration failed: %v", err)
}
db := HSDatabase{
db: dbConn,
notifier: notifier,
ipPrefixes: ipPrefixes,
baseDomain: baseDomain,
}
return &db, err return &db, err
} }
@ -373,6 +304,39 @@ func openDB(dbType, connectionAddr string, debug bool) (*gorm.DB, error) {
) )
} }
// getValue returns the value for the given key in KV.
func (hsdb *HSDatabase) getValue(key string) (string, error) {
var row KV
if result := hsdb.db.First(&row, "key = ?", key); errors.Is(
result.Error,
gorm.ErrRecordNotFound,
) {
return "", errValueNotFound
}
return row.Value, nil
}
// setValue sets value for the given key in KV.
func (hsdb *HSDatabase) setValue(key string, value string) error {
keyValue := KV{
Key: key,
Value: value,
}
if _, err := hsdb.getValue(key); err == nil {
hsdb.db.Model(&keyValue).Where("key = ?", key).Update("value", value)
return nil
}
if err := hsdb.db.Create(keyValue).Error; err != nil {
return fmt.Errorf("failed to create key value pair in the database: %w", err)
}
return nil
}
func (hsdb *HSDatabase) PingDB(ctx context.Context) error { func (hsdb *HSDatabase) PingDB(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, time.Second) ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel() defer cancel()

View file

@ -55,12 +55,17 @@ func (hsdb *HSDatabase) listPeers(node *types.Node) (types.Nodes, error) {
Preload("User"). Preload("User").
Preload("Routes"). Preload("Routes").
Where("node_key <> ?", Where("node_key <> ?",
node.NodeKey.String()).Find(&nodes).Error; err != nil { node.NodeKey).Find(&nodes).Error; err != nil {
return types.Nodes{}, err return types.Nodes{}, err
} }
sort.Slice(nodes, func(i, j int) bool { return nodes[i].ID < nodes[j].ID }) sort.Slice(nodes, func(i, j int) bool { return nodes[i].ID < nodes[j].ID })
log.Trace().
Caller().
Str("node", node.Hostname).
Msgf("Found peers: %s", nodes.String())
return nodes, nil return nodes, nil
} }
@ -171,19 +176,13 @@ func (hsdb *HSDatabase) GetNodeByMachineKey(
hsdb.mu.RLock() hsdb.mu.RLock()
defer hsdb.mu.RUnlock() defer hsdb.mu.RUnlock()
return hsdb.getNodeByMachineKey(machineKey)
}
func (hsdb *HSDatabase) getNodeByMachineKey(
machineKey key.MachinePublic,
) (*types.Node, error) {
mach := types.Node{} mach := types.Node{}
if result := hsdb.db. if result := hsdb.db.
Preload("AuthKey"). Preload("AuthKey").
Preload("AuthKey.User"). Preload("AuthKey.User").
Preload("User"). Preload("User").
Preload("Routes"). Preload("Routes").
First(&mach, "machine_key = ?", machineKey.String()); result.Error != nil { First(&mach, "machine_key = ?", util.MachinePublicKeyStripPrefix(machineKey)); result.Error != nil {
return nil, result.Error return nil, result.Error
} }
@ -204,7 +203,7 @@ func (hsdb *HSDatabase) GetNodeByNodeKey(
Preload("User"). Preload("User").
Preload("Routes"). Preload("Routes").
First(&node, "node_key = ?", First(&node, "node_key = ?",
nodeKey.String()); result.Error != nil { util.NodePublicKeyStripPrefix(nodeKey)); result.Error != nil {
return nil, result.Error return nil, result.Error
} }
@ -225,9 +224,9 @@ func (hsdb *HSDatabase) GetNodeByAnyKey(
Preload("User"). Preload("User").
Preload("Routes"). Preload("Routes").
First(&node, "machine_key = ? OR node_key = ? OR node_key = ?", First(&node, "machine_key = ? OR node_key = ? OR node_key = ?",
machineKey.String(), util.MachinePublicKeyStripPrefix(machineKey),
nodeKey.String(), util.NodePublicKeyStripPrefix(nodeKey),
oldNodeKey.String()); result.Error != nil { util.NodePublicKeyStripPrefix(oldNodeKey)); result.Error != nil {
return nil, result.Error return nil, result.Error
} }
@ -253,10 +252,6 @@ func (hsdb *HSDatabase) SetTags(
hsdb.mu.Lock() hsdb.mu.Lock()
defer hsdb.mu.Unlock() defer hsdb.mu.Unlock()
if len(tags) == 0 {
return nil
}
newTags := []string{} newTags := []string{}
for _, tag := range tags { for _, tag := range tags {
if !util.StringOrPrefixListContains(newTags, tag) { if !util.StringOrPrefixListContains(newTags, tag) {
@ -270,14 +265,10 @@ func (hsdb *HSDatabase) SetTags(
return fmt.Errorf("failed to update tags for node in the database: %w", err) return fmt.Errorf("failed to update tags for node in the database: %w", err)
} }
stateUpdate := types.StateUpdate{ hsdb.notifier.NotifyWithIgnore(types.StateUpdate{
Type: types.StatePeerChanged, Type: types.StatePeerChanged,
ChangeNodes: types.Nodes{node}, Changed: types.Nodes{node},
Message: "called from db.SetTags", }, node.MachineKey)
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyWithIgnore(stateUpdate, node.MachineKey.String())
}
return nil return nil
} }
@ -310,14 +301,10 @@ func (hsdb *HSDatabase) RenameNode(node *types.Node, newName string) error {
return fmt.Errorf("failed to rename node in the database: %w", err) return fmt.Errorf("failed to rename node in the database: %w", err)
} }
stateUpdate := types.StateUpdate{ hsdb.notifier.NotifyWithIgnore(types.StateUpdate{
Type: types.StatePeerChanged, Type: types.StatePeerChanged,
ChangeNodes: types.Nodes{node}, Changed: types.Nodes{node},
Message: "called from db.RenameNode", }, node.MachineKey)
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyWithIgnore(stateUpdate, node.MachineKey.String())
}
return nil return nil
} }
@ -340,28 +327,10 @@ func (hsdb *HSDatabase) nodeSetExpiry(node *types.Node, expiry time.Time) error
) )
} }
node.Expiry = &expiry hsdb.notifier.NotifyWithIgnore(types.StateUpdate{
Type: types.StatePeerChanged,
stateSelfUpdate := types.StateUpdate{ Changed: types.Nodes{node},
Type: types.StateSelfUpdate, }, node.MachineKey)
ChangeNodes: types.Nodes{node},
}
if stateSelfUpdate.Valid() {
hsdb.notifier.NotifyByMachineKey(stateSelfUpdate, node.MachineKey)
}
stateUpdate := types.StateUpdate{
Type: types.StatePeerChangedPatch,
ChangePatches: []*tailcfg.PeerChange{
{
NodeID: tailcfg.NodeID(node.ID),
KeyExpiry: &expiry,
},
},
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyWithIgnore(stateUpdate, node.MachineKey.String())
}
return nil return nil
} }
@ -385,13 +354,10 @@ func (hsdb *HSDatabase) deleteNode(node *types.Node) error {
return err return err
} }
stateUpdate := types.StateUpdate{ hsdb.notifier.NotifyAll(types.StateUpdate{
Type: types.StatePeerRemoved, Type: types.StatePeerRemoved,
Removed: []tailcfg.NodeID{tailcfg.NodeID(node.ID)}, Removed: []tailcfg.NodeID{tailcfg.NodeID(node.ID)},
} })
if stateUpdate.Valid() {
hsdb.notifier.NotifyAll(stateUpdate)
}
return nil return nil
} }
@ -410,7 +376,7 @@ func (hsdb *HSDatabase) UpdateLastSeen(node *types.Node) error {
func (hsdb *HSDatabase) RegisterNodeFromAuthCallback( func (hsdb *HSDatabase) RegisterNodeFromAuthCallback(
cache *cache.Cache, cache *cache.Cache,
mkey key.MachinePublic, nodeKeyStr string,
userName string, userName string,
nodeExpiry *time.Time, nodeExpiry *time.Time,
registrationMethod string, registrationMethod string,
@ -418,14 +384,20 @@ func (hsdb *HSDatabase) RegisterNodeFromAuthCallback(
hsdb.mu.Lock() hsdb.mu.Lock()
defer hsdb.mu.Unlock() defer hsdb.mu.Unlock()
nodeKey := key.NodePublic{}
err := nodeKey.UnmarshalText([]byte(nodeKeyStr))
if err != nil {
return nil, err
}
log.Debug(). log.Debug().
Str("machine_key", mkey.ShortString()). Str("nodeKey", nodeKey.ShortString()).
Str("userName", userName). Str("userName", userName).
Str("registrationMethod", registrationMethod). Str("registrationMethod", registrationMethod).
Str("expiresAt", fmt.Sprintf("%v", nodeExpiry)). Str("expiresAt", fmt.Sprintf("%v", nodeExpiry)).
Msg("Registering node from API/CLI or auth callback") Msg("Registering node from API/CLI or auth callback")
if nodeInterface, ok := cache.Get(mkey.String()); ok { if nodeInterface, ok := cache.Get(util.NodePublicKeyStripPrefix(nodeKey)); ok {
if registrationNode, ok := nodeInterface.(types.Node); ok { if registrationNode, ok := nodeInterface.(types.Node); ok {
user, err := hsdb.getUser(userName) user, err := hsdb.getUser(userName)
if err != nil { if err != nil {
@ -453,7 +425,7 @@ func (hsdb *HSDatabase) RegisterNodeFromAuthCallback(
) )
if err == nil { if err == nil {
cache.Delete(mkey.String()) cache.Delete(nodeKeyStr)
} }
return node, err return node, err
@ -476,8 +448,8 @@ func (hsdb *HSDatabase) RegisterNode(node types.Node) (*types.Node, error) {
func (hsdb *HSDatabase) registerNode(node types.Node) (*types.Node, error) { func (hsdb *HSDatabase) registerNode(node types.Node) (*types.Node, error) {
log.Debug(). log.Debug().
Str("node", node.Hostname). Str("node", node.Hostname).
Str("machine_key", node.MachineKey.ShortString()). Str("machine_key", node.MachineKey).
Str("node_key", node.NodeKey.ShortString()). Str("node_key", node.NodeKey).
Str("user", node.User.Name). Str("user", node.User.Name).
Msg("Registering node") Msg("Registering node")
@ -492,8 +464,8 @@ func (hsdb *HSDatabase) registerNode(node types.Node) (*types.Node, error) {
log.Trace(). log.Trace().
Caller(). Caller().
Str("node", node.Hostname). Str("node", node.Hostname).
Str("machine_key", node.MachineKey.ShortString()). Str("machine_key", node.MachineKey).
Str("node_key", node.NodeKey.ShortString()). Str("node_key", node.NodeKey).
Str("user", node.User.Name). Str("user", node.User.Name).
Msg("Node authorized again") Msg("Node authorized again")
@ -535,7 +507,7 @@ func (hsdb *HSDatabase) NodeSetNodeKey(node *types.Node, nodeKey key.NodePublic)
defer hsdb.mu.Unlock() defer hsdb.mu.Unlock()
if err := hsdb.db.Model(node).Updates(types.Node{ if err := hsdb.db.Model(node).Updates(types.Node{
NodeKey: nodeKey, NodeKey: util.NodePublicKeyStripPrefix(nodeKey),
}).Error; err != nil { }).Error; err != nil {
return err return err
} }
@ -552,7 +524,7 @@ func (hsdb *HSDatabase) NodeSetMachineKey(
defer hsdb.mu.Unlock() defer hsdb.mu.Unlock()
if err := hsdb.db.Model(node).Updates(types.Node{ if err := hsdb.db.Model(node).Updates(types.Node{
MachineKey: machineKey, MachineKey: util.MachinePublicKeyStripPrefix(machineKey),
}).Error; err != nil { }).Error; err != nil {
return err return err
} }
@ -663,6 +635,20 @@ func (hsdb *HSDatabase) IsRoutesEnabled(node *types.Node, routeStr string) bool
return false return false
} }
func (hsdb *HSDatabase) ListOnlineNodes(
node *types.Node,
) (map[tailcfg.NodeID]bool, error) {
hsdb.mu.RLock()
defer hsdb.mu.RUnlock()
peers, err := hsdb.listPeers(node)
if err != nil {
return nil, err
}
return peers.OnlineNodeMap(), nil
}
// enableRoutes enables new routes based on a list of new routes. // enableRoutes enables new routes based on a list of new routes.
func (hsdb *HSDatabase) enableRoutes(node *types.Node, routeStrs ...string) error { func (hsdb *HSDatabase) enableRoutes(node *types.Node, routeStrs ...string) error {
newRoutes := make([]netip.Prefix, len(routeStrs)) newRoutes := make([]netip.Prefix, len(routeStrs))
@ -714,30 +700,10 @@ func (hsdb *HSDatabase) enableRoutes(node *types.Node, routeStrs ...string) erro
} }
} }
// Ensure the node has the latest routes when notifying the other hsdb.notifier.NotifyWithIgnore(types.StateUpdate{
// nodes
nRoutes, err := hsdb.getNodeRoutes(node)
if err != nil {
return fmt.Errorf("failed to read back routes: %w", err)
}
node.Routes = nRoutes
log.Trace().
Caller().
Str("node", node.Hostname).
Strs("routes", routeStrs).
Msg("enabling routes")
stateUpdate := types.StateUpdate{
Type: types.StatePeerChanged, Type: types.StatePeerChanged,
ChangeNodes: types.Nodes{node}, Changed: types.Nodes{node},
Message: "called from db.enableRoutes", }, node.MachineKey)
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyWithIgnore(
stateUpdate, node.MachineKey.String())
}
return nil return nil
} }
@ -768,10 +734,7 @@ func generateGivenName(suppliedName string, randomSuffix bool) (string, error) {
return normalizedHostname, nil return normalizedHostname, nil
} }
func (hsdb *HSDatabase) GenerateGivenName( func (hsdb *HSDatabase) GenerateGivenName(machineKey string, suppliedName string) (string, error) {
mkey key.MachinePublic,
suppliedName string,
) (string, error) {
hsdb.mu.RLock() hsdb.mu.RLock()
defer hsdb.mu.RUnlock() defer hsdb.mu.RUnlock()
@ -786,14 +749,8 @@ func (hsdb *HSDatabase) GenerateGivenName(
return "", err return "", err
} }
var nodeFound *types.Node for _, node := range nodes {
for idx, node := range nodes { if node.MachineKey != machineKey && node.GivenName == givenName {
if node.GivenName == givenName {
nodeFound = nodes[idx]
}
}
if nodeFound != nil && nodeFound.MachineKey.String() != mkey.String() {
postfixedName, err := generateGivenName(suppliedName, true) postfixedName, err := generateGivenName(suppliedName, true)
if err != nil { if err != nil {
return "", err return "", err
@ -801,6 +758,7 @@ func (hsdb *HSDatabase) GenerateGivenName(
givenName = postfixedName givenName = postfixedName
} }
}
return givenName, nil return givenName, nil
} }
@ -866,30 +824,33 @@ func (hsdb *HSDatabase) ExpireExpiredNodes(lastCheck time.Time) time.Time {
// checked everything. // checked everything.
started := time.Now() started := time.Now()
expiredNodes := make([]*types.Node, 0) users, err := hsdb.listUsers()
nodes, err := hsdb.listNodes()
if err != nil { if err != nil {
log.Error(). log.Error().Err(err).Msg("Error listing users")
Err(err).
Msg("Error listing nodes to find expired nodes")
return time.Unix(0, 0) return time.Unix(0, 0)
} }
for _, user := range users {
nodes, err := hsdb.listNodesByUser(user.Name)
if err != nil {
log.Error().
Err(err).
Str("user", user.Name).
Msg("Error listing nodes in user")
return time.Unix(0, 0)
}
expired := make([]tailcfg.NodeID, 0)
for index, node := range nodes { for index, node := range nodes {
if node.IsExpired() && if node.IsExpired() &&
// TODO(kradalby): Replace this, it is very spammy
// It will notify about all nodes that has been expired.
// It should only notify about expired nodes since _last check_.
node.Expiry.After(lastCheck) { node.Expiry.After(lastCheck) {
expiredNodes = append(expiredNodes, &nodes[index]) expired = append(expired, tailcfg.NodeID(node.ID))
// Do not use setNodeExpiry as that has a notifier hook, which now := time.Now()
// can cause a deadlock, we are updating all changed nodes later err := hsdb.nodeSetExpiry(nodes[index], now)
// and there is no point in notifiying twice. if err != nil {
if err := hsdb.db.Model(nodes[index]).Updates(types.Node{
Expiry: &started,
}).Error; err != nil {
log.Error(). log.Error().
Err(err). Err(err).
Str("node", node.Hostname). Str("node", node.Hostname).
@ -904,31 +865,11 @@ func (hsdb *HSDatabase) ExpireExpiredNodes(lastCheck time.Time) time.Time {
} }
} }
expired := make([]*tailcfg.PeerChange, len(expiredNodes)) if len(expired) > 0 {
for idx, node := range expiredNodes { hsdb.notifier.NotifyAll(types.StateUpdate{
expired[idx] = &tailcfg.PeerChange{ Type: types.StatePeerRemoved,
NodeID: tailcfg.NodeID(node.ID), Removed: expired,
KeyExpiry: &started, })
}
}
// Inform the peers of a node with a lightweight update.
stateUpdate := types.StateUpdate{
Type: types.StatePeerChangedPatch,
ChangePatches: expired,
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyAll(stateUpdate)
}
// Inform the node itself that it has expired.
for _, node := range expiredNodes {
stateSelfUpdate := types.StateUpdate{
Type: types.StateSelfUpdate,
ChangeNodes: types.Nodes{node},
}
if stateSelfUpdate.Valid() {
hsdb.notifier.NotifyByMachineKey(stateSelfUpdate, node.MachineKey)
} }
} }

View file

@ -12,7 +12,6 @@ import (
"github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util" "github.com/juanfont/headscale/hscontrol/util"
"gopkg.in/check.v1" "gopkg.in/check.v1"
"tailscale.com/tailcfg"
"tailscale.com/types/key" "tailscale.com/types/key"
) )
@ -26,13 +25,11 @@ func (s *Suite) TestGetNode(c *check.C) {
_, err = db.GetNode("test", "testnode") _, err = db.GetNode("test", "testnode")
c.Assert(err, check.NotNil) c.Assert(err, check.NotNil)
nodeKey := key.NewNode()
machineKey := key.NewMachine()
node := &types.Node{ node := &types.Node{
ID: 0, ID: 0,
MachineKey: machineKey.Public(), MachineKey: "foo",
NodeKey: nodeKey.Public(), NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnode", Hostname: "testnode",
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
@ -54,13 +51,11 @@ func (s *Suite) TestGetNodeByID(c *check.C) {
_, err = db.GetNodeByID(0) _, err = db.GetNodeByID(0)
c.Assert(err, check.NotNil) c.Assert(err, check.NotNil)
nodeKey := key.NewNode()
machineKey := key.NewMachine()
node := types.Node{ node := types.Node{
ID: 0, ID: 0,
MachineKey: machineKey.Public(), MachineKey: "foo",
NodeKey: nodeKey.Public(), NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnode", Hostname: "testnode",
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
@ -87,8 +82,9 @@ func (s *Suite) TestGetNodeByNodeKey(c *check.C) {
node := types.Node{ node := types.Node{
ID: 0, ID: 0,
MachineKey: machineKey.Public(), MachineKey: util.MachinePublicKeyStripPrefix(machineKey.Public()),
NodeKey: nodeKey.Public(), NodeKey: util.NodePublicKeyStripPrefix(nodeKey.Public()),
DiscoKey: "faa",
Hostname: "testnode", Hostname: "testnode",
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
@ -117,8 +113,9 @@ func (s *Suite) TestGetNodeByAnyNodeKey(c *check.C) {
node := types.Node{ node := types.Node{
ID: 0, ID: 0,
MachineKey: machineKey.Public(), MachineKey: util.MachinePublicKeyStripPrefix(machineKey.Public()),
NodeKey: nodeKey.Public(), NodeKey: util.NodePublicKeyStripPrefix(nodeKey.Public()),
DiscoKey: "faa",
Hostname: "testnode", Hostname: "testnode",
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
@ -133,14 +130,11 @@ func (s *Suite) TestGetNodeByAnyNodeKey(c *check.C) {
func (s *Suite) TestHardDeleteNode(c *check.C) { func (s *Suite) TestHardDeleteNode(c *check.C) {
user, err := db.CreateUser("test") user, err := db.CreateUser("test")
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
nodeKey := key.NewNode()
machineKey := key.NewMachine()
node := types.Node{ node := types.Node{
ID: 0, ID: 0,
MachineKey: machineKey.Public(), MachineKey: "foo",
NodeKey: nodeKey.Public(), NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnode3", Hostname: "testnode3",
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
@ -166,13 +160,11 @@ func (s *Suite) TestListPeers(c *check.C) {
c.Assert(err, check.NotNil) c.Assert(err, check.NotNil)
for index := 0; index <= 10; index++ { for index := 0; index <= 10; index++ {
nodeKey := key.NewNode()
machineKey := key.NewMachine()
node := types.Node{ node := types.Node{
ID: uint64(index), ID: uint64(index),
MachineKey: machineKey.Public(), MachineKey: "foo" + strconv.Itoa(index),
NodeKey: nodeKey.Public(), NodeKey: "bar" + strconv.Itoa(index),
DiscoKey: "faa" + strconv.Itoa(index),
Hostname: "testnode" + strconv.Itoa(index), Hostname: "testnode" + strconv.Itoa(index),
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
@ -213,13 +205,11 @@ func (s *Suite) TestGetACLFilteredPeers(c *check.C) {
c.Assert(err, check.NotNil) c.Assert(err, check.NotNil)
for index := 0; index <= 10; index++ { for index := 0; index <= 10; index++ {
nodeKey := key.NewNode()
machineKey := key.NewMachine()
node := types.Node{ node := types.Node{
ID: uint64(index), ID: uint64(index),
MachineKey: machineKey.Public(), MachineKey: "foo" + strconv.Itoa(index),
NodeKey: nodeKey.Public(), NodeKey: "bar" + strconv.Itoa(index),
DiscoKey: "faa" + strconv.Itoa(index),
IPAddresses: types.NodeAddresses{ IPAddresses: types.NodeAddresses{
netip.MustParseAddr(fmt.Sprintf("100.64.0.%v", strconv.Itoa(index+1))), netip.MustParseAddr(fmt.Sprintf("100.64.0.%v", strconv.Itoa(index+1))),
}, },
@ -298,13 +288,11 @@ func (s *Suite) TestExpireNode(c *check.C) {
_, err = db.GetNode("test", "testnode") _, err = db.GetNode("test", "testnode")
c.Assert(err, check.NotNil) c.Assert(err, check.NotNil)
nodeKey := key.NewNode()
machineKey := key.NewMachine()
node := &types.Node{ node := &types.Node{
ID: 0, ID: 0,
MachineKey: machineKey.Public(), MachineKey: "foo",
NodeKey: nodeKey.Public(), NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnode", Hostname: "testnode",
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
@ -357,15 +345,11 @@ func (s *Suite) TestGenerateGivenName(c *check.C) {
_, err = db.GetNode("user-1", "testnode") _, err = db.GetNode("user-1", "testnode")
c.Assert(err, check.NotNil) c.Assert(err, check.NotNil)
nodeKey := key.NewNode()
machineKey := key.NewMachine()
machineKey2 := key.NewMachine()
node := &types.Node{ node := &types.Node{
ID: 0, ID: 0,
MachineKey: machineKey.Public(), MachineKey: "node-key-1",
NodeKey: nodeKey.Public(), NodeKey: "node-key-1",
DiscoKey: "disco-key-1",
Hostname: "hostname-1", Hostname: "hostname-1",
GivenName: "hostname-1", GivenName: "hostname-1",
UserID: user1.ID, UserID: user1.ID,
@ -374,20 +358,25 @@ func (s *Suite) TestGenerateGivenName(c *check.C) {
} }
db.db.Save(node) db.db.Save(node)
givenName, err := db.GenerateGivenName(machineKey2.Public(), "hostname-2") givenName, err := db.GenerateGivenName("node-key-2", "hostname-2")
comment := check.Commentf("Same user, unique nodes, unique hostnames, no conflict") comment := check.Commentf("Same user, unique nodes, unique hostnames, no conflict")
c.Assert(err, check.IsNil, comment) c.Assert(err, check.IsNil, comment)
c.Assert(givenName, check.Equals, "hostname-2", comment) c.Assert(givenName, check.Equals, "hostname-2", comment)
givenName, err = db.GenerateGivenName(machineKey.Public(), "hostname-1") givenName, err = db.GenerateGivenName("node-key-1", "hostname-1")
comment = check.Commentf("Same user, same node, same hostname, no conflict") comment = check.Commentf("Same user, same node, same hostname, no conflict")
c.Assert(err, check.IsNil, comment) c.Assert(err, check.IsNil, comment)
c.Assert(givenName, check.Equals, "hostname-1", comment) c.Assert(givenName, check.Equals, "hostname-1", comment)
givenName, err = db.GenerateGivenName(machineKey2.Public(), "hostname-1") givenName, err = db.GenerateGivenName("node-key-2", "hostname-1")
comment = check.Commentf("Same user, unique nodes, same hostname, conflict") comment = check.Commentf("Same user, unique nodes, same hostname, conflict")
c.Assert(err, check.IsNil, comment) c.Assert(err, check.IsNil, comment)
c.Assert(givenName, check.Matches, fmt.Sprintf("^hostname-1-[a-z0-9]{%d}$", NodeGivenNameHashLength), comment) c.Assert(givenName, check.Matches, fmt.Sprintf("^hostname-1-[a-z0-9]{%d}$", NodeGivenNameHashLength), comment)
givenName, err = db.GenerateGivenName("node-key-2", "hostname-1")
comment = check.Commentf("Unique users, unique nodes, same hostname, conflict")
c.Assert(err, check.IsNil, comment)
c.Assert(givenName, check.Matches, fmt.Sprintf("^hostname-1-[a-z0-9]{%d}$", NodeGivenNameHashLength), comment)
} }
func (s *Suite) TestSetTags(c *check.C) { func (s *Suite) TestSetTags(c *check.C) {
@ -400,13 +389,11 @@ func (s *Suite) TestSetTags(c *check.C) {
_, err = db.GetNode("test", "testnode") _, err = db.GetNode("test", "testnode")
c.Assert(err, check.NotNil) c.Assert(err, check.NotNil)
nodeKey := key.NewNode()
machineKey := key.NewMachine()
node := &types.Node{ node := &types.Node{
ID: 0, ID: 0,
MachineKey: machineKey.Public(), MachineKey: "foo",
NodeKey: nodeKey.Public(), NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnode", Hostname: "testnode",
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
@ -578,7 +565,6 @@ func (s *Suite) TestAutoApproveRoutes(c *check.C) {
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
nodeKey := key.NewNode() nodeKey := key.NewNode()
machineKey := key.NewMachine()
defaultRouteV4 := netip.MustParsePrefix("0.0.0.0/0") defaultRouteV4 := netip.MustParsePrefix("0.0.0.0/0")
defaultRouteV6 := netip.MustParsePrefix("::/0") defaultRouteV6 := netip.MustParsePrefix("::/0")
@ -588,13 +574,14 @@ func (s *Suite) TestAutoApproveRoutes(c *check.C) {
node := types.Node{ node := types.Node{
ID: 0, ID: 0,
MachineKey: machineKey.Public(), MachineKey: "foo",
NodeKey: nodeKey.Public(), NodeKey: util.NodePublicKeyStripPrefix(nodeKey.Public()),
DiscoKey: "faa",
Hostname: "test", Hostname: "test",
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID), AuthKeyID: uint(pak.ID),
Hostinfo: &tailcfg.Hostinfo{ HostInfo: types.HostInfo{
RequestTags: []string{"tag:exit"}, RequestTags: []string{"tag:exit"},
RoutableIPs: []netip.Prefix{defaultRouteV4, defaultRouteV6, route1, route2}, RoutableIPs: []netip.Prefix{defaultRouteV4, defaultRouteV6, route1, route2},
}, },
@ -603,9 +590,8 @@ func (s *Suite) TestAutoApproveRoutes(c *check.C) {
db.db.Save(&node) db.db.Save(&node)
sendUpdate, err := db.SaveNodeRoutes(&node) err = db.SaveNodeRoutes(&node)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
c.Assert(sendUpdate, check.Equals, false)
node0ByID, err := db.GetNodeByID(0) node0ByID, err := db.GetNodeByID(0)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)

View file

@ -77,6 +77,9 @@ func (*Suite) TestAlreadyUsedKey(c *check.C) {
node := types.Node{ node := types.Node{
ID: 0, ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testest", Hostname: "testest",
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
@ -98,6 +101,9 @@ func (*Suite) TestReusableBeingUsedKey(c *check.C) {
node := types.Node{ node := types.Node{
ID: 1, ID: 1,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testest", Hostname: "testest",
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
@ -132,6 +138,9 @@ func (*Suite) TestEphemeralKey(c *check.C) {
now := time.Now().Add(-time.Second * 30) now := time.Now().Add(-time.Second * 30)
node := types.Node{ node := types.Node{
ID: 0, ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testest", Hostname: "testest",
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
@ -198,5 +207,5 @@ func (*Suite) TestPreAuthKeyACLTags(c *check.C) {
listedPaks, err := db.ListPreAuthKeys("test8") listedPaks, err := db.ListPreAuthKeys("test8")
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
c.Assert(listedPaks[0].Proto().GetAclTags(), check.DeepEquals, tags) c.Assert(listedPaks[0].Proto().AclTags, check.DeepEquals, tags)
} }

View file

@ -7,9 +7,7 @@ import (
"github.com/juanfont/headscale/hscontrol/policy" "github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/types"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
"github.com/samber/lo"
"gorm.io/gorm" "gorm.io/gorm"
"tailscale.com/types/key"
) )
var ErrRouteIsNotAvailable = errors.New("route is not available") var ErrRouteIsNotAvailable = errors.New("route is not available")
@ -23,38 +21,7 @@ func (hsdb *HSDatabase) GetRoutes() (types.Routes, error) {
func (hsdb *HSDatabase) getRoutes() (types.Routes, error) { func (hsdb *HSDatabase) getRoutes() (types.Routes, error) {
var routes types.Routes var routes types.Routes
err := hsdb.db. err := hsdb.db.Preload("Node").Find(&routes).Error
Preload("Node").
Preload("Node.User").
Find(&routes).Error
if err != nil {
return nil, err
}
return routes, nil
}
func (hsdb *HSDatabase) getAdvertisedAndEnabledRoutes() (types.Routes, error) {
var routes types.Routes
err := hsdb.db.
Preload("Node").
Preload("Node.User").
Where("advertised = ? AND enabled = ?", true, true).
Find(&routes).Error
if err != nil {
return nil, err
}
return routes, nil
}
func (hsdb *HSDatabase) getRoutesByPrefix(pref netip.Prefix) (types.Routes, error) {
var routes types.Routes
err := hsdb.db.
Preload("Node").
Preload("Node.User").
Where("prefix = ?", types.IPPrefix(pref)).
Find(&routes).Error
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -73,7 +40,6 @@ func (hsdb *HSDatabase) getNodeAdvertisedRoutes(node *types.Node) (types.Routes,
var routes types.Routes var routes types.Routes
err := hsdb.db. err := hsdb.db.
Preload("Node"). Preload("Node").
Preload("Node.User").
Where("node_id = ? AND advertised = true", node.ID). Where("node_id = ? AND advertised = true", node.ID).
Find(&routes).Error Find(&routes).Error
if err != nil { if err != nil {
@ -94,7 +60,6 @@ func (hsdb *HSDatabase) getNodeRoutes(node *types.Node) (types.Routes, error) {
var routes types.Routes var routes types.Routes
err := hsdb.db. err := hsdb.db.
Preload("Node"). Preload("Node").
Preload("Node.User").
Where("node_id = ?", node.ID). Where("node_id = ?", node.ID).
Find(&routes).Error Find(&routes).Error
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
@ -113,10 +78,7 @@ func (hsdb *HSDatabase) GetRoute(id uint64) (*types.Route, error) {
func (hsdb *HSDatabase) getRoute(id uint64) (*types.Route, error) { func (hsdb *HSDatabase) getRoute(id uint64) (*types.Route, error) {
var route types.Route var route types.Route
err := hsdb.db. err := hsdb.db.Preload("Node").First(&route, id).Error
Preload("Node").
Preload("Node.User").
First(&route, id).Error
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -160,26 +122,21 @@ func (hsdb *HSDatabase) DisableRoute(id uint64) error {
return err return err
} }
var routes types.Routes
node := route.Node
// Tailscale requires both IPv4 and IPv6 exit routes to // Tailscale requires both IPv4 and IPv6 exit routes to
// be enabled at the same time, as per // be enabled at the same time, as per
// https://github.com/juanfont/headscale/issues/804#issuecomment-1399314002 // https://github.com/juanfont/headscale/issues/804#issuecomment-1399314002
if !route.IsExitRoute() { if !route.IsExitRoute() {
err = hsdb.failoverRouteWithNotify(route)
if err != nil {
return err
}
route.Enabled = false route.Enabled = false
route.IsPrimary = false route.IsPrimary = false
err = hsdb.db.Save(route).Error err = hsdb.db.Save(route).Error
if err != nil { if err != nil {
return err return err
} }
} else {
routes, err = hsdb.getNodeRoutes(&node) return hsdb.handlePrimarySubnetFailover()
}
routes, err := hsdb.getNodeRoutes(&route.Node)
if err != nil { if err != nil {
return err return err
} }
@ -194,27 +151,8 @@ func (hsdb *HSDatabase) DisableRoute(id uint64) error {
} }
} }
} }
}
if routes == nil { return hsdb.handlePrimarySubnetFailover()
routes, err = hsdb.getNodeRoutes(&node)
if err != nil {
return err
}
}
node.Routes = routes
stateUpdate := types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: types.Nodes{&node},
Message: "called from db.DisableRoute",
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyAll(stateUpdate)
}
return nil
} }
func (hsdb *HSDatabase) DeleteRoute(id uint64) error { func (hsdb *HSDatabase) DeleteRoute(id uint64) error {
@ -226,23 +164,18 @@ func (hsdb *HSDatabase) DeleteRoute(id uint64) error {
return err return err
} }
var routes types.Routes
node := route.Node
// Tailscale requires both IPv4 and IPv6 exit routes to // Tailscale requires both IPv4 and IPv6 exit routes to
// be enabled at the same time, as per // be enabled at the same time, as per
// https://github.com/juanfont/headscale/issues/804#issuecomment-1399314002 // https://github.com/juanfont/headscale/issues/804#issuecomment-1399314002
if !route.IsExitRoute() { if !route.IsExitRoute() {
err := hsdb.failoverRouteWithNotify(route)
if err != nil {
return nil
}
if err := hsdb.db.Unscoped().Delete(&route).Error; err != nil { if err := hsdb.db.Unscoped().Delete(&route).Error; err != nil {
return err return err
} }
} else {
routes, err := hsdb.getNodeRoutes(&node) return hsdb.handlePrimarySubnetFailover()
}
routes, err := hsdb.getNodeRoutes(&route.Node)
if err != nil { if err != nil {
return err return err
} }
@ -257,27 +190,8 @@ func (hsdb *HSDatabase) DeleteRoute(id uint64) error {
if err := hsdb.db.Unscoped().Delete(&routesToDelete).Error; err != nil { if err := hsdb.db.Unscoped().Delete(&routesToDelete).Error; err != nil {
return err return err
} }
}
if routes == nil { return hsdb.handlePrimarySubnetFailover()
routes, err = hsdb.getNodeRoutes(&node)
if err != nil {
return err
}
}
node.Routes = routes
stateUpdate := types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: types.Nodes{&node},
Message: "called from db.DeleteRoute",
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyAll(stateUpdate)
}
return nil
} }
func (hsdb *HSDatabase) deleteNodeRoutes(node *types.Node) error { func (hsdb *HSDatabase) deleteNodeRoutes(node *types.Node) error {
@ -290,13 +204,9 @@ func (hsdb *HSDatabase) deleteNodeRoutes(node *types.Node) error {
if err := hsdb.db.Unscoped().Delete(&routes[i]).Error; err != nil { if err := hsdb.db.Unscoped().Delete(&routes[i]).Error; err != nil {
return err return err
} }
// TODO(kradalby): This is a bit too aggressive, we could probably
// figure out which routes needs to be failed over rather than all.
hsdb.failoverRouteWithNotify(&routes[i])
} }
return nil return hsdb.handlePrimarySubnetFailover()
} }
// isUniquePrefix returns if there is another node providing the same route already. // isUniquePrefix returns if there is another node providing the same route already.
@ -349,26 +259,22 @@ func (hsdb *HSDatabase) GetNodePrimaryRoutes(node *types.Node) (types.Routes, er
// SaveNodeRoutes takes a node and updates the database with // SaveNodeRoutes takes a node and updates the database with
// the new routes. // the new routes.
// It returns a bool wheter an update should be sent as the func (hsdb *HSDatabase) SaveNodeRoutes(node *types.Node) error {
// saved route impacts nodes.
func (hsdb *HSDatabase) SaveNodeRoutes(node *types.Node) (bool, error) {
hsdb.mu.Lock() hsdb.mu.Lock()
defer hsdb.mu.Unlock() defer hsdb.mu.Unlock()
return hsdb.saveNodeRoutes(node) return hsdb.saveNodeRoutes(node)
} }
func (hsdb *HSDatabase) saveNodeRoutes(node *types.Node) (bool, error) { func (hsdb *HSDatabase) saveNodeRoutes(node *types.Node) error {
sendUpdate := false
currentRoutes := types.Routes{} currentRoutes := types.Routes{}
err := hsdb.db.Where("node_id = ?", node.ID).Find(&currentRoutes).Error err := hsdb.db.Where("node_id = ?", node.ID).Find(&currentRoutes).Error
if err != nil { if err != nil {
return sendUpdate, err return err
} }
advertisedRoutes := map[netip.Prefix]bool{} advertisedRoutes := map[netip.Prefix]bool{}
for _, prefix := range node.Hostinfo.RoutableIPs { for _, prefix := range node.HostInfo.RoutableIPs {
advertisedRoutes[prefix] = false advertisedRoutes[prefix] = false
} }
@ -384,14 +290,7 @@ func (hsdb *HSDatabase) saveNodeRoutes(node *types.Node) (bool, error) {
currentRoutes[pos].Advertised = true currentRoutes[pos].Advertised = true
err := hsdb.db.Save(&currentRoutes[pos]).Error err := hsdb.db.Save(&currentRoutes[pos]).Error
if err != nil { if err != nil {
return sendUpdate, err return err
}
// If a route that is newly "saved" is already
// enabled, set sendUpdate to true as it is now
// available.
if route.Enabled {
sendUpdate = true
} }
} }
advertisedRoutes[netip.Prefix(route.Prefix)] = true advertisedRoutes[netip.Prefix(route.Prefix)] = true
@ -400,7 +299,7 @@ func (hsdb *HSDatabase) saveNodeRoutes(node *types.Node) (bool, error) {
currentRoutes[pos].Enabled = false currentRoutes[pos].Enabled = false
err := hsdb.db.Save(&currentRoutes[pos]).Error err := hsdb.db.Save(&currentRoutes[pos]).Error
if err != nil { if err != nil {
return sendUpdate, err return err
} }
} }
} }
@ -415,223 +314,141 @@ func (hsdb *HSDatabase) saveNodeRoutes(node *types.Node) (bool, error) {
} }
err := hsdb.db.Create(&route).Error err := hsdb.db.Create(&route).Error
if err != nil { if err != nil {
return sendUpdate, err return err
} }
} }
} }
return sendUpdate, nil
}
// EnsureFailoverRouteIsAvailable takes a node and checks if the node's route
// currently have a functioning host that exposes the network.
func (hsdb *HSDatabase) EnsureFailoverRouteIsAvailable(node *types.Node) error {
nodeRoutes, err := hsdb.getNodeRoutes(node)
if err != nil {
return nil return nil
} }
for _, nodeRoute := range nodeRoutes { func (hsdb *HSDatabase) HandlePrimarySubnetFailover() error {
routes, err := hsdb.getRoutesByPrefix(netip.Prefix(nodeRoute.Prefix)) hsdb.mu.Lock()
defer hsdb.mu.Unlock()
return hsdb.handlePrimarySubnetFailover()
}
func (hsdb *HSDatabase) handlePrimarySubnetFailover() error {
// first, get all the enabled routes
var routes types.Routes
err := hsdb.db.
Preload("Node").
Where("advertised = ? AND enabled = ?", true, true).
Find(&routes).Error
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
log.Error().Err(err).Msg("error getting routes")
}
changedNodes := make(types.Nodes, 0)
for pos, route := range routes {
if route.IsExitRoute() {
continue
}
node := &route.Node
if !route.IsPrimary {
_, err := hsdb.getPrimaryRoute(netip.Prefix(route.Prefix))
if hsdb.isUniquePrefix(route) || errors.Is(err, gorm.ErrRecordNotFound) {
log.Info().
Str("prefix", netip.Prefix(route.Prefix).String()).
Str("node", route.Node.GivenName).
Msg("Setting primary route")
routes[pos].IsPrimary = true
err := hsdb.db.Save(&routes[pos]).Error
if err != nil { if err != nil {
log.Error().Err(err).Msg("error marking route as primary")
return err return err
} }
for _, route := range routes { changedNodes = append(changedNodes, node)
continue
}
}
if route.IsPrimary { if route.IsPrimary {
// if we have a primary route, and the node is connected if route.Node.IsOnline() {
// nothing needs to be done.
if hsdb.notifier.IsConnected(route.Node.MachineKey) {
continue continue
} }
// if not, we need to failover the route // node offline, find a new primary
err := hsdb.failoverRouteWithNotify(&route) log.Info().
if err != nil { Str("node", route.Node.Hostname).
return err Str("prefix", netip.Prefix(route.Prefix).String()).
} Msgf("node offline, finding a new primary subnet")
}
}
}
return nil // find a new primary route
} var newPrimaryRoutes types.Routes
err := hsdb.db.
Preload("Node").
Where("prefix = ? AND node_id != ? AND advertised = ? AND enabled = ?",
route.Prefix,
route.NodeID,
true, true).
Find(&newPrimaryRoutes).Error
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
log.Error().Err(err).Msg("error finding new primary route")
func (hsdb *HSDatabase) FailoverNodeRoutesWithNotify(node *types.Node) error {
routes, err := hsdb.getNodeRoutes(node)
if err != nil {
return nil
}
var changedKeys []key.MachinePublic
for _, route := range routes {
changed, err := hsdb.failoverRoute(&route)
if err != nil {
return err return err
} }
changedKeys = append(changedKeys, changed...) var newPrimaryRoute *types.Route
} for pos, r := range newPrimaryRoutes {
if r.Node.IsOnline() {
newPrimaryRoute = &newPrimaryRoutes[pos]
changedKeys = lo.Uniq(changedKeys)
var nodes types.Nodes
for _, key := range changedKeys {
node, err := hsdb.GetNodeByMachineKey(key)
if err != nil {
return err
}
nodes = append(nodes, node)
}
if nodes != nil {
stateUpdate := types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: nodes,
Message: "called from db.FailoverNodeRoutesWithNotify",
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyAll(stateUpdate)
}
}
return nil
}
func (hsdb *HSDatabase) failoverRouteWithNotify(r *types.Route) error {
changedKeys, err := hsdb.failoverRoute(r)
if err != nil {
return err
}
if len(changedKeys) == 0 {
return nil
}
var nodes types.Nodes
log.Trace().
Str("hostname", r.Node.Hostname).
Msg("loading machines with new primary routes from db")
for _, key := range changedKeys {
node, err := hsdb.getNodeByMachineKey(key)
if err != nil {
return err
}
nodes = append(nodes, node)
}
log.Trace().
Str("hostname", r.Node.Hostname).
Msg("notifying peers about primary route change")
if nodes != nil {
stateUpdate := types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: nodes,
Message: "called from db.failoverRouteWithNotify",
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyAll(stateUpdate)
}
}
log.Trace().
Str("hostname", r.Node.Hostname).
Msg("notified peers about primary route change")
return nil
}
// failoverRoute takes a route that is no longer available,
// this can be either from:
// - being disabled
// - being deleted
// - host going offline
//
// and tries to find a new route to take over its place.
// If the given route was not primary, it returns early.
func (hsdb *HSDatabase) failoverRoute(r *types.Route) ([]key.MachinePublic, error) {
if r == nil {
return nil, nil
}
// This route is not a primary route, and it isnt
// being served to nodes.
if !r.IsPrimary {
return nil, nil
}
// We do not have to failover exit nodes
if r.IsExitRoute() {
return nil, nil
}
routes, err := hsdb.getRoutesByPrefix(netip.Prefix(r.Prefix))
if err != nil {
return nil, err
}
var newPrimary *types.Route
// Find a new suitable route
for idx, route := range routes {
if r.ID == route.ID {
continue
}
if hsdb.notifier.IsConnected(route.Node.MachineKey) {
newPrimary = &routes[idx]
break break
} }
} }
// If a new route was not found/available, if newPrimaryRoute == nil {
// return with an error. log.Warn().
// We do not want to update the database as Str("node", route.Node.Hostname).
// the one currently marked as primary is the Str("prefix", netip.Prefix(route.Prefix).String()).
// best we got. Msgf("no alternative primary route found")
if newPrimary == nil {
return nil, nil continue
} }
log.Trace(). log.Info().
Str("hostname", newPrimary.Node.Hostname). Str("old_node", route.Node.Hostname).
Msg("found new primary, updating db") Str("prefix", netip.Prefix(route.Prefix).String()).
Str("new_node", newPrimaryRoute.Node.Hostname).
Msgf("found new primary route")
// Remove primary from the old route // disable the old primary route
r.IsPrimary = false routes[pos].IsPrimary = false
err = hsdb.db.Save(&r).Error err = hsdb.db.Save(&routes[pos]).Error
if err != nil { if err != nil {
log.Error().Err(err).Msg("error disabling new primary route") log.Error().Err(err).Msg("error disabling old primary route")
return nil, err return err
} }
log.Trace(). // enable the new primary route
Str("hostname", newPrimary.Node.Hostname). newPrimaryRoute.IsPrimary = true
Msg("removed primary from old route") err = hsdb.db.Save(&newPrimaryRoute).Error
// Set primary for the new primary
newPrimary.IsPrimary = true
err = hsdb.db.Save(&newPrimary).Error
if err != nil { if err != nil {
log.Error().Err(err).Msg("error enabling new primary route") log.Error().Err(err).Msg("error enabling new primary route")
return nil, err return err
} }
log.Trace(). changedNodes = append(changedNodes, node)
Str("hostname", newPrimary.Node.Hostname). }
Msg("set primary to new route") }
// Return a list of the machinekeys of the changed nodes. if len(changedNodes) > 0 {
return []key.MachinePublic{r.Node.MachineKey, newPrimary.Node.MachineKey}, nil hsdb.notifier.NotifyAll(types.StateUpdate{
Type: types.StatePeerChanged,
Changed: changedNodes,
})
}
return nil
} }
// EnableAutoApprovedRoutes enables any routes advertised by a node that match the ACL autoApprovers policy. // EnableAutoApprovedRoutes enables any routes advertised by a node that match the ACL autoApprovers policy.

View file

@ -2,19 +2,12 @@ package db
import ( import (
"net/netip" "net/netip"
"os"
"testing"
"time" "time"
"github.com/google/go-cmp/cmp"
"github.com/juanfont/headscale/hscontrol/notifier"
"github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util" "github.com/juanfont/headscale/hscontrol/util"
"github.com/stretchr/testify/assert"
"gopkg.in/check.v1" "gopkg.in/check.v1"
"gorm.io/gorm"
"tailscale.com/tailcfg" "tailscale.com/tailcfg"
"tailscale.com/types/key"
) )
func (s *Suite) TestGetRoutes(c *check.C) { func (s *Suite) TestGetRoutes(c *check.C) {
@ -36,17 +29,19 @@ func (s *Suite) TestGetRoutes(c *check.C) {
node := types.Node{ node := types.Node{
ID: 0, ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "test_get_route_node", Hostname: "test_get_route_node",
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID), AuthKeyID: uint(pak.ID),
Hostinfo: &hostInfo, HostInfo: types.HostInfo(hostInfo),
} }
db.db.Save(&node) db.db.Save(&node)
su, err := db.SaveNodeRoutes(&node) err = db.SaveNodeRoutes(&node)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
c.Assert(su, check.Equals, false)
advertisedRoutes, err := db.GetAdvertisedRoutes(&node) advertisedRoutes, err := db.GetAdvertisedRoutes(&node)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -85,17 +80,19 @@ func (s *Suite) TestGetEnableRoutes(c *check.C) {
node := types.Node{ node := types.Node{
ID: 0, ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "test_enable_route_node", Hostname: "test_enable_route_node",
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID), AuthKeyID: uint(pak.ID),
Hostinfo: &hostInfo, HostInfo: types.HostInfo(hostInfo),
} }
db.db.Save(&node) db.db.Save(&node)
sendUpdate, err := db.SaveNodeRoutes(&node) err = db.SaveNodeRoutes(&node)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
c.Assert(sendUpdate, check.Equals, false)
availableRoutes, err := db.GetAdvertisedRoutes(&node) availableRoutes, err := db.GetAdvertisedRoutes(&node)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -157,17 +154,19 @@ func (s *Suite) TestIsUniquePrefix(c *check.C) {
} }
node1 := types.Node{ node1 := types.Node{
ID: 1, ID: 1,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "test_enable_route_node", Hostname: "test_enable_route_node",
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID), AuthKeyID: uint(pak.ID),
Hostinfo: &hostInfo1, HostInfo: types.HostInfo(hostInfo1),
} }
db.db.Save(&node1) db.db.Save(&node1)
sendUpdate, err := db.SaveNodeRoutes(&node1) err = db.SaveNodeRoutes(&node1)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
c.Assert(sendUpdate, check.Equals, false)
err = db.enableRoutes(&node1, route.String()) err = db.enableRoutes(&node1, route.String())
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -180,17 +179,19 @@ func (s *Suite) TestIsUniquePrefix(c *check.C) {
} }
node2 := types.Node{ node2 := types.Node{
ID: 2, ID: 2,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "test_enable_route_node", Hostname: "test_enable_route_node",
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID), AuthKeyID: uint(pak.ID),
Hostinfo: &hostInfo2, HostInfo: types.HostInfo(hostInfo2),
} }
db.db.Save(&node2) db.db.Save(&node2)
sendUpdate, err = db.SaveNodeRoutes(&node2) err = db.SaveNodeRoutes(&node2)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
c.Assert(sendUpdate, check.Equals, false)
err = db.enableRoutes(&node2, route2.String()) err = db.enableRoutes(&node2, route2.String())
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -212,6 +213,148 @@ func (s *Suite) TestIsUniquePrefix(c *check.C) {
c.Assert(len(routes), check.Equals, 0) c.Assert(len(routes), check.Equals, 0)
} }
func (s *Suite) TestSubnetFailover(c *check.C) {
user, err := db.CreateUser("test")
c.Assert(err, check.IsNil)
pak, err := db.CreatePreAuthKey(user.Name, false, false, nil, nil)
c.Assert(err, check.IsNil)
_, err = db.GetNode("test", "test_enable_route_node")
c.Assert(err, check.NotNil)
prefix, err := netip.ParsePrefix(
"10.0.0.0/24",
)
c.Assert(err, check.IsNil)
prefix2, err := netip.ParsePrefix(
"150.0.10.0/25",
)
c.Assert(err, check.IsNil)
hostInfo1 := tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{prefix, prefix2},
}
now := time.Now()
node1 := types.Node{
ID: 1,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "test_enable_route_node",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID),
HostInfo: types.HostInfo(hostInfo1),
LastSeen: &now,
}
db.db.Save(&node1)
err = db.SaveNodeRoutes(&node1)
c.Assert(err, check.IsNil)
err = db.enableRoutes(&node1, prefix.String())
c.Assert(err, check.IsNil)
err = db.enableRoutes(&node1, prefix2.String())
c.Assert(err, check.IsNil)
err = db.HandlePrimarySubnetFailover()
c.Assert(err, check.IsNil)
enabledRoutes1, err := db.GetEnabledRoutes(&node1)
c.Assert(err, check.IsNil)
c.Assert(len(enabledRoutes1), check.Equals, 2)
route, err := db.getPrimaryRoute(prefix)
c.Assert(err, check.IsNil)
c.Assert(route.NodeID, check.Equals, node1.ID)
hostInfo2 := tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{prefix2},
}
node2 := types.Node{
ID: 2,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "test_enable_route_node",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID),
HostInfo: types.HostInfo(hostInfo2),
LastSeen: &now,
}
db.db.Save(&node2)
err = db.saveNodeRoutes(&node2)
c.Assert(err, check.IsNil)
err = db.enableRoutes(&node2, prefix2.String())
c.Assert(err, check.IsNil)
err = db.HandlePrimarySubnetFailover()
c.Assert(err, check.IsNil)
enabledRoutes1, err = db.GetEnabledRoutes(&node1)
c.Assert(err, check.IsNil)
c.Assert(len(enabledRoutes1), check.Equals, 2)
enabledRoutes2, err := db.GetEnabledRoutes(&node2)
c.Assert(err, check.IsNil)
c.Assert(len(enabledRoutes2), check.Equals, 1)
routes, err := db.GetNodePrimaryRoutes(&node1)
c.Assert(err, check.IsNil)
c.Assert(len(routes), check.Equals, 2)
routes, err = db.GetNodePrimaryRoutes(&node2)
c.Assert(err, check.IsNil)
c.Assert(len(routes), check.Equals, 0)
// lets make node1 lastseen 10 mins ago
before := now.Add(-10 * time.Minute)
node1.LastSeen = &before
err = db.db.Save(&node1).Error
c.Assert(err, check.IsNil)
err = db.HandlePrimarySubnetFailover()
c.Assert(err, check.IsNil)
routes, err = db.GetNodePrimaryRoutes(&node1)
c.Assert(err, check.IsNil)
c.Assert(len(routes), check.Equals, 1)
routes, err = db.GetNodePrimaryRoutes(&node2)
c.Assert(err, check.IsNil)
c.Assert(len(routes), check.Equals, 1)
node2.HostInfo = types.HostInfo(tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{prefix, prefix2},
})
err = db.db.Save(&node2).Error
c.Assert(err, check.IsNil)
err = db.SaveNodeRoutes(&node2)
c.Assert(err, check.IsNil)
err = db.enableRoutes(&node2, prefix.String())
c.Assert(err, check.IsNil)
err = db.HandlePrimarySubnetFailover()
c.Assert(err, check.IsNil)
routes, err = db.GetNodePrimaryRoutes(&node1)
c.Assert(err, check.IsNil)
c.Assert(len(routes), check.Equals, 0)
routes, err = db.GetNodePrimaryRoutes(&node2)
c.Assert(err, check.IsNil)
c.Assert(len(routes), check.Equals, 2)
}
func (s *Suite) TestDeleteRoutes(c *check.C) { func (s *Suite) TestDeleteRoutes(c *check.C) {
user, err := db.CreateUser("test") user, err := db.CreateUser("test")
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -239,18 +382,20 @@ func (s *Suite) TestDeleteRoutes(c *check.C) {
now := time.Now() now := time.Now()
node1 := types.Node{ node1 := types.Node{
ID: 1, ID: 1,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "test_enable_route_node", Hostname: "test_enable_route_node",
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID), AuthKeyID: uint(pak.ID),
Hostinfo: &hostInfo1, HostInfo: types.HostInfo(hostInfo1),
LastSeen: &now, LastSeen: &now,
} }
db.db.Save(&node1) db.db.Save(&node1)
sendUpdate, err := db.SaveNodeRoutes(&node1) err = db.SaveNodeRoutes(&node1)
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
c.Assert(sendUpdate, check.Equals, false)
err = db.enableRoutes(&node1, prefix.String()) err = db.enableRoutes(&node1, prefix.String())
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -268,362 +413,3 @@ func (s *Suite) TestDeleteRoutes(c *check.C) {
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
c.Assert(len(enabledRoutes1), check.Equals, 1) c.Assert(len(enabledRoutes1), check.Equals, 1)
} }
func TestFailoverRoute(t *testing.T) {
ipp := func(s string) types.IPPrefix { return types.IPPrefix(netip.MustParsePrefix(s)) }
// TODO(kradalby): Count/verify updates
var sink chan types.StateUpdate
go func() {
for range sink {
}
}()
machineKeys := []key.MachinePublic{
key.NewMachine().Public(),
key.NewMachine().Public(),
key.NewMachine().Public(),
key.NewMachine().Public(),
}
tests := []struct {
name string
failingRoute types.Route
routes types.Routes
want []key.MachinePublic
wantErr bool
}{
{
name: "no-route",
failingRoute: types.Route{},
routes: types.Routes{},
want: nil,
wantErr: false,
},
{
name: "no-prime",
failingRoute: types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: false,
},
routes: types.Routes{},
want: nil,
wantErr: false,
},
{
name: "exit-node",
failingRoute: types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("0.0.0.0/0"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
},
routes: types.Routes{},
want: nil,
wantErr: false,
},
{
name: "no-failover-single-route",
failingRoute: types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
},
routes: types.Routes{
types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
},
},
want: nil,
wantErr: false,
},
{
name: "failover-primary",
failingRoute: types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
},
routes: types.Routes{
types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
},
types.Route{
Model: gorm.Model{
ID: 2,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[1],
},
IsPrimary: false,
},
},
want: []key.MachinePublic{
machineKeys[0],
machineKeys[1],
},
wantErr: false,
},
{
name: "failover-none-primary",
failingRoute: types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: false,
},
routes: types.Routes{
types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
},
types.Route{
Model: gorm.Model{
ID: 2,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[1],
},
IsPrimary: false,
},
},
want: nil,
wantErr: false,
},
{
name: "failover-primary-multi-route",
failingRoute: types.Route{
Model: gorm.Model{
ID: 2,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[1],
},
IsPrimary: true,
},
routes: types.Routes{
types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: false,
},
types.Route{
Model: gorm.Model{
ID: 2,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[1],
},
IsPrimary: true,
},
types.Route{
Model: gorm.Model{
ID: 3,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[2],
},
IsPrimary: false,
},
},
want: []key.MachinePublic{
machineKeys[1],
machineKeys[0],
},
wantErr: false,
},
{
name: "failover-primary-no-online",
failingRoute: types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
},
routes: types.Routes{
types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
},
// Offline
types.Route{
Model: gorm.Model{
ID: 2,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[3],
},
IsPrimary: false,
},
},
want: nil,
wantErr: false,
},
{
name: "failover-primary-one-not-online",
failingRoute: types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
},
routes: types.Routes{
types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
},
// Offline
types.Route{
Model: gorm.Model{
ID: 2,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[3],
},
IsPrimary: false,
},
types.Route{
Model: gorm.Model{
ID: 3,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[1],
},
IsPrimary: true,
},
},
want: []key.MachinePublic{
machineKeys[0],
machineKeys[1],
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "failover-db-test")
assert.NoError(t, err)
notif := notifier.NewNotifier()
db, err = NewHeadscaleDatabase(
"sqlite3",
tmpDir+"/headscale_test.db",
false,
notif,
[]netip.Prefix{
netip.MustParsePrefix("10.27.0.0/23"),
},
"",
)
assert.NoError(t, err)
// Pretend that all the nodes are connected to control
for idx, key := range machineKeys {
// Pretend one node is offline
if idx == 3 {
continue
}
notif.AddNode(key, sink)
}
for _, route := range tt.routes {
if err := db.db.Save(&route).Error; err != nil {
t.Fatalf("failed to create route: %s", err)
}
}
got, err := db.failoverRoute(&tt.failingRoute)
if (err != nil) != tt.wantErr {
t.Errorf("failoverRoute() error = %v, wantErr %v", err, tt.wantErr)
return
}
if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" {
t.Errorf("failoverRoute() unexpected result (-want +got):\n%s", diff)
}
})
}
}

View file

@ -1,7 +1,6 @@
package db package db
import ( import (
"log"
"net/netip" "net/netip"
"os" "os"
"testing" "testing"
@ -28,22 +27,19 @@ func (s *Suite) SetUpTest(c *check.C) {
} }
func (s *Suite) TearDownTest(c *check.C) { func (s *Suite) TearDownTest(c *check.C) {
// os.RemoveAll(tmpDir) os.RemoveAll(tmpDir)
} }
func (s *Suite) ResetDB(c *check.C) { func (s *Suite) ResetDB(c *check.C) {
// if len(tmpDir) != 0 { if len(tmpDir) != 0 {
// os.RemoveAll(tmpDir) os.RemoveAll(tmpDir)
// } }
var err error var err error
tmpDir, err = os.MkdirTemp("", "headscale-db-test-*") tmpDir, err = os.MkdirTemp("", "autoygg-client-test")
if err != nil { if err != nil {
c.Fatal(err) c.Fatal(err)
} }
log.Printf("database path: %s", tmpDir+"/headscale_test.db")
db, err = NewHeadscaleDatabase( db, err = NewHeadscaleDatabase(
"sqlite3", "sqlite3",
tmpDir+"/headscale_test.db", tmpDir+"/headscale_test.db",

View file

@ -48,6 +48,9 @@ func (s *Suite) TestDestroyUserErrors(c *check.C) {
node := types.Node{ node := types.Node{
ID: 0, ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnode", Hostname: "testnode",
UserID: user.ID, UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
@ -100,6 +103,9 @@ func (s *Suite) TestSetMachineUser(c *check.C) {
node := types.Node{ node := types.Node{
ID: 0, ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnode", Hostname: "testnode",
UserID: oldUser.ID, UserID: oldUser.ID,
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,

View file

@ -13,7 +13,6 @@ import (
"time" "time"
"github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
"tailscale.com/derp" "tailscale.com/derp"
"tailscale.com/net/stun" "tailscale.com/net/stun"
@ -40,7 +39,7 @@ func NewDERPServer(
cfg *types.DERPConfig, cfg *types.DERPConfig,
) (*DERPServer, error) { ) (*DERPServer, error) {
log.Trace().Caller().Msg("Creating new embedded DERP server") log.Trace().Caller().Msg("Creating new embedded DERP server")
server := derp.NewServer(derpKey, util.TSLogfWrapper()) // nolint // zerolinter complains server := derp.NewServer(derpKey, log.Debug().Msgf) // nolint // zerolinter complains
return &DERPServer{ return &DERPServer{
serverURL: serverURL, serverURL: serverURL,

View file

@ -172,18 +172,12 @@ func (api headscaleV1APIServer) RegisterNode(
) (*v1.RegisterNodeResponse, error) { ) (*v1.RegisterNodeResponse, error) {
log.Trace(). log.Trace().
Str("user", request.GetUser()). Str("user", request.GetUser()).
Str("machine_key", request.GetKey()). Str("node_key", request.GetKey()).
Msg("Registering node") Msg("Registering node")
var mkey key.MachinePublic
err := mkey.UnmarshalText([]byte(request.GetKey()))
if err != nil {
return nil, err
}
node, err := api.h.db.RegisterNodeFromAuthCallback( node, err := api.h.db.RegisterNodeFromAuthCallback(
api.h.registrationCache, api.h.registrationCache,
mkey, request.GetKey(),
request.GetUser(), request.GetUser(),
nil, nil,
util.RegisterMethodCLI, util.RegisterMethodCLI,
@ -204,13 +198,7 @@ func (api headscaleV1APIServer) GetNode(
return nil, err return nil, err
} }
resp := node.Proto() return &v1.GetNodeResponse{Node: node.Proto()}, nil
// Populate the online field based on
// currently connected nodes.
resp.Online = api.h.nodeNotifier.IsConnected(node.MachineKey)
return &v1.GetNodeResponse{Node: resp}, nil
} }
func (api headscaleV1APIServer) SetTags( func (api headscaleV1APIServer) SetTags(
@ -339,13 +327,7 @@ func (api headscaleV1APIServer) ListNodes(
response := make([]*v1.Node, len(nodes)) response := make([]*v1.Node, len(nodes))
for index, node := range nodes { for index, node := range nodes {
resp := node.Proto() response[index] = node.Proto()
// Populate the online field based on
// currently connected nodes.
resp.Online = api.h.nodeNotifier.IsConnected(node.MachineKey)
response[index] = resp
} }
return &v1.ListNodesResponse{Nodes: response}, nil return &v1.ListNodesResponse{Nodes: response}, nil
@ -358,18 +340,13 @@ func (api headscaleV1APIServer) ListNodes(
response := make([]*v1.Node, len(nodes)) response := make([]*v1.Node, len(nodes))
for index, node := range nodes { for index, node := range nodes {
resp := node.Proto() m := node.Proto()
// Populate the online field based on
// currently connected nodes.
resp.Online = api.h.nodeNotifier.IsConnected(node.MachineKey)
validTags, invalidTags := api.h.ACLPolicy.TagsOfNode( validTags, invalidTags := api.h.ACLPolicy.TagsOfNode(
&node, &node,
) )
resp.InvalidTags = invalidTags m.InvalidTags = invalidTags
resp.ValidTags = validTags m.ValidTags = validTags
response[index] = resp response[index] = m
} }
return &v1.ListNodesResponse{Nodes: response}, nil return &v1.ListNodesResponse{Nodes: response}, nil
@ -544,22 +521,13 @@ func (api headscaleV1APIServer) DebugCreateNode(
Hostname: "DebugTestNode", Hostname: "DebugTestNode",
} }
var mkey key.MachinePublic givenName, err := api.h.db.GenerateGivenName(request.GetKey(), request.GetName())
err = mkey.UnmarshalText([]byte(request.GetKey()))
if err != nil { if err != nil {
return nil, err return nil, err
} }
givenName, err := api.h.db.GenerateGivenName(mkey, request.GetName())
if err != nil {
return nil, err
}
nodeKey := key.NewNode()
newNode := types.Node{ newNode := types.Node{
MachineKey: mkey, MachineKey: request.GetKey(),
NodeKey: nodeKey.Public(),
Hostname: request.GetName(), Hostname: request.GetName(),
GivenName: givenName, GivenName: givenName,
User: *user, User: *user,
@ -567,15 +535,17 @@ func (api headscaleV1APIServer) DebugCreateNode(
Expiry: &time.Time{}, Expiry: &time.Time{},
LastSeen: &time.Time{}, LastSeen: &time.Time{},
Hostinfo: &hostinfo, HostInfo: types.HostInfo(hostinfo),
} }
log.Debug(). nodeKey := key.NodePublic{}
Str("machine_key", mkey.ShortString()). err = nodeKey.UnmarshalText([]byte(request.GetKey()))
Msg("adding debug machine via CLI, appending to registration cache") if err != nil {
log.Panic().Msg("can not add node for debug. invalid node key")
}
api.h.registrationCache.Set( api.h.registrationCache.Set(
mkey.String(), util.NodePublicKeyStripPrefix(nodeKey),
newNode, newNode,
registerCacheExpiration, registerCacheExpiration,
) )

View file

@ -0,0 +1,15 @@
//go:build ts2019
package hscontrol
import (
"net/http"
"github.com/gorilla/mux"
)
func (h *Headscale) addLegacyHandlers(router *mux.Router) {
router.HandleFunc("/machine/{mkey}/map", h.PollNetMapHandler).
Methods(http.MethodPost)
router.HandleFunc("/machine/{mkey}", h.RegistrationHandler).Methods(http.MethodPost)
}

View file

@ -0,0 +1,8 @@
//go:build !ts2019
package hscontrol
import "github.com/gorilla/mux"
func (h *Headscale) addLegacyHandlers(router *mux.Router) {
}

View file

@ -11,6 +11,7 @@ import (
"time" "time"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
"tailscale.com/tailcfg" "tailscale.com/tailcfg"
"tailscale.com/types/key" "tailscale.com/types/key"
@ -62,6 +63,26 @@ func (h *Headscale) KeyHandler(
// New Tailscale clients send a 'v' parameter to indicate the CurrentCapabilityVersion // New Tailscale clients send a 'v' parameter to indicate the CurrentCapabilityVersion
capVer, err := parseCabailityVersion(req) capVer, err := parseCabailityVersion(req)
if err != nil { if err != nil {
if errors.Is(err, ErrNoCapabilityVersion) {
log.Debug().
Str("handler", "/key").
Msg("New legacy client")
// Old clients don't send a 'v' parameter, so we send the legacy public key
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusOK)
_, err := writer.Write(
[]byte(util.MachinePublicKeyStripPrefix(h.privateKey2019.Public())),
)
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Failed to write response")
}
return
}
log.Error(). log.Error().
Caller(). Caller().
Err(err). Err(err).
@ -80,7 +101,7 @@ func (h *Headscale) KeyHandler(
log.Debug(). log.Debug().
Str("handler", "/key"). Str("handler", "/key").
Int("cap_ver", int(capVer)). Int("v", int(capVer)).
Msg("New noise client") Msg("New noise client")
if err != nil { if err != nil {
writer.Header().Set("Content-Type", "text/plain; charset=utf-8") writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
@ -99,6 +120,7 @@ func (h *Headscale) KeyHandler(
// TS2021 (Tailscale v2 protocol) requires to have a different key // TS2021 (Tailscale v2 protocol) requires to have a different key
if capVer >= NoiseCapabilityVersion { if capVer >= NoiseCapabilityVersion {
resp := tailcfg.OverTLSPublicKeyResponse{ resp := tailcfg.OverTLSPublicKeyResponse{
LegacyPublicKey: h.privateKey2019.Public(),
PublicKey: h.noisePrivateKey.Public(), PublicKey: h.noisePrivateKey.Public(),
} }
writer.Header().Set("Content-Type", "application/json") writer.Header().Set("Content-Type", "application/json")
@ -184,16 +206,33 @@ func (h *Headscale) RegisterWebAPI(
req *http.Request, req *http.Request,
) { ) {
vars := mux.Vars(req) vars := mux.Vars(req)
machineKeyStr := vars["mkey"] nodeKeyStr, ok := vars["nkey"]
if !util.NodePublicKeyRegex.Match([]byte(nodeKeyStr)) {
log.Warn().Str("node_key", nodeKeyStr).Msg("Invalid node key passed to registration url")
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusUnauthorized)
_, err := writer.Write([]byte("Unauthorized"))
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Failed to write response")
}
return
}
// We need to make sure we dont open for XSS style injections, if the parameter that // We need to make sure we dont open for XSS style injections, if the parameter that
// is passed as a key is not parsable/validated as a NodePublic key, then fail to render // is passed as a key is not parsable/validated as a NodePublic key, then fail to render
// the template and log an error. // the template and log an error.
var machineKey key.MachinePublic var nodeKey key.NodePublic
err := machineKey.UnmarshalText( err := nodeKey.UnmarshalText(
[]byte(machineKeyStr), []byte(util.NodePublicKeyEnsurePrefix(nodeKeyStr)),
) )
if err != nil {
if !ok || nodeKeyStr == "" || err != nil {
log.Warn().Err(err).Msg("Failed to parse incoming nodekey") log.Warn().Err(err).Msg("Failed to parse incoming nodekey")
writer.Header().Set("Content-Type", "text/plain; charset=utf-8") writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
@ -211,7 +250,7 @@ func (h *Headscale) RegisterWebAPI(
var content bytes.Buffer var content bytes.Buffer
if err := registerWebAPITemplate.Execute(&content, registerWebAPITemplateConfig{ if err := registerWebAPITemplate.Execute(&content, registerWebAPITemplateConfig{
Key: machineKey.String(), Key: nodeKeyStr,
}); err != nil { }); err != nil {
log.Error(). log.Error().
Str("func", "RegisterWebAPI"). Str("func", "RegisterWebAPI").

View file

@ -8,7 +8,6 @@ import (
"net/url" "net/url"
"os" "os"
"path" "path"
"slices"
"sort" "sort"
"strings" "strings"
"sync" "sync"
@ -21,11 +20,12 @@ import (
"github.com/juanfont/headscale/hscontrol/util" "github.com/juanfont/headscale/hscontrol/util"
"github.com/klauspost/compress/zstd" "github.com/klauspost/compress/zstd"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
"golang.org/x/exp/maps" "github.com/samber/lo"
"tailscale.com/envknob" "tailscale.com/envknob"
"tailscale.com/smallzstd" "tailscale.com/smallzstd"
"tailscale.com/tailcfg" "tailscale.com/tailcfg"
"tailscale.com/types/dnstype" "tailscale.com/types/dnstype"
"tailscale.com/types/key"
) )
const ( const (
@ -46,9 +46,12 @@ var debugDumpMapResponsePath = envknob.String("HEADSCALE_DEBUG_DUMP_MAPRESPONSE_
// - Keep information about the previous mapresponse so we can send a diff // - Keep information about the previous mapresponse so we can send a diff
// - Store hashes // - Store hashes
// - Create a "minifier" that removes info not needed for the node // - Create a "minifier" that removes info not needed for the node
// - some sort of batching, wait for 5 or 60 seconds before sending
type Mapper struct { type Mapper struct {
privateKey2019 *key.MachinePrivate
isNoise bool
capVer tailcfg.CapabilityVersion
// Configuration // Configuration
// TODO(kradalby): figure out if this is the format we want this in // TODO(kradalby): figure out if this is the format we want this in
derpMap *tailcfg.DERPMap derpMap *tailcfg.DERPMap
@ -65,17 +68,14 @@ type Mapper struct {
// only one func is accessing it over time. // only one func is accessing it over time.
mu sync.Mutex mu sync.Mutex
peers map[uint64]*types.Node peers map[uint64]*types.Node
patches map[uint64][]patch
}
type patch struct {
timestamp time.Time
change *tailcfg.PeerChange
} }
func NewMapper( func NewMapper(
node *types.Node, node *types.Node,
peers types.Nodes, peers types.Nodes,
privateKey *key.MachinePrivate,
isNoise bool,
capVer tailcfg.CapabilityVersion,
derpMap *tailcfg.DERPMap, derpMap *tailcfg.DERPMap,
baseDomain string, baseDomain string,
dnsCfg *tailcfg.DNSConfig, dnsCfg *tailcfg.DNSConfig,
@ -84,12 +84,17 @@ func NewMapper(
) *Mapper { ) *Mapper {
log.Debug(). log.Debug().
Caller(). Caller().
Bool("noise", isNoise).
Str("node", node.Hostname). Str("node", node.Hostname).
Msg("creating new mapper") Msg("creating new mapper")
uid, _ := util.GenerateRandomStringDNSSafe(mapperIDLength) uid, _ := util.GenerateRandomStringDNSSafe(mapperIDLength)
return &Mapper{ return &Mapper{
privateKey2019: privateKey,
isNoise: isNoise,
capVer: capVer,
derpMap: derpMap, derpMap: derpMap,
baseDomain: baseDomain, baseDomain: baseDomain,
dnsCfg: dnsCfg, dnsCfg: dnsCfg,
@ -102,7 +107,6 @@ func NewMapper(
// TODO: populate // TODO: populate
peers: peers.IDMap(), peers: peers.IDMap(),
patches: make(map[uint64][]patch),
} }
} }
@ -191,7 +195,7 @@ func addNextDNSMetadata(resolvers []*dnstype.Resolver, node *types.Node) {
if strings.HasPrefix(resolver.Addr, nextDNSDoHPrefix) { if strings.HasPrefix(resolver.Addr, nextDNSDoHPrefix) {
attrs := url.Values{ attrs := url.Values{
"device_name": []string{node.Hostname}, "device_name": []string{node.Hostname},
"device_model": []string{node.Hostinfo.OS}, "device_model": []string{node.HostInfo.OS},
} }
if len(node.IPAddresses) > 0 { if len(node.IPAddresses) > 0 {
@ -208,11 +212,10 @@ func addNextDNSMetadata(resolvers []*dnstype.Resolver, node *types.Node) {
func (m *Mapper) fullMapResponse( func (m *Mapper) fullMapResponse(
node *types.Node, node *types.Node,
pol *policy.ACLPolicy, pol *policy.ACLPolicy,
capVer tailcfg.CapabilityVersion,
) (*tailcfg.MapResponse, error) { ) (*tailcfg.MapResponse, error) {
peers := nodeMapToList(m.peers) peers := nodeMapToList(m.peers)
resp, err := m.baseWithConfigMapResponse(node, pol, capVer) resp, err := m.baseWithConfigMapResponse(node, pol)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -221,7 +224,7 @@ func (m *Mapper) fullMapResponse(
resp, resp,
pol, pol,
node, node,
capVer, m.capVer,
peers, peers,
peers, peers,
m.baseDomain, m.baseDomain,
@ -244,24 +247,15 @@ func (m *Mapper) FullMapResponse(
m.mu.Lock() m.mu.Lock()
defer m.mu.Unlock() defer m.mu.Unlock()
peers := maps.Keys(m.peers) resp, err := m.fullMapResponse(node, pol)
peersWithPatches := maps.Keys(m.patches)
slices.Sort(peers)
slices.Sort(peersWithPatches)
if len(peersWithPatches) > 0 {
log.Debug().
Str("node", node.Hostname).
Uints64("peers", peers).
Uints64("pending_patches", peersWithPatches).
Msgf("node requested full map response, but has pending patches")
}
resp, err := m.fullMapResponse(node, pol, mapRequest.Version)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if m.isNoise {
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
}
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress) return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
} }
@ -273,11 +267,15 @@ func (m *Mapper) LiteMapResponse(
node *types.Node, node *types.Node,
pol *policy.ACLPolicy, pol *policy.ACLPolicy,
) ([]byte, error) { ) ([]byte, error) {
resp, err := m.baseWithConfigMapResponse(node, pol, mapRequest.Version) resp, err := m.baseWithConfigMapResponse(node, pol)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if m.isNoise {
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
}
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress) return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
} }
@ -294,12 +292,10 @@ func (m *Mapper) KeepAliveResponse(
func (m *Mapper) DERPMapResponse( func (m *Mapper) DERPMapResponse(
mapRequest tailcfg.MapRequest, mapRequest tailcfg.MapRequest,
node *types.Node, node *types.Node,
derpMap *tailcfg.DERPMap, derpMap tailcfg.DERPMap,
) ([]byte, error) { ) ([]byte, error) {
m.derpMap = derpMap
resp := m.baseMapResponse() resp := m.baseMapResponse()
resp.DERPMap = derpMap resp.DERPMap = &derpMap
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress) return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress)
} }
@ -309,29 +305,18 @@ func (m *Mapper) PeerChangedResponse(
node *types.Node, node *types.Node,
changed types.Nodes, changed types.Nodes,
pol *policy.ACLPolicy, pol *policy.ACLPolicy,
messages ...string,
) ([]byte, error) { ) ([]byte, error) {
m.mu.Lock() m.mu.Lock()
defer m.mu.Unlock() defer m.mu.Unlock()
lastSeen := make(map[tailcfg.NodeID]bool)
// Update our internal map. // Update our internal map.
for _, node := range changed { for _, node := range changed {
if patches, ok := m.patches[node.ID]; ok {
// preserve online status in case the patch has an outdated one
online := node.IsOnline
for _, p := range patches {
// TODO(kradalby): Figure if this needs to be sorted by timestamp
node.ApplyPeerChange(p.change)
}
// Ensure the patches are not applied again later
delete(m.patches, node.ID)
node.IsOnline = online
}
m.peers[node.ID] = node m.peers[node.ID] = node
// We have just seen the node, let the peers update their list.
lastSeen[tailcfg.NodeID(node.ID)] = true
} }
resp := m.baseMapResponse() resp := m.baseMapResponse()
@ -340,7 +325,7 @@ func (m *Mapper) PeerChangedResponse(
&resp, &resp,
pol, pol,
node, node,
mapRequest.Version, m.capVer,
nodeMapToList(m.peers), nodeMapToList(m.peers),
changed, changed,
m.baseDomain, m.baseDomain,
@ -351,55 +336,11 @@ func (m *Mapper) PeerChangedResponse(
return nil, err return nil, err
} }
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress, messages...) // resp.PeerSeenChange = lastSeen
}
// PeerChangedPatchResponse creates a patch MapResponse with
// incoming update from a state change.
func (m *Mapper) PeerChangedPatchResponse(
mapRequest tailcfg.MapRequest,
node *types.Node,
changed []*tailcfg.PeerChange,
pol *policy.ACLPolicy,
) ([]byte, error) {
m.mu.Lock()
defer m.mu.Unlock()
sendUpdate := false
// patch the internal map
for _, change := range changed {
if peer, ok := m.peers[uint64(change.NodeID)]; ok {
peer.ApplyPeerChange(change)
sendUpdate = true
} else {
log.Trace().Str("node", node.Hostname).Msgf("Node with ID %s is missing from mapper for Node %s, saving patch for when node is available", change.NodeID, node.Hostname)
p := patch{
timestamp: time.Now(),
change: change,
}
if patches, ok := m.patches[uint64(change.NodeID)]; ok {
patches := append(patches, p)
m.patches[uint64(change.NodeID)] = patches
} else {
m.patches[uint64(change.NodeID)] = []patch{p}
}
}
}
if !sendUpdate {
return nil, nil
}
resp := m.baseMapResponse()
resp.PeersChangedPatch = changed
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress) return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress)
} }
// TODO(kradalby): We need some integration tests for this.
func (m *Mapper) PeerRemovedResponse( func (m *Mapper) PeerRemovedResponse(
mapRequest tailcfg.MapRequest, mapRequest tailcfg.MapRequest,
node *types.Node, node *types.Node,
@ -408,23 +349,13 @@ func (m *Mapper) PeerRemovedResponse(
m.mu.Lock() m.mu.Lock()
defer m.mu.Unlock() defer m.mu.Unlock()
// Some nodes might have been removed already
// so we dont want to ask downstream to remove
// twice, than can cause a panic in tailscaled.
notYetRemoved := []tailcfg.NodeID{}
// remove from our internal map // remove from our internal map
for _, id := range removed { for _, id := range removed {
if _, ok := m.peers[uint64(id)]; ok {
notYetRemoved = append(notYetRemoved, id)
}
delete(m.peers, uint64(id)) delete(m.peers, uint64(id))
delete(m.patches, uint64(id))
} }
resp := m.baseMapResponse() resp := m.baseMapResponse()
resp.PeersRemoved = notYetRemoved resp.PeersRemoved = removed
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress) return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress)
} }
@ -434,10 +365,20 @@ func (m *Mapper) marshalMapResponse(
resp *tailcfg.MapResponse, resp *tailcfg.MapResponse,
node *types.Node, node *types.Node,
compression string, compression string,
messages ...string,
) ([]byte, error) { ) ([]byte, error) {
atomic.AddUint64(&m.seq, 1) atomic.AddUint64(&m.seq, 1)
var machineKey key.MachinePublic
err := machineKey.UnmarshalText([]byte(util.MachinePublicKeyEnsurePrefix(node.MachineKey)))
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Cannot parse client key")
return nil, err
}
jsonBody, err := json.Marshal(resp) jsonBody, err := json.Marshal(resp)
if err != nil { if err != nil {
log.Error(). log.Error().
@ -448,27 +389,11 @@ func (m *Mapper) marshalMapResponse(
if debugDumpMapResponsePath != "" { if debugDumpMapResponsePath != "" {
data := map[string]interface{}{ data := map[string]interface{}{
"Messages": messages,
"MapRequest": mapRequest, "MapRequest": mapRequest,
"MapResponse": resp, "MapResponse": resp,
} }
responseType := "keepalive" body, err := json.Marshal(data)
switch {
case resp.Peers != nil && len(resp.Peers) > 0:
responseType = "full"
case resp.Peers == nil && resp.PeersChanged == nil && resp.PeersChangedPatch == nil:
responseType = "lite"
case resp.PeersChanged != nil && len(resp.PeersChanged) > 0:
responseType = "changed"
case resp.PeersChangedPatch != nil && len(resp.PeersChangedPatch) > 0:
responseType = "patch"
case resp.PeersRemoved != nil && len(resp.PeersRemoved) > 0:
responseType = "removed"
}
body, err := json.MarshalIndent(data, "", " ")
if err != nil { if err != nil {
log.Error(). log.Error().
Caller(). Caller().
@ -487,7 +412,7 @@ func (m *Mapper) marshalMapResponse(
mapResponsePath := path.Join( mapResponsePath := path.Join(
mPath, mPath,
fmt.Sprintf("%d-%s-%d-%s.json", now, m.uid, atomic.LoadUint64(&m.seq), responseType), fmt.Sprintf("%d-%s-%d.json", now, m.uid, atomic.LoadUint64(&m.seq)),
) )
log.Trace().Msgf("Writing MapResponse to %s", mapResponsePath) log.Trace().Msgf("Writing MapResponse to %s", mapResponsePath)
@ -500,9 +425,16 @@ func (m *Mapper) marshalMapResponse(
var respBody []byte var respBody []byte
if compression == util.ZstdCompression { if compression == util.ZstdCompression {
respBody = zstdEncode(jsonBody) respBody = zstdEncode(jsonBody)
if !m.isNoise { // if legacy protocol
respBody = m.privateKey2019.SealTo(machineKey, respBody)
}
} else {
if !m.isNoise { // if legacy protocol
respBody = m.privateKey2019.SealTo(machineKey, jsonBody)
} else { } else {
respBody = jsonBody respBody = jsonBody
} }
}
data := make([]byte, reservedResponseHeaderSize) data := make([]byte, reservedResponseHeaderSize)
binary.LittleEndian.PutUint32(data, uint32(len(respBody))) binary.LittleEndian.PutUint32(data, uint32(len(respBody)))
@ -511,6 +443,32 @@ func (m *Mapper) marshalMapResponse(
return data, nil return data, nil
} }
// MarshalResponse takes an Tailscale Response, marhsal it to JSON.
// If isNoise is set, then the JSON body will be returned
// If !isNoise and privateKey2019 is set, the JSON body will be sealed in a Nacl box.
func MarshalResponse(
resp interface{},
isNoise bool,
privateKey2019 *key.MachinePrivate,
machineKey key.MachinePublic,
) ([]byte, error) {
jsonBody, err := json.Marshal(resp)
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Cannot marshal response")
return nil, err
}
if !isNoise && privateKey2019 != nil {
return privateKey2019.SealTo(machineKey, jsonBody), nil
}
return jsonBody, nil
}
func zstdEncode(in []byte) []byte { func zstdEncode(in []byte) []byte {
encoder, ok := zstdEncoderPool.Get().(*zstd.Encoder) encoder, ok := zstdEncoderPool.Get().(*zstd.Encoder)
if !ok { if !ok {
@ -544,7 +502,6 @@ func (m *Mapper) baseMapResponse() tailcfg.MapResponse {
resp := tailcfg.MapResponse{ resp := tailcfg.MapResponse{
KeepAlive: false, KeepAlive: false,
ControlTime: &now, ControlTime: &now,
// TODO(kradalby): Implement PingRequest?
} }
return resp return resp
@ -557,11 +514,10 @@ func (m *Mapper) baseMapResponse() tailcfg.MapResponse {
func (m *Mapper) baseWithConfigMapResponse( func (m *Mapper) baseWithConfigMapResponse(
node *types.Node, node *types.Node,
pol *policy.ACLPolicy, pol *policy.ACLPolicy,
capVer tailcfg.CapabilityVersion,
) (*tailcfg.MapResponse, error) { ) (*tailcfg.MapResponse, error) {
resp := m.baseMapResponse() resp := m.baseMapResponse()
tailnode, err := tailNode(node, capVer, pol, m.dnsCfg, m.baseDomain, m.randomClientPort) tailnode, err := tailNode(node, m.capVer, pol, m.dnsCfg, m.baseDomain, m.randomClientPort)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -594,6 +550,15 @@ func nodeMapToList(nodes map[uint64]*types.Node) types.Nodes {
return ret return ret
} }
func filterExpiredAndNotReady(peers types.Nodes) types.Nodes {
return lo.Filter(peers, func(item *types.Node, index int) bool {
// Filter out nodes that are expired OR
// nodes that has no endpoints, this typically means they have
// registered, but are not configured.
return !item.IsExpired() || len(item.Endpoints) > 0
})
}
// appendPeerChanges mutates a tailcfg.MapResponse with all the // appendPeerChanges mutates a tailcfg.MapResponse with all the
// necessary changes when peers have changed. // necessary changes when peers have changed.
func appendPeerChanges( func appendPeerChanges(
@ -619,6 +584,9 @@ func appendPeerChanges(
return err return err
} }
// Filter out peers that have expired.
changed = filterExpiredAndNotReady(changed)
// If there are filter rules present, see if there are any nodes that cannot // If there are filter rules present, see if there are any nodes that cannot
// access eachother at all and remove them from the peers. // access eachother at all and remove them from the peers.
if len(rules) > 0 { if len(rules) > 0 {
@ -654,5 +622,8 @@ func appendPeerChanges(
resp.UserProfiles = profiles resp.UserProfiles = profiles
resp.SSHPolicy = sshPolicy resp.SSHPolicy = sshPolicy
// TODO(kradalby): This currently does not take last seen in keepalives into account
resp.OnlineChange = peers.OnlineNodeMap()
return nil return nil
} }

View file

@ -167,15 +167,9 @@ func Test_fullMapResponse(t *testing.T) {
mini := &types.Node{ mini := &types.Node{
ID: 0, ID: 0,
MachineKey: mustMK( MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507", NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
), DiscoKey: "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
NodeKey: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")}, IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
Hostname: "mini", Hostname: "mini",
GivenName: "mini", GivenName: "mini",
@ -186,7 +180,8 @@ func Test_fullMapResponse(t *testing.T) {
AuthKey: &types.PreAuthKey{}, AuthKey: &types.PreAuthKey{},
LastSeen: &lastSeen, LastSeen: &lastSeen,
Expiry: &expire, Expiry: &expire,
Hostinfo: &tailcfg.Hostinfo{}, HostInfo: types.HostInfo{},
Endpoints: []string{},
Routes: []types.Route{ Routes: []types.Route{
{ {
Prefix: types.IPPrefix(netip.MustParsePrefix("0.0.0.0/0")), Prefix: types.IPPrefix(netip.MustParsePrefix("0.0.0.0/0")),
@ -231,12 +226,14 @@ func Test_fullMapResponse(t *testing.T) {
netip.MustParsePrefix("0.0.0.0/0"), netip.MustParsePrefix("0.0.0.0/0"),
netip.MustParsePrefix("192.168.0.0/24"), netip.MustParsePrefix("192.168.0.0/24"),
}, },
Endpoints: []string{},
DERP: "127.3.3.40:0", DERP: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}), Hostinfo: hiview(tailcfg.Hostinfo{}),
Created: created, Created: created,
Tags: []string{}, Tags: []string{},
PrimaryRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")}, PrimaryRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
LastSeen: &lastSeen, LastSeen: &lastSeen,
Online: new(bool),
MachineAuthorized: true, MachineAuthorized: true,
Capabilities: []tailcfg.NodeCapability{ Capabilities: []tailcfg.NodeCapability{
tailcfg.CapabilityFileSharing, tailcfg.CapabilityFileSharing,
@ -248,15 +245,9 @@ func Test_fullMapResponse(t *testing.T) {
peer1 := &types.Node{ peer1 := &types.Node{
ID: 1, ID: 1,
MachineKey: mustMK( MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507", NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
), DiscoKey: "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
NodeKey: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")}, IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")},
Hostname: "peer1", Hostname: "peer1",
GivenName: "peer1", GivenName: "peer1",
@ -265,7 +256,8 @@ func Test_fullMapResponse(t *testing.T) {
ForcedTags: []string{}, ForcedTags: []string{},
LastSeen: &lastSeen, LastSeen: &lastSeen,
Expiry: &expire, Expiry: &expire,
Hostinfo: &tailcfg.Hostinfo{}, HostInfo: types.HostInfo{},
Endpoints: []string{},
Routes: []types.Route{}, Routes: []types.Route{},
CreatedAt: created, CreatedAt: created,
} }
@ -286,12 +278,14 @@ func Test_fullMapResponse(t *testing.T) {
), ),
Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.2/32")}, Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.2/32")},
AllowedIPs: []netip.Prefix{netip.MustParsePrefix("100.64.0.2/32")}, AllowedIPs: []netip.Prefix{netip.MustParsePrefix("100.64.0.2/32")},
Endpoints: []string{},
DERP: "127.3.3.40:0", DERP: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}), Hostinfo: hiview(tailcfg.Hostinfo{}),
Created: created, Created: created,
Tags: []string{}, Tags: []string{},
PrimaryRoutes: []netip.Prefix{}, PrimaryRoutes: []netip.Prefix{},
LastSeen: &lastSeen, LastSeen: &lastSeen,
Online: new(bool),
MachineAuthorized: true, MachineAuthorized: true,
Capabilities: []tailcfg.NodeCapability{ Capabilities: []tailcfg.NodeCapability{
tailcfg.CapabilityFileSharing, tailcfg.CapabilityFileSharing,
@ -303,15 +297,9 @@ func Test_fullMapResponse(t *testing.T) {
peer2 := &types.Node{ peer2 := &types.Node{
ID: 2, ID: 2,
MachineKey: mustMK( MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507", NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
), DiscoKey: "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
NodeKey: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")}, IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
Hostname: "peer2", Hostname: "peer2",
GivenName: "peer2", GivenName: "peer2",
@ -320,7 +308,8 @@ func Test_fullMapResponse(t *testing.T) {
ForcedTags: []string{}, ForcedTags: []string{},
LastSeen: &lastSeen, LastSeen: &lastSeen,
Expiry: &expire, Expiry: &expire,
Hostinfo: &tailcfg.Hostinfo{}, HostInfo: types.HostInfo{},
Endpoints: []string{},
Routes: []types.Route{}, Routes: []types.Route{},
CreatedAt: created, CreatedAt: created,
} }
@ -398,6 +387,7 @@ func Test_fullMapResponse(t *testing.T) {
DNSConfig: &tailcfg.DNSConfig{}, DNSConfig: &tailcfg.DNSConfig{},
Domain: "", Domain: "",
CollectServices: "false", CollectServices: "false",
OnlineChange: map[tailcfg.NodeID]bool{tailPeer1.ID: false},
PacketFilter: []tailcfg.FilterRule{}, PacketFilter: []tailcfg.FilterRule{},
UserProfiles: []tailcfg.UserProfile{{LoginName: "mini", DisplayName: "mini"}}, UserProfiles: []tailcfg.UserProfile{{LoginName: "mini", DisplayName: "mini"}},
SSHPolicy: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{}}, SSHPolicy: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{}},
@ -439,6 +429,10 @@ func Test_fullMapResponse(t *testing.T) {
DNSConfig: &tailcfg.DNSConfig{}, DNSConfig: &tailcfg.DNSConfig{},
Domain: "", Domain: "",
CollectServices: "false", CollectServices: "false",
OnlineChange: map[tailcfg.NodeID]bool{
tailPeer1.ID: false,
tailcfg.NodeID(peer2.ID): false,
},
PacketFilter: []tailcfg.FilterRule{ PacketFilter: []tailcfg.FilterRule{
{ {
SrcIPs: []string{"100.64.0.2/32"}, SrcIPs: []string{"100.64.0.2/32"},
@ -465,6 +459,9 @@ func Test_fullMapResponse(t *testing.T) {
mappy := NewMapper( mappy := NewMapper(
tt.node, tt.node,
tt.peers, tt.peers,
nil,
false,
0,
tt.derpMap, tt.derpMap,
tt.baseDomain, tt.baseDomain,
tt.dnsConfig, tt.dnsConfig,
@ -475,7 +472,6 @@ func Test_fullMapResponse(t *testing.T) {
got, err := mappy.fullMapResponse( got, err := mappy.fullMapResponse(
tt.node, tt.node,
tt.pol, tt.pol,
0,
) )
if (err != nil) != tt.wantErr { if (err != nil) != tt.wantErr {

View file

@ -52,6 +52,21 @@ func tailNode(
baseDomain string, baseDomain string,
randomClientPort bool, randomClientPort bool,
) (*tailcfg.Node, error) { ) (*tailcfg.Node, error) {
nodeKey, err := node.NodePublicKey()
if err != nil {
return nil, err
}
machineKey, err := node.MachinePublicKey()
if err != nil {
return nil, err
}
discoKey, err := node.DiscoPublicKey()
if err != nil {
return nil, err
}
addrs := node.IPAddresses.Prefixes() addrs := node.IPAddresses.Prefixes()
allowedIPs := append( allowedIPs := append(
@ -72,8 +87,8 @@ func tailNode(
} }
var derp string var derp string
if node.Hostinfo.NetInfo != nil { if node.HostInfo.NetInfo != nil {
derp = fmt.Sprintf("127.3.3.40:%d", node.Hostinfo.NetInfo.PreferredDERP) derp = fmt.Sprintf("127.3.3.40:%d", node.HostInfo.NetInfo.PreferredDERP)
} else { } else {
derp = "127.3.3.40:0" // Zero means disconnected or unknown. derp = "127.3.3.40:0" // Zero means disconnected or unknown.
} }
@ -87,9 +102,13 @@ func tailNode(
hostname, err := node.GetFQDN(dnsConfig, baseDomain) hostname, err := node.GetFQDN(dnsConfig, baseDomain)
if err != nil { if err != nil {
return nil, fmt.Errorf("tailNode, failed to create FQDN: %s", err) return nil, err
} }
hostInfo := node.GetHostInfo()
online := node.IsOnline()
tags, _ := pol.TagsOfNode(node) tags, _ := pol.TagsOfNode(node)
tags = lo.Uniq(append(tags, node.ForcedTags...)) tags = lo.Uniq(append(tags, node.ForcedTags...))
@ -99,30 +118,28 @@ func tailNode(
strconv.FormatUint(node.ID, util.Base10), strconv.FormatUint(node.ID, util.Base10),
), // in headscale, unlike tailcontrol server, IDs are permanent ), // in headscale, unlike tailcontrol server, IDs are permanent
Name: hostname, Name: hostname,
Cap: capVer,
User: tailcfg.UserID(node.UserID), User: tailcfg.UserID(node.UserID),
Key: node.NodeKey, Key: nodeKey,
KeyExpiry: keyExpiry, KeyExpiry: keyExpiry,
Machine: node.MachineKey, Machine: machineKey,
DiscoKey: node.DiscoKey, DiscoKey: discoKey,
Addresses: addrs, Addresses: addrs,
AllowedIPs: allowedIPs, AllowedIPs: allowedIPs,
Endpoints: node.Endpoints, Endpoints: node.Endpoints,
DERP: derp, DERP: derp,
Hostinfo: node.Hostinfo.View(), Hostinfo: hostInfo.View(),
Created: node.CreatedAt, Created: node.CreatedAt,
Online: node.IsOnline,
Tags: tags, Tags: tags,
PrimaryRoutes: primaryPrefixes, PrimaryRoutes: primaryPrefixes,
LastSeen: node.LastSeen,
Online: &online,
MachineAuthorized: !node.IsExpired(), MachineAuthorized: !node.IsExpired(),
Expired: node.IsExpired(),
} }
// - 74: 2023-09-18: Client understands NodeCapMap // - 74: 2023-09-18: Client understands NodeCapMap
@ -153,11 +170,5 @@ func tailNode(
tNode.Capabilities = append(tNode.Capabilities, tailcfg.NodeAttrDisableUPnP) tNode.Capabilities = append(tNode.Capabilities, tailcfg.NodeAttrDisableUPnP)
} }
if node.IsOnline == nil || !*node.IsOnline {
// LastSeen is only set when node is
// not connected to the control server.
tNode.LastSeen = node.LastSeen
}
return &tNode, nil return &tNode, nil
} }

View file

@ -54,41 +54,20 @@ func TestTailNode(t *testing.T) {
}{ }{
{ {
name: "empty-node", name: "empty-node",
node: &types.Node{ node: &types.Node{},
Hostinfo: &tailcfg.Hostinfo{},
},
pol: &policy.ACLPolicy{}, pol: &policy.ACLPolicy{},
dnsConfig: &tailcfg.DNSConfig{}, dnsConfig: &tailcfg.DNSConfig{},
baseDomain: "", baseDomain: "",
want: &tailcfg.Node{ want: nil,
StableID: "0", wantErr: true,
Addresses: []netip.Prefix{},
AllowedIPs: []netip.Prefix{},
DERP: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}),
Tags: []string{},
PrimaryRoutes: []netip.Prefix{},
MachineAuthorized: true,
Capabilities: []tailcfg.NodeCapability{
"https://tailscale.com/cap/file-sharing", "https://tailscale.com/cap/is-admin",
"https://tailscale.com/cap/ssh", "debug-disable-upnp",
},
},
wantErr: false,
}, },
{ {
name: "minimal-node", name: "minimal-node",
node: &types.Node{ node: &types.Node{
ID: 0, ID: 0,
MachineKey: mustMK( MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507", NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
), DiscoKey: "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
NodeKey: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
IPAddresses: []netip.Addr{ IPAddresses: []netip.Addr{
netip.MustParseAddr("100.64.0.1"), netip.MustParseAddr("100.64.0.1"),
}, },
@ -103,7 +82,8 @@ func TestTailNode(t *testing.T) {
AuthKey: &types.PreAuthKey{}, AuthKey: &types.PreAuthKey{},
LastSeen: &lastSeen, LastSeen: &lastSeen,
Expiry: &expire, Expiry: &expire,
Hostinfo: &tailcfg.Hostinfo{}, HostInfo: types.HostInfo{},
Endpoints: []string{},
Routes: []types.Route{ Routes: []types.Route{
{ {
Prefix: types.IPPrefix(netip.MustParsePrefix("0.0.0.0/0")), Prefix: types.IPPrefix(netip.MustParsePrefix("0.0.0.0/0")),
@ -153,6 +133,7 @@ func TestTailNode(t *testing.T) {
netip.MustParsePrefix("0.0.0.0/0"), netip.MustParsePrefix("0.0.0.0/0"),
netip.MustParsePrefix("192.168.0.0/24"), netip.MustParsePrefix("192.168.0.0/24"),
}, },
Endpoints: []string{},
DERP: "127.3.3.40:0", DERP: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}), Hostinfo: hiview(tailcfg.Hostinfo{}),
Created: created, Created: created,
@ -164,6 +145,7 @@ func TestTailNode(t *testing.T) {
}, },
LastSeen: &lastSeen, LastSeen: &lastSeen,
Online: new(bool),
MachineAuthorized: true, MachineAuthorized: true,
Capabilities: []tailcfg.NodeCapability{ Capabilities: []tailcfg.NodeCapability{

View file

@ -1,14 +1,11 @@
package notifier package notifier
import ( import (
"fmt"
"strings"
"sync" "sync"
"github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util" "github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
"tailscale.com/types/key"
) )
type Notifier struct { type Notifier struct {
@ -20,9 +17,9 @@ func NewNotifier() *Notifier {
return &Notifier{} return &Notifier{}
} }
func (n *Notifier) AddNode(machineKey key.MachinePublic, c chan<- types.StateUpdate) { func (n *Notifier) AddNode(machineKey string, c chan<- types.StateUpdate) {
log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("acquiring lock to add node") log.Trace().Caller().Str("key", machineKey).Msg("acquiring lock to add node")
defer log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("releasing lock to add node") defer log.Trace().Caller().Str("key", machineKey).Msg("releasing lock to add node")
n.l.Lock() n.l.Lock()
defer n.l.Unlock() defer n.l.Unlock()
@ -31,17 +28,17 @@ func (n *Notifier) AddNode(machineKey key.MachinePublic, c chan<- types.StateUpd
n.nodes = make(map[string]chan<- types.StateUpdate) n.nodes = make(map[string]chan<- types.StateUpdate)
} }
n.nodes[machineKey.String()] = c n.nodes[machineKey] = c
log.Trace(). log.Trace().
Str("machine_key", machineKey.ShortString()). Str("machine_key", machineKey).
Int("open_chans", len(n.nodes)). Int("open_chans", len(n.nodes)).
Msg("Added new channel") Msg("Added new channel")
} }
func (n *Notifier) RemoveNode(machineKey key.MachinePublic) { func (n *Notifier) RemoveNode(machineKey string) {
log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("acquiring lock to remove node") log.Trace().Caller().Str("key", machineKey).Msg("acquiring lock to remove node")
defer log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("releasing lock to remove node") defer log.Trace().Caller().Str("key", machineKey).Msg("releasing lock to remove node")
n.l.Lock() n.l.Lock()
defer n.l.Unlock() defer n.l.Unlock()
@ -50,27 +47,14 @@ func (n *Notifier) RemoveNode(machineKey key.MachinePublic) {
return return
} }
delete(n.nodes, machineKey.String()) delete(n.nodes, machineKey)
log.Trace(). log.Trace().
Str("machine_key", machineKey.ShortString()). Str("machine_key", machineKey).
Int("open_chans", len(n.nodes)). Int("open_chans", len(n.nodes)).
Msg("Removed channel") Msg("Removed channel")
} }
// IsConnected reports if a node is connected to headscale and has a
// poll session open.
func (n *Notifier) IsConnected(machineKey key.MachinePublic) bool {
n.l.RLock()
defer n.l.RUnlock()
if _, ok := n.nodes[machineKey.String()]; ok {
return true
}
return false
}
func (n *Notifier) NotifyAll(update types.StateUpdate) { func (n *Notifier) NotifyAll(update types.StateUpdate) {
n.NotifyWithIgnore(update) n.NotifyWithIgnore(update)
} }
@ -94,31 +78,3 @@ func (n *Notifier) NotifyWithIgnore(update types.StateUpdate, ignore ...string)
c <- update c <- update
} }
} }
func (n *Notifier) NotifyByMachineKey(update types.StateUpdate, mKey key.MachinePublic) {
log.Trace().Caller().Interface("type", update.Type).Msg("acquiring lock to notify")
defer log.Trace().
Caller().
Interface("type", update.Type).
Msg("releasing lock, finished notifing")
n.l.RLock()
defer n.l.RUnlock()
if c, ok := n.nodes[mKey.String()]; ok {
c <- update
}
}
func (n *Notifier) String() string {
n.l.RLock()
defer n.l.RUnlock()
str := []string{"Notifier, in map:\n"}
for k, v := range n.nodes {
str = append(str, fmt.Sprintf("\t%s: %v\n", k, v))
}
return strings.Join(str, "")
}

View file

@ -124,28 +124,42 @@ func (h *Headscale) determineTokenExpiration(idTokenExpiration time.Time) time.T
// RegisterOIDC redirects to the OIDC provider for authentication // RegisterOIDC redirects to the OIDC provider for authentication
// Puts NodeKey in cache so the callback can retrieve it using the oidc state param // Puts NodeKey in cache so the callback can retrieve it using the oidc state param
// Listens in /oidc/register/:mKey. // Listens in /oidc/register/:nKey.
func (h *Headscale) RegisterOIDC( func (h *Headscale) RegisterOIDC(
writer http.ResponseWriter, writer http.ResponseWriter,
req *http.Request, req *http.Request,
) { ) {
vars := mux.Vars(req) vars := mux.Vars(req)
machineKeyStr, ok := vars["mkey"] nodeKeyStr, ok := vars["nkey"]
log.Debug(). log.Debug().
Caller(). Caller().
Str("machine_key", machineKeyStr). Str("node_key", nodeKeyStr).
Bool("ok", ok). Bool("ok", ok).
Msg("Received oidc register call") Msg("Received oidc register call")
if !util.NodePublicKeyRegex.Match([]byte(nodeKeyStr)) {
log.Warn().Str("node_key", nodeKeyStr).Msg("Invalid node key passed to registration url")
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusUnauthorized)
_, err := writer.Write([]byte("Unauthorized"))
if err != nil {
util.LogErr(err, "Failed to write response")
}
return
}
// We need to make sure we dont open for XSS style injections, if the parameter that // We need to make sure we dont open for XSS style injections, if the parameter that
// is passed as a key is not parsable/validated as a NodePublic key, then fail to render // is passed as a key is not parsable/validated as a NodePublic key, then fail to render
// the template and log an error. // the template and log an error.
var machineKey key.MachinePublic var nodeKey key.NodePublic
err := machineKey.UnmarshalText( err := nodeKey.UnmarshalText(
[]byte(machineKeyStr), []byte(util.NodePublicKeyEnsurePrefix(nodeKeyStr)),
) )
if err != nil {
if !ok || nodeKeyStr == "" || err != nil {
log.Warn(). log.Warn().
Err(err). Err(err).
Msg("Failed to parse incoming nodekey in OIDC registration") Msg("Failed to parse incoming nodekey in OIDC registration")
@ -174,7 +188,7 @@ func (h *Headscale) RegisterOIDC(
// place the node key into the state cache, so it can be retrieved later // place the node key into the state cache, so it can be retrieved later
h.registrationCache.Set( h.registrationCache.Set(
stateStr, stateStr,
machineKey, util.NodePublicKeyStripPrefix(nodeKey),
registerCacheExpiration, registerCacheExpiration,
) )
@ -252,7 +266,7 @@ func (h *Headscale) OIDCCallback(
return return
} }
machineKey, nodeExists, err := h.validateNodeForOIDCCallback( nodeKey, nodeExists, err := h.validateNodeForOIDCCallback(
writer, writer,
state, state,
claims, claims,
@ -280,7 +294,7 @@ func (h *Headscale) OIDCCallback(
return return
} }
if err := h.registerNodeForOIDCCallback(writer, user, machineKey, idTokenExpiry); err != nil { if err := h.registerNodeForOIDCCallback(writer, user, nodeKey, idTokenExpiry); err != nil {
return return
} }
@ -525,10 +539,10 @@ func (h *Headscale) validateNodeForOIDCCallback(
state string, state string,
claims *IDTokenClaims, claims *IDTokenClaims,
expiry time.Time, expiry time.Time,
) (*key.MachinePublic, bool, error) { ) (*key.NodePublic, bool, error) {
// retrieve nodekey from state cache // retrieve nodekey from state cache
machineKeyIf, machineKeyFound := h.registrationCache.Get(state) nodeKeyIf, nodeKeyFound := h.registrationCache.Get(state)
if !machineKeyFound { if !nodeKeyFound {
log.Trace(). log.Trace().
Msg("requested node state key expired before authorisation completed") Msg("requested node state key expired before authorisation completed")
writer.Header().Set("Content-Type", "text/plain; charset=utf-8") writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
@ -541,12 +555,11 @@ func (h *Headscale) validateNodeForOIDCCallback(
return nil, false, errOIDCNodeKeyMissing return nil, false, errOIDCNodeKeyMissing
} }
var machineKey key.MachinePublic var nodeKey key.NodePublic
machineKey, machineKeyOK := machineKeyIf.(key.MachinePublic) nodeKeyFromCache, nodeKeyOK := nodeKeyIf.(string)
if !machineKeyOK { if !nodeKeyOK {
log.Trace(). log.Trace().
Interface("got", machineKeyIf). Msg("requested node state key is not a string")
Msg("requested node state key is not a nodekey")
writer.Header().Set("Content-Type", "text/plain; charset=utf-8") writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusBadRequest) writer.WriteHeader(http.StatusBadRequest)
_, err := writer.Write([]byte("state is invalid")) _, err := writer.Write([]byte("state is invalid"))
@ -557,11 +570,29 @@ func (h *Headscale) validateNodeForOIDCCallback(
return nil, false, errOIDCInvalidNodeState return nil, false, errOIDCInvalidNodeState
} }
err := nodeKey.UnmarshalText(
[]byte(util.NodePublicKeyEnsurePrefix(nodeKeyFromCache)),
)
if err != nil {
log.Error().
Str("nodeKey", nodeKeyFromCache).
Bool("nodeKeyOK", nodeKeyOK).
Msg("could not parse node public key")
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusBadRequest)
_, werr := writer.Write([]byte("could not parse node public key"))
if werr != nil {
util.LogErr(err, "Failed to write response")
}
return nil, false, err
}
// retrieve node information if it exist // retrieve node information if it exist
// The error is not important, because if it does not // The error is not important, because if it does not
// exist, then this is a new node and we will move // exist, then this is a new node and we will move
// on to registration. // on to registration.
node, _ := h.db.GetNodeByMachineKey(machineKey) node, _ := h.db.GetNodeByNodeKey(nodeKey)
if node != nil { if node != nil {
log.Trace(). log.Trace().
@ -626,7 +657,7 @@ func (h *Headscale) validateNodeForOIDCCallback(
return nil, true, nil return nil, true, nil
} }
return &machineKey, false, nil return &nodeKey, false, nil
} }
func getUserName( func getUserName(
@ -709,13 +740,13 @@ func (h *Headscale) findOrCreateNewUserForOIDCCallback(
func (h *Headscale) registerNodeForOIDCCallback( func (h *Headscale) registerNodeForOIDCCallback(
writer http.ResponseWriter, writer http.ResponseWriter,
user *types.User, user *types.User,
machineKey *key.MachinePublic, nodeKey *key.NodePublic,
expiry time.Time, expiry time.Time,
) error { ) error {
if _, err := h.db.RegisterNodeFromAuthCallback( if _, err := h.db.RegisterNodeFromAuthCallback(
// TODO(kradalby): find a better way to use the cache across modules // TODO(kradalby): find a better way to use the cache across modules
h.registrationCache, h.registrationCache,
*machineKey, nodeKey.String(),
user.Name, user.Name,
&expiry, &expiry,
util.RegisterMethodOIDC, util.RegisterMethodOIDC,

View file

@ -596,13 +596,10 @@ func excludeCorrectlyTaggedNodes(
} }
// for each node if tag is in tags list, don't append it. // for each node if tag is in tags list, don't append it.
for _, node := range nodes { for _, node := range nodes {
hi := node.GetHostInfo()
found := false found := false
for _, t := range hi.RequestTags {
if node.Hostinfo == nil {
continue
}
for _, t := range node.Hostinfo.RequestTags {
if util.StringOrPrefixListContains(tags, t) { if util.StringOrPrefixListContains(tags, t) {
found = true found = true
@ -674,18 +671,14 @@ func expandOwnersFromTag(
pol *ACLPolicy, pol *ACLPolicy,
tag string, tag string,
) ([]string, error) { ) ([]string, error) {
noTagErr := fmt.Errorf( var owners []string
ows, ok := pol.TagOwners[tag]
if !ok {
return []string{}, fmt.Errorf(
"%w. %v isn't owned by a TagOwner. Please add one first. https://tailscale.com/kb/1018/acls/#tag-owners", "%w. %v isn't owned by a TagOwner. Please add one first. https://tailscale.com/kb/1018/acls/#tag-owners",
ErrInvalidTag, ErrInvalidTag,
tag, tag,
) )
if pol == nil {
return []string{}, noTagErr
}
var owners []string
ows, ok := pol.TagOwners[tag]
if !ok {
return []string{}, noTagErr
} }
for _, owner := range ows { for _, owner := range ows {
if isGroup(owner) { if isGroup(owner) {
@ -794,11 +787,8 @@ func (pol *ACLPolicy) expandIPsFromTag(
for _, user := range owners { for _, user := range owners {
nodes := filterNodesByUser(nodes, user) nodes := filterNodesByUser(nodes, user)
for _, node := range nodes { for _, node := range nodes {
if node.Hostinfo == nil { hi := node.GetHostInfo()
continue if util.StringOrPrefixListContains(hi.RequestTags, alias) {
}
if util.StringOrPrefixListContains(node.Hostinfo.RequestTags, alias) {
node.IPAddresses.AppendToIPSet(&build) node.IPAddresses.AppendToIPSet(&build)
} }
} }
@ -892,7 +882,7 @@ func (pol *ACLPolicy) TagsOfNode(
validTagMap := make(map[string]bool) validTagMap := make(map[string]bool)
invalidTagMap := make(map[string]bool) invalidTagMap := make(map[string]bool)
for _, tag := range node.Hostinfo.RequestTags { for _, tag := range node.HostInfo.RequestTags {
owners, err := expandOwnersFromTag(pol, tag) owners, err := expandOwnersFromTag(pol, tag)
if errors.Is(err, ErrInvalidTag) { if errors.Is(err, ErrInvalidTag) {
invalidTagMap[tag] = true invalidTagMap[tag] = true

View file

@ -16,6 +16,10 @@ import (
"tailscale.com/tailcfg" "tailscale.com/tailcfg"
) )
var ipComparer = cmp.Comparer(func(x, y netip.Addr) bool {
return x.Compare(y) == 0
})
func Test(t *testing.T) { func Test(t *testing.T) {
check.TestingT(t) check.TestingT(t)
} }
@ -397,7 +401,6 @@ acls:
User: types.User{ User: types.User{
Name: "testuser", Name: "testuser",
}, },
Hostinfo: &tailcfg.Hostinfo{},
}, },
}) })
@ -948,7 +951,7 @@ func Test_listNodesInUser(t *testing.T) {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
got := filterNodesByUser(test.args.nodes, test.args.user) got := filterNodesByUser(test.args.nodes, test.args.user)
if diff := cmp.Diff(test.want, got, util.Comparers...); diff != "" { if diff := cmp.Diff(test.want, got); diff != "" {
t.Errorf("listNodesInUser() = (-want +got):\n%s", diff) t.Errorf("listNodesInUser() = (-want +got):\n%s", diff)
} }
}) })
@ -1244,7 +1247,7 @@ func Test_expandAlias(t *testing.T) {
netip.MustParseAddr("100.64.0.1"), netip.MustParseAddr("100.64.0.1"),
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{ HostInfo: types.HostInfo{
OS: "centos", OS: "centos",
Hostname: "foo", Hostname: "foo",
RequestTags: []string{"tag:hr-webserver"}, RequestTags: []string{"tag:hr-webserver"},
@ -1255,7 +1258,7 @@ func Test_expandAlias(t *testing.T) {
netip.MustParseAddr("100.64.0.2"), netip.MustParseAddr("100.64.0.2"),
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{ HostInfo: types.HostInfo{
OS: "centos", OS: "centos",
Hostname: "foo", Hostname: "foo",
RequestTags: []string{"tag:hr-webserver"}, RequestTags: []string{"tag:hr-webserver"},
@ -1385,7 +1388,7 @@ func Test_expandAlias(t *testing.T) {
netip.MustParseAddr("100.64.0.2"), netip.MustParseAddr("100.64.0.2"),
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{ HostInfo: types.HostInfo{
OS: "centos", OS: "centos",
Hostname: "foo", Hostname: "foo",
RequestTags: []string{"tag:hr-webserver"}, RequestTags: []string{"tag:hr-webserver"},
@ -1423,7 +1426,7 @@ func Test_expandAlias(t *testing.T) {
netip.MustParseAddr("100.64.0.1"), netip.MustParseAddr("100.64.0.1"),
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{ HostInfo: types.HostInfo{
OS: "centos", OS: "centos",
Hostname: "foo", Hostname: "foo",
RequestTags: []string{"tag:accountant-webserver"}, RequestTags: []string{"tag:accountant-webserver"},
@ -1434,7 +1437,7 @@ func Test_expandAlias(t *testing.T) {
netip.MustParseAddr("100.64.0.2"), netip.MustParseAddr("100.64.0.2"),
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{ HostInfo: types.HostInfo{
OS: "centos", OS: "centos",
Hostname: "foo", Hostname: "foo",
RequestTags: []string{"tag:accountant-webserver"}, RequestTags: []string{"tag:accountant-webserver"},
@ -1445,14 +1448,12 @@ func Test_expandAlias(t *testing.T) {
netip.MustParseAddr("100.64.0.3"), netip.MustParseAddr("100.64.0.3"),
}, },
User: types.User{Name: "marc"}, User: types.User{Name: "marc"},
Hostinfo: &tailcfg.Hostinfo{},
}, },
&types.Node{ &types.Node{
IPAddresses: types.NodeAddresses{ IPAddresses: types.NodeAddresses{
netip.MustParseAddr("100.64.0.4"), netip.MustParseAddr("100.64.0.4"),
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{},
}, },
}, },
}, },
@ -1502,7 +1503,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.1"), netip.MustParseAddr("100.64.0.1"),
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{ HostInfo: types.HostInfo{
OS: "centos", OS: "centos",
Hostname: "foo", Hostname: "foo",
RequestTags: []string{"tag:accountant-webserver"}, RequestTags: []string{"tag:accountant-webserver"},
@ -1513,7 +1514,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.2"), netip.MustParseAddr("100.64.0.2"),
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{ HostInfo: types.HostInfo{
OS: "centos", OS: "centos",
Hostname: "foo", Hostname: "foo",
RequestTags: []string{"tag:accountant-webserver"}, RequestTags: []string{"tag:accountant-webserver"},
@ -1524,7 +1525,6 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.4"), netip.MustParseAddr("100.64.0.4"),
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{},
}, },
}, },
user: "joe", user: "joe",
@ -1533,7 +1533,6 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
&types.Node{ &types.Node{
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.4")}, IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.4")},
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{},
}, },
}, },
}, },
@ -1554,7 +1553,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.1"), netip.MustParseAddr("100.64.0.1"),
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{ HostInfo: types.HostInfo{
OS: "centos", OS: "centos",
Hostname: "foo", Hostname: "foo",
RequestTags: []string{"tag:accountant-webserver"}, RequestTags: []string{"tag:accountant-webserver"},
@ -1565,7 +1564,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.2"), netip.MustParseAddr("100.64.0.2"),
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{ HostInfo: types.HostInfo{
OS: "centos", OS: "centos",
Hostname: "foo", Hostname: "foo",
RequestTags: []string{"tag:accountant-webserver"}, RequestTags: []string{"tag:accountant-webserver"},
@ -1576,7 +1575,6 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.4"), netip.MustParseAddr("100.64.0.4"),
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{},
}, },
}, },
user: "joe", user: "joe",
@ -1585,7 +1583,6 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
&types.Node{ &types.Node{
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.4")}, IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.4")},
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{},
}, },
}, },
}, },
@ -1601,7 +1598,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.1"), netip.MustParseAddr("100.64.0.1"),
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{ HostInfo: types.HostInfo{
OS: "centos", OS: "centos",
Hostname: "foo", Hostname: "foo",
RequestTags: []string{"tag:accountant-webserver"}, RequestTags: []string{"tag:accountant-webserver"},
@ -1613,14 +1610,12 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
ForcedTags: []string{"tag:accountant-webserver"}, ForcedTags: []string{"tag:accountant-webserver"},
Hostinfo: &tailcfg.Hostinfo{},
}, },
&types.Node{ &types.Node{
IPAddresses: types.NodeAddresses{ IPAddresses: types.NodeAddresses{
netip.MustParseAddr("100.64.0.4"), netip.MustParseAddr("100.64.0.4"),
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{},
}, },
}, },
user: "joe", user: "joe",
@ -1629,7 +1624,6 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
&types.Node{ &types.Node{
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.4")}, IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.4")},
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{},
}, },
}, },
}, },
@ -1645,7 +1639,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.1"), netip.MustParseAddr("100.64.0.1"),
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{ HostInfo: types.HostInfo{
OS: "centos", OS: "centos",
Hostname: "hr-web1", Hostname: "hr-web1",
RequestTags: []string{"tag:hr-webserver"}, RequestTags: []string{"tag:hr-webserver"},
@ -1656,7 +1650,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.2"), netip.MustParseAddr("100.64.0.2"),
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{ HostInfo: types.HostInfo{
OS: "centos", OS: "centos",
Hostname: "hr-web2", Hostname: "hr-web2",
RequestTags: []string{"tag:hr-webserver"}, RequestTags: []string{"tag:hr-webserver"},
@ -1667,7 +1661,6 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.4"), netip.MustParseAddr("100.64.0.4"),
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{},
}, },
}, },
user: "joe", user: "joe",
@ -1678,7 +1671,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.1"), netip.MustParseAddr("100.64.0.1"),
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{ HostInfo: types.HostInfo{
OS: "centos", OS: "centos",
Hostname: "hr-web1", Hostname: "hr-web1",
RequestTags: []string{"tag:hr-webserver"}, RequestTags: []string{"tag:hr-webserver"},
@ -1689,7 +1682,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.2"), netip.MustParseAddr("100.64.0.2"),
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{ HostInfo: types.HostInfo{
OS: "centos", OS: "centos",
Hostname: "hr-web2", Hostname: "hr-web2",
RequestTags: []string{"tag:hr-webserver"}, RequestTags: []string{"tag:hr-webserver"},
@ -1700,7 +1693,6 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
netip.MustParseAddr("100.64.0.4"), netip.MustParseAddr("100.64.0.4"),
}, },
User: types.User{Name: "joe"}, User: types.User{Name: "joe"},
Hostinfo: &tailcfg.Hostinfo{},
}, },
}, },
}, },
@ -1712,7 +1704,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
test.args.nodes, test.args.nodes,
test.args.user, test.args.user,
) )
if diff := cmp.Diff(test.want, got, util.Comparers...); diff != "" { if diff := cmp.Diff(test.want, got, ipComparer); diff != "" {
t.Errorf("excludeCorrectlyTaggedNodes() (-want +got):\n%s", diff) t.Errorf("excludeCorrectlyTaggedNodes() (-want +got):\n%s", diff)
} }
}) })
@ -1943,7 +1935,7 @@ func Test_getTags(t *testing.T) {
User: types.User{ User: types.User{
Name: "joe", Name: "joe",
}, },
Hostinfo: &tailcfg.Hostinfo{ HostInfo: types.HostInfo{
RequestTags: []string{"tag:valid"}, RequestTags: []string{"tag:valid"},
}, },
}, },
@ -1963,7 +1955,7 @@ func Test_getTags(t *testing.T) {
User: types.User{ User: types.User{
Name: "joe", Name: "joe",
}, },
Hostinfo: &tailcfg.Hostinfo{ HostInfo: types.HostInfo{
RequestTags: []string{"tag:valid", "tag:invalid"}, RequestTags: []string{"tag:valid", "tag:invalid"},
}, },
}, },
@ -1983,7 +1975,7 @@ func Test_getTags(t *testing.T) {
User: types.User{ User: types.User{
Name: "joe", Name: "joe",
}, },
Hostinfo: &tailcfg.Hostinfo{ HostInfo: types.HostInfo{
RequestTags: []string{ RequestTags: []string{
"tag:invalid", "tag:invalid",
"tag:valid", "tag:valid",
@ -2007,7 +1999,7 @@ func Test_getTags(t *testing.T) {
User: types.User{ User: types.User{
Name: "joe", Name: "joe",
}, },
Hostinfo: &tailcfg.Hostinfo{ HostInfo: types.HostInfo{
RequestTags: []string{"tag:invalid", "very-invalid"}, RequestTags: []string{"tag:invalid", "very-invalid"},
}, },
}, },
@ -2023,7 +2015,7 @@ func Test_getTags(t *testing.T) {
User: types.User{ User: types.User{
Name: "joe", Name: "joe",
}, },
Hostinfo: &tailcfg.Hostinfo{ HostInfo: types.HostInfo{
RequestTags: []string{"tag:invalid", "very-invalid"}, RequestTags: []string{"tag:invalid", "very-invalid"},
}, },
}, },
@ -2064,6 +2056,10 @@ func Test_getTags(t *testing.T) {
} }
func Test_getFilteredByACLPeers(t *testing.T) { func Test_getFilteredByACLPeers(t *testing.T) {
ipComparer := cmp.Comparer(func(x, y netip.Addr) bool {
return x.Compare(y) == 0
})
type args struct { type args struct {
nodes types.Nodes nodes types.Nodes
rules []tailcfg.FilterRule rules []tailcfg.FilterRule
@ -2727,7 +2723,7 @@ func Test_getFilteredByACLPeers(t *testing.T) {
tt.args.nodes, tt.args.nodes,
tt.args.rules, tt.args.rules,
) )
if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" { if diff := cmp.Diff(tt.want, got, ipComparer); diff != "" {
t.Errorf("FilterNodesByACL() unexpected result (-want +got):\n%s", diff) t.Errorf("FilterNodesByACL() unexpected result (-want +got):\n%s", diff)
} }
}) })
@ -2990,6 +2986,9 @@ func TestValidExpandTagOwnersInSources(t *testing.T) {
node := &types.Node{ node := &types.Node{
ID: 0, ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnodes", Hostname: "testnodes",
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")},
UserID: 0, UserID: 0,
@ -2997,7 +2996,7 @@ func TestValidExpandTagOwnersInSources(t *testing.T) {
Name: "user1", Name: "user1",
}, },
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
Hostinfo: &hostInfo, HostInfo: types.HostInfo(hostInfo),
} }
pol := &ACLPolicy{ pol := &ACLPolicy{
@ -3042,6 +3041,9 @@ func TestInvalidTagValidUser(t *testing.T) {
node := &types.Node{ node := &types.Node{
ID: 1, ID: 1,
MachineKey: "12345",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnodes", Hostname: "testnodes",
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")},
UserID: 1, UserID: 1,
@ -3049,7 +3051,7 @@ func TestInvalidTagValidUser(t *testing.T) {
Name: "user1", Name: "user1",
}, },
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
Hostinfo: &hostInfo, HostInfo: types.HostInfo(hostInfo),
} }
pol := &ACLPolicy{ pol := &ACLPolicy{
@ -3093,6 +3095,9 @@ func TestValidExpandTagOwnersInDestinations(t *testing.T) {
node := &types.Node{ node := &types.Node{
ID: 1, ID: 1,
MachineKey: "12345",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testnodes", Hostname: "testnodes",
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")},
UserID: 1, UserID: 1,
@ -3100,7 +3105,7 @@ func TestValidExpandTagOwnersInDestinations(t *testing.T) {
Name: "user1", Name: "user1",
}, },
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
Hostinfo: &hostInfo, HostInfo: types.HostInfo(hostInfo),
} }
pol := &ACLPolicy{ pol := &ACLPolicy{
@ -3154,6 +3159,9 @@ func TestValidTagInvalidUser(t *testing.T) {
node := &types.Node{ node := &types.Node{
ID: 1, ID: 1,
MachineKey: "12345",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "webserver", Hostname: "webserver",
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")}, IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")},
UserID: 1, UserID: 1,
@ -3161,7 +3169,7 @@ func TestValidTagInvalidUser(t *testing.T) {
Name: "user1", Name: "user1",
}, },
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
Hostinfo: &hostInfo, HostInfo: types.HostInfo(hostInfo),
} }
hostInfo2 := tailcfg.Hostinfo{ hostInfo2 := tailcfg.Hostinfo{
@ -3171,6 +3179,9 @@ func TestValidTagInvalidUser(t *testing.T) {
nodes2 := &types.Node{ nodes2 := &types.Node{
ID: 2, ID: 2,
MachineKey: "56789",
NodeKey: "bar2",
DiscoKey: "faab",
Hostname: "user", Hostname: "user",
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.2")}, IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.2")},
UserID: 1, UserID: 1,
@ -3178,7 +3189,7 @@ func TestValidTagInvalidUser(t *testing.T) {
Name: "user1", Name: "user1",
}, },
RegisterMethod: util.RegisterMethodAuthKey, RegisterMethod: util.RegisterMethodAuthKey,
Hostinfo: &hostInfo2, HostInfo: types.HostInfo(hostInfo2),
} }
pol := &ACLPolicy{ pol := &ACLPolicy{

View file

@ -8,8 +8,8 @@ import (
"github.com/juanfont/headscale/hscontrol/mapper" "github.com/juanfont/headscale/hscontrol/mapper"
"github.com/juanfont/headscale/hscontrol/types" "github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log" "github.com/rs/zerolog/log"
xslices "golang.org/x/exp/slices"
"tailscale.com/tailcfg" "tailscale.com/tailcfg"
) )
@ -26,32 +26,35 @@ type UpdateNode func()
func logPollFunc( func logPollFunc(
mapRequest tailcfg.MapRequest, mapRequest tailcfg.MapRequest,
node *types.Node, node *types.Node,
isNoise bool,
) (func(string), func(error, string)) { ) (func(string), func(error, string)) {
return func(msg string) { return func(msg string) {
log.Info(). log.Info().
Caller(). Caller().
Bool("noise", isNoise).
Bool("readOnly", mapRequest.ReadOnly). Bool("readOnly", mapRequest.ReadOnly).
Bool("omitPeers", mapRequest.OmitPeers). Bool("omitPeers", mapRequest.OmitPeers).
Bool("stream", mapRequest.Stream). Bool("stream", mapRequest.Stream).
Str("node_key", node.NodeKey.ShortString()). Str("node_key", node.NodeKey).
Str("node", node.Hostname). Str("node", node.Hostname).
Msg(msg) Msg(msg)
}, },
func(err error, msg string) { func(err error, msg string) {
log.Error(). log.Error().
Caller(). Caller().
Bool("noise", isNoise).
Bool("readOnly", mapRequest.ReadOnly). Bool("readOnly", mapRequest.ReadOnly).
Bool("omitPeers", mapRequest.OmitPeers). Bool("omitPeers", mapRequest.OmitPeers).
Bool("stream", mapRequest.Stream). Bool("stream", mapRequest.Stream).
Str("node_key", node.NodeKey.ShortString()). Str("node_key", node.NodeKey).
Str("node", node.Hostname). Str("node", node.Hostname).
Err(err). Err(err).
Msg(msg) Msg(msg)
} }
} }
// handlePoll ensures the node gets the appropriate updates from either // handlePoll is the common code for the legacy and Noise protocols to
// polling or immediate responses. // managed the poll loop.
// //
//nolint:gocyclo //nolint:gocyclo
func (h *Headscale) handlePoll( func (h *Headscale) handlePoll(
@ -59,10 +62,12 @@ func (h *Headscale) handlePoll(
ctx context.Context, ctx context.Context,
node *types.Node, node *types.Node,
mapRequest tailcfg.MapRequest, mapRequest tailcfg.MapRequest,
isNoise bool,
capVer tailcfg.CapabilityVersion,
) { ) {
logInfo, logErr := logPollFunc(mapRequest, node) logInfo, logErr := logPollFunc(mapRequest, node, isNoise)
// This is the mechanism where the node gives us information about its // This is the mechanism where the node gives us inforamtion about its
// current configuration. // current configuration.
// //
// If OmitPeers is true, Stream is false, and ReadOnly is false, // If OmitPeers is true, Stream is false, and ReadOnly is false,
@ -70,112 +75,46 @@ func (h *Headscale) handlePoll(
// breaking existing long-polling (Stream == true) connections. // breaking existing long-polling (Stream == true) connections.
// In this case, the server can omit the entire response; the client // In this case, the server can omit the entire response; the client
// only checks the HTTP response status code. // only checks the HTTP response status code.
// TODO(kradalby): remove ReadOnly when we only support capVer 68+
if mapRequest.OmitPeers && !mapRequest.Stream && !mapRequest.ReadOnly { if mapRequest.OmitPeers && !mapRequest.Stream && !mapRequest.ReadOnly {
log.Info(). log.Info().
Caller(). Caller().
Bool("noise", isNoise).
Bool("readOnly", mapRequest.ReadOnly). Bool("readOnly", mapRequest.ReadOnly).
Bool("omitPeers", mapRequest.OmitPeers). Bool("omitPeers", mapRequest.OmitPeers).
Bool("stream", mapRequest.Stream). Bool("stream", mapRequest.Stream).
Str("node_key", node.NodeKey.ShortString()). Str("node_key", node.NodeKey).
Str("node", node.Hostname). Str("node", node.Hostname).
Int("cap_ver", int(mapRequest.Version)). Strs("endpoints", node.Endpoints).
Msg("Received update") Msg("Received endpoint update")
change := node.PeerChangeFromMapRequest(mapRequest) now := time.Now().UTC()
node.LastSeen = &now
node.Hostname = mapRequest.Hostinfo.Hostname
node.HostInfo = types.HostInfo(*mapRequest.Hostinfo)
node.DiscoKey = util.DiscoPublicKeyStripPrefix(mapRequest.DiscoKey)
node.Endpoints = mapRequest.Endpoints
online := h.nodeNotifier.IsConnected(node.MachineKey) if err := h.db.NodeSave(node); err != nil {
change.Online = &online logErr(err, "Failed to persist/update node in the database")
http.Error(writer, "", http.StatusInternalServerError)
node.ApplyPeerChange(&change) return
}
hostInfoChange := node.Hostinfo.Equal(mapRequest.Hostinfo) err := h.db.SaveNodeRoutes(node)
logTracePeerChange(node.Hostname, hostInfoChange, &change)
// Check if the Hostinfo of the node has changed.
// If it has changed, check if there has been a change tod
// the routable IPs of the host and update update them in
// the database. Then send a Changed update
// (containing the whole node object) to peers to inform about
// the route change.
// If the hostinfo has changed, but not the routes, just update
// hostinfo and let the function continue.
if !hostInfoChange {
oldRoutes := node.Hostinfo.RoutableIPs
newRoutes := mapRequest.Hostinfo.RoutableIPs
oldServicesCount := len(node.Hostinfo.Services)
newServicesCount := len(mapRequest.Hostinfo.Services)
node.Hostinfo = mapRequest.Hostinfo
sendUpdate := false
// Route changes come as part of Hostinfo, which means that
// when an update comes, the Node Route logic need to run.
// This will require a "change" in comparison to a "patch",
// which is more costly.
if !xslices.Equal(oldRoutes, newRoutes) {
var err error
sendUpdate, err = h.db.SaveNodeRoutes(node)
if err != nil { if err != nil {
logErr(err, "Error processing node routes") logErr(err, "Error processing node routes")
http.Error(writer, "", http.StatusInternalServerError) http.Error(writer, "", http.StatusInternalServerError)
return return
} }
}
// Services is mostly useful for discovery and not critical, h.nodeNotifier.NotifyWithIgnore(
// except for peerapi, which is how nodes talk to eachother. types.StateUpdate{
// If peerapi was not part of the initial mapresponse, we
// need to make sure its sent out later as it is needed for
// Taildrop.
// TODO(kradalby): Length comparison is a bit naive, replace.
if oldServicesCount != newServicesCount {
sendUpdate = true
}
if sendUpdate {
if err := h.db.NodeSave(node); err != nil {
logErr(err, "Failed to persist/update node in the database")
http.Error(writer, "", http.StatusInternalServerError)
return
}
stateUpdate := types.StateUpdate{
Type: types.StatePeerChanged, Type: types.StatePeerChanged,
ChangeNodes: types.Nodes{node}, Changed: types.Nodes{node},
Message: "called from handlePoll -> update -> new hostinfo", },
} node.MachineKey)
if stateUpdate.Valid() {
h.nodeNotifier.NotifyWithIgnore(
stateUpdate,
node.MachineKey.String())
}
return
}
}
if err := h.db.NodeSave(node); err != nil {
logErr(err, "Failed to persist/update node in the database")
http.Error(writer, "", http.StatusInternalServerError)
return
}
stateUpdate := types.StateUpdate{
Type: types.StatePeerChangedPatch,
ChangePatches: []*tailcfg.PeerChange{&change},
}
if stateUpdate.Valid() {
h.nodeNotifier.NotifyWithIgnore(
stateUpdate,
node.MachineKey.String())
}
writer.WriteHeader(http.StatusOK) writer.WriteHeader(http.StatusOK)
if f, ok := writer.(http.Flusher); ok { if f, ok := writer.(http.Flusher); ok {
@ -183,7 +122,7 @@ func (h *Headscale) handlePoll(
} }
return return
} else if mapRequest.OmitPeers && !mapRequest.Stream && mapRequest.ReadOnly {
// ReadOnly is whether the client just wants to fetch the // ReadOnly is whether the client just wants to fetch the
// MapResponse, without updating their Endpoints. The // MapResponse, without updating their Endpoints. The
// Endpoints field will be ignored and LastSeen will not be // Endpoints field will be ignored and LastSeen will not be
@ -192,7 +131,7 @@ func (h *Headscale) handlePoll(
// The intended use is for clients to discover the DERP map at // The intended use is for clients to discover the DERP map at
// start-up before their first real endpoint update. // start-up before their first real endpoint update.
} else if mapRequest.OmitPeers && !mapRequest.Stream && mapRequest.ReadOnly { } else if mapRequest.OmitPeers && !mapRequest.Stream && mapRequest.ReadOnly {
h.handleLiteRequest(writer, node, mapRequest) h.handleLiteRequest(writer, node, mapRequest, isNoise, capVer)
return return
} else if mapRequest.OmitPeers && mapRequest.Stream { } else if mapRequest.OmitPeers && mapRequest.Stream {
@ -201,39 +140,12 @@ func (h *Headscale) handlePoll(
return return
} }
change := node.PeerChangeFromMapRequest(mapRequest) now := time.Now().UTC()
node.LastSeen = &now
// A stream is being set up, the node is Online node.Hostname = mapRequest.Hostinfo.Hostname
online := true node.HostInfo = types.HostInfo(*mapRequest.Hostinfo)
change.Online = &online node.DiscoKey = util.DiscoPublicKeyStripPrefix(mapRequest.DiscoKey)
node.Endpoints = mapRequest.Endpoints
node.ApplyPeerChange(&change)
// Only save HostInfo if changed, update routes if changed
// TODO(kradalby): Remove when capver is over 68
if !node.Hostinfo.Equal(mapRequest.Hostinfo) {
oldRoutes := node.Hostinfo.RoutableIPs
newRoutes := mapRequest.Hostinfo.RoutableIPs
node.Hostinfo = mapRequest.Hostinfo
if !xslices.Equal(oldRoutes, newRoutes) {
_, err := h.db.SaveNodeRoutes(node)
if err != nil {
logErr(err, "Error processing node routes")
http.Error(writer, "", http.StatusInternalServerError)
return
}
}
}
if err := h.db.NodeSave(node); err != nil {
logErr(err, "Failed to persist/update node in the database")
http.Error(writer, "", http.StatusInternalServerError)
return
}
// When a node connects to control, list the peers it has at // When a node connects to control, list the peers it has at
// that given point, further updates are kept in memory in // that given point, further updates are kept in memory in
@ -247,14 +159,12 @@ func (h *Headscale) handlePoll(
return return
} }
for _, peer := range peers {
online := h.nodeNotifier.IsConnected(peer.MachineKey)
peer.IsOnline = &online
}
mapp := mapper.NewMapper( mapp := mapper.NewMapper(
node, node,
peers, peers,
h.privateKey2019,
isNoise,
capVer,
h.DERPMap, h.DERPMap,
h.cfg.BaseDomain, h.cfg.BaseDomain,
h.cfg.DNSConfig, h.cfg.DNSConfig,
@ -262,6 +172,11 @@ func (h *Headscale) handlePoll(
h.cfg.RandomizeClientPort, h.cfg.RandomizeClientPort,
) )
err = h.db.SaveNodeRoutes(node)
if err != nil {
logErr(err, "Error processing node routes")
}
// update ACLRules with peer informations (to update server tags if necessary) // update ACLRules with peer informations (to update server tags if necessary)
if h.ACLPolicy != nil { if h.ACLPolicy != nil {
// update routes with peer information // update routes with peer information
@ -271,6 +186,14 @@ func (h *Headscale) handlePoll(
} }
} }
// TODO(kradalby): Save specific stuff, not whole object.
if err := h.db.NodeSave(node); err != nil {
logErr(err, "Failed to persist/update node in the database")
http.Error(writer, "", http.StatusInternalServerError)
return
}
logInfo("Sending initial map") logInfo("Sending initial map")
mapResp, err := mapp.FullMapResponse(mapRequest, node, h.ACLPolicy) mapResp, err := mapp.FullMapResponse(mapRequest, node, h.ACLPolicy)
@ -295,26 +218,18 @@ func (h *Headscale) handlePoll(
return return
} }
stateUpdate := types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: types.Nodes{node},
Message: "called from handlePoll -> new node added",
}
if stateUpdate.Valid() {
h.nodeNotifier.NotifyWithIgnore( h.nodeNotifier.NotifyWithIgnore(
stateUpdate, types.StateUpdate{
node.MachineKey.String()) Type: types.StatePeerChanged,
} Changed: types.Nodes{node},
},
node.MachineKey)
// Set up the client stream // Set up the client stream
h.pollNetMapStreamWG.Add(1) h.pollNetMapStreamWG.Add(1)
defer h.pollNetMapStreamWG.Done() defer h.pollNetMapStreamWG.Done()
// Use a buffered channel in case a node is not fully ready updateChan := make(chan types.StateUpdate)
// to receive a message to make sure we dont block the entire
// notifier.
// 12 is arbitrarily chosen.
updateChan := make(chan types.StateUpdate, 12)
defer closeChanWithLog(updateChan, node.Hostname, "updateChan") defer closeChanWithLog(updateChan, node.Hostname, "updateChan")
// Register the node's update channel // Register the node's update channel
@ -328,10 +243,6 @@ func (h *Headscale) handlePoll(
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
if len(node.Routes) > 0 {
go h.db.EnsureFailoverRouteIsAvailable(node)
}
for { for {
logInfo("Waiting for update on stream channel") logInfo("Waiting for update on stream channel")
select { select {
@ -361,7 +272,14 @@ func (h *Headscale) handlePoll(
// One alternative is to split these different channels into // One alternative is to split these different channels into
// goroutines, but then you might have a problem without a lock // goroutines, but then you might have a problem without a lock
// if a keepalive is written at the same time as an update. // if a keepalive is written at the same time as an update.
go h.updateNodeOnlineStatus(true, node) go func() {
err = h.db.UpdateLastSeen(node)
if err != nil {
logErr(err, "Cannot update node LastSeen")
return
}
}()
case update := <-updateChan: case update := <-updateChan:
logInfo("Received update") logInfo("Received update")
@ -371,43 +289,18 @@ func (h *Headscale) handlePoll(
var err error var err error
switch update.Type { switch update.Type {
case types.StateFullUpdate:
logInfo("Sending Full MapResponse")
data, err = mapp.FullMapResponse(mapRequest, node, h.ACLPolicy)
case types.StatePeerChanged: case types.StatePeerChanged:
logInfo(fmt.Sprintf("Sending Changed MapResponse: %s", update.Message)) logInfo("Sending PeerChanged MapResponse")
data, err = mapp.PeerChangedResponse(mapRequest, node, update.Changed, h.ACLPolicy)
for _, node := range update.ChangeNodes {
// If a node is not reported to be online, it might be
// because the value is outdated, check with the notifier.
// However, if it is set to Online, and not in the notifier,
// this might be because it has announced itself, but not
// reached the stage to actually create the notifier channel.
if node.IsOnline != nil && !*node.IsOnline {
isOnline := h.nodeNotifier.IsConnected(node.MachineKey)
node.IsOnline = &isOnline
}
}
data, err = mapp.PeerChangedResponse(mapRequest, node, update.ChangeNodes, h.ACLPolicy, update.Message)
case types.StatePeerChangedPatch:
logInfo("Sending PeerChangedPatch MapResponse")
data, err = mapp.PeerChangedPatchResponse(mapRequest, node, update.ChangePatches, h.ACLPolicy)
case types.StatePeerRemoved: case types.StatePeerRemoved:
logInfo("Sending PeerRemoved MapResponse") logInfo("Sending PeerRemoved MapResponse")
data, err = mapp.PeerRemovedResponse(mapRequest, node, update.Removed) data, err = mapp.PeerRemovedResponse(mapRequest, node, update.Removed)
case types.StateSelfUpdate:
if len(update.ChangeNodes) == 1 {
logInfo("Sending SelfUpdate MapResponse")
node = update.ChangeNodes[0]
data, err = mapp.LiteMapResponse(mapRequest, node, h.ACLPolicy)
} else {
logInfo("SelfUpdate contained too many nodes, this is likely a bug in the code, please report.")
}
case types.StateDERPUpdated: case types.StateDERPUpdated:
logInfo("Sending DERPUpdate MapResponse") logInfo("Sending DERPUpdate MapResponse")
data, err = mapp.DERPMapResponse(mapRequest, node, update.DERPMap) data, err = mapp.DERPMapResponse(mapRequest, node, update.DERPMap)
case types.StateFullUpdate:
logInfo("Sending Full MapResponse")
data, err = mapp.FullMapResponse(mapRequest, node, h.ACLPolicy)
} }
if err != nil { if err != nil {
@ -416,8 +309,6 @@ func (h *Headscale) handlePoll(
return return
} }
// Only send update if there is change
if data != nil {
_, err = writer.Write(data) _, err = writer.Write(data)
if err != nil { if err != nil {
logErr(err, "Could not write the map response") logErr(err, "Could not write the map response")
@ -436,25 +327,37 @@ func (h *Headscale) handlePoll(
return return
} }
// See comment in keepAliveTicker
go func() {
err = h.db.UpdateLastSeen(node)
if err != nil {
logErr(err, "Cannot update node LastSeen")
return
}
}()
log.Info(). log.Info().
Caller(). Caller().
Bool("noise", isNoise).
Bool("readOnly", mapRequest.ReadOnly). Bool("readOnly", mapRequest.ReadOnly).
Bool("omitPeers", mapRequest.OmitPeers). Bool("omitPeers", mapRequest.OmitPeers).
Bool("stream", mapRequest.Stream). Bool("stream", mapRequest.Stream).
Str("node_key", node.NodeKey.ShortString()). Str("node_key", node.NodeKey).
Str("machine_key", node.MachineKey.ShortString()).
Str("node", node.Hostname). Str("node", node.Hostname).
TimeDiff("timeSpent", time.Now(), now). TimeDiff("timeSpent", time.Now(), now).
Msg("update sent") Msg("update sent")
}
case <-ctx.Done(): case <-ctx.Done():
logInfo("The client has closed the connection") logInfo("The client has closed the connection")
go h.updateNodeOnlineStatus(false, node) go func() {
err = h.db.UpdateLastSeen(node)
if err != nil {
logErr(err, "Cannot update node LastSeen")
// Failover the node's routes if any. return
go h.db.FailoverNodeRoutesWithNotify(node) }
}()
// The connection has been closed, so we can stop polling. // The connection has been closed, so we can stop polling.
return return
@ -467,36 +370,6 @@ func (h *Headscale) handlePoll(
} }
} }
// updateNodeOnlineStatus records the last seen status of a node and notifies peers
// about change in their online/offline status.
// It takes a StateUpdateType of either StatePeerOnlineChanged or StatePeerOfflineChanged.
func (h *Headscale) updateNodeOnlineStatus(online bool, node *types.Node) {
now := time.Now()
node.LastSeen = &now
statusUpdate := types.StateUpdate{
Type: types.StatePeerChangedPatch,
ChangePatches: []*tailcfg.PeerChange{
{
NodeID: tailcfg.NodeID(node.ID),
Online: &online,
LastSeen: &now,
},
},
}
if statusUpdate.Valid() {
h.nodeNotifier.NotifyWithIgnore(statusUpdate, node.MachineKey.String())
}
err := h.db.UpdateLastSeen(node)
if err != nil {
log.Error().Err(err).Msg("Cannot update node LastSeen")
return
}
}
func closeChanWithLog[C chan []byte | chan struct{} | chan types.StateUpdate](channel C, node, name string) { func closeChanWithLog[C chan []byte | chan struct{} | chan types.StateUpdate](channel C, node, name string) {
log.Trace(). log.Trace().
Str("handler", "PollNetMap"). Str("handler", "PollNetMap").
@ -511,12 +384,19 @@ func (h *Headscale) handleLiteRequest(
writer http.ResponseWriter, writer http.ResponseWriter,
node *types.Node, node *types.Node,
mapRequest tailcfg.MapRequest, mapRequest tailcfg.MapRequest,
isNoise bool,
capVer tailcfg.CapabilityVersion,
) { ) {
logInfo, logErr := logPollFunc(mapRequest, node) logInfo, logErr := logPollFunc(mapRequest, node, isNoise)
mapp := mapper.NewMapper( mapp := mapper.NewMapper(
node, node,
// TODO(kradalby): It might not be acceptable to send
// an empty peer list here.
types.Nodes{}, types.Nodes{},
h.privateKey2019,
isNoise,
capVer,
h.DERPMap, h.DERPMap,
h.cfg.BaseDomain, h.cfg.BaseDomain,
h.cfg.DNSConfig, h.cfg.DNSConfig,
@ -541,38 +421,3 @@ func (h *Headscale) handleLiteRequest(
logErr(err, "Failed to write response") logErr(err, "Failed to write response")
} }
} }
func logTracePeerChange(hostname string, hostinfoChange bool, change *tailcfg.PeerChange) {
trace := log.Trace().Str("node_id", change.NodeID.String()).Str("hostname", hostname)
if change.Key != nil {
trace = trace.Str("node_key", change.Key.ShortString())
}
if change.DiscoKey != nil {
trace = trace.Str("disco_key", change.DiscoKey.ShortString())
}
if change.Online != nil {
trace = trace.Bool("online", *change.Online)
}
if change.Endpoints != nil {
eps := make([]string, len(change.Endpoints))
for idx, ep := range change.Endpoints {
eps[idx] = ep.String()
}
trace = trace.Strs("endpoints", eps)
}
if hostinfoChange {
trace = trace.Bool("hostinfo_changed", hostinfoChange)
}
if change.DERPRegion != 0 {
trace = trace.Int("derp_region", change.DERPRegion)
}
trace.Time("last_seen", *change.LastSeen).Msg("PeerChange received")
}

108
hscontrol/poll_legacy.go Normal file
View file

@ -0,0 +1,108 @@
//go:build ts2019
package hscontrol
import (
"errors"
"io"
"net/http"
"github.com/gorilla/mux"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
"gorm.io/gorm"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
)
// PollNetMapHandler takes care of /machine/:id/map
//
// This is the busiest endpoint, as it keeps the HTTP long poll that updates
// the clients when something in the network changes.
//
// The clients POST stuff like HostInfo and their Endpoints here, but
// only after their first request (marked with the ReadOnly field).
//
// At this moment the updates are sent in a quite horrendous way, but they kinda work.
func (h *Headscale) PollNetMapHandler(
writer http.ResponseWriter,
req *http.Request,
) {
vars := mux.Vars(req)
machineKeyStr, ok := vars["mkey"]
if !ok || machineKeyStr == "" {
log.Error().
Str("handler", "PollNetMap").
Msg("No machine key in request")
http.Error(writer, "No machine key in request", http.StatusBadRequest)
return
}
log.Trace().
Str("handler", "PollNetMap").
Str("id", machineKeyStr).
Msg("PollNetMapHandler called")
body, _ := io.ReadAll(req.Body)
var machineKey key.MachinePublic
err := machineKey.UnmarshalText([]byte(util.MachinePublicKeyEnsurePrefix(machineKeyStr)))
if err != nil {
log.Error().
Str("handler", "PollNetMap").
Err(err).
Msg("Cannot parse client key")
http.Error(writer, "Cannot parse client key", http.StatusBadRequest)
return
}
mapRequest := tailcfg.MapRequest{}
err = util.DecodeAndUnmarshalNaCl(body, &mapRequest, &machineKey, h.privateKey2019)
if err != nil {
log.Error().
Str("handler", "PollNetMap").
Err(err).
Msg("Cannot decode message")
http.Error(writer, "Cannot decode message", http.StatusBadRequest)
return
}
node, err := h.db.GetNodeByMachineKey(machineKey)
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
log.Warn().
Str("handler", "PollNetMap").
Msgf("Ignoring request, cannot find node with key %s", machineKey.String())
http.Error(writer, "", http.StatusUnauthorized)
return
}
log.Error().
Str("handler", "PollNetMap").
Msgf("Failed to fetch node from the database with Machine key: %s", machineKey.String())
http.Error(writer, "", http.StatusInternalServerError)
return
}
log.Trace().
Str("handler", "PollNetMap").
Str("id", machineKeyStr).
Str("node", node.Hostname).
Msg("A node is sending a MapRequest via legacy protocol")
capVer, err := parseCabailityVersion(req)
if err != nil && !errors.Is(err, ErrNoCapabilityVersion) {
log.Error().
Caller().
Err(err).
Msg("failed to parse capVer")
http.Error(writer, "Internal error", http.StatusInternalServerError)
return
}
h.handlePoll(writer, req.Context(), node, mapRequest, false, capVer)
}

View file

@ -12,10 +12,6 @@ import (
"tailscale.com/types/key" "tailscale.com/types/key"
) )
const (
MinimumCapVersion tailcfg.CapabilityVersion = 56
)
// NoisePollNetMapHandler takes care of /machine/:id/map using the Noise protocol // NoisePollNetMapHandler takes care of /machine/:id/map using the Noise protocol
// //
// This is the busiest endpoint, as it keeps the HTTP long poll that updates // This is the busiest endpoint, as it keeps the HTTP long poll that updates
@ -51,18 +47,6 @@ func (ns *noiseServer) NoisePollNetMapHandler(
return return
} }
// Reject unsupported versions
if mapRequest.Version < MinimumCapVersion {
log.Info().
Caller().
Int("min_version", int(MinimumCapVersion)).
Int("client_version", int(mapRequest.Version)).
Msg("unsupported client connected")
http.Error(writer, "Internal error", http.StatusBadRequest)
return
}
ns.nodeKey = mapRequest.NodeKey ns.nodeKey = mapRequest.NodeKey
node, err := ns.headscale.db.GetNodeByAnyKey( node, err := ns.headscale.db.GetNodeByAnyKey(
@ -89,8 +73,20 @@ func (ns *noiseServer) NoisePollNetMapHandler(
log.Debug(). log.Debug().
Str("handler", "NoisePollNetMap"). Str("handler", "NoisePollNetMap").
Str("node", node.Hostname). Str("node", node.Hostname).
Int("cap_ver", int(mapRequest.Version)).
Msg("A node sending a MapRequest with Noise protocol") Msg("A node sending a MapRequest with Noise protocol")
ns.headscale.handlePoll(writer, req.Context(), node, mapRequest) capVer, err := parseCabailityVersion(req)
if err != nil && !errors.Is(err, ErrNoCapabilityVersion) {
log.Error().
Caller().
Err(err).
Msg("failed to parse capVer")
http.Error(writer, "Internal error", http.StatusInternalServerError)
return
}
// TODO(kradalby): since we are now passing capVer, we could arguably stop passing
// isNoise, and rather have a isNoise function that takes capVer
ns.headscale.handlePoll(writer, req.Context(), node, mapRequest, true, capVer)
} }

View file

@ -40,6 +40,7 @@ func (s *Suite) ResetDB(c *check.C) {
c.Fatal(err) c.Fatal(err)
} }
cfg := types.Config{ cfg := types.Config{
PrivateKeyPath: tmpDir + "/private.key",
NoisePrivateKeyPath: tmpDir + "/noise_private.key", NoisePrivateKeyPath: tmpDir + "/noise_private.key",
DBtype: "sqlite3", DBtype: "sqlite3",
DBpath: tmpDir + "/headscale_test.db", DBpath: tmpDir + "/headscale_test.db",

View file

@ -1,99 +0,0 @@
package hscontrol
import (
"context"
"fmt"
"net/http"
"os"
"github.com/tailscale/tailsql/server/tailsql"
"tailscale.com/tsnet"
"tailscale.com/tsweb"
"tailscale.com/types/logger"
)
func runTailSQLService(ctx context.Context, logf logger.Logf, stateDir, dbPath string) error {
opts := tailsql.Options{
Hostname: "tailsql-headscale",
StateDir: stateDir,
Sources: []tailsql.DBSpec{
{
Source: "headscale",
Label: "headscale - sqlite",
Driver: "sqlite",
URL: fmt.Sprintf("file:%s?mode=ro", dbPath),
Named: map[string]string{
"schema": `select * from sqlite_schema`,
},
},
},
}
tsNode := &tsnet.Server{
Dir: os.ExpandEnv(opts.StateDir),
Hostname: opts.Hostname,
Logf: logger.Discard,
}
// if *doDebugLog {
// tsNode.Logf = logf
// }
defer tsNode.Close()
logf("Starting tailscale (hostname=%q)", opts.Hostname)
lc, err := tsNode.LocalClient()
if err != nil {
return fmt.Errorf("connect local client: %w", err)
}
opts.LocalClient = lc // for authentication
// Make sure the Tailscale node starts up. It might not, if it is a new node
// and the user did not provide an auth key.
if st, err := tsNode.Up(ctx); err != nil {
return fmt.Errorf("starting tailscale: %w", err)
} else {
logf("tailscale started, node state %q", st.BackendState)
}
// Reaching here, we have a running Tailscale node, now we can set up the
// HTTP and/or HTTPS plumbing for TailSQL itself.
tsql, err := tailsql.NewServer(opts)
if err != nil {
return fmt.Errorf("creating tailsql server: %w", err)
}
lst, err := tsNode.Listen("tcp", ":80")
if err != nil {
return fmt.Errorf("listen port 80: %w", err)
}
if opts.ServeHTTPS {
// When serving TLS, add a redirect from HTTP on port 80 to HTTPS on 443.
certDomains := tsNode.CertDomains()
if len(certDomains) == 0 {
fmt.Errorf("no cert domains available for HTTPS")
}
base := "https://" + certDomains[0]
go http.Serve(lst, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
target := base + r.RequestURI
http.Redirect(w, r, target, http.StatusPermanentRedirect)
}))
// log.Printf("Redirecting HTTP to HTTPS at %q", base)
// For the real service, start a separate listener.
// Note: Replaces the port 80 listener.
var err error
lst, err = tsNode.ListenTLS("tcp", ":443")
if err != nil {
return fmt.Errorf("listen TLS: %w", err)
}
logf("enabled serving via HTTPS")
}
mux := tsql.NewMux()
tsweb.Debugger(mux)
go http.Serve(lst, mux)
logf("ailSQL started")
<-ctx.Done()
logf("TailSQL shutting down...")
return tsNode.Close()
}

View file

@ -12,6 +12,33 @@ import (
var ErrCannotParsePrefix = errors.New("cannot parse prefix") var ErrCannotParsePrefix = errors.New("cannot parse prefix")
// This is a "wrapper" type around tailscales
// Hostinfo to allow us to add database "serialization"
// methods. This allows us to use a typed values throughout
// the code and not have to marshal/unmarshal and error
// check all over the code.
type HostInfo tailcfg.Hostinfo
func (hi *HostInfo) Scan(destination interface{}) error {
switch value := destination.(type) {
case []byte:
return json.Unmarshal(value, hi)
case string:
return json.Unmarshal([]byte(value), hi)
default:
return fmt.Errorf("%w: unexpected data type %T", ErrNodeAddressesInvalid, destination)
}
}
// Value return json value, implement driver.Valuer interface.
func (hi HostInfo) Value() (driver.Value, error) {
bytes, err := json.Marshal(hi)
return string(bytes), err
}
type IPPrefix netip.Prefix type IPPrefix netip.Prefix
func (i *IPPrefix) Scan(destination interface{}) error { func (i *IPPrefix) Scan(destination interface{}) error {
@ -84,37 +111,20 @@ type StateUpdateType int
const ( const (
StateFullUpdate StateUpdateType = iota StateFullUpdate StateUpdateType = iota
// StatePeerChanged is used for updates that needs
// to be calculated with all peers and all policy rules.
// This would typically be things that include tags, routes
// and similar.
StatePeerChanged StatePeerChanged
StatePeerChangedPatch
StatePeerRemoved StatePeerRemoved
// StateSelfUpdate is used to indicate that the node
// has changed in control, and the client needs to be
// informed.
// The updated node is inside the ChangeNodes field
// which should have a length of one.
StateSelfUpdate
StateDERPUpdated StateDERPUpdated
) )
// StateUpdate is an internal message containing information about // StateUpdate is an internal message containing information about
// a state change that has happened to the network. // a state change that has happened to the network.
// If type is StateFullUpdate, all fields are ignored.
type StateUpdate struct { type StateUpdate struct {
// The type of update // The type of update
Type StateUpdateType Type StateUpdateType
// ChangeNodes must be set when Type is StatePeerAdded // Changed must be set when Type is StatePeerChanged and
// and StatePeerChanged and contains the full node // contain the Node IDs of nodes that have changed.
// object for added nodes. Changed Nodes
ChangeNodes Nodes
// ChangePatches must be set when Type is StatePeerChangedPatch
// and contains a populated PeerChange object.
ChangePatches []*tailcfg.PeerChange
// Removed must be set when Type is StatePeerRemoved and // Removed must be set when Type is StatePeerRemoved and
// contain a list of the nodes that has been removed from // contain a list of the nodes that has been removed from
@ -123,40 +133,5 @@ type StateUpdate struct {
// DERPMap must be set when Type is StateDERPUpdated and // DERPMap must be set when Type is StateDERPUpdated and
// contain the new DERP Map. // contain the new DERP Map.
DERPMap *tailcfg.DERPMap DERPMap tailcfg.DERPMap
// Additional message for tracking origin or what being
// updated, useful for ambiguous updates like StatePeerChanged.
Message string
}
// Valid reports if a StateUpdate is correctly filled and
// panics if the mandatory fields for a type is not
// filled.
// Reports true if valid.
func (su *StateUpdate) Valid() bool {
switch su.Type {
case StatePeerChanged:
if su.ChangeNodes == nil {
panic("Mandatory field ChangeNodes is not set on StatePeerChanged update")
}
case StatePeerChangedPatch:
if su.ChangePatches == nil {
panic("Mandatory field ChangePatches is not set on StatePeerChangedPatch update")
}
case StatePeerRemoved:
if su.Removed == nil {
panic("Mandatory field Removed is not set on StatePeerRemove update")
}
case StateSelfUpdate:
if su.ChangeNodes == nil || len(su.ChangeNodes) != 1 {
panic("Mandatory field ChangeNodes is not set for StateSelfUpdate or has more than one node")
}
case StateDERPUpdated:
if su.DERPMap == nil {
panic("Mandatory field DERPMap is not set on StateDERPUpdated update")
}
}
return true
} }

View file

@ -41,6 +41,7 @@ type Config struct {
EphemeralNodeInactivityTimeout time.Duration EphemeralNodeInactivityTimeout time.Duration
NodeUpdateCheckInterval time.Duration NodeUpdateCheckInterval time.Duration
IPPrefixes []netip.Prefix IPPrefixes []netip.Prefix
PrivateKeyPath string
NoisePrivateKeyPath string NoisePrivateKeyPath string
BaseDomain string BaseDomain string
Log LogConfig Log LogConfig
@ -115,7 +116,6 @@ type DERPConfig struct {
ServerRegionID int ServerRegionID int
ServerRegionCode string ServerRegionCode string
ServerRegionName string ServerRegionName string
ServerPrivateKeyPath string
STUNAddr string STUNAddr string
URLs []url.URL URLs []url.URL
Paths []string Paths []string
@ -294,7 +294,6 @@ func GetDERPConfig() DERPConfig {
serverRegionCode := viper.GetString("derp.server.region_code") serverRegionCode := viper.GetString("derp.server.region_code")
serverRegionName := viper.GetString("derp.server.region_name") serverRegionName := viper.GetString("derp.server.region_name")
stunAddr := viper.GetString("derp.server.stun_listen_addr") stunAddr := viper.GetString("derp.server.stun_listen_addr")
privateKeyPath := util.AbsolutePathFromConfigPath(viper.GetString("derp.server.private_key_path"))
if serverEnabled && stunAddr == "" { if serverEnabled && stunAddr == "" {
log.Fatal(). log.Fatal().
@ -326,7 +325,6 @@ func GetDERPConfig() DERPConfig {
ServerRegionID: serverRegionID, ServerRegionID: serverRegionID,
ServerRegionCode: serverRegionCode, ServerRegionCode: serverRegionCode,
ServerRegionName: serverRegionName, ServerRegionName: serverRegionName,
ServerPrivateKeyPath: privateKeyPath,
STUNAddr: stunAddr, STUNAddr: stunAddr,
URLs: urls, URLs: urls,
Paths: paths, Paths: paths,
@ -592,6 +590,9 @@ func GetHeadscaleConfig() (*Config, error) {
DisableUpdateCheck: viper.GetBool("disable_check_updates"), DisableUpdateCheck: viper.GetBool("disable_check_updates"),
IPPrefixes: prefixes, IPPrefixes: prefixes,
PrivateKeyPath: util.AbsolutePathFromConfigPath(
viper.GetString("private_key_path"),
),
NoisePrivateKeyPath: util.AbsolutePathFromConfigPath( NoisePrivateKeyPath: util.AbsolutePathFromConfigPath(
viper.GetString("noise.private_key_path"), viper.GetString("noise.private_key_path"),
), ),

View file

@ -2,7 +2,6 @@ package types
import ( import (
"database/sql/driver" "database/sql/driver"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"net/netip" "net/netip"
@ -12,60 +11,24 @@ import (
v1 "github.com/juanfont/headscale/gen/go/headscale/v1" v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/policy/matcher" "github.com/juanfont/headscale/hscontrol/policy/matcher"
"github.com/rs/zerolog/log" "github.com/juanfont/headscale/hscontrol/util"
"go4.org/netipx" "go4.org/netipx"
"google.golang.org/protobuf/types/known/timestamppb" "google.golang.org/protobuf/types/known/timestamppb"
"gorm.io/gorm"
"tailscale.com/tailcfg" "tailscale.com/tailcfg"
"tailscale.com/types/key" "tailscale.com/types/key"
) )
var ( var (
ErrNodeAddressesInvalid = errors.New("failed to parse node addresses") ErrNodeAddressesInvalid = errors.New("failed to parse node addresses")
ErrHostnameTooLong = errors.New("hostname too long, cannot except 255 ASCII chars") ErrHostnameTooLong = errors.New("hostname too long")
ErrNodeHasNoGivenName = errors.New("node has no given name")
ErrNodeUserHasNoName = errors.New("node user has no name")
) )
// Node is a Headscale client. // Node is a Headscale client.
type Node struct { type Node struct {
ID uint64 `gorm:"primary_key"` ID uint64 `gorm:"primary_key"`
MachineKey string `gorm:"type:varchar(64);unique_index"`
// MachineKeyDatabaseField is the string representation of MachineKey NodeKey string
// it is _only_ used for reading and writing the key to the DiscoKey string
// database and should not be used.
// Use MachineKey instead.
MachineKeyDatabaseField string `gorm:"column:machine_key;unique_index"`
MachineKey key.MachinePublic `gorm:"-"`
// NodeKeyDatabaseField is the string representation of NodeKey
// it is _only_ used for reading and writing the key to the
// database and should not be used.
// Use NodeKey instead.
NodeKeyDatabaseField string `gorm:"column:node_key"`
NodeKey key.NodePublic `gorm:"-"`
// DiscoKeyDatabaseField is the string representation of DiscoKey
// it is _only_ used for reading and writing the key to the
// database and should not be used.
// Use DiscoKey instead.
DiscoKeyDatabaseField string `gorm:"column:disco_key"`
DiscoKey key.DiscoPublic `gorm:"-"`
// EndpointsDatabaseField is the string list representation of Endpoints
// it is _only_ used for reading and writing the key to the
// database and should not be used.
// Use Endpoints instead.
EndpointsDatabaseField StringList `gorm:"column:endpoints"`
Endpoints []netip.AddrPort `gorm:"-"`
// EndpointsDatabaseField is the string list representation of Endpoints
// it is _only_ used for reading and writing the key to the
// database and should not be used.
// Use Endpoints instead.
HostinfoDatabaseField string `gorm:"column:host_info"`
Hostinfo *tailcfg.Hostinfo `gorm:"-"`
IPAddresses NodeAddresses IPAddresses NodeAddresses
// Hostname represents the name given by the Tailscale // Hostname represents the name given by the Tailscale
@ -93,19 +56,30 @@ type Node struct {
LastSeen *time.Time LastSeen *time.Time
Expiry *time.Time Expiry *time.Time
HostInfo HostInfo
Endpoints StringList
Routes []Route Routes []Route
CreatedAt time.Time CreatedAt time.Time
UpdatedAt time.Time UpdatedAt time.Time
DeletedAt *time.Time DeletedAt *time.Time
IsOnline *bool `gorm:"-"`
} }
type ( type (
Nodes []*Node Nodes []*Node
) )
func (nodes Nodes) OnlineNodeMap() map[tailcfg.NodeID]bool {
ret := make(map[tailcfg.NodeID]bool)
for _, node := range nodes {
ret[tailcfg.NodeID(node.ID)] = node.IsOnline()
}
return ret
}
type NodeAddresses []netip.Addr type NodeAddresses []netip.Addr
func (na NodeAddresses) Sort() { func (na NodeAddresses) Sort() {
@ -201,6 +175,21 @@ func (node Node) IsExpired() bool {
return time.Now().UTC().After(*node.Expiry) return time.Now().UTC().After(*node.Expiry)
} }
// IsOnline returns if the node is connected to Headscale.
// This is really a naive implementation, as we don't really see
// if there is a working connection between the client and the server.
func (node *Node) IsOnline() bool {
if node.LastSeen == nil {
return false
}
if node.IsExpired() {
return false
}
return node.LastSeen.After(time.Now().Add(-KeepAliveInterval))
}
// IsEphemeral returns if the node is registered as an Ephemeral node. // IsEphemeral returns if the node is registered as an Ephemeral node.
// https://tailscale.com/kb/1111/ephemeral-nodes/ // https://tailscale.com/kb/1111/ephemeral-nodes/
func (node *Node) IsEphemeral() bool { func (node *Node) IsEphemeral() bool {
@ -238,89 +227,19 @@ func (nodes Nodes) FilterByIP(ip netip.Addr) Nodes {
return found return found
} }
// BeforeSave is a hook that ensures that some values that
// cannot be directly marshalled into database values are stored
// correctly in the database.
// This currently means storing the keys as strings.
func (node *Node) BeforeSave(tx *gorm.DB) error {
node.MachineKeyDatabaseField = node.MachineKey.String()
node.NodeKeyDatabaseField = node.NodeKey.String()
node.DiscoKeyDatabaseField = node.DiscoKey.String()
var endpoints StringList
for _, addrPort := range node.Endpoints {
endpoints = append(endpoints, addrPort.String())
}
node.EndpointsDatabaseField = endpoints
hi, err := json.Marshal(node.Hostinfo)
if err != nil {
return fmt.Errorf("failed to marshal Hostinfo to store in db: %w", err)
}
node.HostinfoDatabaseField = string(hi)
return nil
}
// AfterFind is a hook that ensures that Node objects fields that
// has a different type in the database is unwrapped and populated
// correctly.
// This currently unmarshals all the keys, stored as strings, into
// the proper types.
func (node *Node) AfterFind(tx *gorm.DB) error {
var machineKey key.MachinePublic
if err := machineKey.UnmarshalText([]byte(node.MachineKeyDatabaseField)); err != nil {
return fmt.Errorf("failed to unmarshal machine key from db: %w", err)
}
node.MachineKey = machineKey
var nodeKey key.NodePublic
if err := nodeKey.UnmarshalText([]byte(node.NodeKeyDatabaseField)); err != nil {
return fmt.Errorf("failed to unmarshal node key from db: %w", err)
}
node.NodeKey = nodeKey
var discoKey key.DiscoPublic
if err := discoKey.UnmarshalText([]byte(node.DiscoKeyDatabaseField)); err != nil {
return fmt.Errorf("failed to unmarshal disco key from db: %w", err)
}
node.DiscoKey = discoKey
endpoints := make([]netip.AddrPort, len(node.EndpointsDatabaseField))
for idx, ep := range node.EndpointsDatabaseField {
addrPort, err := netip.ParseAddrPort(ep)
if err != nil {
return fmt.Errorf("failed to parse endpoint from db: %w", err)
}
endpoints[idx] = addrPort
}
node.Endpoints = endpoints
var hi tailcfg.Hostinfo
if err := json.Unmarshal([]byte(node.HostinfoDatabaseField), &hi); err != nil {
log.Trace().Err(err).Msgf("Hostinfo content: %s", node.HostinfoDatabaseField)
return fmt.Errorf("failed to unmarshal Hostinfo from db: %w", err)
}
node.Hostinfo = &hi
return nil
}
func (node *Node) Proto() *v1.Node { func (node *Node) Proto() *v1.Node {
nodeProto := &v1.Node{ nodeProto := &v1.Node{
Id: node.ID, Id: node.ID,
MachineKey: node.MachineKey.String(), MachineKey: node.MachineKey,
NodeKey: node.NodeKey.String(), NodeKey: node.NodeKey,
DiscoKey: node.DiscoKey.String(), DiscoKey: node.DiscoKey,
IpAddresses: node.IPAddresses.StringSlice(), IpAddresses: node.IPAddresses.StringSlice(),
Name: node.Hostname, Name: node.Hostname,
GivenName: node.GivenName, GivenName: node.GivenName,
User: node.User.Proto(), User: node.User.Proto(),
ForcedTags: node.ForcedTags, ForcedTags: node.ForcedTags,
Online: node.IsOnline(),
// TODO(kradalby): Implement register method enum converter // TODO(kradalby): Implement register method enum converter
// RegisterMethod: , // RegisterMethod: ,
@ -343,17 +262,14 @@ func (node *Node) Proto() *v1.Node {
return nodeProto return nodeProto
} }
// GetHostInfo returns a Hostinfo struct for the node.
func (node *Node) GetHostInfo() tailcfg.Hostinfo {
return tailcfg.Hostinfo(node.HostInfo)
}
func (node *Node) GetFQDN(dnsConfig *tailcfg.DNSConfig, baseDomain string) (string, error) { func (node *Node) GetFQDN(dnsConfig *tailcfg.DNSConfig, baseDomain string) (string, error) {
var hostname string var hostname string
if dnsConfig != nil && dnsConfig.Proxied { // MagicDNS if dnsConfig != nil && dnsConfig.Proxied { // MagicDNS
if node.GivenName == "" {
return "", fmt.Errorf("failed to create valid FQDN: %w", ErrNodeHasNoGivenName)
}
if node.User.Name == "" {
return "", fmt.Errorf("failed to create valid FQDN: %w", ErrNodeUserHasNoName)
}
hostname = fmt.Sprintf( hostname = fmt.Sprintf(
"%s.%s.%s", "%s.%s.%s",
node.GivenName, node.GivenName,
@ -362,7 +278,7 @@ func (node *Node) GetFQDN(dnsConfig *tailcfg.DNSConfig, baseDomain string) (stri
) )
if len(hostname) > MaxHostnameLength { if len(hostname) > MaxHostnameLength {
return "", fmt.Errorf( return "", fmt.Errorf(
"failed to create valid FQDN (%s): %w", "hostname %q is too long it cannot except 255 ASCII chars: %w",
hostname, hostname,
ErrHostnameTooLong, ErrHostnameTooLong,
) )
@ -374,98 +290,49 @@ func (node *Node) GetFQDN(dnsConfig *tailcfg.DNSConfig, baseDomain string) (stri
return hostname, nil return hostname, nil
} }
// func (node *Node) String() string { func (node *Node) MachinePublicKey() (key.MachinePublic, error) {
// return node.Hostname var machineKey key.MachinePublic
// }
// PeerChangeFromMapRequest takes a MapRequest and compares it to the node if node.MachineKey != "" {
// to produce a PeerChange struct that can be used to updated the node and err := machineKey.UnmarshalText(
// inform peers about smaller changes to the node. []byte(util.MachinePublicKeyEnsurePrefix(node.MachineKey)),
// When a field is added to this function, remember to also add it to: )
// - node.ApplyPeerChange if err != nil {
// - logTracePeerChange in poll.go return key.MachinePublic{}, fmt.Errorf("failed to parse machine public key: %w", err)
func (node *Node) PeerChangeFromMapRequest(req tailcfg.MapRequest) tailcfg.PeerChange {
ret := tailcfg.PeerChange{
NodeID: tailcfg.NodeID(node.ID),
}
if node.NodeKey.String() != req.NodeKey.String() {
ret.Key = &req.NodeKey
}
if node.DiscoKey.String() != req.DiscoKey.String() {
ret.DiscoKey = &req.DiscoKey
}
if node.Hostinfo != nil &&
node.Hostinfo.NetInfo != nil &&
req.Hostinfo != nil &&
req.Hostinfo.NetInfo != nil &&
node.Hostinfo.NetInfo.PreferredDERP != req.Hostinfo.NetInfo.PreferredDERP {
ret.DERPRegion = req.Hostinfo.NetInfo.PreferredDERP
}
if req.Hostinfo != nil && req.Hostinfo.NetInfo != nil {
// If there is no stored Hostinfo or NetInfo, use
// the new PreferredDERP.
if node.Hostinfo == nil {
ret.DERPRegion = req.Hostinfo.NetInfo.PreferredDERP
} else if node.Hostinfo.NetInfo == nil {
ret.DERPRegion = req.Hostinfo.NetInfo.PreferredDERP
} else {
// If there is a PreferredDERP check if it has changed.
if node.Hostinfo.NetInfo.PreferredDERP != req.Hostinfo.NetInfo.PreferredDERP {
ret.DERPRegion = req.Hostinfo.NetInfo.PreferredDERP
}
} }
} }
// TODO(kradalby): Find a good way to compare updates return machineKey, nil
ret.Endpoints = req.Endpoints
now := time.Now()
ret.LastSeen = &now
return ret
} }
// ApplyPeerChange takes a PeerChange struct and updates the node. func (node *Node) DiscoPublicKey() (key.DiscoPublic, error) {
func (node *Node) ApplyPeerChange(change *tailcfg.PeerChange) { var discoKey key.DiscoPublic
if change.Key != nil { if node.DiscoKey != "" {
node.NodeKey = *change.Key err := discoKey.UnmarshalText(
} []byte(util.DiscoPublicKeyEnsurePrefix(node.DiscoKey)),
)
if change.DiscoKey != nil { if err != nil {
node.DiscoKey = *change.DiscoKey return key.DiscoPublic{}, fmt.Errorf("failed to parse disco public key: %w", err)
}
if change.Online != nil {
node.IsOnline = change.Online
}
if change.Endpoints != nil {
node.Endpoints = change.Endpoints
}
// This might technically not be useful as we replace
// the whole hostinfo blob when it has changed.
if change.DERPRegion != 0 {
if node.Hostinfo == nil {
node.Hostinfo = &tailcfg.Hostinfo{
NetInfo: &tailcfg.NetInfo{
PreferredDERP: change.DERPRegion,
},
}
} else if node.Hostinfo.NetInfo == nil {
node.Hostinfo.NetInfo = &tailcfg.NetInfo{
PreferredDERP: change.DERPRegion,
} }
} else { } else {
node.Hostinfo.NetInfo.PreferredDERP = change.DERPRegion discoKey = key.DiscoPublic{}
}
} }
node.LastSeen = change.LastSeen return discoKey, nil
}
func (node *Node) NodePublicKey() (key.NodePublic, error) {
var nodeKey key.NodePublic
err := nodeKey.UnmarshalText([]byte(util.NodePublicKeyEnsurePrefix(node.NodeKey)))
if err != nil {
return key.NodePublic{}, fmt.Errorf("failed to parse node public key: %w", err)
}
return nodeKey, nil
}
func (node Node) String() string {
return node.Hostname
} }
func (nodes Nodes) String() string { func (nodes Nodes) String() string {

Some files were not shown because too many files have changed in this diff Show more