Compare commits
No commits in common. "6b635b3566b5fd8c05df1687563dfe1d508e1aad" and "d017339af4b13a6eae1e49499f2ba4ae4d991431" have entirely different histories.
6b635b3566
...
d017339af4
120 changed files with 3123 additions and 5411 deletions
2
.github/workflows/docs.yml
vendored
2
.github/workflows/docs.yml
vendored
|
@ -26,7 +26,7 @@ jobs:
|
|||
key: ${{ github.ref }}
|
||||
path: .cache
|
||||
- name: Setup dependencies
|
||||
run: pip install -r docs/requirements.txt
|
||||
run: pip install mkdocs-material pillow cairosvg mkdocs-minify-plugin
|
||||
- name: Build docs
|
||||
run: mkdocs build --strict
|
||||
- name: Upload artifact
|
||||
|
|
6
.github/workflows/stale.yml
vendored
6
.github/workflows/stale.yml
vendored
|
@ -12,10 +12,10 @@ jobs:
|
|||
steps:
|
||||
- uses: actions/stale@v5
|
||||
with:
|
||||
days-before-issue-stale: 90
|
||||
days-before-issue-close: 7
|
||||
days-before-issue-stale: 180
|
||||
days-before-issue-close: 14
|
||||
stale-issue-label: "stale"
|
||||
stale-issue-message: "This issue is stale because it has been open for 90 days with no activity."
|
||||
stale-issue-message: "This issue is stale because it has been open for 180 days with no activity."
|
||||
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestACLAllowStarDst
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestACLAllowUser80Dst
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestACLAllowUserDst
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestACLDenyAllPort80
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestACLDevice1CanAccessDevice2
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestACLHostsInNetMapTable
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestACLNamedHostsCanReach
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestACLNamedHostsCanReachBySubnet
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestApiKeyCommand
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestAuthKeyLogoutAndRelogin
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestAuthWebFlowAuthenticationPingAll
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestAuthWebFlowLogoutAndRelogin
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestCreateTailscale
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestDERPServerScenario
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestEnablingRoutes
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestEphemeral
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestExpireNode
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -1,67 +0,0 @@
|
|||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestHASubnetRouterFailover
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
TestHASubnetRouterFailover:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
- uses: satackey/action-docker-layer-caching@main
|
||||
continue-on-error: true
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- name: Run TestHASubnetRouterFailover
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestHASubnetRouterFailover$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestHeadscale
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -1,67 +0,0 @@
|
|||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestNodeAdvertiseTagNoACLCommand
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
TestNodeAdvertiseTagNoACLCommand:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
- uses: satackey/action-docker-layer-caching@main
|
||||
continue-on-error: true
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- name: Run TestNodeAdvertiseTagNoACLCommand
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestNodeAdvertiseTagNoACLCommand$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
|
@ -1,67 +0,0 @@
|
|||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestNodeAdvertiseTagWithACLCommand
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
TestNodeAdvertiseTagWithACLCommand:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
- uses: satackey/action-docker-layer-caching@main
|
||||
continue-on-error: true
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- name: Run TestNodeAdvertiseTagWithACLCommand
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestNodeAdvertiseTagWithACLCommand$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestNodeCommand
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestNodeExpireCommand
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestNodeMoveCommand
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -1,67 +0,0 @@
|
|||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestNodeOnlineLastSeenStatus
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
TestNodeOnlineLastSeenStatus:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
- uses: satackey/action-docker-layer-caching@main
|
||||
continue-on-error: true
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- name: Run TestNodeOnlineLastSeenStatus
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestNodeOnlineLastSeenStatus$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestNodeRenameCommand
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestNodeTagCommand
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestOIDCAuthenticationPingAll
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestOIDCExpireNodesBasedOnTokenExpiry
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestPingAllByHostname
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestPingAllByIP
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestPreAuthKeyCommand
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestPreAuthKeyCommandReusableEphemeral
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestPreAuthKeyCommandWithoutExpiry
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestResolveMagicDNS
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestSSHIsBlockedInACL
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestSSHMultipleUsersAllToAll
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestSSHNoSSHConfigured
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestSSHOneUserToAll
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestSSHUserOnlyIsolation
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestTaildrop
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestTailscaleNodesJoiningHeadcale
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -35,11 +35,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run TestUserCommand
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -49,6 +46,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -1,6 +1,5 @@
|
|||
ignored/
|
||||
tailscale/
|
||||
.vscode/
|
||||
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
|
|
12
CHANGELOG.md
12
CHANGELOG.md
|
@ -23,18 +23,11 @@ after improving the test harness as part of adopting [#1460](https://github.com/
|
|||
|
||||
### BREAKING
|
||||
|
||||
- Code reorganisation, a lot of code has moved, please review the following PRs accordingly [#1473](https://github.com/juanfont/headscale/pull/1473)
|
||||
- API: Machine is now Node [#1553](https://github.com/juanfont/headscale/pull/1553)
|
||||
- Remove support for older Tailscale clients [#1611](https://github.com/juanfont/headscale/pull/1611)
|
||||
- The latest supported client is 1.36
|
||||
- Headscale checks that _at least_ one DERP is defined at start [#1564](https://github.com/juanfont/headscale/pull/1564)
|
||||
- If no DERP is configured, the server will fail to start, this can be because it cannot load the DERPMap from file or url.
|
||||
- Embedded DERP server requires a private key [#1611](https://github.com/juanfont/headscale/pull/1611)
|
||||
- Add a filepath entry to [`derp.server.private_key_path`](https://github.com/juanfont/headscale/blob/b35993981297e18393706b2c963d6db882bba6aa/config-example.yaml#L95)
|
||||
Code reorganisation, a lot of code has moved, please review the following PRs accordingly [#1473](https://github.com/juanfont/headscale/pull/1473)
|
||||
API: Machine is now Node [#1553](https://github.com/juanfont/headscale/pull/1553)
|
||||
|
||||
### Changes
|
||||
|
||||
Use versioned migrations [#1644](https://github.com/juanfont/headscale/pull/1644)
|
||||
Make the OIDC callback page better [#1484](https://github.com/juanfont/headscale/pull/1484)
|
||||
Allow use of the username OIDC claim [#1287](https://github.com/juanfont/headscale/pull/1287)
|
||||
SSH support [#1487](https://github.com/juanfont/headscale/pull/1487)
|
||||
|
@ -43,7 +36,6 @@ Use error group handling to ensure tests actually pass [#1535](https://github.co
|
|||
Fix hang on SIGTERM [#1492](https://github.com/juanfont/headscale/pull/1492) taken from [#1480](https://github.com/juanfont/headscale/pull/1480)
|
||||
Send logs to stderr by default [#1524](https://github.com/juanfont/headscale/pull/1524)
|
||||
Fix [TS-2023-006](https://tailscale.com/security-bulletins/#ts-2023-006) security UPnP issue [#1563](https://github.com/juanfont/headscale/pull/1563)
|
||||
Turn off gRPC logging [#1640](https://github.com/juanfont/headscale/pull/1640) fixes [#1259](https://github.com/juanfont/headscale/issues/1259)
|
||||
Add `oidc.groups_claim`, `oidc.email_claim`, and `oidc.username_claim` to allow setting those claim names [#1594](https://github.com/juanfont/headscale/pull/1594)
|
||||
|
||||
## 0.22.3 (2023-05-12)
|
||||
|
|
|
@ -9,7 +9,7 @@ RUN go mod download
|
|||
|
||||
COPY . .
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -tags ts2019 -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN strip /go/bin/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ RUN go mod download
|
|||
|
||||
COPY . .
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -tags ts2019 -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
# Debug image
|
||||
|
|
6
Makefile
6
Makefile
|
@ -10,6 +10,8 @@ ifeq ($(filter $(GOOS), openbsd netbsd soloaris plan9), )
|
|||
else
|
||||
endif
|
||||
|
||||
TAGS = -tags ts2019
|
||||
|
||||
# GO_SOURCES = $(wildcard *.go)
|
||||
# PROTO_SOURCES = $(wildcard **/*.proto)
|
||||
GO_SOURCES = $(call rwildcard,,*.go)
|
||||
|
@ -22,7 +24,7 @@ build:
|
|||
dev: lint test build
|
||||
|
||||
test:
|
||||
gotestsum -- -short -coverprofile=coverage.out ./...
|
||||
gotestsum -- $(TAGS) -short -coverprofile=coverage.out ./...
|
||||
|
||||
test_integration:
|
||||
docker run \
|
||||
|
@ -32,7 +34,7 @@ test_integration:
|
|||
-v $$PWD:$$PWD -w $$PWD/integration \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- -failfast ./... -timeout 120m -parallel 8
|
||||
go run gotest.tools/gotestsum@latest -- $(TAGS) -failfast ./... -timeout 120m -parallel 8
|
||||
|
||||
lint:
|
||||
golangci-lint run --fix --timeout 10m
|
||||
|
|
92
README.md
92
README.md
|
@ -466,13 +466,6 @@ make build
|
|||
<sub style="font-size:14px"><b>unreality</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/vsychov>
|
||||
<img src=https://avatars.githubusercontent.com/u/2186303?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=MichaelKo/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>MichaelKo</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/kevin1sMe>
|
||||
<img src=https://avatars.githubusercontent.com/u/6886076?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=kevinlin/>
|
||||
|
@ -480,8 +473,6 @@ make build
|
|||
<sub style="font-size:14px"><b>kevinlin</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/QZAiXH>
|
||||
<img src=https://avatars.githubusercontent.com/u/23068780?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Snack/>
|
||||
|
@ -489,6 +480,8 @@ make build
|
|||
<sub style="font-size:14px"><b>Snack</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/artemklevtsov>
|
||||
<img src=https://avatars.githubusercontent.com/u/603798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Artem Klevtsov/>
|
||||
|
@ -524,8 +517,6 @@ make build
|
|||
<sub style="font-size:14px"><b>LIU HANCHENG</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/motiejus>
|
||||
<img src=https://avatars.githubusercontent.com/u/107720?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Motiejus Jakštys/>
|
||||
|
@ -533,6 +524,8 @@ make build
|
|||
<sub style="font-size:14px"><b>Motiejus Jakštys</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/pvinis>
|
||||
<img src=https://avatars.githubusercontent.com/u/100233?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pavlos Vinieratos/>
|
||||
|
@ -554,6 +547,13 @@ make build
|
|||
<sub style="font-size:14px"><b>Steven Honson</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/vsychov>
|
||||
<img src=https://avatars.githubusercontent.com/u/2186303?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=MichaelKo/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>MichaelKo</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ratsclub>
|
||||
<img src=https://avatars.githubusercontent.com/u/25647735?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Victor Freire/>
|
||||
|
@ -577,13 +577,6 @@ make build
|
|||
<sub style="font-size:14px"><b>thomas</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/puzpuzpuz>
|
||||
<img src=https://avatars.githubusercontent.com/u/37772591?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Andrei Pechkurov/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Andrei Pechkurov</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/linsomniac>
|
||||
<img src=https://avatars.githubusercontent.com/u/466380?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Sean Reifschneider/>
|
||||
|
@ -605,6 +598,13 @@ make build
|
|||
<sub style="font-size:14px"><b>Albert Copeland</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/puzpuzpuz>
|
||||
<img src=https://avatars.githubusercontent.com/u/37772591?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Andrei Pechkurov/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Andrei Pechkurov</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/theryecatcher>
|
||||
<img src=https://avatars.githubusercontent.com/u/16442416?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Anoop Sundaresh/>
|
||||
|
@ -658,13 +658,6 @@ make build
|
|||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/winterheart>
|
||||
<img src=https://avatars.githubusercontent.com/u/81112?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Azamat H. Hackimov/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Azamat H. Hackimov</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/stensonb>
|
||||
<img src=https://avatars.githubusercontent.com/u/933389?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Bryan Stenson/>
|
||||
|
@ -700,8 +693,6 @@ make build
|
|||
<sub style="font-size:14px"><b>Felix Kronlage-Dammers</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/felixonmars>
|
||||
<img src=https://avatars.githubusercontent.com/u/1006477?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Felix Yan/>
|
||||
|
@ -709,6 +700,8 @@ make build
|
|||
<sub style="font-size:14px"><b>Felix Yan</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/gabe565>
|
||||
<img src=https://avatars.githubusercontent.com/u/7717888?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Gabe Cook/>
|
||||
|
@ -730,13 +723,6 @@ make build
|
|||
<sub style="font-size:14px"><b>hrtkpf</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/jessebot>
|
||||
<img src=https://avatars.githubusercontent.com/u/2389292?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=JesseBot/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>JesseBot</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/jimt>
|
||||
<img src=https://avatars.githubusercontent.com/u/180326?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jim Tittsler/>
|
||||
|
@ -744,8 +730,6 @@ make build
|
|||
<sub style="font-size:14px"><b>Jim Tittsler</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/jsiebens>
|
||||
<img src=https://avatars.githubusercontent.com/u/499769?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Johan Siebens/>
|
||||
|
@ -760,6 +744,8 @@ make build
|
|||
<sub style="font-size:14px"><b>John Axel Eriksson</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ShadowJonathan>
|
||||
<img src=https://avatars.githubusercontent.com/u/22740616?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jonathan de Jong/>
|
||||
|
@ -788,8 +774,6 @@ make build
|
|||
<sub style="font-size:14px"><b>Lucalux</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/foxtrot>
|
||||
<img src=https://avatars.githubusercontent.com/u/4153572?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Marc/>
|
||||
|
@ -804,6 +788,8 @@ make build
|
|||
<sub style="font-size:14px"><b>Mesar Hameed</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/mikejsavage>
|
||||
<img src=https://avatars.githubusercontent.com/u/579299?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Michael Savage/>
|
||||
|
@ -832,8 +818,6 @@ make build
|
|||
<sub style="font-size:14px"><b>Pontus N</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/nnsee>
|
||||
<img src=https://avatars.githubusercontent.com/u/36747857?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Rasmus Moorats/>
|
||||
|
@ -848,6 +832,8 @@ make build
|
|||
<sub style="font-size:14px"><b>rcursaru</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/renovate-bot>
|
||||
<img src=https://avatars.githubusercontent.com/u/25180681?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mend Renovate/>
|
||||
|
@ -864,9 +850,9 @@ make build
|
|||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/muzy>
|
||||
<img src=https://avatars.githubusercontent.com/u/321723?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Sebastian/>
|
||||
<img src=https://avatars.githubusercontent.com/u/321723?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Sebastian Muszytowski/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Sebastian</b></sub>
|
||||
<sub style="font-size:14px"><b>Sebastian Muszytowski</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
|
@ -876,8 +862,6 @@ make build
|
|||
<sub style="font-size:14px"><b>Shaanan Cohney</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/6ixfalls>
|
||||
<img src=https://avatars.githubusercontent.com/u/23470032?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Six/>
|
||||
|
@ -892,6 +876,8 @@ make build
|
|||
<sub style="font-size:14px"><b>Stefan VanBuren</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/sophware>
|
||||
<img src=https://avatars.githubusercontent.com/u/41669?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=sophware/>
|
||||
|
@ -920,8 +906,6 @@ make build
|
|||
<sub style="font-size:14px"><b>The Gitter Badger</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/tianon>
|
||||
<img src=https://avatars.githubusercontent.com/u/161631?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tianon Gravi/>
|
||||
|
@ -936,6 +920,8 @@ make build
|
|||
<sub style="font-size:14px"><b>Till Hoffmann</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/woudsma>
|
||||
<img src=https://avatars.githubusercontent.com/u/6162978?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tjerk Woudsma/>
|
||||
|
@ -964,8 +950,6 @@ make build
|
|||
<sub style="font-size:14px"><b>Zachary Newell</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/zekker6>
|
||||
<img src=https://avatars.githubusercontent.com/u/1367798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zakhar Bessarab/>
|
||||
|
@ -980,6 +964,8 @@ make build
|
|||
<sub style="font-size:14px"><b>Zhiyuan Zheng</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Bpazy>
|
||||
<img src=https://avatars.githubusercontent.com/u/9838749?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ziyuan Han/>
|
||||
|
@ -1008,8 +994,6 @@ make build
|
|||
<sub style="font-size:14px"><b>dnaq</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/nning>
|
||||
<img src=https://avatars.githubusercontent.com/u/557430?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=henning mueller/>
|
||||
|
@ -1024,6 +1008,8 @@ make build
|
|||
<sub style="font-size:14px"><b>ignoramous</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/jimyag>
|
||||
<img src=https://avatars.githubusercontent.com/u/69233189?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=jimyag/>
|
||||
|
@ -1052,8 +1038,6 @@ make build
|
|||
<sub style="font-size:14px"><b>ma6174</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/manju-rn>
|
||||
<img src=https://avatars.githubusercontent.com/u/26291847?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=manju-rn/>
|
||||
|
@ -1068,6 +1052,8 @@ make build
|
|||
<sub style="font-size:14px"><b>nicholas-yap</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/pernila>
|
||||
<img src=https://avatars.githubusercontent.com/u/12460060?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tommi Pernila/>
|
||||
|
@ -1096,8 +1082,6 @@ make build
|
|||
<sub style="font-size:14px"><b>zy</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/atorregrosa-smd>
|
||||
<img src=https://avatars.githubusercontent.com/u/78434679?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Àlex Torregrosa/>
|
||||
|
|
47
cmd/build-docker-img/main.go
Normal file
47
cmd/build-docker-img/main.go
Normal file
|
@ -0,0 +1,47 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/juanfont/headscale/integration"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/ory/dockertest/v3"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.Printf("creating docker pool")
|
||||
pool, err := dockertest.NewPool("")
|
||||
if err != nil {
|
||||
log.Fatalf("could not connect to docker: %s", err)
|
||||
}
|
||||
|
||||
log.Printf("creating docker network")
|
||||
network, err := pool.CreateNetwork("docker-integration-net")
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create or get network: %s", err)
|
||||
}
|
||||
|
||||
for _, version := range integration.AllVersions {
|
||||
log.Printf("creating container image for Tailscale (%s)", version)
|
||||
|
||||
tsClient, err := tsic.New(
|
||||
pool,
|
||||
version,
|
||||
network,
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create tailscale node: %s", err)
|
||||
}
|
||||
|
||||
err = tsClient.Shutdown()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to shut down container: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
network.Close()
|
||||
err = pool.RemoveNetwork(network)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to remove network: %s", err)
|
||||
}
|
||||
}
|
|
@ -56,11 +56,8 @@ jobs:
|
|||
config-example.yaml
|
||||
|
||||
- name: Run {{.Name}}
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
|
@ -70,6 +67,7 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -67,7 +67,7 @@ var listAPIKeys = &cobra.Command{
|
|||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetApiKeys(), "", output)
|
||||
SuccessOutput(response.ApiKeys, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -75,11 +75,11 @@ var listAPIKeys = &cobra.Command{
|
|||
tableData := pterm.TableData{
|
||||
{"ID", "Prefix", "Expiration", "Created"},
|
||||
}
|
||||
for _, key := range response.GetApiKeys() {
|
||||
for _, key := range response.ApiKeys {
|
||||
expiration := "-"
|
||||
|
||||
if key.GetExpiration() != nil {
|
||||
expiration = ColourTime(key.GetExpiration().AsTime())
|
||||
expiration = ColourTime(key.Expiration.AsTime())
|
||||
}
|
||||
|
||||
tableData = append(tableData, []string{
|
||||
|
@ -155,7 +155,7 @@ If you loose a key, create a new one and revoke (expire) the old one.`,
|
|||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetApiKey(), response.GetApiKey(), output)
|
||||
SuccessOutput(response.ApiKey, response.ApiKey, output)
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -4,10 +4,10 @@ import (
|
|||
"fmt"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -93,13 +93,11 @@ var createNodeCmd = &cobra.Command{
|
|||
|
||||
return
|
||||
}
|
||||
|
||||
var mkey key.MachinePublic
|
||||
err = mkey.UnmarshalText([]byte(machineKey))
|
||||
if err != nil {
|
||||
if !util.NodePublicKeyRegex.Match([]byte(machineKey)) {
|
||||
err = errPreAuthKeyMalformed
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to parse machine key from flag: %s", err),
|
||||
fmt.Sprintf("Error: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
|
@ -135,6 +133,6 @@ var createNodeCmd = &cobra.Command{
|
|||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetNode(), "Node created", output)
|
||||
SuccessOutput(response.Node, "Node created", output)
|
||||
},
|
||||
}
|
||||
|
|
|
@ -152,8 +152,8 @@ var registerNodeCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
SuccessOutput(
|
||||
response.GetNode(),
|
||||
fmt.Sprintf("Node %s registered", response.GetNode().GetGivenName()), output)
|
||||
response.Node,
|
||||
fmt.Sprintf("Node %s registered", response.Node.GivenName), output)
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -196,12 +196,12 @@ var listNodesCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetNodes(), "", output)
|
||||
SuccessOutput(response.Nodes, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
tableData, err := nodesToPtables(user, showTags, response.GetNodes())
|
||||
tableData, err := nodesToPtables(user, showTags, response.Nodes)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
|
||||
|
@ -262,7 +262,7 @@ var expireNodeCmd = &cobra.Command{
|
|||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetNode(), "Node expired", output)
|
||||
SuccessOutput(response.Node, "Node expired", output)
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -310,7 +310,7 @@ var renameNodeCmd = &cobra.Command{
|
|||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetNode(), "Node renamed", output)
|
||||
SuccessOutput(response.Node, "Node renamed", output)
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -364,7 +364,7 @@ var deleteNodeCmd = &cobra.Command{
|
|||
prompt := &survey.Confirm{
|
||||
Message: fmt.Sprintf(
|
||||
"Do you want to remove the node %s?",
|
||||
getResponse.GetNode().GetName(),
|
||||
getResponse.GetNode().Name,
|
||||
),
|
||||
}
|
||||
err = survey.AskOne(prompt, &confirm)
|
||||
|
@ -473,7 +473,7 @@ var moveNodeCmd = &cobra.Command{
|
|||
return
|
||||
}
|
||||
|
||||
SuccessOutput(moveResponse.GetNode(), "Node moved to another user", output)
|
||||
SuccessOutput(moveResponse.Node, "Node moved to another user", output)
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -493,7 +493,7 @@ func nodesToPtables(
|
|||
"Ephemeral",
|
||||
"Last seen",
|
||||
"Expiration",
|
||||
"Connected",
|
||||
"Online",
|
||||
"Expired",
|
||||
}
|
||||
if showTags {
|
||||
|
@ -507,21 +507,21 @@ func nodesToPtables(
|
|||
|
||||
for _, node := range nodes {
|
||||
var ephemeral bool
|
||||
if node.GetPreAuthKey() != nil && node.GetPreAuthKey().GetEphemeral() {
|
||||
if node.PreAuthKey != nil && node.PreAuthKey.Ephemeral {
|
||||
ephemeral = true
|
||||
}
|
||||
|
||||
var lastSeen time.Time
|
||||
var lastSeenTime string
|
||||
if node.GetLastSeen() != nil {
|
||||
lastSeen = node.GetLastSeen().AsTime()
|
||||
if node.LastSeen != nil {
|
||||
lastSeen = node.LastSeen.AsTime()
|
||||
lastSeenTime = lastSeen.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
|
||||
var expiry time.Time
|
||||
var expiryTime string
|
||||
if node.GetExpiry() != nil {
|
||||
expiry = node.GetExpiry().AsTime()
|
||||
if node.Expiry != nil {
|
||||
expiry = node.Expiry.AsTime()
|
||||
expiryTime = expiry.Format("2006-01-02 15:04:05")
|
||||
} else {
|
||||
expiryTime = "N/A"
|
||||
|
@ -529,7 +529,7 @@ func nodesToPtables(
|
|||
|
||||
var machineKey key.MachinePublic
|
||||
err := machineKey.UnmarshalText(
|
||||
[]byte(node.GetMachineKey()),
|
||||
[]byte(util.MachinePublicKeyEnsurePrefix(node.MachineKey)),
|
||||
)
|
||||
if err != nil {
|
||||
machineKey = key.MachinePublic{}
|
||||
|
@ -537,14 +537,14 @@ func nodesToPtables(
|
|||
|
||||
var nodeKey key.NodePublic
|
||||
err = nodeKey.UnmarshalText(
|
||||
[]byte(node.GetNodeKey()),
|
||||
[]byte(util.NodePublicKeyEnsurePrefix(node.NodeKey)),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var online string
|
||||
if node.GetOnline() {
|
||||
if node.Online {
|
||||
online = pterm.LightGreen("online")
|
||||
} else {
|
||||
online = pterm.LightRed("offline")
|
||||
|
@ -558,36 +558,36 @@ func nodesToPtables(
|
|||
}
|
||||
|
||||
var forcedTags string
|
||||
for _, tag := range node.GetForcedTags() {
|
||||
for _, tag := range node.ForcedTags {
|
||||
forcedTags += "," + tag
|
||||
}
|
||||
forcedTags = strings.TrimLeft(forcedTags, ",")
|
||||
var invalidTags string
|
||||
for _, tag := range node.GetInvalidTags() {
|
||||
if !contains(node.GetForcedTags(), tag) {
|
||||
for _, tag := range node.InvalidTags {
|
||||
if !contains(node.ForcedTags, tag) {
|
||||
invalidTags += "," + pterm.LightRed(tag)
|
||||
}
|
||||
}
|
||||
invalidTags = strings.TrimLeft(invalidTags, ",")
|
||||
var validTags string
|
||||
for _, tag := range node.GetValidTags() {
|
||||
if !contains(node.GetForcedTags(), tag) {
|
||||
for _, tag := range node.ValidTags {
|
||||
if !contains(node.ForcedTags, tag) {
|
||||
validTags += "," + pterm.LightGreen(tag)
|
||||
}
|
||||
}
|
||||
validTags = strings.TrimLeft(validTags, ",")
|
||||
|
||||
var user string
|
||||
if currentUser == "" || (currentUser == node.GetUser().GetName()) {
|
||||
user = pterm.LightMagenta(node.GetUser().GetName())
|
||||
if currentUser == "" || (currentUser == node.User.Name) {
|
||||
user = pterm.LightMagenta(node.User.Name)
|
||||
} else {
|
||||
// Shared into this user
|
||||
user = pterm.LightYellow(node.GetUser().GetName())
|
||||
user = pterm.LightYellow(node.User.Name)
|
||||
}
|
||||
|
||||
var IPV4Address string
|
||||
var IPV6Address string
|
||||
for _, addr := range node.GetIpAddresses() {
|
||||
for _, addr := range node.IpAddresses {
|
||||
if netip.MustParseAddr(addr).Is4() {
|
||||
IPV4Address = addr
|
||||
} else {
|
||||
|
@ -596,8 +596,8 @@ func nodesToPtables(
|
|||
}
|
||||
|
||||
nodeData := []string{
|
||||
strconv.FormatUint(node.GetId(), util.Base10),
|
||||
node.GetName(),
|
||||
strconv.FormatUint(node.Id, util.Base10),
|
||||
node.Name,
|
||||
node.GetGivenName(),
|
||||
machineKey.ShortString(),
|
||||
nodeKey.ShortString(),
|
||||
|
|
|
@ -84,7 +84,7 @@ var listPreAuthKeys = &cobra.Command{
|
|||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetPreAuthKeys(), "", output)
|
||||
SuccessOutput(response.PreAuthKeys, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -101,10 +101,10 @@ var listPreAuthKeys = &cobra.Command{
|
|||
"Tags",
|
||||
},
|
||||
}
|
||||
for _, key := range response.GetPreAuthKeys() {
|
||||
for _, key := range response.PreAuthKeys {
|
||||
expiration := "-"
|
||||
if key.GetExpiration() != nil {
|
||||
expiration = ColourTime(key.GetExpiration().AsTime())
|
||||
expiration = ColourTime(key.Expiration.AsTime())
|
||||
}
|
||||
|
||||
var reusable string
|
||||
|
@ -116,7 +116,7 @@ var listPreAuthKeys = &cobra.Command{
|
|||
|
||||
aclTags := ""
|
||||
|
||||
for _, tag := range key.GetAclTags() {
|
||||
for _, tag := range key.AclTags {
|
||||
aclTags += "," + tag
|
||||
}
|
||||
|
||||
|
@ -214,7 +214,7 @@ var createPreAuthKeyCmd = &cobra.Command{
|
|||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetPreAuthKey(), response.GetPreAuthKey().GetKey(), output)
|
||||
SuccessOutput(response.PreAuthKey, response.PreAuthKey.Key, output)
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -87,12 +87,12 @@ var listRoutesCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetRoutes(), "", output)
|
||||
SuccessOutput(response.Routes, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
routes = response.GetRoutes()
|
||||
routes = response.Routes
|
||||
} else {
|
||||
response, err := client.GetNodeRoutes(ctx, &v1.GetNodeRoutesRequest{
|
||||
NodeId: machineID,
|
||||
|
@ -108,12 +108,12 @@ var listRoutesCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetRoutes(), "", output)
|
||||
SuccessOutput(response.Routes, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
routes = response.GetRoutes()
|
||||
routes = response.Routes
|
||||
}
|
||||
|
||||
tableData := routesToPtables(routes)
|
||||
|
@ -271,25 +271,25 @@ func routesToPtables(routes []*v1.Route) pterm.TableData {
|
|||
|
||||
for _, route := range routes {
|
||||
var isPrimaryStr string
|
||||
prefix, err := netip.ParsePrefix(route.GetPrefix())
|
||||
prefix, err := netip.ParsePrefix(route.Prefix)
|
||||
if err != nil {
|
||||
log.Printf("Error parsing prefix %s: %s", route.GetPrefix(), err)
|
||||
log.Printf("Error parsing prefix %s: %s", route.Prefix, err)
|
||||
|
||||
continue
|
||||
}
|
||||
if prefix == types.ExitRouteV4 || prefix == types.ExitRouteV6 {
|
||||
isPrimaryStr = "-"
|
||||
} else {
|
||||
isPrimaryStr = strconv.FormatBool(route.GetIsPrimary())
|
||||
isPrimaryStr = strconv.FormatBool(route.IsPrimary)
|
||||
}
|
||||
|
||||
tableData = append(tableData,
|
||||
[]string{
|
||||
strconv.FormatUint(route.GetId(), Base10),
|
||||
route.GetNode().GetGivenName(),
|
||||
route.GetPrefix(),
|
||||
strconv.FormatBool(route.GetAdvertised()),
|
||||
strconv.FormatBool(route.GetEnabled()),
|
||||
strconv.FormatUint(route.Id, Base10),
|
||||
route.Node.GivenName,
|
||||
route.Prefix,
|
||||
strconv.FormatBool(route.Advertised),
|
||||
strconv.FormatBool(route.Enabled),
|
||||
isPrimaryStr,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ var createUserCmd = &cobra.Command{
|
|||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetUser(), "User created", output)
|
||||
SuccessOutput(response.User, "User created", output)
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -169,7 +169,7 @@ var listUsersCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.GetUsers(), "", output)
|
||||
SuccessOutput(response.Users, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -236,6 +236,6 @@ var renameUserCmd = &cobra.Command{
|
|||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.GetUser(), "User renamed", output)
|
||||
SuccessOutput(response.User, "User renamed", output)
|
||||
},
|
||||
}
|
||||
|
|
|
@ -40,12 +40,19 @@ grpc_listen_addr: 127.0.0.1:50443
|
|||
# are doing.
|
||||
grpc_allow_insecure: false
|
||||
|
||||
# Private key used to encrypt the traffic between headscale
|
||||
# and Tailscale clients.
|
||||
# The private key file will be autogenerated if it's missing.
|
||||
#
|
||||
private_key_path: /var/lib/headscale/private.key
|
||||
|
||||
# The Noise section includes specific configuration for the
|
||||
# TS2021 Noise protocol
|
||||
noise:
|
||||
# The Noise private key is used to encrypt the
|
||||
# traffic between headscale and Tailscale clients when
|
||||
# using the new Noise-based protocol.
|
||||
# using the new Noise-based protocol. It must be different
|
||||
# from the legacy private key.
|
||||
private_key_path: /var/lib/headscale/noise_private.key
|
||||
|
||||
# List of IP prefixes to allocate tailaddresses from.
|
||||
|
@ -88,12 +95,6 @@ derp:
|
|||
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
|
||||
stun_listen_addr: "0.0.0.0:3478"
|
||||
|
||||
# Private key used to encrypt the traffic between headscale DERP
|
||||
# and Tailscale clients.
|
||||
# The private key file will be autogenerated if it's missing.
|
||||
#
|
||||
private_key_path: /var/lib/headscale/derp_server_private.key
|
||||
|
||||
# List of externally available DERP maps encoded in JSON
|
||||
urls:
|
||||
- https://controlplane.tailscale.com/derpmap/default
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
cairosvg~=2.7.1
|
||||
mkdocs-material~=9.4.14
|
||||
mkdocs-minify-plugin~=0.7.1
|
||||
pillow~=10.1.0
|
||||
|
|
@ -28,7 +28,7 @@ cd ./headscale
|
|||
touch ./config/db.sqlite
|
||||
```
|
||||
|
||||
3. **(Strongly Recommended)** Download a copy of the [example configuration](https://github.com/juanfont/headscale/blob/main/config-example.yaml) from the headscale repository.
|
||||
3. **(Strongly Recommended)** Download a copy of the [example configuration][config-example.yaml](https://github.com/juanfont/headscale/blob/main/config-example.yaml) from the headscale repository.
|
||||
|
||||
Using wget:
|
||||
|
||||
|
|
12
flake.lock
12
flake.lock
|
@ -5,11 +5,11 @@
|
|||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1701680307,
|
||||
"narHash": "sha256-kAuep2h5ajznlPMD9rnQyffWG8EM/C73lejGofXvdM8=",
|
||||
"lastModified": 1694529238,
|
||||
"narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "4022d587cbbfd70fe950c1e2083a02621806a725",
|
||||
"rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
@ -20,11 +20,11 @@
|
|||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1701998057,
|
||||
"narHash": "sha256-gAJGhcTO9cso7XDfAScXUlPcva427AUT2q02qrmXPdo=",
|
||||
"lastModified": 1699186365,
|
||||
"narHash": "sha256-Pxrw5U8mBsL3NlrJ6q1KK1crzvSUcdfwb9083sKDrcU=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "09dc04054ba2ff1f861357d0e7e76d021b273cd7",
|
||||
"rev": "a0b3b06b7a82c965ae0bb1d59f6e386fe755001d",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
|
16
flake.nix
16
flake.nix
|
@ -26,12 +26,14 @@
|
|||
version = headscaleVersion;
|
||||
src = pkgs.lib.cleanSource self;
|
||||
|
||||
tags = ["ts2019"];
|
||||
|
||||
# Only run unit tests when testing a build
|
||||
checkFlags = ["-short"];
|
||||
|
||||
# When updating go.mod or go.sum, a new sha will need to be calculated,
|
||||
# update this if you have a mismatch after doing a change to thos files.
|
||||
vendorHash = "sha256-8x4RKaS8vnBYTPlvQTkDKWIAJOgPF99hvPiuRyTMrA8=";
|
||||
vendorSha256 = "sha256-Q6eySc8lXYhkWka7Y+qOM6viv7QhdjFZDX8PttaLfr4=";
|
||||
|
||||
ldflags = ["-s" "-w" "-X github.com/juanfont/headscale/cmd/headscale/cli.Version=v${version}"];
|
||||
};
|
||||
|
@ -47,7 +49,7 @@
|
|||
sha256 = "sha256-2K9KAg8iSubiTbujyFGN3yggrL+EDyeUCs9OOta/19A=";
|
||||
};
|
||||
|
||||
vendorHash = "sha256-rxYuzn4ezAxaeDhxd8qdOzt+CKYIh03A9zKNdzILq18=";
|
||||
vendorSha256 = "sha256-rxYuzn4ezAxaeDhxd8qdOzt+CKYIh03A9zKNdzILq18=";
|
||||
|
||||
nativeBuildInputs = [pkgs.installShellFiles];
|
||||
};
|
||||
|
@ -69,7 +71,7 @@
|
|||
sha256 = "sha256-lnNdsDCpeSHtl2lC1IhUw11t3cnGF+37qSM7HDvKLls=";
|
||||
};
|
||||
|
||||
vendorHash = "sha256-dGdnDuRbwg8fU7uB5GaHEWa/zI3w06onqjturvooJQA=";
|
||||
vendorSha256 = "sha256-dGdnDuRbwg8fU7uB5GaHEWa/zI3w06onqjturvooJQA=";
|
||||
|
||||
nativeBuildInputs = [pkgs.installShellFiles];
|
||||
|
||||
|
@ -127,7 +129,15 @@
|
|||
buildInputs = devDeps;
|
||||
|
||||
shellHook = ''
|
||||
export GOFLAGS=-tags="ts2019"
|
||||
export PATH="$PWD/result/bin:$PATH"
|
||||
|
||||
mkdir -p ./ignored
|
||||
export HEADSCALE_PRIVATE_KEY_PATH="./ignored/private.key"
|
||||
export HEADSCALE_NOISE_PRIVATE_KEY_PATH="./ignored/noise_private.key"
|
||||
export HEADSCALE_DB_PATH="./ignored/db.sqlite"
|
||||
export HEADSCALE_TLS_LETSENCRYPT_CACHE_DIR="./ignored/cache"
|
||||
export HEADSCALE_UNIX_SOCKET="./ignored/headscale.sock"
|
||||
'';
|
||||
};
|
||||
|
||||
|
|
208
go.mod
208
go.mod
|
@ -1,181 +1,137 @@
|
|||
module github.com/juanfont/headscale
|
||||
|
||||
go 1.21.0
|
||||
go 1.21
|
||||
|
||||
toolchain go1.21.4
|
||||
toolchain go1.21.1
|
||||
|
||||
require (
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7
|
||||
github.com/coreos/go-oidc/v3 v3.8.0
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
github.com/deckarep/golang-set/v2 v2.4.0
|
||||
github.com/AlecAivazis/survey/v2 v2.3.6
|
||||
github.com/coreos/go-oidc/v3 v3.5.0
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/deckarep/golang-set/v2 v2.3.0
|
||||
github.com/efekarakus/termcolor v1.0.1
|
||||
github.com/glebarez/sqlite v1.10.0
|
||||
github.com/go-gormigrate/gormigrate/v2 v2.1.1
|
||||
github.com/glebarez/sqlite v1.7.0
|
||||
github.com/gofrs/uuid/v5 v5.0.0
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1
|
||||
github.com/klauspost/compress v1.17.3
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.2
|
||||
github.com/klauspost/compress v1.16.7
|
||||
github.com/oauth2-proxy/mockoidc v0.0.0-20220308204021-b9169deeb282
|
||||
github.com/ory/dockertest/v3 v3.10.0
|
||||
github.com/ory/dockertest/v3 v3.9.1
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1
|
||||
github.com/pkg/profile v1.7.0
|
||||
github.com/prometheus/client_golang v1.17.0
|
||||
github.com/prometheus/common v0.45.0
|
||||
github.com/pterm/pterm v0.12.71
|
||||
github.com/puzpuzpuz/xsync/v3 v3.0.2
|
||||
github.com/rs/zerolog v1.31.0
|
||||
github.com/prometheus/client_golang v1.15.1
|
||||
github.com/prometheus/common v0.42.0
|
||||
github.com/pterm/pterm v0.12.58
|
||||
github.com/puzpuzpuz/xsync/v2 v2.4.0
|
||||
github.com/rs/zerolog v1.29.0
|
||||
github.com/samber/lo v1.38.1
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/viper v1.17.0
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spf13/viper v1.16.0
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a
|
||||
github.com/tailscale/tailsql v0.0.0-20231216172832-51483e0c711b
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e
|
||||
go4.org/netipx v0.0.0-20230824141953-6213f710f925
|
||||
golang.org/x/crypto v0.16.0
|
||||
golang.org/x/exp v0.0.0-20231127185646-65229373498e
|
||||
golang.org/x/net v0.19.0
|
||||
golang.org/x/oauth2 v0.15.0
|
||||
golang.org/x/sync v0.5.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4
|
||||
google.golang.org/grpc v1.59.0
|
||||
google.golang.org/protobuf v1.31.0
|
||||
go4.org/netipx v0.0.0-20230728180743-ad4cb58a6516
|
||||
golang.org/x/crypto v0.12.0
|
||||
golang.org/x/net v0.14.0
|
||||
golang.org/x/oauth2 v0.7.0
|
||||
golang.org/x/sync v0.2.0
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
|
||||
google.golang.org/grpc v1.55.0
|
||||
google.golang.org/protobuf v1.30.0
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gorm.io/driver/postgres v1.5.4
|
||||
gorm.io/gorm v1.25.5
|
||||
tailscale.com v1.56.1
|
||||
gorm.io/driver/postgres v1.4.8
|
||||
gorm.io/gorm v1.24.6
|
||||
tailscale.com v1.50.0
|
||||
)
|
||||
|
||||
require (
|
||||
atomicgo.dev/cursor v0.2.0 // indirect
|
||||
atomicgo.dev/cursor v0.1.1 // indirect
|
||||
atomicgo.dev/keyboard v0.2.9 // indirect
|
||||
atomicgo.dev/schedule v0.1.0 // indirect
|
||||
filippo.io/edwards25519 v1.0.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/akutz/memconn v0.1.0 // indirect
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.21.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.42 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.40 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.43 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssm v1.38.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.14.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.22.0 // indirect
|
||||
github.com/aws/smithy-go v1.14.2 // indirect
|
||||
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/containerd/console v1.0.3 // indirect
|
||||
github.com/containerd/continuity v0.4.3 // indirect
|
||||
github.com/coreos/go-iptables v0.7.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/dblohm7/wingoes v0.0.0-20231025182615-65d8b4b5428f // indirect
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e // indirect
|
||||
github.com/docker/cli v24.0.7+incompatible // indirect
|
||||
github.com/docker/docker v24.0.7+incompatible // indirect
|
||||
github.com/containerd/continuity v0.3.0 // indirect
|
||||
github.com/coreos/go-iptables v0.6.0 // indirect
|
||||
github.com/dblohm7/wingoes v0.0.0-20230821191801-fc76608aecf0 // indirect
|
||||
github.com/docker/cli v23.0.5+incompatible // indirect
|
||||
github.com/docker/docker v24.0.4+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/felixge/fgprof v0.9.3 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.5.0 // indirect
|
||||
github.com/glebarez/go-sqlite v1.21.2 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.1 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.4.0 // indirect
|
||||
github.com/glebarez/go-sqlite v1.20.3 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/btree v1.1.2 // indirect
|
||||
github.com/google/go-github v17.0.0+incompatible // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/nftables v0.1.1-0.20230115205135-9aa6fdf5a28c // indirect
|
||||
github.com/google/pprof v0.0.0-20231127191134-f3a68a39ae15 // indirect
|
||||
github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.4.0 // indirect
|
||||
github.com/gookit/color v1.5.4 // indirect
|
||||
github.com/gorilla/csrf v1.7.1 // indirect
|
||||
github.com/gorilla/securecookie v1.1.1 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gookit/color v1.5.3 // indirect
|
||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hdevalence/ed25519consensus v0.1.0 // indirect
|
||||
github.com/illarion/gonotify v1.0.1 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/insomniacslk/dhcp v0.0.0-20230908212754-65c27093e38a // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.0 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
||||
github.com/jackc/pgx/v5 v5.3.0 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.4.0 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.3.2 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/lib/pq v1.10.7 // indirect
|
||||
github.com/lithammer/fuzzysearch v1.1.8 // indirect
|
||||
github.com/lithammer/fuzzysearch v1.1.5 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
|
||||
github.com/mdlayher/genetlink v1.3.2 // indirect
|
||||
github.com/mattn/go-isatty v0.0.18 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mdlayher/netlink v1.7.2 // indirect
|
||||
github.com/mdlayher/sdnotify v1.0.0 // indirect
|
||||
github.com/mdlayher/socket v0.5.0 // indirect
|
||||
github.com/mdlayher/socket v0.4.1 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/miekg/dns v1.1.57 // indirect
|
||||
github.com/miekg/dns v1.1.55 // indirect
|
||||
github.com/mitchellh/go-ps v1.0.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc5 // indirect
|
||||
github.com/opencontainers/runc v1.1.10 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.18 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc3 // indirect
|
||||
github.com/opencontainers/runc v1.1.4 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rogpeppe/go-internal v1.11.0 // indirect
|
||||
github.com/safchain/ethtool v0.3.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.6.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/spf13/afero v1.9.5 // indirect
|
||||
github.com/spf13/cast v1.5.1 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e // indirect
|
||||
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 // indirect
|
||||
github.com/tailscale/golang-x-crypto v0.0.0-20230713185742-f0b76a10a08e // indirect
|
||||
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 // indirect
|
||||
github.com/subosito/gotenv v1.4.2 // indirect
|
||||
github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 // indirect
|
||||
github.com/tailscale/setec v0.0.0-20230926024544-07dde05889e7 // indirect
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20231213172531-a4fa669015b2 // indirect
|
||||
github.com/tailscale/wireguard-go v0.0.0-20231121184858-cc193a0b3272 // indirect
|
||||
github.com/tcnksm/go-httpstat v0.2.0 // indirect
|
||||
github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63 // indirect
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2 // indirect
|
||||
github.com/vishvananda/netns v0.0.4 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
|
@ -183,27 +139,23 @@ require (
|
|||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go4.org/mem v0.0.0-20220726221520-4f986261bf13 // indirect
|
||||
golang.org/x/mod v0.14.0 // indirect
|
||||
golang.org/x/sys v0.15.0 // indirect
|
||||
golang.org/x/term v0.15.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/tools v0.16.0 // indirect
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect
|
||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 // indirect
|
||||
golang.org/x/mod v0.11.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/term v0.11.0 // indirect
|
||||
golang.org/x/text v0.12.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.9.1 // indirect
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gvisor.dev/gvisor v0.0.0-20230928000133-4fe30062272c // indirect
|
||||
inet.af/peercred v0.0.0-20210906144145-0893ea02156a // indirect
|
||||
modernc.org/libc v1.34.11 // indirect
|
||||
modernc.org/mathutil v1.6.0 // indirect
|
||||
modernc.org/memory v1.7.2 // indirect
|
||||
modernc.org/sqlite v1.28.0 // indirect
|
||||
nhooyr.io/websocket v1.8.10 // indirect
|
||||
gotest.tools/v3 v3.4.0 // indirect
|
||||
modernc.org/libc v1.22.2 // indirect
|
||||
modernc.org/mathutil v1.5.0 // indirect
|
||||
modernc.org/memory v1.5.0 // indirect
|
||||
modernc.org/sqlite v1.20.3 // indirect
|
||||
nhooyr.io/websocket v1.8.7 // indirect
|
||||
)
|
||||
|
|
102
hscontrol/app.go
102
hscontrol/app.go
|
@ -48,7 +48,6 @@ import (
|
|||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/reflection"
|
||||
"google.golang.org/grpc/status"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
"tailscale.com/types/key"
|
||||
|
@ -60,9 +59,6 @@ var (
|
|||
errUnsupportedLetsEncryptChallengeType = errors.New(
|
||||
"unknown value for Lets Encrypt challenge type",
|
||||
)
|
||||
errEmptyInitialDERPMap = errors.New(
|
||||
"initial DERPMap is empty, Headscale requries at least one entry",
|
||||
)
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -81,6 +77,7 @@ type Headscale struct {
|
|||
dbString string
|
||||
dbType string
|
||||
dbDebug bool
|
||||
privateKey2019 *key.MachinePrivate
|
||||
noisePrivateKey *key.MachinePrivate
|
||||
|
||||
DERPMap *tailcfg.DERPMap
|
||||
|
@ -99,23 +96,26 @@ type Headscale struct {
|
|||
pollNetMapStreamWG sync.WaitGroup
|
||||
}
|
||||
|
||||
var (
|
||||
profilingEnabled = envknob.Bool("HEADSCALE_PROFILING_ENABLED")
|
||||
tailsqlEnabled = envknob.Bool("HEADSCALE_DEBUG_TAILSQL_ENABLED")
|
||||
tailsqlStateDir = envknob.String("HEADSCALE_DEBUG_TAILSQL_STATE_DIR")
|
||||
tailsqlTSKey = envknob.String("TS_AUTHKEY")
|
||||
)
|
||||
|
||||
func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
||||
if profilingEnabled {
|
||||
if _, enableProfile := os.LookupEnv("HEADSCALE_PROFILING_ENABLED"); enableProfile {
|
||||
runtime.SetBlockProfileRate(1)
|
||||
}
|
||||
|
||||
privateKey, err := readOrCreatePrivateKey(cfg.PrivateKeyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read or create private key: %w", err)
|
||||
}
|
||||
|
||||
// TS2021 requires to have a different key from the legacy protocol.
|
||||
noisePrivateKey, err := readOrCreatePrivateKey(cfg.NoisePrivateKeyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read or create Noise protocol private key: %w", err)
|
||||
}
|
||||
|
||||
if privateKey.Equal(*noisePrivateKey) {
|
||||
return nil, fmt.Errorf("private key and noise private key are the same: %w", err)
|
||||
}
|
||||
|
||||
var dbString string
|
||||
switch cfg.DBtype {
|
||||
case db.Postgres:
|
||||
|
@ -156,6 +156,7 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
|||
cfg: cfg,
|
||||
dbType: cfg.DBtype,
|
||||
dbString: dbString,
|
||||
privateKey2019: privateKey,
|
||||
noisePrivateKey: noisePrivateKey,
|
||||
registrationCache: registrationCache,
|
||||
pollNetMapStreamWG: sync.WaitGroup{},
|
||||
|
@ -198,21 +199,10 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
|||
}
|
||||
|
||||
if cfg.DERP.ServerEnabled {
|
||||
derpServerKey, err := readOrCreatePrivateKey(cfg.DERP.ServerPrivateKeyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read or create DERP server private key: %w", err)
|
||||
}
|
||||
|
||||
if derpServerKey.Equal(*noisePrivateKey) {
|
||||
return nil, fmt.Errorf(
|
||||
"DERP server private key and noise private key are the same: %w",
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
// TODO(kradalby): replace this key with a dedicated DERP key.
|
||||
embeddedDERPServer, err := derpServer.NewDERPServer(
|
||||
cfg.ServerURL,
|
||||
key.NodePrivate(*derpServerKey),
|
||||
key.NodePrivate(*privateKey),
|
||||
&cfg.DERP,
|
||||
)
|
||||
if err != nil {
|
||||
|
@ -273,13 +263,20 @@ func (h *Headscale) scheduledDERPMapUpdateWorker(cancelChan <-chan struct{}) {
|
|||
h.DERPMap.Regions[region.RegionID] = ®ion
|
||||
}
|
||||
|
||||
stateUpdate := types.StateUpdate{
|
||||
h.nodeNotifier.NotifyAll(types.StateUpdate{
|
||||
Type: types.StateDERPUpdated,
|
||||
DERPMap: h.DERPMap,
|
||||
DERPMap: *h.DERPMap,
|
||||
})
|
||||
}
|
||||
if stateUpdate.Valid() {
|
||||
h.nodeNotifier.NotifyAll(stateUpdate)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) failoverSubnetRoutes(milliSeconds int64) {
|
||||
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
|
||||
for range ticker.C {
|
||||
err := h.db.HandlePrimarySubnetFailover()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("failed to handle primary subnet failover")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -452,9 +449,10 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
|
|||
|
||||
router.HandleFunc("/health", h.HealthHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/key", h.KeyHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/register/{mkey}", h.RegisterWebAPI).Methods(http.MethodGet)
|
||||
router.HandleFunc("/register/{nkey}", h.RegisterWebAPI).Methods(http.MethodGet)
|
||||
h.addLegacyHandlers(router)
|
||||
|
||||
router.HandleFunc("/oidc/register/{mkey}", h.RegisterOIDC).Methods(http.MethodGet)
|
||||
router.HandleFunc("/oidc/register/{nkey}", h.RegisterOIDC).Methods(http.MethodGet)
|
||||
router.HandleFunc("/oidc/callback", h.OIDCCallback).Methods(http.MethodGet)
|
||||
router.HandleFunc("/apple", h.AppleConfigMessage).Methods(http.MethodGet)
|
||||
router.HandleFunc("/apple/{platform}", h.ApplePlatformConfig).
|
||||
|
@ -512,15 +510,13 @@ func (h *Headscale) Serve() error {
|
|||
go h.scheduledDERPMapUpdateWorker(derpMapCancelChannel)
|
||||
}
|
||||
|
||||
if len(h.DERPMap.Regions) == 0 {
|
||||
return errEmptyInitialDERPMap
|
||||
}
|
||||
|
||||
// TODO(kradalby): These should have cancel channels and be cleaned
|
||||
// up on shutdown.
|
||||
go h.expireEphemeralNodes(updateInterval)
|
||||
go h.expireExpiredMachines(updateInterval)
|
||||
|
||||
go h.failoverSubnetRoutes(updateInterval)
|
||||
|
||||
if zl.GlobalLevel() == zl.TraceLevel {
|
||||
zerolog.RespLog = true
|
||||
} else {
|
||||
|
@ -576,10 +572,7 @@ func (h *Headscale) Serve() error {
|
|||
}
|
||||
|
||||
// Start the local gRPC server without TLS and without authentication
|
||||
grpcSocket := grpc.NewServer(
|
||||
// Uncomment to debug grpc communication.
|
||||
// zerolog.UnaryInterceptor(),
|
||||
)
|
||||
grpcSocket := grpc.NewServer(zerolog.UnaryInterceptor())
|
||||
|
||||
v1.RegisterHeadscaleServiceServer(grpcSocket, newHeadscaleV1APIServer(h))
|
||||
reflection.Register(grpcSocket)
|
||||
|
@ -619,8 +612,7 @@ func (h *Headscale) Serve() error {
|
|||
grpc.UnaryInterceptor(
|
||||
grpcMiddleware.ChainUnaryServer(
|
||||
h.grpcAuthenticationInterceptor,
|
||||
// Uncomment to debug grpc communication.
|
||||
// zerolog.NewUnaryServerInterceptor(),
|
||||
zerolog.NewUnaryServerInterceptor(),
|
||||
),
|
||||
),
|
||||
}
|
||||
|
@ -706,18 +698,6 @@ func (h *Headscale) Serve() error {
|
|||
log.Info().
|
||||
Msgf("listening and serving metrics on: %s", h.cfg.MetricsAddr)
|
||||
|
||||
var tailsqlContext context.Context
|
||||
if tailsqlEnabled {
|
||||
if h.cfg.DBtype != db.Sqlite {
|
||||
log.Fatal().Str("type", h.cfg.DBtype).Msgf("tailsql only support %q", db.Sqlite)
|
||||
}
|
||||
if tailsqlTSKey == "" {
|
||||
log.Fatal().Msg("tailsql requires TS_AUTHKEY to be set")
|
||||
}
|
||||
tailsqlContext = context.Background()
|
||||
go runTailSQLService(ctx, util.TSLogfWrapper(), tailsqlStateDir, h.cfg.DBpath)
|
||||
}
|
||||
|
||||
// Handle common process-killing signals so we can gracefully shut down:
|
||||
h.shutdownChan = make(chan struct{})
|
||||
sigc := make(chan os.Signal, 1)
|
||||
|
@ -783,10 +763,6 @@ func (h *Headscale) Serve() error {
|
|||
grpcListener.Close()
|
||||
}
|
||||
|
||||
if tailsqlContext != nil {
|
||||
tailsqlContext.Done()
|
||||
}
|
||||
|
||||
// Close network listeners
|
||||
promHTTPListener.Close()
|
||||
httpListener.Close()
|
||||
|
@ -924,8 +900,7 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
|
|||
err = os.WriteFile(path, machineKeyStr, privateKeyFileMode)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to save private key to disk at path %q: %w",
|
||||
path,
|
||||
"failed to save private key to disk: %w",
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
@ -936,9 +911,16 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
|
|||
}
|
||||
|
||||
trimmedPrivateKey := strings.TrimSpace(string(privateKey))
|
||||
privateKeyEnsurePrefix := util.PrivateKeyEnsurePrefix(trimmedPrivateKey)
|
||||
|
||||
var machineKey key.MachinePrivate
|
||||
if err = machineKey.UnmarshalText([]byte(trimmedPrivateKey)); err != nil {
|
||||
if err = machineKey.UnmarshalText([]byte(privateKeyEnsurePrefix)); err != nil {
|
||||
log.Info().
|
||||
Str("path", path).
|
||||
Msg("This might be due to a legacy (headscale pre-0.12) private key. " +
|
||||
"If the key is in WireGuard format, delete the key and restart headscale. " +
|
||||
"A new key will automatically be generated. All Tailscale clients will have to be restarted")
|
||||
|
||||
return nil, fmt.Errorf("failed to parse private key: %w", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
package hscontrol
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/mapper"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
@ -16,62 +16,22 @@ import (
|
|||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
func logAuthFunc(
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
) (func(string), func(string), func(error, string)) {
|
||||
return func(msg string) {
|
||||
log.Info().
|
||||
Caller().
|
||||
Str("machine_key", machineKey.ShortString()).
|
||||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Str("followup", registerRequest.Followup).
|
||||
Time("expiry", registerRequest.Expiry).
|
||||
Msg(msg)
|
||||
},
|
||||
func(msg string) {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("machine_key", machineKey.ShortString()).
|
||||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Str("followup", registerRequest.Followup).
|
||||
Time("expiry", registerRequest.Expiry).
|
||||
Msg(msg)
|
||||
},
|
||||
func(err error, msg string) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("machine_key", machineKey.ShortString()).
|
||||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Str("followup", registerRequest.Followup).
|
||||
Time("expiry", registerRequest.Expiry).
|
||||
Err(err).
|
||||
Msg(msg)
|
||||
}
|
||||
}
|
||||
|
||||
// handleRegister is the logic for registering a client.
|
||||
// handleRegister is the common logic for registering a client in the legacy and Noise protocols
|
||||
//
|
||||
// When using Noise, the machineKey is Zero.
|
||||
func (h *Headscale) handleRegister(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
isNoise bool,
|
||||
) {
|
||||
logInfo, logTrace, logErr := logAuthFunc(registerRequest, machineKey)
|
||||
now := time.Now().UTC()
|
||||
logTrace("handleRegister called, looking up machine in DB")
|
||||
node, err := h.db.GetNodeByAnyKey(machineKey, registerRequest.NodeKey, registerRequest.OldNodeKey)
|
||||
logTrace("handleRegister database lookup has returned")
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
// If the node has AuthKey set, handle registration via PreAuthKeys
|
||||
if registerRequest.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(writer, registerRequest, machineKey)
|
||||
h.handleAuthKey(writer, registerRequest, machineKey, isNoise)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -85,29 +45,49 @@ func (h *Headscale) handleRegister(
|
|||
// is that the client will hammer headscale with requests until it gets a
|
||||
// successful RegisterResponse.
|
||||
if registerRequest.Followup != "" {
|
||||
logTrace("register request is a followup")
|
||||
if _, ok := h.registrationCache.Get(machineKey.String()); ok {
|
||||
logTrace("Node is waiting for interactive login")
|
||||
if _, ok := h.registrationCache.Get(util.NodePublicKeyStripPrefix(registerRequest.NodeKey)); ok {
|
||||
log.Debug().
|
||||
Caller().
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Str("machine_key", machineKey.ShortString()).
|
||||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
|
||||
Str("follow_up", registerRequest.Followup).
|
||||
Bool("noise", isNoise).
|
||||
Msg("Node is waiting for interactive login")
|
||||
|
||||
select {
|
||||
case <-req.Context().Done():
|
||||
return
|
||||
case <-time.After(registrationHoldoff):
|
||||
h.handleNewNode(writer, registerRequest, machineKey)
|
||||
h.handleNewNode(writer, registerRequest, machineKey, isNoise)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logInfo("Node not found in database, creating new")
|
||||
log.Info().
|
||||
Caller().
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Str("machine_key", machineKey.ShortString()).
|
||||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
|
||||
Str("follow_up", registerRequest.Followup).
|
||||
Bool("noise", isNoise).
|
||||
Msg("New node not yet in the database")
|
||||
|
||||
givenName, err := h.db.GenerateGivenName(
|
||||
machineKey,
|
||||
machineKey.String(),
|
||||
registerRequest.Hostinfo.Hostname,
|
||||
)
|
||||
if err != nil {
|
||||
logErr(err, "Failed to generate given name for node")
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "RegistrationHandler").
|
||||
Str("hostinfo.name", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed to generate given name for node")
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -117,26 +97,31 @@ func (h *Headscale) handleRegister(
|
|||
// We create the node and then keep it around until a callback
|
||||
// happens
|
||||
newNode := types.Node{
|
||||
MachineKey: machineKey,
|
||||
MachineKey: util.MachinePublicKeyStripPrefix(machineKey),
|
||||
Hostname: registerRequest.Hostinfo.Hostname,
|
||||
GivenName: givenName,
|
||||
NodeKey: registerRequest.NodeKey,
|
||||
NodeKey: util.NodePublicKeyStripPrefix(registerRequest.NodeKey),
|
||||
LastSeen: &now,
|
||||
Expiry: &time.Time{},
|
||||
}
|
||||
|
||||
if !registerRequest.Expiry.IsZero() {
|
||||
logTrace("Non-zero expiry time requested")
|
||||
log.Trace().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Time("expiry", registerRequest.Expiry).
|
||||
Msg("Non-zero expiry time requested")
|
||||
newNode.Expiry = ®isterRequest.Expiry
|
||||
}
|
||||
|
||||
h.registrationCache.Set(
|
||||
machineKey.String(),
|
||||
newNode.NodeKey,
|
||||
newNode,
|
||||
registerCacheExpiration,
|
||||
)
|
||||
|
||||
h.handleNewNode(writer, registerRequest, machineKey)
|
||||
h.handleNewNode(writer, registerRequest, machineKey, isNoise)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -149,7 +134,11 @@ func (h *Headscale) handleRegister(
|
|||
// (juan): For a while we had a bug where we were not storing the MachineKey for the nodes using the TS2021,
|
||||
// due to a misunderstanding of the protocol https://github.com/juanfont/headscale/issues/1054
|
||||
// So if we have a not valid MachineKey (but we were able to fetch the node with the NodeKeys), we update it.
|
||||
if err != nil || node.MachineKey.IsZero() {
|
||||
var storedMachineKey key.MachinePublic
|
||||
err = storedMachineKey.UnmarshalText(
|
||||
[]byte(util.MachinePublicKeyEnsurePrefix(node.MachineKey)),
|
||||
)
|
||||
if err != nil || storedMachineKey.IsZero() {
|
||||
if err := h.db.NodeSetMachineKey(node, machineKey); err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
|
@ -167,12 +156,12 @@ func (h *Headscale) handleRegister(
|
|||
// - Trying to log out (sending a expiry in the past)
|
||||
// - A valid, registered node, looking for /map
|
||||
// - Expired node wanting to reauthenticate
|
||||
if node.NodeKey.String() == registerRequest.NodeKey.String() {
|
||||
if node.NodeKey == util.NodePublicKeyStripPrefix(registerRequest.NodeKey) {
|
||||
// The client sends an Expiry in the past if the client is requesting to expire the key (aka logout)
|
||||
// https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go#L648
|
||||
if !registerRequest.Expiry.IsZero() &&
|
||||
registerRequest.Expiry.UTC().Before(now) {
|
||||
h.handleNodeLogOut(writer, *node, machineKey)
|
||||
h.handleNodeLogOut(writer, *node, machineKey, isNoise)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -180,20 +169,21 @@ func (h *Headscale) handleRegister(
|
|||
// If node is not expired, and it is register, we have a already accepted this node,
|
||||
// let it proceed with a valid registration
|
||||
if !node.IsExpired() {
|
||||
h.handleNodeWithValidRegistration(writer, *node, machineKey)
|
||||
h.handleNodeWithValidRegistration(writer, *node, machineKey, isNoise)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The NodeKey we have matches OldNodeKey, which means this is a refresh after a key expiration
|
||||
if node.NodeKey.String() == registerRequest.OldNodeKey.String() &&
|
||||
if node.NodeKey == util.NodePublicKeyStripPrefix(registerRequest.OldNodeKey) &&
|
||||
!node.IsExpired() {
|
||||
h.handleNodeKeyRefresh(
|
||||
writer,
|
||||
registerRequest,
|
||||
*node,
|
||||
machineKey,
|
||||
isNoise,
|
||||
)
|
||||
|
||||
return
|
||||
|
@ -208,7 +198,7 @@ func (h *Headscale) handleRegister(
|
|||
}
|
||||
|
||||
// The node has expired or it is logged out
|
||||
h.handleNodeExpiredOrLoggedOut(writer, registerRequest, *node, machineKey)
|
||||
h.handleNodeExpiredOrLoggedOut(writer, registerRequest, *node, machineKey, isNoise)
|
||||
|
||||
// TODO(juan): RegisterRequest includes an Expiry time, that we could optionally use
|
||||
node.Expiry = &time.Time{}
|
||||
|
@ -217,9 +207,9 @@ func (h *Headscale) handleRegister(
|
|||
// we need to make sure the NodeKey matches the one in the request
|
||||
// TODO(juan): What happens when using fast user switching between two
|
||||
// headscale-managed tailnets?
|
||||
node.NodeKey = registerRequest.NodeKey
|
||||
node.NodeKey = util.NodePublicKeyStripPrefix(registerRequest.NodeKey)
|
||||
h.registrationCache.Set(
|
||||
machineKey.String(),
|
||||
util.NodePublicKeyStripPrefix(registerRequest.NodeKey),
|
||||
*node,
|
||||
registerCacheExpiration,
|
||||
)
|
||||
|
@ -229,6 +219,7 @@ func (h *Headscale) handleRegister(
|
|||
}
|
||||
|
||||
// handleAuthKey contains the logic to manage auth key client registration
|
||||
// It is used both by the legacy and the new Noise protocol.
|
||||
// When using Noise, the machineKey is Zero.
|
||||
//
|
||||
// TODO: check if any locks are needed around IP allocation.
|
||||
|
@ -236,10 +227,12 @@ func (h *Headscale) handleAuthKey(
|
|||
writer http.ResponseWriter,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
isNoise bool,
|
||||
) {
|
||||
log.Debug().
|
||||
Caller().
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Bool("noise", isNoise).
|
||||
Msgf("Processing auth key for %s", registerRequest.Hostinfo.Hostname)
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
|
@ -247,15 +240,17 @@ func (h *Headscale) handleAuthKey(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed authentication via AuthKey")
|
||||
resp.MachineAuthorized = false
|
||||
|
||||
respBody, err := json.Marshal(resp)
|
||||
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
|
@ -272,12 +267,14 @@ func (h *Headscale) handleAuthKey(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Msg("Failed authentication via AuthKey")
|
||||
|
||||
|
@ -293,10 +290,11 @@ func (h *Headscale) handleAuthKey(
|
|||
|
||||
log.Debug().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Msg("Authentication key was valid, proceeding to acquire IP addresses")
|
||||
|
||||
nodeKey := registerRequest.NodeKey
|
||||
nodeKey := util.NodePublicKeyStripPrefix(registerRequest.NodeKey)
|
||||
|
||||
// retrieve node information if it exist
|
||||
// The error is not important, because if it does not
|
||||
|
@ -306,6 +304,7 @@ func (h *Headscale) handleAuthKey(
|
|||
if node != nil {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", node.Hostname).
|
||||
Msg("node was already registered before, refreshing with new auth key")
|
||||
|
||||
|
@ -315,6 +314,7 @@ func (h *Headscale) handleAuthKey(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", node.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed to refresh node")
|
||||
|
@ -322,7 +322,7 @@ func (h *Headscale) handleAuthKey(
|
|||
return
|
||||
}
|
||||
|
||||
aclTags := pak.Proto().GetAclTags()
|
||||
aclTags := pak.Proto().AclTags
|
||||
if len(aclTags) > 0 {
|
||||
// This conditional preserves the existing behaviour, although SaaS would reset the tags on auth-key login
|
||||
err = h.db.SetTags(node, aclTags)
|
||||
|
@ -330,6 +330,7 @@ func (h *Headscale) handleAuthKey(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", node.Hostname).
|
||||
Strs("aclTags", aclTags).
|
||||
Err(err).
|
||||
|
@ -341,10 +342,11 @@ func (h *Headscale) handleAuthKey(
|
|||
} else {
|
||||
now := time.Now().UTC()
|
||||
|
||||
givenName, err := h.db.GenerateGivenName(machineKey, registerRequest.Hostinfo.Hostname)
|
||||
givenName, err := h.db.GenerateGivenName(util.MachinePublicKeyStripPrefix(machineKey), registerRequest.Hostinfo.Hostname)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("func", "RegistrationHandler").
|
||||
Str("hostinfo.name", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
|
@ -357,13 +359,13 @@ func (h *Headscale) handleAuthKey(
|
|||
Hostname: registerRequest.Hostinfo.Hostname,
|
||||
GivenName: givenName,
|
||||
UserID: pak.User.ID,
|
||||
MachineKey: machineKey,
|
||||
MachineKey: util.MachinePublicKeyStripPrefix(machineKey),
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
Expiry: ®isterRequest.Expiry,
|
||||
NodeKey: nodeKey,
|
||||
LastSeen: &now,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
ForcedTags: pak.Proto().GetAclTags(),
|
||||
ForcedTags: pak.Proto().AclTags,
|
||||
}
|
||||
|
||||
node, err = h.db.RegisterNode(
|
||||
|
@ -372,6 +374,7 @@ func (h *Headscale) handleAuthKey(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("could not register node")
|
||||
nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name).
|
||||
|
@ -386,6 +389,7 @@ func (h *Headscale) handleAuthKey(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Failed to use pre-auth key")
|
||||
nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name).
|
||||
|
@ -401,10 +405,11 @@ func (h *Headscale) handleAuthKey(
|
|||
// Otherwise it will need to exec `tailscale up` twice to fetch the *LoginName*
|
||||
resp.Login = *pak.User.TailscaleLogin()
|
||||
|
||||
respBody, err := json.Marshal(resp)
|
||||
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
|
@ -422,105 +427,52 @@ func (h *Headscale) handleAuthKey(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Str("ips", strings.Join(node.IPAddresses.StringSlice(), ", ")).
|
||||
Msg("Successfully authenticated via AuthKey")
|
||||
}
|
||||
|
||||
// handleNewNode returns the authorisation URL to the client based on what type
|
||||
// of registration headscale is configured with.
|
||||
// This url is then showed to the user by the local Tailscale client.
|
||||
// handleNewNode exposes for both legacy and Noise the functionality to get a URL
|
||||
// for authorizing the node. This url is then showed to the user by the local Tailscale client.
|
||||
func (h *Headscale) handleNewNode(
|
||||
writer http.ResponseWriter,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
isNoise bool,
|
||||
) {
|
||||
logInfo, logTrace, logErr := logAuthFunc(registerRequest, machineKey)
|
||||
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
// The node registration is new, redirect the client to the registration URL
|
||||
logTrace("The node seems to be new, sending auth url")
|
||||
log.Debug().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Msg("The node seems to be new, sending auth url")
|
||||
|
||||
if h.oauth2Config != nil {
|
||||
resp.AuthURL = fmt.Sprintf(
|
||||
"%s/oidc/register/%s",
|
||||
strings.TrimSuffix(h.cfg.ServerURL, "/"),
|
||||
machineKey.String(),
|
||||
registerRequest.NodeKey,
|
||||
)
|
||||
} else {
|
||||
resp.AuthURL = fmt.Sprintf("%s/register/%s",
|
||||
strings.TrimSuffix(h.cfg.ServerURL, "/"),
|
||||
machineKey.String())
|
||||
registerRequest.NodeKey)
|
||||
}
|
||||
|
||||
respBody, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
logErr(err, "Cannot encode message")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
logErr(err, "Failed to write response")
|
||||
}
|
||||
|
||||
logInfo(fmt.Sprintf("Successfully sent auth url: %s", resp.AuthURL))
|
||||
}
|
||||
|
||||
func (h *Headscale) handleNodeLogOut(
|
||||
writer http.ResponseWriter,
|
||||
node types.Node,
|
||||
machineKey key.MachinePublic,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
log.Info().
|
||||
Str("node", node.Hostname).
|
||||
Msg("Client requested logout")
|
||||
|
||||
now := time.Now()
|
||||
err := h.db.NodeSetExpiry(&node, now)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to expire node")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
stateUpdate := types.StateUpdate{
|
||||
Type: types.StatePeerChangedPatch,
|
||||
ChangePatches: []*tailcfg.PeerChange{
|
||||
{
|
||||
NodeID: tailcfg.NodeID(node.ID),
|
||||
KeyExpiry: &now,
|
||||
},
|
||||
},
|
||||
}
|
||||
if stateUpdate.Valid() {
|
||||
h.nodeNotifier.NotifyWithIgnore(stateUpdate, node.MachineKey.String())
|
||||
}
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.MachineAuthorized = false
|
||||
resp.NodeKeyExpired = true
|
||||
resp.User = *node.User.TailscaleUser()
|
||||
respBody, err := json.Marshal(resp)
|
||||
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
@ -533,6 +485,68 @@ func (h *Headscale) handleNodeLogOut(
|
|||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Bool("noise", isNoise).
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("AuthURL", resp.AuthURL).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Msg("Successfully sent auth url")
|
||||
}
|
||||
|
||||
func (h *Headscale) handleNodeLogOut(
|
||||
writer http.ResponseWriter,
|
||||
node types.Node,
|
||||
machineKey key.MachinePublic,
|
||||
isNoise bool,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
log.Info().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", node.Hostname).
|
||||
Msg("Client requested logout")
|
||||
|
||||
now := time.Now()
|
||||
err := h.db.NodeSetExpiry(&node, now)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Failed to expire node")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.MachineAuthorized = false
|
||||
resp.NodeKeyExpired = true
|
||||
resp.User = *node.User.TailscaleUser()
|
||||
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Bool("noise", isNoise).
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
|
@ -554,6 +568,7 @@ func (h *Headscale) handleNodeLogOut(
|
|||
|
||||
log.Info().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", node.Hostname).
|
||||
Msg("Successfully logged out")
|
||||
}
|
||||
|
@ -562,12 +577,14 @@ func (h *Headscale) handleNodeWithValidRegistration(
|
|||
writer http.ResponseWriter,
|
||||
node types.Node,
|
||||
machineKey key.MachinePublic,
|
||||
isNoise bool,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
// The node registration is valid, respond with redirect to /map
|
||||
log.Debug().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", node.Hostname).
|
||||
Msg("Client is registered and we have the current NodeKey. All clear to /map")
|
||||
|
||||
|
@ -576,10 +593,11 @@ func (h *Headscale) handleNodeWithValidRegistration(
|
|||
resp.User = *node.User.TailscaleUser()
|
||||
resp.Login = *node.User.TailscaleLogin()
|
||||
|
||||
respBody, err := json.Marshal(resp)
|
||||
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
nodeRegistrations.WithLabelValues("update", "web", "error", node.User.Name).
|
||||
|
@ -597,12 +615,14 @@ func (h *Headscale) handleNodeWithValidRegistration(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", node.Hostname).
|
||||
Msg("Node successfully authorized")
|
||||
}
|
||||
|
@ -612,11 +632,13 @@ func (h *Headscale) handleNodeKeyRefresh(
|
|||
registerRequest tailcfg.RegisterRequest,
|
||||
node types.Node,
|
||||
machineKey key.MachinePublic,
|
||||
isNoise bool,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
log.Info().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", node.Hostname).
|
||||
Msg("We have the OldNodeKey in the database. This is a key refresh")
|
||||
|
||||
|
@ -633,10 +655,11 @@ func (h *Headscale) handleNodeKeyRefresh(
|
|||
|
||||
resp.AuthURL = ""
|
||||
resp.User = *node.User.TailscaleUser()
|
||||
respBody, err := json.Marshal(resp)
|
||||
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
@ -650,12 +673,14 @@ func (h *Headscale) handleNodeKeyRefresh(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
Str("old_node_key", registerRequest.OldNodeKey.ShortString()).
|
||||
Str("node", node.Hostname).
|
||||
|
@ -667,11 +692,12 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut(
|
|||
registerRequest tailcfg.RegisterRequest,
|
||||
node types.Node,
|
||||
machineKey key.MachinePublic,
|
||||
isNoise bool,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
if registerRequest.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(writer, registerRequest, machineKey)
|
||||
h.handleAuthKey(writer, registerRequest, machineKey, isNoise)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -679,6 +705,7 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut(
|
|||
// The client has registered before, but has expired or logged out
|
||||
log.Trace().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", node.Hostname).
|
||||
Str("machine_key", machineKey.ShortString()).
|
||||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
|
@ -688,17 +715,18 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut(
|
|||
if h.oauth2Config != nil {
|
||||
resp.AuthURL = fmt.Sprintf("%s/oidc/register/%s",
|
||||
strings.TrimSuffix(h.cfg.ServerURL, "/"),
|
||||
machineKey.String())
|
||||
registerRequest.NodeKey)
|
||||
} else {
|
||||
resp.AuthURL = fmt.Sprintf("%s/register/%s",
|
||||
strings.TrimSuffix(h.cfg.ServerURL, "/"),
|
||||
machineKey.String())
|
||||
registerRequest.NodeKey)
|
||||
}
|
||||
|
||||
respBody, err := json.Marshal(resp)
|
||||
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
nodeRegistrations.WithLabelValues("reauth", "web", "error", node.User.Name).
|
||||
|
@ -716,12 +744,14 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("machine_key", machineKey.ShortString()).
|
||||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
|
||||
|
|
61
hscontrol/auth_legacy.go
Normal file
61
hscontrol/auth_legacy.go
Normal file
|
@ -0,0 +1,61 @@
|
|||
//go:build ts2019
|
||||
|
||||
package hscontrol
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
// RegistrationHandler handles the actual registration process of a machine
|
||||
// Endpoint /machine/:mkey.
|
||||
func (h *Headscale) RegistrationHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
vars := mux.Vars(req)
|
||||
machineKeyStr, ok := vars["mkey"]
|
||||
if !ok || machineKeyStr == "" {
|
||||
log.Error().
|
||||
Str("handler", "RegistrationHandler").
|
||||
Msg("No machine ID in request")
|
||||
http.Error(writer, "No machine ID in request", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
body, _ := io.ReadAll(req.Body)
|
||||
|
||||
var machineKey key.MachinePublic
|
||||
err := machineKey.UnmarshalText([]byte(util.MachinePublicKeyEnsurePrefix(machineKeyStr)))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot parse machine key")
|
||||
nodeRegistrations.WithLabelValues("unknown", "web", "error", "unknown").Inc()
|
||||
http.Error(writer, "Cannot parse machine key", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
registerRequest := tailcfg.RegisterRequest{}
|
||||
err = util.DecodeAndUnmarshalNaCl(body, ®isterRequest, &machineKey, h.privateKey2019)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot decode message")
|
||||
nodeRegistrations.WithLabelValues("unknown", "web", "error", "unknown").Inc()
|
||||
http.Error(writer, "Cannot decode message", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
h.handleRegister(writer, req, registerRequest, machineKey, false)
|
||||
}
|
|
@ -39,19 +39,7 @@ func (ns *noiseServer) NoiseRegistrationHandler(
|
|||
return
|
||||
}
|
||||
|
||||
// Reject unsupported versions
|
||||
if registerRequest.Version < MinimumCapVersion {
|
||||
log.Info().
|
||||
Caller().
|
||||
Int("min_version", int(MinimumCapVersion)).
|
||||
Int("client_version", int(registerRequest.Version)).
|
||||
Msg("unsupported client connected")
|
||||
http.Error(writer, "Internal error", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ns.nodeKey = registerRequest.NodeKey
|
||||
|
||||
ns.headscale.handleRegister(writer, req, registerRequest, ns.conn.Peer())
|
||||
ns.headscale.handleRegister(writer, req, registerRequest, ns.conn.Peer(), true)
|
||||
}
|
||||
|
|
|
@ -35,6 +35,9 @@ func (s *Suite) TestGetUsedIps(c *check.C) {
|
|||
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
|
@ -80,6 +83,9 @@ func (s *Suite) TestGetMultiIp(c *check.C) {
|
|||
|
||||
node := types.Node{
|
||||
ID: uint64(index),
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
|
@ -167,6 +173,9 @@ func (s *Suite) TestGetAvailableIpNodeWithoutIP(c *check.C) {
|
|||
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
|
|
|
@ -2,16 +2,13 @@ package db
|
|||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/glebarez/sqlite"
|
||||
"github.com/go-gormigrate/gormigrate/v2"
|
||||
"github.com/juanfont/headscale/hscontrol/notifier"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
|
@ -22,11 +19,15 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
dbVersion = "1"
|
||||
Postgres = "postgres"
|
||||
Sqlite = "sqlite3"
|
||||
)
|
||||
|
||||
var errDatabaseNotSupported = errors.New("database type not supported")
|
||||
var (
|
||||
errValueNotFound = errors.New("not found")
|
||||
errDatabaseNotSupported = errors.New("database type not supported")
|
||||
)
|
||||
|
||||
// KV is a key-value store in a psql table. For future use...
|
||||
// TODO(kradalby): Is this used for anything?
|
||||
|
@ -61,125 +62,80 @@ func NewHeadscaleDatabase(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
migrations := gormigrate.New(dbConn, gormigrate.DefaultOptions, []*gormigrate.Migration{
|
||||
// New migrations should be added as transactions at the end of this list.
|
||||
// The initial commit here is quite messy, completely out of order and
|
||||
// has no versioning and is the tech debt of not having versioned migrations
|
||||
// prior to this point. This first migration is all DB changes to bring a DB
|
||||
// up to 0.23.0.
|
||||
{
|
||||
ID: "202312101416",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
if dbType == Postgres {
|
||||
tx.Exec(`create extension if not exists "uuid-ossp";`)
|
||||
db := HSDatabase{
|
||||
db: dbConn,
|
||||
notifier: notifier,
|
||||
|
||||
ipPrefixes: ipPrefixes,
|
||||
baseDomain: baseDomain,
|
||||
}
|
||||
|
||||
_ = tx.Migrator().RenameTable("namespaces", "users")
|
||||
log.Debug().Msgf("database %#v", dbConn)
|
||||
|
||||
if dbType == Postgres {
|
||||
dbConn.Exec(`create extension if not exists "uuid-ossp";`)
|
||||
}
|
||||
|
||||
_ = dbConn.Migrator().RenameTable("namespaces", "users")
|
||||
|
||||
// the big rename from Machine to Node
|
||||
_ = tx.Migrator().RenameTable("machines", "nodes")
|
||||
_ = tx.Migrator().RenameColumn(&types.Route{}, "machine_id", "node_id")
|
||||
_ = dbConn.Migrator().RenameTable("machines", "nodes")
|
||||
_ = dbConn.Migrator().RenameColumn(&types.Route{}, "machine_id", "node_id")
|
||||
|
||||
err = tx.AutoMigrate(types.User{})
|
||||
err = dbConn.AutoMigrate(types.User{})
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_ = tx.Migrator().RenameColumn(&types.Node{}, "namespace_id", "user_id")
|
||||
_ = tx.Migrator().RenameColumn(&types.PreAuthKey{}, "namespace_id", "user_id")
|
||||
_ = dbConn.Migrator().RenameColumn(&types.Node{}, "namespace_id", "user_id")
|
||||
_ = dbConn.Migrator().RenameColumn(&types.PreAuthKey{}, "namespace_id", "user_id")
|
||||
|
||||
_ = tx.Migrator().RenameColumn(&types.Node{}, "ip_address", "ip_addresses")
|
||||
_ = tx.Migrator().RenameColumn(&types.Node{}, "name", "hostname")
|
||||
_ = dbConn.Migrator().RenameColumn(&types.Node{}, "ip_address", "ip_addresses")
|
||||
_ = dbConn.Migrator().RenameColumn(&types.Node{}, "name", "hostname")
|
||||
|
||||
// GivenName is used as the primary source of DNS names, make sure
|
||||
// the field is populated and normalized if it was not when the
|
||||
// node was registered.
|
||||
_ = tx.Migrator().RenameColumn(&types.Node{}, "nickname", "given_name")
|
||||
_ = dbConn.Migrator().RenameColumn(&types.Node{}, "nickname", "given_name")
|
||||
|
||||
// If the Node table has a column for registered,
|
||||
// If the MacNodehine table has a column for registered,
|
||||
// find all occourences of "false" and drop them. Then
|
||||
// remove the column.
|
||||
if tx.Migrator().HasColumn(&types.Node{}, "registered") {
|
||||
if dbConn.Migrator().HasColumn(&types.Node{}, "registered") {
|
||||
log.Info().
|
||||
Msg(`Database has legacy "registered" column in node, removing...`)
|
||||
|
||||
nodes := types.Nodes{}
|
||||
if err := tx.Not("registered").Find(&nodes).Error; err != nil {
|
||||
if err := dbConn.Not("registered").Find(&nodes).Error; err != nil {
|
||||
log.Error().Err(err).Msg("Error accessing db")
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
log.Info().
|
||||
Str("node", node.Hostname).
|
||||
Str("machine_key", node.MachineKey.ShortString()).
|
||||
Str("machine_key", node.MachineKey).
|
||||
Msg("Deleting unregistered node")
|
||||
if err := tx.Delete(&types.Node{}, node.ID).Error; err != nil {
|
||||
if err := dbConn.Delete(&types.Node{}, node.ID).Error; err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("node", node.Hostname).
|
||||
Str("machine_key", node.MachineKey.ShortString()).
|
||||
Str("machine_key", node.MachineKey).
|
||||
Msg("Error deleting unregistered node")
|
||||
}
|
||||
}
|
||||
|
||||
err := tx.Migrator().DropColumn(&types.Node{}, "registered")
|
||||
err := dbConn.Migrator().DropColumn(&types.Node{}, "registered")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error dropping registered column")
|
||||
}
|
||||
}
|
||||
|
||||
err = tx.AutoMigrate(&types.Route{})
|
||||
err = dbConn.AutoMigrate(&types.Route{})
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = tx.AutoMigrate(&types.Node{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure all keys have correct prefixes
|
||||
// https://github.com/tailscale/tailscale/blob/main/types/key/node.go#L35
|
||||
type result struct {
|
||||
ID uint64
|
||||
MachineKey string
|
||||
NodeKey string
|
||||
DiscoKey string
|
||||
}
|
||||
var results []result
|
||||
err = tx.Raw("SELECT id, node_key, machine_key, disco_key FROM nodes").Find(&results).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, node := range results {
|
||||
mKey := node.MachineKey
|
||||
if !strings.HasPrefix(node.MachineKey, "mkey:") {
|
||||
mKey = "mkey:" + node.MachineKey
|
||||
}
|
||||
nKey := node.NodeKey
|
||||
if !strings.HasPrefix(node.NodeKey, "nodekey:") {
|
||||
nKey = "nodekey:" + node.NodeKey
|
||||
}
|
||||
|
||||
dKey := node.DiscoKey
|
||||
if !strings.HasPrefix(node.DiscoKey, "discokey:") {
|
||||
dKey = "discokey:" + node.DiscoKey
|
||||
}
|
||||
|
||||
err := tx.Exec(
|
||||
"UPDATE nodes SET machine_key = @mKey, node_key = @nKey, disco_key = @dKey WHERE ID = @id",
|
||||
sql.Named("mKey", mKey),
|
||||
sql.Named("nKey", nKey),
|
||||
sql.Named("dKey", dKey),
|
||||
sql.Named("id", node.ID),
|
||||
).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if tx.Migrator().HasColumn(&types.Node{}, "enabled_routes") {
|
||||
if dbConn.Migrator().HasColumn(&types.Node{}, "enabled_routes") {
|
||||
log.Info().Msgf("Database has legacy enabled_routes column in node, migrating...")
|
||||
|
||||
type NodeAux struct {
|
||||
|
@ -188,7 +144,7 @@ func NewHeadscaleDatabase(
|
|||
}
|
||||
|
||||
nodesAux := []NodeAux{}
|
||||
err := tx.Table("nodes").Select("id, enabled_routes").Scan(&nodesAux).Error
|
||||
err := dbConn.Table("nodes").Select("id, enabled_routes").Scan(&nodesAux).Error
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("Error accessing db")
|
||||
}
|
||||
|
@ -203,7 +159,7 @@ func NewHeadscaleDatabase(
|
|||
continue
|
||||
}
|
||||
|
||||
err = tx.Preload("Node").
|
||||
err = dbConn.Preload("Node").
|
||||
Where("node_id = ? AND prefix = ?", node.ID, types.IPPrefix(prefix)).
|
||||
First(&types.Route{}).
|
||||
Error
|
||||
|
@ -221,7 +177,7 @@ func NewHeadscaleDatabase(
|
|||
Enabled: true,
|
||||
Prefix: types.IPPrefix(prefix),
|
||||
}
|
||||
if err := tx.Create(&route).Error; err != nil {
|
||||
if err := dbConn.Create(&route).Error; err != nil {
|
||||
log.Error().Err(err).Msg("Error creating route")
|
||||
} else {
|
||||
log.Info().
|
||||
|
@ -232,15 +188,20 @@ func NewHeadscaleDatabase(
|
|||
}
|
||||
}
|
||||
|
||||
err = tx.Migrator().DropColumn(&types.Node{}, "enabled_routes")
|
||||
err = dbConn.Migrator().DropColumn(&types.Node{}, "enabled_routes")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error dropping enabled_routes column")
|
||||
}
|
||||
}
|
||||
|
||||
if tx.Migrator().HasColumn(&types.Node{}, "given_name") {
|
||||
err = dbConn.AutoMigrate(&types.Node{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if dbConn.Migrator().HasColumn(&types.Node{}, "given_name") {
|
||||
nodes := types.Nodes{}
|
||||
if err := tx.Find(&nodes).Error; err != nil {
|
||||
if err := dbConn.Find(&nodes).Error; err != nil {
|
||||
log.Error().Err(err).Msg("Error accessing db")
|
||||
}
|
||||
|
||||
|
@ -257,9 +218,7 @@ func NewHeadscaleDatabase(
|
|||
Msg("Failed to normalize node hostname in DB migration")
|
||||
}
|
||||
|
||||
err = tx.Model(nodes[item]).Updates(types.Node{
|
||||
GivenName: normalizedHostname,
|
||||
}).Error
|
||||
err = db.RenameNode(nodes[item], normalizedHostname)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
|
@ -271,58 +230,30 @@ func NewHeadscaleDatabase(
|
|||
}
|
||||
}
|
||||
|
||||
err = tx.AutoMigrate(&KV{})
|
||||
err = dbConn.AutoMigrate(&KV{})
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = tx.AutoMigrate(&types.PreAuthKey{})
|
||||
err = dbConn.AutoMigrate(&types.PreAuthKey{})
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = tx.AutoMigrate(&types.PreAuthKeyACLTag{})
|
||||
err = dbConn.AutoMigrate(&types.PreAuthKeyACLTag{})
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_ = tx.Migrator().DropTable("shared_machines")
|
||||
_ = dbConn.Migrator().DropTable("shared_machines")
|
||||
|
||||
err = tx.AutoMigrate(&types.APIKey{})
|
||||
err = dbConn.AutoMigrate(&types.APIKey{})
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Rollback: func(tx *gorm.DB) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
// drop key-value table, it is not used, and has not contained
|
||||
// useful data for a long time or ever.
|
||||
ID: "202312101430",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
return tx.Migrator().DropTable("kvs")
|
||||
},
|
||||
Rollback: func(tx *gorm.DB) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err = migrations.Migrate(); err != nil {
|
||||
log.Fatal().Err(err).Msgf("Migration failed: %v", err)
|
||||
}
|
||||
|
||||
db := HSDatabase{
|
||||
db: dbConn,
|
||||
notifier: notifier,
|
||||
|
||||
ipPrefixes: ipPrefixes,
|
||||
baseDomain: baseDomain,
|
||||
}
|
||||
// TODO(kradalby): is this needed?
|
||||
err = db.setValue("db_version", dbVersion)
|
||||
|
||||
return &db, err
|
||||
}
|
||||
|
@ -373,6 +304,39 @@ func openDB(dbType, connectionAddr string, debug bool) (*gorm.DB, error) {
|
|||
)
|
||||
}
|
||||
|
||||
// getValue returns the value for the given key in KV.
|
||||
func (hsdb *HSDatabase) getValue(key string) (string, error) {
|
||||
var row KV
|
||||
if result := hsdb.db.First(&row, "key = ?", key); errors.Is(
|
||||
result.Error,
|
||||
gorm.ErrRecordNotFound,
|
||||
) {
|
||||
return "", errValueNotFound
|
||||
}
|
||||
|
||||
return row.Value, nil
|
||||
}
|
||||
|
||||
// setValue sets value for the given key in KV.
|
||||
func (hsdb *HSDatabase) setValue(key string, value string) error {
|
||||
keyValue := KV{
|
||||
Key: key,
|
||||
Value: value,
|
||||
}
|
||||
|
||||
if _, err := hsdb.getValue(key); err == nil {
|
||||
hsdb.db.Model(&keyValue).Where("key = ?", key).Update("value", value)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := hsdb.db.Create(keyValue).Error; err != nil {
|
||||
return fmt.Errorf("failed to create key value pair in the database: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) PingDB(ctx context.Context) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
|
|
|
@ -55,12 +55,17 @@ func (hsdb *HSDatabase) listPeers(node *types.Node) (types.Nodes, error) {
|
|||
Preload("User").
|
||||
Preload("Routes").
|
||||
Where("node_key <> ?",
|
||||
node.NodeKey.String()).Find(&nodes).Error; err != nil {
|
||||
node.NodeKey).Find(&nodes).Error; err != nil {
|
||||
return types.Nodes{}, err
|
||||
}
|
||||
|
||||
sort.Slice(nodes, func(i, j int) bool { return nodes[i].ID < nodes[j].ID })
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Msgf("Found peers: %s", nodes.String())
|
||||
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
|
@ -171,19 +176,13 @@ func (hsdb *HSDatabase) GetNodeByMachineKey(
|
|||
hsdb.mu.RLock()
|
||||
defer hsdb.mu.RUnlock()
|
||||
|
||||
return hsdb.getNodeByMachineKey(machineKey)
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) getNodeByMachineKey(
|
||||
machineKey key.MachinePublic,
|
||||
) (*types.Node, error) {
|
||||
mach := types.Node{}
|
||||
if result := hsdb.db.
|
||||
Preload("AuthKey").
|
||||
Preload("AuthKey.User").
|
||||
Preload("User").
|
||||
Preload("Routes").
|
||||
First(&mach, "machine_key = ?", machineKey.String()); result.Error != nil {
|
||||
First(&mach, "machine_key = ?", util.MachinePublicKeyStripPrefix(machineKey)); result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
|
@ -204,7 +203,7 @@ func (hsdb *HSDatabase) GetNodeByNodeKey(
|
|||
Preload("User").
|
||||
Preload("Routes").
|
||||
First(&node, "node_key = ?",
|
||||
nodeKey.String()); result.Error != nil {
|
||||
util.NodePublicKeyStripPrefix(nodeKey)); result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
|
@ -225,9 +224,9 @@ func (hsdb *HSDatabase) GetNodeByAnyKey(
|
|||
Preload("User").
|
||||
Preload("Routes").
|
||||
First(&node, "machine_key = ? OR node_key = ? OR node_key = ?",
|
||||
machineKey.String(),
|
||||
nodeKey.String(),
|
||||
oldNodeKey.String()); result.Error != nil {
|
||||
util.MachinePublicKeyStripPrefix(machineKey),
|
||||
util.NodePublicKeyStripPrefix(nodeKey),
|
||||
util.NodePublicKeyStripPrefix(oldNodeKey)); result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
|
@ -253,10 +252,6 @@ func (hsdb *HSDatabase) SetTags(
|
|||
hsdb.mu.Lock()
|
||||
defer hsdb.mu.Unlock()
|
||||
|
||||
if len(tags) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
newTags := []string{}
|
||||
for _, tag := range tags {
|
||||
if !util.StringOrPrefixListContains(newTags, tag) {
|
||||
|
@ -270,14 +265,10 @@ func (hsdb *HSDatabase) SetTags(
|
|||
return fmt.Errorf("failed to update tags for node in the database: %w", err)
|
||||
}
|
||||
|
||||
stateUpdate := types.StateUpdate{
|
||||
hsdb.notifier.NotifyWithIgnore(types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: types.Nodes{node},
|
||||
Message: "called from db.SetTags",
|
||||
}
|
||||
if stateUpdate.Valid() {
|
||||
hsdb.notifier.NotifyWithIgnore(stateUpdate, node.MachineKey.String())
|
||||
}
|
||||
Changed: types.Nodes{node},
|
||||
}, node.MachineKey)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -310,14 +301,10 @@ func (hsdb *HSDatabase) RenameNode(node *types.Node, newName string) error {
|
|||
return fmt.Errorf("failed to rename node in the database: %w", err)
|
||||
}
|
||||
|
||||
stateUpdate := types.StateUpdate{
|
||||
hsdb.notifier.NotifyWithIgnore(types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: types.Nodes{node},
|
||||
Message: "called from db.RenameNode",
|
||||
}
|
||||
if stateUpdate.Valid() {
|
||||
hsdb.notifier.NotifyWithIgnore(stateUpdate, node.MachineKey.String())
|
||||
}
|
||||
Changed: types.Nodes{node},
|
||||
}, node.MachineKey)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -340,28 +327,10 @@ func (hsdb *HSDatabase) nodeSetExpiry(node *types.Node, expiry time.Time) error
|
|||
)
|
||||
}
|
||||
|
||||
node.Expiry = &expiry
|
||||
|
||||
stateSelfUpdate := types.StateUpdate{
|
||||
Type: types.StateSelfUpdate,
|
||||
ChangeNodes: types.Nodes{node},
|
||||
}
|
||||
if stateSelfUpdate.Valid() {
|
||||
hsdb.notifier.NotifyByMachineKey(stateSelfUpdate, node.MachineKey)
|
||||
}
|
||||
|
||||
stateUpdate := types.StateUpdate{
|
||||
Type: types.StatePeerChangedPatch,
|
||||
ChangePatches: []*tailcfg.PeerChange{
|
||||
{
|
||||
NodeID: tailcfg.NodeID(node.ID),
|
||||
KeyExpiry: &expiry,
|
||||
},
|
||||
},
|
||||
}
|
||||
if stateUpdate.Valid() {
|
||||
hsdb.notifier.NotifyWithIgnore(stateUpdate, node.MachineKey.String())
|
||||
}
|
||||
hsdb.notifier.NotifyWithIgnore(types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
Changed: types.Nodes{node},
|
||||
}, node.MachineKey)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -385,13 +354,10 @@ func (hsdb *HSDatabase) deleteNode(node *types.Node) error {
|
|||
return err
|
||||
}
|
||||
|
||||
stateUpdate := types.StateUpdate{
|
||||
hsdb.notifier.NotifyAll(types.StateUpdate{
|
||||
Type: types.StatePeerRemoved,
|
||||
Removed: []tailcfg.NodeID{tailcfg.NodeID(node.ID)},
|
||||
}
|
||||
if stateUpdate.Valid() {
|
||||
hsdb.notifier.NotifyAll(stateUpdate)
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -410,7 +376,7 @@ func (hsdb *HSDatabase) UpdateLastSeen(node *types.Node) error {
|
|||
|
||||
func (hsdb *HSDatabase) RegisterNodeFromAuthCallback(
|
||||
cache *cache.Cache,
|
||||
mkey key.MachinePublic,
|
||||
nodeKeyStr string,
|
||||
userName string,
|
||||
nodeExpiry *time.Time,
|
||||
registrationMethod string,
|
||||
|
@ -418,14 +384,20 @@ func (hsdb *HSDatabase) RegisterNodeFromAuthCallback(
|
|||
hsdb.mu.Lock()
|
||||
defer hsdb.mu.Unlock()
|
||||
|
||||
nodeKey := key.NodePublic{}
|
||||
err := nodeKey.UnmarshalText([]byte(nodeKeyStr))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Str("machine_key", mkey.ShortString()).
|
||||
Str("nodeKey", nodeKey.ShortString()).
|
||||
Str("userName", userName).
|
||||
Str("registrationMethod", registrationMethod).
|
||||
Str("expiresAt", fmt.Sprintf("%v", nodeExpiry)).
|
||||
Msg("Registering node from API/CLI or auth callback")
|
||||
|
||||
if nodeInterface, ok := cache.Get(mkey.String()); ok {
|
||||
if nodeInterface, ok := cache.Get(util.NodePublicKeyStripPrefix(nodeKey)); ok {
|
||||
if registrationNode, ok := nodeInterface.(types.Node); ok {
|
||||
user, err := hsdb.getUser(userName)
|
||||
if err != nil {
|
||||
|
@ -453,7 +425,7 @@ func (hsdb *HSDatabase) RegisterNodeFromAuthCallback(
|
|||
)
|
||||
|
||||
if err == nil {
|
||||
cache.Delete(mkey.String())
|
||||
cache.Delete(nodeKeyStr)
|
||||
}
|
||||
|
||||
return node, err
|
||||
|
@ -476,8 +448,8 @@ func (hsdb *HSDatabase) RegisterNode(node types.Node) (*types.Node, error) {
|
|||
func (hsdb *HSDatabase) registerNode(node types.Node) (*types.Node, error) {
|
||||
log.Debug().
|
||||
Str("node", node.Hostname).
|
||||
Str("machine_key", node.MachineKey.ShortString()).
|
||||
Str("node_key", node.NodeKey.ShortString()).
|
||||
Str("machine_key", node.MachineKey).
|
||||
Str("node_key", node.NodeKey).
|
||||
Str("user", node.User.Name).
|
||||
Msg("Registering node")
|
||||
|
||||
|
@ -492,8 +464,8 @@ func (hsdb *HSDatabase) registerNode(node types.Node) (*types.Node, error) {
|
|||
log.Trace().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Str("machine_key", node.MachineKey.ShortString()).
|
||||
Str("node_key", node.NodeKey.ShortString()).
|
||||
Str("machine_key", node.MachineKey).
|
||||
Str("node_key", node.NodeKey).
|
||||
Str("user", node.User.Name).
|
||||
Msg("Node authorized again")
|
||||
|
||||
|
@ -535,7 +507,7 @@ func (hsdb *HSDatabase) NodeSetNodeKey(node *types.Node, nodeKey key.NodePublic)
|
|||
defer hsdb.mu.Unlock()
|
||||
|
||||
if err := hsdb.db.Model(node).Updates(types.Node{
|
||||
NodeKey: nodeKey,
|
||||
NodeKey: util.NodePublicKeyStripPrefix(nodeKey),
|
||||
}).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -552,7 +524,7 @@ func (hsdb *HSDatabase) NodeSetMachineKey(
|
|||
defer hsdb.mu.Unlock()
|
||||
|
||||
if err := hsdb.db.Model(node).Updates(types.Node{
|
||||
MachineKey: machineKey,
|
||||
MachineKey: util.MachinePublicKeyStripPrefix(machineKey),
|
||||
}).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -663,6 +635,20 @@ func (hsdb *HSDatabase) IsRoutesEnabled(node *types.Node, routeStr string) bool
|
|||
return false
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) ListOnlineNodes(
|
||||
node *types.Node,
|
||||
) (map[tailcfg.NodeID]bool, error) {
|
||||
hsdb.mu.RLock()
|
||||
defer hsdb.mu.RUnlock()
|
||||
|
||||
peers, err := hsdb.listPeers(node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return peers.OnlineNodeMap(), nil
|
||||
}
|
||||
|
||||
// enableRoutes enables new routes based on a list of new routes.
|
||||
func (hsdb *HSDatabase) enableRoutes(node *types.Node, routeStrs ...string) error {
|
||||
newRoutes := make([]netip.Prefix, len(routeStrs))
|
||||
|
@ -714,30 +700,10 @@ func (hsdb *HSDatabase) enableRoutes(node *types.Node, routeStrs ...string) erro
|
|||
}
|
||||
}
|
||||
|
||||
// Ensure the node has the latest routes when notifying the other
|
||||
// nodes
|
||||
nRoutes, err := hsdb.getNodeRoutes(node)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read back routes: %w", err)
|
||||
}
|
||||
|
||||
node.Routes = nRoutes
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("node", node.Hostname).
|
||||
Strs("routes", routeStrs).
|
||||
Msg("enabling routes")
|
||||
|
||||
stateUpdate := types.StateUpdate{
|
||||
hsdb.notifier.NotifyWithIgnore(types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: types.Nodes{node},
|
||||
Message: "called from db.enableRoutes",
|
||||
}
|
||||
if stateUpdate.Valid() {
|
||||
hsdb.notifier.NotifyWithIgnore(
|
||||
stateUpdate, node.MachineKey.String())
|
||||
}
|
||||
Changed: types.Nodes{node},
|
||||
}, node.MachineKey)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -768,10 +734,7 @@ func generateGivenName(suppliedName string, randomSuffix bool) (string, error) {
|
|||
return normalizedHostname, nil
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) GenerateGivenName(
|
||||
mkey key.MachinePublic,
|
||||
suppliedName string,
|
||||
) (string, error) {
|
||||
func (hsdb *HSDatabase) GenerateGivenName(machineKey string, suppliedName string) (string, error) {
|
||||
hsdb.mu.RLock()
|
||||
defer hsdb.mu.RUnlock()
|
||||
|
||||
|
@ -786,14 +749,8 @@ func (hsdb *HSDatabase) GenerateGivenName(
|
|||
return "", err
|
||||
}
|
||||
|
||||
var nodeFound *types.Node
|
||||
for idx, node := range nodes {
|
||||
if node.GivenName == givenName {
|
||||
nodeFound = nodes[idx]
|
||||
}
|
||||
}
|
||||
|
||||
if nodeFound != nil && nodeFound.MachineKey.String() != mkey.String() {
|
||||
for _, node := range nodes {
|
||||
if node.MachineKey != machineKey && node.GivenName == givenName {
|
||||
postfixedName, err := generateGivenName(suppliedName, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -801,6 +758,7 @@ func (hsdb *HSDatabase) GenerateGivenName(
|
|||
|
||||
givenName = postfixedName
|
||||
}
|
||||
}
|
||||
|
||||
return givenName, nil
|
||||
}
|
||||
|
@ -866,30 +824,33 @@ func (hsdb *HSDatabase) ExpireExpiredNodes(lastCheck time.Time) time.Time {
|
|||
// checked everything.
|
||||
started := time.Now()
|
||||
|
||||
expiredNodes := make([]*types.Node, 0)
|
||||
|
||||
nodes, err := hsdb.listNodes()
|
||||
users, err := hsdb.listUsers()
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Msg("Error listing nodes to find expired nodes")
|
||||
log.Error().Err(err).Msg("Error listing users")
|
||||
|
||||
return time.Unix(0, 0)
|
||||
}
|
||||
|
||||
for _, user := range users {
|
||||
nodes, err := hsdb.listNodesByUser(user.Name)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("user", user.Name).
|
||||
Msg("Error listing nodes in user")
|
||||
|
||||
return time.Unix(0, 0)
|
||||
}
|
||||
|
||||
expired := make([]tailcfg.NodeID, 0)
|
||||
for index, node := range nodes {
|
||||
if node.IsExpired() &&
|
||||
// TODO(kradalby): Replace this, it is very spammy
|
||||
// It will notify about all nodes that has been expired.
|
||||
// It should only notify about expired nodes since _last check_.
|
||||
node.Expiry.After(lastCheck) {
|
||||
expiredNodes = append(expiredNodes, &nodes[index])
|
||||
expired = append(expired, tailcfg.NodeID(node.ID))
|
||||
|
||||
// Do not use setNodeExpiry as that has a notifier hook, which
|
||||
// can cause a deadlock, we are updating all changed nodes later
|
||||
// and there is no point in notifiying twice.
|
||||
if err := hsdb.db.Model(nodes[index]).Updates(types.Node{
|
||||
Expiry: &started,
|
||||
}).Error; err != nil {
|
||||
now := time.Now()
|
||||
err := hsdb.nodeSetExpiry(nodes[index], now)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("node", node.Hostname).
|
||||
|
@ -904,31 +865,11 @@ func (hsdb *HSDatabase) ExpireExpiredNodes(lastCheck time.Time) time.Time {
|
|||
}
|
||||
}
|
||||
|
||||
expired := make([]*tailcfg.PeerChange, len(expiredNodes))
|
||||
for idx, node := range expiredNodes {
|
||||
expired[idx] = &tailcfg.PeerChange{
|
||||
NodeID: tailcfg.NodeID(node.ID),
|
||||
KeyExpiry: &started,
|
||||
}
|
||||
}
|
||||
|
||||
// Inform the peers of a node with a lightweight update.
|
||||
stateUpdate := types.StateUpdate{
|
||||
Type: types.StatePeerChangedPatch,
|
||||
ChangePatches: expired,
|
||||
}
|
||||
if stateUpdate.Valid() {
|
||||
hsdb.notifier.NotifyAll(stateUpdate)
|
||||
}
|
||||
|
||||
// Inform the node itself that it has expired.
|
||||
for _, node := range expiredNodes {
|
||||
stateSelfUpdate := types.StateUpdate{
|
||||
Type: types.StateSelfUpdate,
|
||||
ChangeNodes: types.Nodes{node},
|
||||
}
|
||||
if stateSelfUpdate.Valid() {
|
||||
hsdb.notifier.NotifyByMachineKey(stateSelfUpdate, node.MachineKey)
|
||||
if len(expired) > 0 {
|
||||
hsdb.notifier.NotifyAll(types.StateUpdate{
|
||||
Type: types.StatePeerRemoved,
|
||||
Removed: expired,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"gopkg.in/check.v1"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
|
@ -26,13 +25,11 @@ func (s *Suite) TestGetNode(c *check.C) {
|
|||
_, err = db.GetNode("test", "testnode")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
nodeKey := key.NewNode()
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
node := &types.Node{
|
||||
ID: 0,
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey.Public(),
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
|
@ -54,13 +51,11 @@ func (s *Suite) TestGetNodeByID(c *check.C) {
|
|||
_, err = db.GetNodeByID(0)
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
nodeKey := key.NewNode()
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey.Public(),
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
|
@ -87,8 +82,9 @@ func (s *Suite) TestGetNodeByNodeKey(c *check.C) {
|
|||
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey.Public(),
|
||||
MachineKey: util.MachinePublicKeyStripPrefix(machineKey.Public()),
|
||||
NodeKey: util.NodePublicKeyStripPrefix(nodeKey.Public()),
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
|
@ -117,8 +113,9 @@ func (s *Suite) TestGetNodeByAnyNodeKey(c *check.C) {
|
|||
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey.Public(),
|
||||
MachineKey: util.MachinePublicKeyStripPrefix(machineKey.Public()),
|
||||
NodeKey: util.NodePublicKeyStripPrefix(nodeKey.Public()),
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
|
@ -133,14 +130,11 @@ func (s *Suite) TestGetNodeByAnyNodeKey(c *check.C) {
|
|||
func (s *Suite) TestHardDeleteNode(c *check.C) {
|
||||
user, err := db.CreateUser("test")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
nodeKey := key.NewNode()
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey.Public(),
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testnode3",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
|
@ -166,13 +160,11 @@ func (s *Suite) TestListPeers(c *check.C) {
|
|||
c.Assert(err, check.NotNil)
|
||||
|
||||
for index := 0; index <= 10; index++ {
|
||||
nodeKey := key.NewNode()
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
node := types.Node{
|
||||
ID: uint64(index),
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey.Public(),
|
||||
MachineKey: "foo" + strconv.Itoa(index),
|
||||
NodeKey: "bar" + strconv.Itoa(index),
|
||||
DiscoKey: "faa" + strconv.Itoa(index),
|
||||
Hostname: "testnode" + strconv.Itoa(index),
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
|
@ -213,13 +205,11 @@ func (s *Suite) TestGetACLFilteredPeers(c *check.C) {
|
|||
c.Assert(err, check.NotNil)
|
||||
|
||||
for index := 0; index <= 10; index++ {
|
||||
nodeKey := key.NewNode()
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
node := types.Node{
|
||||
ID: uint64(index),
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey.Public(),
|
||||
MachineKey: "foo" + strconv.Itoa(index),
|
||||
NodeKey: "bar" + strconv.Itoa(index),
|
||||
DiscoKey: "faa" + strconv.Itoa(index),
|
||||
IPAddresses: types.NodeAddresses{
|
||||
netip.MustParseAddr(fmt.Sprintf("100.64.0.%v", strconv.Itoa(index+1))),
|
||||
},
|
||||
|
@ -298,13 +288,11 @@ func (s *Suite) TestExpireNode(c *check.C) {
|
|||
_, err = db.GetNode("test", "testnode")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
nodeKey := key.NewNode()
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
node := &types.Node{
|
||||
ID: 0,
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey.Public(),
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
|
@ -357,15 +345,11 @@ func (s *Suite) TestGenerateGivenName(c *check.C) {
|
|||
_, err = db.GetNode("user-1", "testnode")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
nodeKey := key.NewNode()
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
machineKey2 := key.NewMachine()
|
||||
|
||||
node := &types.Node{
|
||||
ID: 0,
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey.Public(),
|
||||
MachineKey: "node-key-1",
|
||||
NodeKey: "node-key-1",
|
||||
DiscoKey: "disco-key-1",
|
||||
Hostname: "hostname-1",
|
||||
GivenName: "hostname-1",
|
||||
UserID: user1.ID,
|
||||
|
@ -374,20 +358,25 @@ func (s *Suite) TestGenerateGivenName(c *check.C) {
|
|||
}
|
||||
db.db.Save(node)
|
||||
|
||||
givenName, err := db.GenerateGivenName(machineKey2.Public(), "hostname-2")
|
||||
givenName, err := db.GenerateGivenName("node-key-2", "hostname-2")
|
||||
comment := check.Commentf("Same user, unique nodes, unique hostnames, no conflict")
|
||||
c.Assert(err, check.IsNil, comment)
|
||||
c.Assert(givenName, check.Equals, "hostname-2", comment)
|
||||
|
||||
givenName, err = db.GenerateGivenName(machineKey.Public(), "hostname-1")
|
||||
givenName, err = db.GenerateGivenName("node-key-1", "hostname-1")
|
||||
comment = check.Commentf("Same user, same node, same hostname, no conflict")
|
||||
c.Assert(err, check.IsNil, comment)
|
||||
c.Assert(givenName, check.Equals, "hostname-1", comment)
|
||||
|
||||
givenName, err = db.GenerateGivenName(machineKey2.Public(), "hostname-1")
|
||||
givenName, err = db.GenerateGivenName("node-key-2", "hostname-1")
|
||||
comment = check.Commentf("Same user, unique nodes, same hostname, conflict")
|
||||
c.Assert(err, check.IsNil, comment)
|
||||
c.Assert(givenName, check.Matches, fmt.Sprintf("^hostname-1-[a-z0-9]{%d}$", NodeGivenNameHashLength), comment)
|
||||
|
||||
givenName, err = db.GenerateGivenName("node-key-2", "hostname-1")
|
||||
comment = check.Commentf("Unique users, unique nodes, same hostname, conflict")
|
||||
c.Assert(err, check.IsNil, comment)
|
||||
c.Assert(givenName, check.Matches, fmt.Sprintf("^hostname-1-[a-z0-9]{%d}$", NodeGivenNameHashLength), comment)
|
||||
}
|
||||
|
||||
func (s *Suite) TestSetTags(c *check.C) {
|
||||
|
@ -400,13 +389,11 @@ func (s *Suite) TestSetTags(c *check.C) {
|
|||
_, err = db.GetNode("test", "testnode")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
nodeKey := key.NewNode()
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
node := &types.Node{
|
||||
ID: 0,
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey.Public(),
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
|
@ -578,7 +565,6 @@ func (s *Suite) TestAutoApproveRoutes(c *check.C) {
|
|||
c.Assert(err, check.IsNil)
|
||||
|
||||
nodeKey := key.NewNode()
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
defaultRouteV4 := netip.MustParsePrefix("0.0.0.0/0")
|
||||
defaultRouteV6 := netip.MustParsePrefix("::/0")
|
||||
|
@ -588,13 +574,14 @@ func (s *Suite) TestAutoApproveRoutes(c *check.C) {
|
|||
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
MachineKey: machineKey.Public(),
|
||||
NodeKey: nodeKey.Public(),
|
||||
MachineKey: "foo",
|
||||
NodeKey: util.NodePublicKeyStripPrefix(nodeKey.Public()),
|
||||
DiscoKey: "faa",
|
||||
Hostname: "test",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
HostInfo: types.HostInfo{
|
||||
RequestTags: []string{"tag:exit"},
|
||||
RoutableIPs: []netip.Prefix{defaultRouteV4, defaultRouteV6, route1, route2},
|
||||
},
|
||||
|
@ -603,9 +590,8 @@ func (s *Suite) TestAutoApproveRoutes(c *check.C) {
|
|||
|
||||
db.db.Save(&node)
|
||||
|
||||
sendUpdate, err := db.SaveNodeRoutes(&node)
|
||||
err = db.SaveNodeRoutes(&node)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(sendUpdate, check.Equals, false)
|
||||
|
||||
node0ByID, err := db.GetNodeByID(0)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
|
|
@ -77,6 +77,9 @@ func (*Suite) TestAlreadyUsedKey(c *check.C) {
|
|||
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testest",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
|
@ -98,6 +101,9 @@ func (*Suite) TestReusableBeingUsedKey(c *check.C) {
|
|||
|
||||
node := types.Node{
|
||||
ID: 1,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testest",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
|
@ -132,6 +138,9 @@ func (*Suite) TestEphemeralKey(c *check.C) {
|
|||
now := time.Now().Add(-time.Second * 30)
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testest",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
|
@ -198,5 +207,5 @@ func (*Suite) TestPreAuthKeyACLTags(c *check.C) {
|
|||
|
||||
listedPaks, err := db.ListPreAuthKeys("test8")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(listedPaks[0].Proto().GetAclTags(), check.DeepEquals, tags)
|
||||
c.Assert(listedPaks[0].Proto().AclTags, check.DeepEquals, tags)
|
||||
}
|
||||
|
|
|
@ -7,9 +7,7 @@ import (
|
|||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/samber/lo"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
var ErrRouteIsNotAvailable = errors.New("route is not available")
|
||||
|
@ -23,38 +21,7 @@ func (hsdb *HSDatabase) GetRoutes() (types.Routes, error) {
|
|||
|
||||
func (hsdb *HSDatabase) getRoutes() (types.Routes, error) {
|
||||
var routes types.Routes
|
||||
err := hsdb.db.
|
||||
Preload("Node").
|
||||
Preload("Node.User").
|
||||
Find(&routes).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return routes, nil
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) getAdvertisedAndEnabledRoutes() (types.Routes, error) {
|
||||
var routes types.Routes
|
||||
err := hsdb.db.
|
||||
Preload("Node").
|
||||
Preload("Node.User").
|
||||
Where("advertised = ? AND enabled = ?", true, true).
|
||||
Find(&routes).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return routes, nil
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) getRoutesByPrefix(pref netip.Prefix) (types.Routes, error) {
|
||||
var routes types.Routes
|
||||
err := hsdb.db.
|
||||
Preload("Node").
|
||||
Preload("Node.User").
|
||||
Where("prefix = ?", types.IPPrefix(pref)).
|
||||
Find(&routes).Error
|
||||
err := hsdb.db.Preload("Node").Find(&routes).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -73,7 +40,6 @@ func (hsdb *HSDatabase) getNodeAdvertisedRoutes(node *types.Node) (types.Routes,
|
|||
var routes types.Routes
|
||||
err := hsdb.db.
|
||||
Preload("Node").
|
||||
Preload("Node.User").
|
||||
Where("node_id = ? AND advertised = true", node.ID).
|
||||
Find(&routes).Error
|
||||
if err != nil {
|
||||
|
@ -94,7 +60,6 @@ func (hsdb *HSDatabase) getNodeRoutes(node *types.Node) (types.Routes, error) {
|
|||
var routes types.Routes
|
||||
err := hsdb.db.
|
||||
Preload("Node").
|
||||
Preload("Node.User").
|
||||
Where("node_id = ?", node.ID).
|
||||
Find(&routes).Error
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
|
@ -113,10 +78,7 @@ func (hsdb *HSDatabase) GetRoute(id uint64) (*types.Route, error) {
|
|||
|
||||
func (hsdb *HSDatabase) getRoute(id uint64) (*types.Route, error) {
|
||||
var route types.Route
|
||||
err := hsdb.db.
|
||||
Preload("Node").
|
||||
Preload("Node.User").
|
||||
First(&route, id).Error
|
||||
err := hsdb.db.Preload("Node").First(&route, id).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -160,26 +122,21 @@ func (hsdb *HSDatabase) DisableRoute(id uint64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
var routes types.Routes
|
||||
node := route.Node
|
||||
|
||||
// Tailscale requires both IPv4 and IPv6 exit routes to
|
||||
// be enabled at the same time, as per
|
||||
// https://github.com/juanfont/headscale/issues/804#issuecomment-1399314002
|
||||
if !route.IsExitRoute() {
|
||||
err = hsdb.failoverRouteWithNotify(route)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
route.Enabled = false
|
||||
route.IsPrimary = false
|
||||
err = hsdb.db.Save(route).Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
routes, err = hsdb.getNodeRoutes(&node)
|
||||
|
||||
return hsdb.handlePrimarySubnetFailover()
|
||||
}
|
||||
|
||||
routes, err := hsdb.getNodeRoutes(&route.Node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -194,27 +151,8 @@ func (hsdb *HSDatabase) DisableRoute(id uint64) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if routes == nil {
|
||||
routes, err = hsdb.getNodeRoutes(&node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
node.Routes = routes
|
||||
|
||||
stateUpdate := types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: types.Nodes{&node},
|
||||
Message: "called from db.DisableRoute",
|
||||
}
|
||||
if stateUpdate.Valid() {
|
||||
hsdb.notifier.NotifyAll(stateUpdate)
|
||||
}
|
||||
|
||||
return nil
|
||||
return hsdb.handlePrimarySubnetFailover()
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) DeleteRoute(id uint64) error {
|
||||
|
@ -226,23 +164,18 @@ func (hsdb *HSDatabase) DeleteRoute(id uint64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
var routes types.Routes
|
||||
node := route.Node
|
||||
|
||||
// Tailscale requires both IPv4 and IPv6 exit routes to
|
||||
// be enabled at the same time, as per
|
||||
// https://github.com/juanfont/headscale/issues/804#issuecomment-1399314002
|
||||
if !route.IsExitRoute() {
|
||||
err := hsdb.failoverRouteWithNotify(route)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := hsdb.db.Unscoped().Delete(&route).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
routes, err := hsdb.getNodeRoutes(&node)
|
||||
|
||||
return hsdb.handlePrimarySubnetFailover()
|
||||
}
|
||||
|
||||
routes, err := hsdb.getNodeRoutes(&route.Node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -257,27 +190,8 @@ func (hsdb *HSDatabase) DeleteRoute(id uint64) error {
|
|||
if err := hsdb.db.Unscoped().Delete(&routesToDelete).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if routes == nil {
|
||||
routes, err = hsdb.getNodeRoutes(&node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
node.Routes = routes
|
||||
|
||||
stateUpdate := types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: types.Nodes{&node},
|
||||
Message: "called from db.DeleteRoute",
|
||||
}
|
||||
if stateUpdate.Valid() {
|
||||
hsdb.notifier.NotifyAll(stateUpdate)
|
||||
}
|
||||
|
||||
return nil
|
||||
return hsdb.handlePrimarySubnetFailover()
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) deleteNodeRoutes(node *types.Node) error {
|
||||
|
@ -290,13 +204,9 @@ func (hsdb *HSDatabase) deleteNodeRoutes(node *types.Node) error {
|
|||
if err := hsdb.db.Unscoped().Delete(&routes[i]).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(kradalby): This is a bit too aggressive, we could probably
|
||||
// figure out which routes needs to be failed over rather than all.
|
||||
hsdb.failoverRouteWithNotify(&routes[i])
|
||||
}
|
||||
|
||||
return nil
|
||||
return hsdb.handlePrimarySubnetFailover()
|
||||
}
|
||||
|
||||
// isUniquePrefix returns if there is another node providing the same route already.
|
||||
|
@ -349,26 +259,22 @@ func (hsdb *HSDatabase) GetNodePrimaryRoutes(node *types.Node) (types.Routes, er
|
|||
|
||||
// SaveNodeRoutes takes a node and updates the database with
|
||||
// the new routes.
|
||||
// It returns a bool wheter an update should be sent as the
|
||||
// saved route impacts nodes.
|
||||
func (hsdb *HSDatabase) SaveNodeRoutes(node *types.Node) (bool, error) {
|
||||
func (hsdb *HSDatabase) SaveNodeRoutes(node *types.Node) error {
|
||||
hsdb.mu.Lock()
|
||||
defer hsdb.mu.Unlock()
|
||||
|
||||
return hsdb.saveNodeRoutes(node)
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) saveNodeRoutes(node *types.Node) (bool, error) {
|
||||
sendUpdate := false
|
||||
|
||||
func (hsdb *HSDatabase) saveNodeRoutes(node *types.Node) error {
|
||||
currentRoutes := types.Routes{}
|
||||
err := hsdb.db.Where("node_id = ?", node.ID).Find(¤tRoutes).Error
|
||||
if err != nil {
|
||||
return sendUpdate, err
|
||||
return err
|
||||
}
|
||||
|
||||
advertisedRoutes := map[netip.Prefix]bool{}
|
||||
for _, prefix := range node.Hostinfo.RoutableIPs {
|
||||
for _, prefix := range node.HostInfo.RoutableIPs {
|
||||
advertisedRoutes[prefix] = false
|
||||
}
|
||||
|
||||
|
@ -384,14 +290,7 @@ func (hsdb *HSDatabase) saveNodeRoutes(node *types.Node) (bool, error) {
|
|||
currentRoutes[pos].Advertised = true
|
||||
err := hsdb.db.Save(¤tRoutes[pos]).Error
|
||||
if err != nil {
|
||||
return sendUpdate, err
|
||||
}
|
||||
|
||||
// If a route that is newly "saved" is already
|
||||
// enabled, set sendUpdate to true as it is now
|
||||
// available.
|
||||
if route.Enabled {
|
||||
sendUpdate = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
advertisedRoutes[netip.Prefix(route.Prefix)] = true
|
||||
|
@ -400,7 +299,7 @@ func (hsdb *HSDatabase) saveNodeRoutes(node *types.Node) (bool, error) {
|
|||
currentRoutes[pos].Enabled = false
|
||||
err := hsdb.db.Save(¤tRoutes[pos]).Error
|
||||
if err != nil {
|
||||
return sendUpdate, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -415,223 +314,141 @@ func (hsdb *HSDatabase) saveNodeRoutes(node *types.Node) (bool, error) {
|
|||
}
|
||||
err := hsdb.db.Create(&route).Error
|
||||
if err != nil {
|
||||
return sendUpdate, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return sendUpdate, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsureFailoverRouteIsAvailable takes a node and checks if the node's route
|
||||
// currently have a functioning host that exposes the network.
|
||||
func (hsdb *HSDatabase) EnsureFailoverRouteIsAvailable(node *types.Node) error {
|
||||
nodeRoutes, err := hsdb.getNodeRoutes(node)
|
||||
if err != nil {
|
||||
return nil
|
||||
func (hsdb *HSDatabase) HandlePrimarySubnetFailover() error {
|
||||
hsdb.mu.Lock()
|
||||
defer hsdb.mu.Unlock()
|
||||
|
||||
return hsdb.handlePrimarySubnetFailover()
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) handlePrimarySubnetFailover() error {
|
||||
// first, get all the enabled routes
|
||||
var routes types.Routes
|
||||
err := hsdb.db.
|
||||
Preload("Node").
|
||||
Where("advertised = ? AND enabled = ?", true, true).
|
||||
Find(&routes).Error
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
log.Error().Err(err).Msg("error getting routes")
|
||||
}
|
||||
|
||||
for _, nodeRoute := range nodeRoutes {
|
||||
routes, err := hsdb.getRoutesByPrefix(netip.Prefix(nodeRoute.Prefix))
|
||||
changedNodes := make(types.Nodes, 0)
|
||||
for pos, route := range routes {
|
||||
if route.IsExitRoute() {
|
||||
continue
|
||||
}
|
||||
|
||||
node := &route.Node
|
||||
|
||||
if !route.IsPrimary {
|
||||
_, err := hsdb.getPrimaryRoute(netip.Prefix(route.Prefix))
|
||||
if hsdb.isUniquePrefix(route) || errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
log.Info().
|
||||
Str("prefix", netip.Prefix(route.Prefix).String()).
|
||||
Str("node", route.Node.GivenName).
|
||||
Msg("Setting primary route")
|
||||
routes[pos].IsPrimary = true
|
||||
err := hsdb.db.Save(&routes[pos]).Error
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("error marking route as primary")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
for _, route := range routes {
|
||||
changedNodes = append(changedNodes, node)
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if route.IsPrimary {
|
||||
// if we have a primary route, and the node is connected
|
||||
// nothing needs to be done.
|
||||
if hsdb.notifier.IsConnected(route.Node.MachineKey) {
|
||||
if route.Node.IsOnline() {
|
||||
continue
|
||||
}
|
||||
|
||||
// if not, we need to failover the route
|
||||
err := hsdb.failoverRouteWithNotify(&route)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// node offline, find a new primary
|
||||
log.Info().
|
||||
Str("node", route.Node.Hostname).
|
||||
Str("prefix", netip.Prefix(route.Prefix).String()).
|
||||
Msgf("node offline, finding a new primary subnet")
|
||||
|
||||
return nil
|
||||
}
|
||||
// find a new primary route
|
||||
var newPrimaryRoutes types.Routes
|
||||
err := hsdb.db.
|
||||
Preload("Node").
|
||||
Where("prefix = ? AND node_id != ? AND advertised = ? AND enabled = ?",
|
||||
route.Prefix,
|
||||
route.NodeID,
|
||||
true, true).
|
||||
Find(&newPrimaryRoutes).Error
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
log.Error().Err(err).Msg("error finding new primary route")
|
||||
|
||||
func (hsdb *HSDatabase) FailoverNodeRoutesWithNotify(node *types.Node) error {
|
||||
routes, err := hsdb.getNodeRoutes(node)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var changedKeys []key.MachinePublic
|
||||
|
||||
for _, route := range routes {
|
||||
changed, err := hsdb.failoverRoute(&route)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
changedKeys = append(changedKeys, changed...)
|
||||
}
|
||||
var newPrimaryRoute *types.Route
|
||||
for pos, r := range newPrimaryRoutes {
|
||||
if r.Node.IsOnline() {
|
||||
newPrimaryRoute = &newPrimaryRoutes[pos]
|
||||
|
||||
changedKeys = lo.Uniq(changedKeys)
|
||||
|
||||
var nodes types.Nodes
|
||||
|
||||
for _, key := range changedKeys {
|
||||
node, err := hsdb.GetNodeByMachineKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
if nodes != nil {
|
||||
stateUpdate := types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: nodes,
|
||||
Message: "called from db.FailoverNodeRoutesWithNotify",
|
||||
}
|
||||
if stateUpdate.Valid() {
|
||||
hsdb.notifier.NotifyAll(stateUpdate)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hsdb *HSDatabase) failoverRouteWithNotify(r *types.Route) error {
|
||||
changedKeys, err := hsdb.failoverRoute(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(changedKeys) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var nodes types.Nodes
|
||||
|
||||
log.Trace().
|
||||
Str("hostname", r.Node.Hostname).
|
||||
Msg("loading machines with new primary routes from db")
|
||||
|
||||
for _, key := range changedKeys {
|
||||
node, err := hsdb.getNodeByMachineKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("hostname", r.Node.Hostname).
|
||||
Msg("notifying peers about primary route change")
|
||||
|
||||
if nodes != nil {
|
||||
stateUpdate := types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: nodes,
|
||||
Message: "called from db.failoverRouteWithNotify",
|
||||
}
|
||||
if stateUpdate.Valid() {
|
||||
hsdb.notifier.NotifyAll(stateUpdate)
|
||||
}
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("hostname", r.Node.Hostname).
|
||||
Msg("notified peers about primary route change")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// failoverRoute takes a route that is no longer available,
|
||||
// this can be either from:
|
||||
// - being disabled
|
||||
// - being deleted
|
||||
// - host going offline
|
||||
//
|
||||
// and tries to find a new route to take over its place.
|
||||
// If the given route was not primary, it returns early.
|
||||
func (hsdb *HSDatabase) failoverRoute(r *types.Route) ([]key.MachinePublic, error) {
|
||||
if r == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// This route is not a primary route, and it isnt
|
||||
// being served to nodes.
|
||||
if !r.IsPrimary {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// We do not have to failover exit nodes
|
||||
if r.IsExitRoute() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
routes, err := hsdb.getRoutesByPrefix(netip.Prefix(r.Prefix))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var newPrimary *types.Route
|
||||
|
||||
// Find a new suitable route
|
||||
for idx, route := range routes {
|
||||
if r.ID == route.ID {
|
||||
continue
|
||||
}
|
||||
|
||||
if hsdb.notifier.IsConnected(route.Node.MachineKey) {
|
||||
newPrimary = &routes[idx]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If a new route was not found/available,
|
||||
// return with an error.
|
||||
// We do not want to update the database as
|
||||
// the one currently marked as primary is the
|
||||
// best we got.
|
||||
if newPrimary == nil {
|
||||
return nil, nil
|
||||
if newPrimaryRoute == nil {
|
||||
log.Warn().
|
||||
Str("node", route.Node.Hostname).
|
||||
Str("prefix", netip.Prefix(route.Prefix).String()).
|
||||
Msgf("no alternative primary route found")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("hostname", newPrimary.Node.Hostname).
|
||||
Msg("found new primary, updating db")
|
||||
log.Info().
|
||||
Str("old_node", route.Node.Hostname).
|
||||
Str("prefix", netip.Prefix(route.Prefix).String()).
|
||||
Str("new_node", newPrimaryRoute.Node.Hostname).
|
||||
Msgf("found new primary route")
|
||||
|
||||
// Remove primary from the old route
|
||||
r.IsPrimary = false
|
||||
err = hsdb.db.Save(&r).Error
|
||||
// disable the old primary route
|
||||
routes[pos].IsPrimary = false
|
||||
err = hsdb.db.Save(&routes[pos]).Error
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("error disabling new primary route")
|
||||
log.Error().Err(err).Msg("error disabling old primary route")
|
||||
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("hostname", newPrimary.Node.Hostname).
|
||||
Msg("removed primary from old route")
|
||||
|
||||
// Set primary for the new primary
|
||||
newPrimary.IsPrimary = true
|
||||
err = hsdb.db.Save(&newPrimary).Error
|
||||
// enable the new primary route
|
||||
newPrimaryRoute.IsPrimary = true
|
||||
err = hsdb.db.Save(&newPrimaryRoute).Error
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("error enabling new primary route")
|
||||
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("hostname", newPrimary.Node.Hostname).
|
||||
Msg("set primary to new route")
|
||||
changedNodes = append(changedNodes, node)
|
||||
}
|
||||
}
|
||||
|
||||
// Return a list of the machinekeys of the changed nodes.
|
||||
return []key.MachinePublic{r.Node.MachineKey, newPrimary.Node.MachineKey}, nil
|
||||
if len(changedNodes) > 0 {
|
||||
hsdb.notifier.NotifyAll(types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
Changed: changedNodes,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnableAutoApprovedRoutes enables any routes advertised by a node that match the ACL autoApprovers policy.
|
||||
|
|
|
@ -2,19 +2,12 @@ package db
|
|||
|
||||
import (
|
||||
"net/netip"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/juanfont/headscale/hscontrol/notifier"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/check.v1"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
func (s *Suite) TestGetRoutes(c *check.C) {
|
||||
|
@ -36,17 +29,19 @@ func (s *Suite) TestGetRoutes(c *check.C) {
|
|||
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "test_get_route_node",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
Hostinfo: &hostInfo,
|
||||
HostInfo: types.HostInfo(hostInfo),
|
||||
}
|
||||
db.db.Save(&node)
|
||||
|
||||
su, err := db.SaveNodeRoutes(&node)
|
||||
err = db.SaveNodeRoutes(&node)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(su, check.Equals, false)
|
||||
|
||||
advertisedRoutes, err := db.GetAdvertisedRoutes(&node)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -85,17 +80,19 @@ func (s *Suite) TestGetEnableRoutes(c *check.C) {
|
|||
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "test_enable_route_node",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
Hostinfo: &hostInfo,
|
||||
HostInfo: types.HostInfo(hostInfo),
|
||||
}
|
||||
db.db.Save(&node)
|
||||
|
||||
sendUpdate, err := db.SaveNodeRoutes(&node)
|
||||
err = db.SaveNodeRoutes(&node)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(sendUpdate, check.Equals, false)
|
||||
|
||||
availableRoutes, err := db.GetAdvertisedRoutes(&node)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -157,17 +154,19 @@ func (s *Suite) TestIsUniquePrefix(c *check.C) {
|
|||
}
|
||||
node1 := types.Node{
|
||||
ID: 1,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "test_enable_route_node",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
Hostinfo: &hostInfo1,
|
||||
HostInfo: types.HostInfo(hostInfo1),
|
||||
}
|
||||
db.db.Save(&node1)
|
||||
|
||||
sendUpdate, err := db.SaveNodeRoutes(&node1)
|
||||
err = db.SaveNodeRoutes(&node1)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(sendUpdate, check.Equals, false)
|
||||
|
||||
err = db.enableRoutes(&node1, route.String())
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -180,17 +179,19 @@ func (s *Suite) TestIsUniquePrefix(c *check.C) {
|
|||
}
|
||||
node2 := types.Node{
|
||||
ID: 2,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "test_enable_route_node",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
Hostinfo: &hostInfo2,
|
||||
HostInfo: types.HostInfo(hostInfo2),
|
||||
}
|
||||
db.db.Save(&node2)
|
||||
|
||||
sendUpdate, err = db.SaveNodeRoutes(&node2)
|
||||
err = db.SaveNodeRoutes(&node2)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(sendUpdate, check.Equals, false)
|
||||
|
||||
err = db.enableRoutes(&node2, route2.String())
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -212,6 +213,148 @@ func (s *Suite) TestIsUniquePrefix(c *check.C) {
|
|||
c.Assert(len(routes), check.Equals, 0)
|
||||
}
|
||||
|
||||
func (s *Suite) TestSubnetFailover(c *check.C) {
|
||||
user, err := db.CreateUser("test")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := db.CreatePreAuthKey(user.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = db.GetNode("test", "test_enable_route_node")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
prefix, err := netip.ParsePrefix(
|
||||
"10.0.0.0/24",
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
prefix2, err := netip.ParsePrefix(
|
||||
"150.0.10.0/25",
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
hostInfo1 := tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{prefix, prefix2},
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
node1 := types.Node{
|
||||
ID: 1,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "test_enable_route_node",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
HostInfo: types.HostInfo(hostInfo1),
|
||||
LastSeen: &now,
|
||||
}
|
||||
db.db.Save(&node1)
|
||||
|
||||
err = db.SaveNodeRoutes(&node1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = db.enableRoutes(&node1, prefix.String())
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = db.enableRoutes(&node1, prefix2.String())
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = db.HandlePrimarySubnetFailover()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
enabledRoutes1, err := db.GetEnabledRoutes(&node1)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(enabledRoutes1), check.Equals, 2)
|
||||
|
||||
route, err := db.getPrimaryRoute(prefix)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(route.NodeID, check.Equals, node1.ID)
|
||||
|
||||
hostInfo2 := tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{prefix2},
|
||||
}
|
||||
node2 := types.Node{
|
||||
ID: 2,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "test_enable_route_node",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
HostInfo: types.HostInfo(hostInfo2),
|
||||
LastSeen: &now,
|
||||
}
|
||||
db.db.Save(&node2)
|
||||
|
||||
err = db.saveNodeRoutes(&node2)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = db.enableRoutes(&node2, prefix2.String())
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = db.HandlePrimarySubnetFailover()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
enabledRoutes1, err = db.GetEnabledRoutes(&node1)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(enabledRoutes1), check.Equals, 2)
|
||||
|
||||
enabledRoutes2, err := db.GetEnabledRoutes(&node2)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(enabledRoutes2), check.Equals, 1)
|
||||
|
||||
routes, err := db.GetNodePrimaryRoutes(&node1)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(routes), check.Equals, 2)
|
||||
|
||||
routes, err = db.GetNodePrimaryRoutes(&node2)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(routes), check.Equals, 0)
|
||||
|
||||
// lets make node1 lastseen 10 mins ago
|
||||
before := now.Add(-10 * time.Minute)
|
||||
node1.LastSeen = &before
|
||||
err = db.db.Save(&node1).Error
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = db.HandlePrimarySubnetFailover()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
routes, err = db.GetNodePrimaryRoutes(&node1)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(routes), check.Equals, 1)
|
||||
|
||||
routes, err = db.GetNodePrimaryRoutes(&node2)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(routes), check.Equals, 1)
|
||||
|
||||
node2.HostInfo = types.HostInfo(tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{prefix, prefix2},
|
||||
})
|
||||
err = db.db.Save(&node2).Error
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = db.SaveNodeRoutes(&node2)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = db.enableRoutes(&node2, prefix.String())
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = db.HandlePrimarySubnetFailover()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
routes, err = db.GetNodePrimaryRoutes(&node1)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(routes), check.Equals, 0)
|
||||
|
||||
routes, err = db.GetNodePrimaryRoutes(&node2)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(routes), check.Equals, 2)
|
||||
}
|
||||
|
||||
func (s *Suite) TestDeleteRoutes(c *check.C) {
|
||||
user, err := db.CreateUser("test")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -239,18 +382,20 @@ func (s *Suite) TestDeleteRoutes(c *check.C) {
|
|||
now := time.Now()
|
||||
node1 := types.Node{
|
||||
ID: 1,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "test_enable_route_node",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
Hostinfo: &hostInfo1,
|
||||
HostInfo: types.HostInfo(hostInfo1),
|
||||
LastSeen: &now,
|
||||
}
|
||||
db.db.Save(&node1)
|
||||
|
||||
sendUpdate, err := db.SaveNodeRoutes(&node1)
|
||||
err = db.SaveNodeRoutes(&node1)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(sendUpdate, check.Equals, false)
|
||||
|
||||
err = db.enableRoutes(&node1, prefix.String())
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@ -268,362 +413,3 @@ func (s *Suite) TestDeleteRoutes(c *check.C) {
|
|||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(enabledRoutes1), check.Equals, 1)
|
||||
}
|
||||
|
||||
func TestFailoverRoute(t *testing.T) {
|
||||
ipp := func(s string) types.IPPrefix { return types.IPPrefix(netip.MustParsePrefix(s)) }
|
||||
|
||||
// TODO(kradalby): Count/verify updates
|
||||
var sink chan types.StateUpdate
|
||||
|
||||
go func() {
|
||||
for range sink {
|
||||
}
|
||||
}()
|
||||
|
||||
machineKeys := []key.MachinePublic{
|
||||
key.NewMachine().Public(),
|
||||
key.NewMachine().Public(),
|
||||
key.NewMachine().Public(),
|
||||
key.NewMachine().Public(),
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
failingRoute types.Route
|
||||
routes types.Routes
|
||||
want []key.MachinePublic
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no-route",
|
||||
failingRoute: types.Route{},
|
||||
routes: types.Routes{},
|
||||
want: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no-prime",
|
||||
failingRoute: types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 1,
|
||||
},
|
||||
Prefix: ipp("10.0.0.0/24"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[0],
|
||||
},
|
||||
IsPrimary: false,
|
||||
},
|
||||
routes: types.Routes{},
|
||||
want: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "exit-node",
|
||||
failingRoute: types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 1,
|
||||
},
|
||||
Prefix: ipp("0.0.0.0/0"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[0],
|
||||
},
|
||||
IsPrimary: true,
|
||||
},
|
||||
routes: types.Routes{},
|
||||
want: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "no-failover-single-route",
|
||||
failingRoute: types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 1,
|
||||
},
|
||||
Prefix: ipp("10.0.0.0/24"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[0],
|
||||
},
|
||||
IsPrimary: true,
|
||||
},
|
||||
routes: types.Routes{
|
||||
types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 1,
|
||||
},
|
||||
Prefix: ipp("10.0.0.0/24"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[0],
|
||||
},
|
||||
IsPrimary: true,
|
||||
},
|
||||
},
|
||||
want: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "failover-primary",
|
||||
failingRoute: types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 1,
|
||||
},
|
||||
Prefix: ipp("10.0.0.0/24"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[0],
|
||||
},
|
||||
IsPrimary: true,
|
||||
},
|
||||
routes: types.Routes{
|
||||
types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 1,
|
||||
},
|
||||
Prefix: ipp("10.0.0.0/24"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[0],
|
||||
},
|
||||
IsPrimary: true,
|
||||
},
|
||||
types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 2,
|
||||
},
|
||||
Prefix: ipp("10.0.0.0/24"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[1],
|
||||
},
|
||||
IsPrimary: false,
|
||||
},
|
||||
},
|
||||
want: []key.MachinePublic{
|
||||
machineKeys[0],
|
||||
machineKeys[1],
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "failover-none-primary",
|
||||
failingRoute: types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 1,
|
||||
},
|
||||
Prefix: ipp("10.0.0.0/24"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[0],
|
||||
},
|
||||
IsPrimary: false,
|
||||
},
|
||||
routes: types.Routes{
|
||||
types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 1,
|
||||
},
|
||||
Prefix: ipp("10.0.0.0/24"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[0],
|
||||
},
|
||||
IsPrimary: true,
|
||||
},
|
||||
types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 2,
|
||||
},
|
||||
Prefix: ipp("10.0.0.0/24"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[1],
|
||||
},
|
||||
IsPrimary: false,
|
||||
},
|
||||
},
|
||||
want: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "failover-primary-multi-route",
|
||||
failingRoute: types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 2,
|
||||
},
|
||||
Prefix: ipp("10.0.0.0/24"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[1],
|
||||
},
|
||||
IsPrimary: true,
|
||||
},
|
||||
routes: types.Routes{
|
||||
types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 1,
|
||||
},
|
||||
Prefix: ipp("10.0.0.0/24"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[0],
|
||||
},
|
||||
IsPrimary: false,
|
||||
},
|
||||
types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 2,
|
||||
},
|
||||
Prefix: ipp("10.0.0.0/24"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[1],
|
||||
},
|
||||
IsPrimary: true,
|
||||
},
|
||||
types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 3,
|
||||
},
|
||||
Prefix: ipp("10.0.0.0/24"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[2],
|
||||
},
|
||||
IsPrimary: false,
|
||||
},
|
||||
},
|
||||
want: []key.MachinePublic{
|
||||
machineKeys[1],
|
||||
machineKeys[0],
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "failover-primary-no-online",
|
||||
failingRoute: types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 1,
|
||||
},
|
||||
Prefix: ipp("10.0.0.0/24"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[0],
|
||||
},
|
||||
IsPrimary: true,
|
||||
},
|
||||
routes: types.Routes{
|
||||
types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 1,
|
||||
},
|
||||
Prefix: ipp("10.0.0.0/24"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[0],
|
||||
},
|
||||
IsPrimary: true,
|
||||
},
|
||||
// Offline
|
||||
types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 2,
|
||||
},
|
||||
Prefix: ipp("10.0.0.0/24"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[3],
|
||||
},
|
||||
IsPrimary: false,
|
||||
},
|
||||
},
|
||||
want: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "failover-primary-one-not-online",
|
||||
failingRoute: types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 1,
|
||||
},
|
||||
Prefix: ipp("10.0.0.0/24"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[0],
|
||||
},
|
||||
IsPrimary: true,
|
||||
},
|
||||
routes: types.Routes{
|
||||
types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 1,
|
||||
},
|
||||
Prefix: ipp("10.0.0.0/24"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[0],
|
||||
},
|
||||
IsPrimary: true,
|
||||
},
|
||||
// Offline
|
||||
types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 2,
|
||||
},
|
||||
Prefix: ipp("10.0.0.0/24"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[3],
|
||||
},
|
||||
IsPrimary: false,
|
||||
},
|
||||
types.Route{
|
||||
Model: gorm.Model{
|
||||
ID: 3,
|
||||
},
|
||||
Prefix: ipp("10.0.0.0/24"),
|
||||
Node: types.Node{
|
||||
MachineKey: machineKeys[1],
|
||||
},
|
||||
IsPrimary: true,
|
||||
},
|
||||
},
|
||||
want: []key.MachinePublic{
|
||||
machineKeys[0],
|
||||
machineKeys[1],
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tmpDir, err := os.MkdirTemp("", "failover-db-test")
|
||||
assert.NoError(t, err)
|
||||
|
||||
notif := notifier.NewNotifier()
|
||||
|
||||
db, err = NewHeadscaleDatabase(
|
||||
"sqlite3",
|
||||
tmpDir+"/headscale_test.db",
|
||||
false,
|
||||
notif,
|
||||
[]netip.Prefix{
|
||||
netip.MustParsePrefix("10.27.0.0/23"),
|
||||
},
|
||||
"",
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Pretend that all the nodes are connected to control
|
||||
for idx, key := range machineKeys {
|
||||
// Pretend one node is offline
|
||||
if idx == 3 {
|
||||
continue
|
||||
}
|
||||
|
||||
notif.AddNode(key, sink)
|
||||
}
|
||||
|
||||
for _, route := range tt.routes {
|
||||
if err := db.db.Save(&route).Error; err != nil {
|
||||
t.Fatalf("failed to create route: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
got, err := db.failoverRoute(&tt.failingRoute)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("failoverRoute() error = %v, wantErr %v", err, tt.wantErr)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" {
|
||||
t.Errorf("failoverRoute() unexpected result (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package db
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/netip"
|
||||
"os"
|
||||
"testing"
|
||||
|
@ -28,22 +27,19 @@ func (s *Suite) SetUpTest(c *check.C) {
|
|||
}
|
||||
|
||||
func (s *Suite) TearDownTest(c *check.C) {
|
||||
// os.RemoveAll(tmpDir)
|
||||
os.RemoveAll(tmpDir)
|
||||
}
|
||||
|
||||
func (s *Suite) ResetDB(c *check.C) {
|
||||
// if len(tmpDir) != 0 {
|
||||
// os.RemoveAll(tmpDir)
|
||||
// }
|
||||
|
||||
if len(tmpDir) != 0 {
|
||||
os.RemoveAll(tmpDir)
|
||||
}
|
||||
var err error
|
||||
tmpDir, err = os.MkdirTemp("", "headscale-db-test-*")
|
||||
tmpDir, err = os.MkdirTemp("", "autoygg-client-test")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
log.Printf("database path: %s", tmpDir+"/headscale_test.db")
|
||||
|
||||
db, err = NewHeadscaleDatabase(
|
||||
"sqlite3",
|
||||
tmpDir+"/headscale_test.db",
|
||||
|
|
|
@ -48,6 +48,9 @@ func (s *Suite) TestDestroyUserErrors(c *check.C) {
|
|||
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
|
@ -100,6 +103,9 @@ func (s *Suite) TestSetMachineUser(c *check.C) {
|
|||
|
||||
node := types.Node{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testnode",
|
||||
UserID: oldUser.ID,
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"tailscale.com/derp"
|
||||
"tailscale.com/net/stun"
|
||||
|
@ -40,7 +39,7 @@ func NewDERPServer(
|
|||
cfg *types.DERPConfig,
|
||||
) (*DERPServer, error) {
|
||||
log.Trace().Caller().Msg("Creating new embedded DERP server")
|
||||
server := derp.NewServer(derpKey, util.TSLogfWrapper()) // nolint // zerolinter complains
|
||||
server := derp.NewServer(derpKey, log.Debug().Msgf) // nolint // zerolinter complains
|
||||
|
||||
return &DERPServer{
|
||||
serverURL: serverURL,
|
||||
|
|
|
@ -172,18 +172,12 @@ func (api headscaleV1APIServer) RegisterNode(
|
|||
) (*v1.RegisterNodeResponse, error) {
|
||||
log.Trace().
|
||||
Str("user", request.GetUser()).
|
||||
Str("machine_key", request.GetKey()).
|
||||
Str("node_key", request.GetKey()).
|
||||
Msg("Registering node")
|
||||
|
||||
var mkey key.MachinePublic
|
||||
err := mkey.UnmarshalText([]byte(request.GetKey()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node, err := api.h.db.RegisterNodeFromAuthCallback(
|
||||
api.h.registrationCache,
|
||||
mkey,
|
||||
request.GetKey(),
|
||||
request.GetUser(),
|
||||
nil,
|
||||
util.RegisterMethodCLI,
|
||||
|
@ -204,13 +198,7 @@ func (api headscaleV1APIServer) GetNode(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
resp := node.Proto()
|
||||
|
||||
// Populate the online field based on
|
||||
// currently connected nodes.
|
||||
resp.Online = api.h.nodeNotifier.IsConnected(node.MachineKey)
|
||||
|
||||
return &v1.GetNodeResponse{Node: resp}, nil
|
||||
return &v1.GetNodeResponse{Node: node.Proto()}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) SetTags(
|
||||
|
@ -339,13 +327,7 @@ func (api headscaleV1APIServer) ListNodes(
|
|||
|
||||
response := make([]*v1.Node, len(nodes))
|
||||
for index, node := range nodes {
|
||||
resp := node.Proto()
|
||||
|
||||
// Populate the online field based on
|
||||
// currently connected nodes.
|
||||
resp.Online = api.h.nodeNotifier.IsConnected(node.MachineKey)
|
||||
|
||||
response[index] = resp
|
||||
response[index] = node.Proto()
|
||||
}
|
||||
|
||||
return &v1.ListNodesResponse{Nodes: response}, nil
|
||||
|
@ -358,18 +340,13 @@ func (api headscaleV1APIServer) ListNodes(
|
|||
|
||||
response := make([]*v1.Node, len(nodes))
|
||||
for index, node := range nodes {
|
||||
resp := node.Proto()
|
||||
|
||||
// Populate the online field based on
|
||||
// currently connected nodes.
|
||||
resp.Online = api.h.nodeNotifier.IsConnected(node.MachineKey)
|
||||
|
||||
m := node.Proto()
|
||||
validTags, invalidTags := api.h.ACLPolicy.TagsOfNode(
|
||||
&node,
|
||||
)
|
||||
resp.InvalidTags = invalidTags
|
||||
resp.ValidTags = validTags
|
||||
response[index] = resp
|
||||
m.InvalidTags = invalidTags
|
||||
m.ValidTags = validTags
|
||||
response[index] = m
|
||||
}
|
||||
|
||||
return &v1.ListNodesResponse{Nodes: response}, nil
|
||||
|
@ -544,22 +521,13 @@ func (api headscaleV1APIServer) DebugCreateNode(
|
|||
Hostname: "DebugTestNode",
|
||||
}
|
||||
|
||||
var mkey key.MachinePublic
|
||||
err = mkey.UnmarshalText([]byte(request.GetKey()))
|
||||
givenName, err := api.h.db.GenerateGivenName(request.GetKey(), request.GetName())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
givenName, err := api.h.db.GenerateGivenName(mkey, request.GetName())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodeKey := key.NewNode()
|
||||
|
||||
newNode := types.Node{
|
||||
MachineKey: mkey,
|
||||
NodeKey: nodeKey.Public(),
|
||||
MachineKey: request.GetKey(),
|
||||
Hostname: request.GetName(),
|
||||
GivenName: givenName,
|
||||
User: *user,
|
||||
|
@ -567,15 +535,17 @@ func (api headscaleV1APIServer) DebugCreateNode(
|
|||
Expiry: &time.Time{},
|
||||
LastSeen: &time.Time{},
|
||||
|
||||
Hostinfo: &hostinfo,
|
||||
HostInfo: types.HostInfo(hostinfo),
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Str("machine_key", mkey.ShortString()).
|
||||
Msg("adding debug machine via CLI, appending to registration cache")
|
||||
nodeKey := key.NodePublic{}
|
||||
err = nodeKey.UnmarshalText([]byte(request.GetKey()))
|
||||
if err != nil {
|
||||
log.Panic().Msg("can not add node for debug. invalid node key")
|
||||
}
|
||||
|
||||
api.h.registrationCache.Set(
|
||||
mkey.String(),
|
||||
util.NodePublicKeyStripPrefix(nodeKey),
|
||||
newNode,
|
||||
registerCacheExpiration,
|
||||
)
|
||||
|
|
15
hscontrol/handler_legacy.go
Normal file
15
hscontrol/handler_legacy.go
Normal file
|
@ -0,0 +1,15 @@
|
|||
//go:build ts2019
|
||||
|
||||
package hscontrol
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func (h *Headscale) addLegacyHandlers(router *mux.Router) {
|
||||
router.HandleFunc("/machine/{mkey}/map", h.PollNetMapHandler).
|
||||
Methods(http.MethodPost)
|
||||
router.HandleFunc("/machine/{mkey}", h.RegistrationHandler).Methods(http.MethodPost)
|
||||
}
|
8
hscontrol/handler_placeholder.go
Normal file
8
hscontrol/handler_placeholder.go
Normal file
|
@ -0,0 +1,8 @@
|
|||
//go:build !ts2019
|
||||
|
||||
package hscontrol
|
||||
|
||||
import "github.com/gorilla/mux"
|
||||
|
||||
func (h *Headscale) addLegacyHandlers(router *mux.Router) {
|
||||
}
|
|
@ -11,6 +11,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
|
@ -62,6 +63,26 @@ func (h *Headscale) KeyHandler(
|
|||
// New Tailscale clients send a 'v' parameter to indicate the CurrentCapabilityVersion
|
||||
capVer, err := parseCabailityVersion(req)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNoCapabilityVersion) {
|
||||
log.Debug().
|
||||
Str("handler", "/key").
|
||||
Msg("New legacy client")
|
||||
// Old clients don't send a 'v' parameter, so we send the legacy public key
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err := writer.Write(
|
||||
[]byte(util.MachinePublicKeyStripPrefix(h.privateKey2019.Public())),
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
|
@ -80,7 +101,7 @@ func (h *Headscale) KeyHandler(
|
|||
|
||||
log.Debug().
|
||||
Str("handler", "/key").
|
||||
Int("cap_ver", int(capVer)).
|
||||
Int("v", int(capVer)).
|
||||
Msg("New noise client")
|
||||
if err != nil {
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
|
@ -99,6 +120,7 @@ func (h *Headscale) KeyHandler(
|
|||
// TS2021 (Tailscale v2 protocol) requires to have a different key
|
||||
if capVer >= NoiseCapabilityVersion {
|
||||
resp := tailcfg.OverTLSPublicKeyResponse{
|
||||
LegacyPublicKey: h.privateKey2019.Public(),
|
||||
PublicKey: h.noisePrivateKey.Public(),
|
||||
}
|
||||
writer.Header().Set("Content-Type", "application/json")
|
||||
|
@ -184,16 +206,33 @@ func (h *Headscale) RegisterWebAPI(
|
|||
req *http.Request,
|
||||
) {
|
||||
vars := mux.Vars(req)
|
||||
machineKeyStr := vars["mkey"]
|
||||
nodeKeyStr, ok := vars["nkey"]
|
||||
|
||||
if !util.NodePublicKeyRegex.Match([]byte(nodeKeyStr)) {
|
||||
log.Warn().Str("node_key", nodeKeyStr).Msg("Invalid node key passed to registration url")
|
||||
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusUnauthorized)
|
||||
_, err := writer.Write([]byte("Unauthorized"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// We need to make sure we dont open for XSS style injections, if the parameter that
|
||||
// is passed as a key is not parsable/validated as a NodePublic key, then fail to render
|
||||
// the template and log an error.
|
||||
var machineKey key.MachinePublic
|
||||
err := machineKey.UnmarshalText(
|
||||
[]byte(machineKeyStr),
|
||||
var nodeKey key.NodePublic
|
||||
err := nodeKey.UnmarshalText(
|
||||
[]byte(util.NodePublicKeyEnsurePrefix(nodeKeyStr)),
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
if !ok || nodeKeyStr == "" || err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to parse incoming nodekey")
|
||||
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
|
@ -211,7 +250,7 @@ func (h *Headscale) RegisterWebAPI(
|
|||
|
||||
var content bytes.Buffer
|
||||
if err := registerWebAPITemplate.Execute(&content, registerWebAPITemplateConfig{
|
||||
Key: machineKey.String(),
|
||||
Key: nodeKeyStr,
|
||||
}); err != nil {
|
||||
log.Error().
|
||||
Str("func", "RegisterWebAPI").
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -21,11 +20,12 @@ import (
|
|||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/rs/zerolog/log"
|
||||
"golang.org/x/exp/maps"
|
||||
"github.com/samber/lo"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/smallzstd"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -46,9 +46,12 @@ var debugDumpMapResponsePath = envknob.String("HEADSCALE_DEBUG_DUMP_MAPRESPONSE_
|
|||
// - Keep information about the previous mapresponse so we can send a diff
|
||||
// - Store hashes
|
||||
// - Create a "minifier" that removes info not needed for the node
|
||||
// - some sort of batching, wait for 5 or 60 seconds before sending
|
||||
|
||||
type Mapper struct {
|
||||
privateKey2019 *key.MachinePrivate
|
||||
isNoise bool
|
||||
capVer tailcfg.CapabilityVersion
|
||||
|
||||
// Configuration
|
||||
// TODO(kradalby): figure out if this is the format we want this in
|
||||
derpMap *tailcfg.DERPMap
|
||||
|
@ -65,17 +68,14 @@ type Mapper struct {
|
|||
// only one func is accessing it over time.
|
||||
mu sync.Mutex
|
||||
peers map[uint64]*types.Node
|
||||
patches map[uint64][]patch
|
||||
}
|
||||
|
||||
type patch struct {
|
||||
timestamp time.Time
|
||||
change *tailcfg.PeerChange
|
||||
}
|
||||
|
||||
func NewMapper(
|
||||
node *types.Node,
|
||||
peers types.Nodes,
|
||||
privateKey *key.MachinePrivate,
|
||||
isNoise bool,
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
derpMap *tailcfg.DERPMap,
|
||||
baseDomain string,
|
||||
dnsCfg *tailcfg.DNSConfig,
|
||||
|
@ -84,12 +84,17 @@ func NewMapper(
|
|||
) *Mapper {
|
||||
log.Debug().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", node.Hostname).
|
||||
Msg("creating new mapper")
|
||||
|
||||
uid, _ := util.GenerateRandomStringDNSSafe(mapperIDLength)
|
||||
|
||||
return &Mapper{
|
||||
privateKey2019: privateKey,
|
||||
isNoise: isNoise,
|
||||
capVer: capVer,
|
||||
|
||||
derpMap: derpMap,
|
||||
baseDomain: baseDomain,
|
||||
dnsCfg: dnsCfg,
|
||||
|
@ -102,7 +107,6 @@ func NewMapper(
|
|||
|
||||
// TODO: populate
|
||||
peers: peers.IDMap(),
|
||||
patches: make(map[uint64][]patch),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -191,7 +195,7 @@ func addNextDNSMetadata(resolvers []*dnstype.Resolver, node *types.Node) {
|
|||
if strings.HasPrefix(resolver.Addr, nextDNSDoHPrefix) {
|
||||
attrs := url.Values{
|
||||
"device_name": []string{node.Hostname},
|
||||
"device_model": []string{node.Hostinfo.OS},
|
||||
"device_model": []string{node.HostInfo.OS},
|
||||
}
|
||||
|
||||
if len(node.IPAddresses) > 0 {
|
||||
|
@ -208,11 +212,10 @@ func addNextDNSMetadata(resolvers []*dnstype.Resolver, node *types.Node) {
|
|||
func (m *Mapper) fullMapResponse(
|
||||
node *types.Node,
|
||||
pol *policy.ACLPolicy,
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
) (*tailcfg.MapResponse, error) {
|
||||
peers := nodeMapToList(m.peers)
|
||||
|
||||
resp, err := m.baseWithConfigMapResponse(node, pol, capVer)
|
||||
resp, err := m.baseWithConfigMapResponse(node, pol)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -221,7 +224,7 @@ func (m *Mapper) fullMapResponse(
|
|||
resp,
|
||||
pol,
|
||||
node,
|
||||
capVer,
|
||||
m.capVer,
|
||||
peers,
|
||||
peers,
|
||||
m.baseDomain,
|
||||
|
@ -244,24 +247,15 @@ func (m *Mapper) FullMapResponse(
|
|||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
peers := maps.Keys(m.peers)
|
||||
peersWithPatches := maps.Keys(m.patches)
|
||||
slices.Sort(peers)
|
||||
slices.Sort(peersWithPatches)
|
||||
|
||||
if len(peersWithPatches) > 0 {
|
||||
log.Debug().
|
||||
Str("node", node.Hostname).
|
||||
Uints64("peers", peers).
|
||||
Uints64("pending_patches", peersWithPatches).
|
||||
Msgf("node requested full map response, but has pending patches")
|
||||
}
|
||||
|
||||
resp, err := m.fullMapResponse(node, pol, mapRequest.Version)
|
||||
resp, err := m.fullMapResponse(node, pol)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if m.isNoise {
|
||||
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
|
||||
}
|
||||
|
||||
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
|
||||
}
|
||||
|
||||
|
@ -273,11 +267,15 @@ func (m *Mapper) LiteMapResponse(
|
|||
node *types.Node,
|
||||
pol *policy.ACLPolicy,
|
||||
) ([]byte, error) {
|
||||
resp, err := m.baseWithConfigMapResponse(node, pol, mapRequest.Version)
|
||||
resp, err := m.baseWithConfigMapResponse(node, pol)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if m.isNoise {
|
||||
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
|
||||
}
|
||||
|
||||
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
|
||||
}
|
||||
|
||||
|
@ -294,12 +292,10 @@ func (m *Mapper) KeepAliveResponse(
|
|||
func (m *Mapper) DERPMapResponse(
|
||||
mapRequest tailcfg.MapRequest,
|
||||
node *types.Node,
|
||||
derpMap *tailcfg.DERPMap,
|
||||
derpMap tailcfg.DERPMap,
|
||||
) ([]byte, error) {
|
||||
m.derpMap = derpMap
|
||||
|
||||
resp := m.baseMapResponse()
|
||||
resp.DERPMap = derpMap
|
||||
resp.DERPMap = &derpMap
|
||||
|
||||
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress)
|
||||
}
|
||||
|
@ -309,29 +305,18 @@ func (m *Mapper) PeerChangedResponse(
|
|||
node *types.Node,
|
||||
changed types.Nodes,
|
||||
pol *policy.ACLPolicy,
|
||||
messages ...string,
|
||||
) ([]byte, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
lastSeen := make(map[tailcfg.NodeID]bool)
|
||||
|
||||
// Update our internal map.
|
||||
for _, node := range changed {
|
||||
if patches, ok := m.patches[node.ID]; ok {
|
||||
// preserve online status in case the patch has an outdated one
|
||||
online := node.IsOnline
|
||||
|
||||
for _, p := range patches {
|
||||
// TODO(kradalby): Figure if this needs to be sorted by timestamp
|
||||
node.ApplyPeerChange(p.change)
|
||||
}
|
||||
|
||||
// Ensure the patches are not applied again later
|
||||
delete(m.patches, node.ID)
|
||||
|
||||
node.IsOnline = online
|
||||
}
|
||||
|
||||
m.peers[node.ID] = node
|
||||
|
||||
// We have just seen the node, let the peers update their list.
|
||||
lastSeen[tailcfg.NodeID(node.ID)] = true
|
||||
}
|
||||
|
||||
resp := m.baseMapResponse()
|
||||
|
@ -340,7 +325,7 @@ func (m *Mapper) PeerChangedResponse(
|
|||
&resp,
|
||||
pol,
|
||||
node,
|
||||
mapRequest.Version,
|
||||
m.capVer,
|
||||
nodeMapToList(m.peers),
|
||||
changed,
|
||||
m.baseDomain,
|
||||
|
@ -351,55 +336,11 @@ func (m *Mapper) PeerChangedResponse(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress, messages...)
|
||||
}
|
||||
|
||||
// PeerChangedPatchResponse creates a patch MapResponse with
|
||||
// incoming update from a state change.
|
||||
func (m *Mapper) PeerChangedPatchResponse(
|
||||
mapRequest tailcfg.MapRequest,
|
||||
node *types.Node,
|
||||
changed []*tailcfg.PeerChange,
|
||||
pol *policy.ACLPolicy,
|
||||
) ([]byte, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
sendUpdate := false
|
||||
// patch the internal map
|
||||
for _, change := range changed {
|
||||
if peer, ok := m.peers[uint64(change.NodeID)]; ok {
|
||||
peer.ApplyPeerChange(change)
|
||||
sendUpdate = true
|
||||
} else {
|
||||
log.Trace().Str("node", node.Hostname).Msgf("Node with ID %s is missing from mapper for Node %s, saving patch for when node is available", change.NodeID, node.Hostname)
|
||||
|
||||
p := patch{
|
||||
timestamp: time.Now(),
|
||||
change: change,
|
||||
}
|
||||
|
||||
if patches, ok := m.patches[uint64(change.NodeID)]; ok {
|
||||
patches := append(patches, p)
|
||||
|
||||
m.patches[uint64(change.NodeID)] = patches
|
||||
} else {
|
||||
m.patches[uint64(change.NodeID)] = []patch{p}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !sendUpdate {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
resp := m.baseMapResponse()
|
||||
resp.PeersChangedPatch = changed
|
||||
// resp.PeerSeenChange = lastSeen
|
||||
|
||||
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress)
|
||||
}
|
||||
|
||||
// TODO(kradalby): We need some integration tests for this.
|
||||
func (m *Mapper) PeerRemovedResponse(
|
||||
mapRequest tailcfg.MapRequest,
|
||||
node *types.Node,
|
||||
|
@ -408,23 +349,13 @@ func (m *Mapper) PeerRemovedResponse(
|
|||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// Some nodes might have been removed already
|
||||
// so we dont want to ask downstream to remove
|
||||
// twice, than can cause a panic in tailscaled.
|
||||
notYetRemoved := []tailcfg.NodeID{}
|
||||
|
||||
// remove from our internal map
|
||||
for _, id := range removed {
|
||||
if _, ok := m.peers[uint64(id)]; ok {
|
||||
notYetRemoved = append(notYetRemoved, id)
|
||||
}
|
||||
|
||||
delete(m.peers, uint64(id))
|
||||
delete(m.patches, uint64(id))
|
||||
}
|
||||
|
||||
resp := m.baseMapResponse()
|
||||
resp.PeersRemoved = notYetRemoved
|
||||
resp.PeersRemoved = removed
|
||||
|
||||
return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress)
|
||||
}
|
||||
|
@ -434,10 +365,20 @@ func (m *Mapper) marshalMapResponse(
|
|||
resp *tailcfg.MapResponse,
|
||||
node *types.Node,
|
||||
compression string,
|
||||
messages ...string,
|
||||
) ([]byte, error) {
|
||||
atomic.AddUint64(&m.seq, 1)
|
||||
|
||||
var machineKey key.MachinePublic
|
||||
err := machineKey.UnmarshalText([]byte(util.MachinePublicKeyEnsurePrefix(node.MachineKey)))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot parse client key")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
jsonBody, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
|
@ -448,27 +389,11 @@ func (m *Mapper) marshalMapResponse(
|
|||
|
||||
if debugDumpMapResponsePath != "" {
|
||||
data := map[string]interface{}{
|
||||
"Messages": messages,
|
||||
"MapRequest": mapRequest,
|
||||
"MapResponse": resp,
|
||||
}
|
||||
|
||||
responseType := "keepalive"
|
||||
|
||||
switch {
|
||||
case resp.Peers != nil && len(resp.Peers) > 0:
|
||||
responseType = "full"
|
||||
case resp.Peers == nil && resp.PeersChanged == nil && resp.PeersChangedPatch == nil:
|
||||
responseType = "lite"
|
||||
case resp.PeersChanged != nil && len(resp.PeersChanged) > 0:
|
||||
responseType = "changed"
|
||||
case resp.PeersChangedPatch != nil && len(resp.PeersChangedPatch) > 0:
|
||||
responseType = "patch"
|
||||
case resp.PeersRemoved != nil && len(resp.PeersRemoved) > 0:
|
||||
responseType = "removed"
|
||||
}
|
||||
|
||||
body, err := json.MarshalIndent(data, "", " ")
|
||||
body, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
|
@ -487,7 +412,7 @@ func (m *Mapper) marshalMapResponse(
|
|||
|
||||
mapResponsePath := path.Join(
|
||||
mPath,
|
||||
fmt.Sprintf("%d-%s-%d-%s.json", now, m.uid, atomic.LoadUint64(&m.seq), responseType),
|
||||
fmt.Sprintf("%d-%s-%d.json", now, m.uid, atomic.LoadUint64(&m.seq)),
|
||||
)
|
||||
|
||||
log.Trace().Msgf("Writing MapResponse to %s", mapResponsePath)
|
||||
|
@ -500,9 +425,16 @@ func (m *Mapper) marshalMapResponse(
|
|||
var respBody []byte
|
||||
if compression == util.ZstdCompression {
|
||||
respBody = zstdEncode(jsonBody)
|
||||
if !m.isNoise { // if legacy protocol
|
||||
respBody = m.privateKey2019.SealTo(machineKey, respBody)
|
||||
}
|
||||
} else {
|
||||
if !m.isNoise { // if legacy protocol
|
||||
respBody = m.privateKey2019.SealTo(machineKey, jsonBody)
|
||||
} else {
|
||||
respBody = jsonBody
|
||||
}
|
||||
}
|
||||
|
||||
data := make([]byte, reservedResponseHeaderSize)
|
||||
binary.LittleEndian.PutUint32(data, uint32(len(respBody)))
|
||||
|
@ -511,6 +443,32 @@ func (m *Mapper) marshalMapResponse(
|
|||
return data, nil
|
||||
}
|
||||
|
||||
// MarshalResponse takes an Tailscale Response, marhsal it to JSON.
|
||||
// If isNoise is set, then the JSON body will be returned
|
||||
// If !isNoise and privateKey2019 is set, the JSON body will be sealed in a Nacl box.
|
||||
func MarshalResponse(
|
||||
resp interface{},
|
||||
isNoise bool,
|
||||
privateKey2019 *key.MachinePrivate,
|
||||
machineKey key.MachinePublic,
|
||||
) ([]byte, error) {
|
||||
jsonBody, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot marshal response")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !isNoise && privateKey2019 != nil {
|
||||
return privateKey2019.SealTo(machineKey, jsonBody), nil
|
||||
}
|
||||
|
||||
return jsonBody, nil
|
||||
}
|
||||
|
||||
func zstdEncode(in []byte) []byte {
|
||||
encoder, ok := zstdEncoderPool.Get().(*zstd.Encoder)
|
||||
if !ok {
|
||||
|
@ -544,7 +502,6 @@ func (m *Mapper) baseMapResponse() tailcfg.MapResponse {
|
|||
resp := tailcfg.MapResponse{
|
||||
KeepAlive: false,
|
||||
ControlTime: &now,
|
||||
// TODO(kradalby): Implement PingRequest?
|
||||
}
|
||||
|
||||
return resp
|
||||
|
@ -557,11 +514,10 @@ func (m *Mapper) baseMapResponse() tailcfg.MapResponse {
|
|||
func (m *Mapper) baseWithConfigMapResponse(
|
||||
node *types.Node,
|
||||
pol *policy.ACLPolicy,
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
) (*tailcfg.MapResponse, error) {
|
||||
resp := m.baseMapResponse()
|
||||
|
||||
tailnode, err := tailNode(node, capVer, pol, m.dnsCfg, m.baseDomain, m.randomClientPort)
|
||||
tailnode, err := tailNode(node, m.capVer, pol, m.dnsCfg, m.baseDomain, m.randomClientPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -594,6 +550,15 @@ func nodeMapToList(nodes map[uint64]*types.Node) types.Nodes {
|
|||
return ret
|
||||
}
|
||||
|
||||
func filterExpiredAndNotReady(peers types.Nodes) types.Nodes {
|
||||
return lo.Filter(peers, func(item *types.Node, index int) bool {
|
||||
// Filter out nodes that are expired OR
|
||||
// nodes that has no endpoints, this typically means they have
|
||||
// registered, but are not configured.
|
||||
return !item.IsExpired() || len(item.Endpoints) > 0
|
||||
})
|
||||
}
|
||||
|
||||
// appendPeerChanges mutates a tailcfg.MapResponse with all the
|
||||
// necessary changes when peers have changed.
|
||||
func appendPeerChanges(
|
||||
|
@ -619,6 +584,9 @@ func appendPeerChanges(
|
|||
return err
|
||||
}
|
||||
|
||||
// Filter out peers that have expired.
|
||||
changed = filterExpiredAndNotReady(changed)
|
||||
|
||||
// If there are filter rules present, see if there are any nodes that cannot
|
||||
// access eachother at all and remove them from the peers.
|
||||
if len(rules) > 0 {
|
||||
|
@ -654,5 +622,8 @@ func appendPeerChanges(
|
|||
resp.UserProfiles = profiles
|
||||
resp.SSHPolicy = sshPolicy
|
||||
|
||||
// TODO(kradalby): This currently does not take last seen in keepalives into account
|
||||
resp.OnlineChange = peers.OnlineNodeMap()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -167,15 +167,9 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
|
||||
mini := &types.Node{
|
||||
ID: 0,
|
||||
MachineKey: mustMK(
|
||||
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
|
||||
),
|
||||
NodeKey: mustNK(
|
||||
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
|
||||
),
|
||||
DiscoKey: mustDK(
|
||||
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
|
||||
),
|
||||
MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
|
||||
NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
|
||||
DiscoKey: "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
|
||||
Hostname: "mini",
|
||||
GivenName: "mini",
|
||||
|
@ -186,7 +180,8 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
AuthKey: &types.PreAuthKey{},
|
||||
LastSeen: &lastSeen,
|
||||
Expiry: &expire,
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
HostInfo: types.HostInfo{},
|
||||
Endpoints: []string{},
|
||||
Routes: []types.Route{
|
||||
{
|
||||
Prefix: types.IPPrefix(netip.MustParsePrefix("0.0.0.0/0")),
|
||||
|
@ -231,12 +226,14 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
netip.MustParsePrefix("0.0.0.0/0"),
|
||||
netip.MustParsePrefix("192.168.0.0/24"),
|
||||
},
|
||||
Endpoints: []string{},
|
||||
DERP: "127.3.3.40:0",
|
||||
Hostinfo: hiview(tailcfg.Hostinfo{}),
|
||||
Created: created,
|
||||
Tags: []string{},
|
||||
PrimaryRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
|
||||
LastSeen: &lastSeen,
|
||||
Online: new(bool),
|
||||
MachineAuthorized: true,
|
||||
Capabilities: []tailcfg.NodeCapability{
|
||||
tailcfg.CapabilityFileSharing,
|
||||
|
@ -248,15 +245,9 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
|
||||
peer1 := &types.Node{
|
||||
ID: 1,
|
||||
MachineKey: mustMK(
|
||||
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
|
||||
),
|
||||
NodeKey: mustNK(
|
||||
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
|
||||
),
|
||||
DiscoKey: mustDK(
|
||||
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
|
||||
),
|
||||
MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
|
||||
NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
|
||||
DiscoKey: "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")},
|
||||
Hostname: "peer1",
|
||||
GivenName: "peer1",
|
||||
|
@ -265,7 +256,8 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
ForcedTags: []string{},
|
||||
LastSeen: &lastSeen,
|
||||
Expiry: &expire,
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
HostInfo: types.HostInfo{},
|
||||
Endpoints: []string{},
|
||||
Routes: []types.Route{},
|
||||
CreatedAt: created,
|
||||
}
|
||||
|
@ -286,12 +278,14 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
),
|
||||
Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.2/32")},
|
||||
AllowedIPs: []netip.Prefix{netip.MustParsePrefix("100.64.0.2/32")},
|
||||
Endpoints: []string{},
|
||||
DERP: "127.3.3.40:0",
|
||||
Hostinfo: hiview(tailcfg.Hostinfo{}),
|
||||
Created: created,
|
||||
Tags: []string{},
|
||||
PrimaryRoutes: []netip.Prefix{},
|
||||
LastSeen: &lastSeen,
|
||||
Online: new(bool),
|
||||
MachineAuthorized: true,
|
||||
Capabilities: []tailcfg.NodeCapability{
|
||||
tailcfg.CapabilityFileSharing,
|
||||
|
@ -303,15 +297,9 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
|
||||
peer2 := &types.Node{
|
||||
ID: 2,
|
||||
MachineKey: mustMK(
|
||||
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
|
||||
),
|
||||
NodeKey: mustNK(
|
||||
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
|
||||
),
|
||||
DiscoKey: mustDK(
|
||||
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
|
||||
),
|
||||
MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
|
||||
NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
|
||||
DiscoKey: "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
|
||||
Hostname: "peer2",
|
||||
GivenName: "peer2",
|
||||
|
@ -320,7 +308,8 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
ForcedTags: []string{},
|
||||
LastSeen: &lastSeen,
|
||||
Expiry: &expire,
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
HostInfo: types.HostInfo{},
|
||||
Endpoints: []string{},
|
||||
Routes: []types.Route{},
|
||||
CreatedAt: created,
|
||||
}
|
||||
|
@ -398,6 +387,7 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
DNSConfig: &tailcfg.DNSConfig{},
|
||||
Domain: "",
|
||||
CollectServices: "false",
|
||||
OnlineChange: map[tailcfg.NodeID]bool{tailPeer1.ID: false},
|
||||
PacketFilter: []tailcfg.FilterRule{},
|
||||
UserProfiles: []tailcfg.UserProfile{{LoginName: "mini", DisplayName: "mini"}},
|
||||
SSHPolicy: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{}},
|
||||
|
@ -439,6 +429,10 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
DNSConfig: &tailcfg.DNSConfig{},
|
||||
Domain: "",
|
||||
CollectServices: "false",
|
||||
OnlineChange: map[tailcfg.NodeID]bool{
|
||||
tailPeer1.ID: false,
|
||||
tailcfg.NodeID(peer2.ID): false,
|
||||
},
|
||||
PacketFilter: []tailcfg.FilterRule{
|
||||
{
|
||||
SrcIPs: []string{"100.64.0.2/32"},
|
||||
|
@ -465,6 +459,9 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
mappy := NewMapper(
|
||||
tt.node,
|
||||
tt.peers,
|
||||
nil,
|
||||
false,
|
||||
0,
|
||||
tt.derpMap,
|
||||
tt.baseDomain,
|
||||
tt.dnsConfig,
|
||||
|
@ -475,7 +472,6 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
got, err := mappy.fullMapResponse(
|
||||
tt.node,
|
||||
tt.pol,
|
||||
0,
|
||||
)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
|
|
|
@ -52,6 +52,21 @@ func tailNode(
|
|||
baseDomain string,
|
||||
randomClientPort bool,
|
||||
) (*tailcfg.Node, error) {
|
||||
nodeKey, err := node.NodePublicKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
machineKey, err := node.MachinePublicKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
discoKey, err := node.DiscoPublicKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addrs := node.IPAddresses.Prefixes()
|
||||
|
||||
allowedIPs := append(
|
||||
|
@ -72,8 +87,8 @@ func tailNode(
|
|||
}
|
||||
|
||||
var derp string
|
||||
if node.Hostinfo.NetInfo != nil {
|
||||
derp = fmt.Sprintf("127.3.3.40:%d", node.Hostinfo.NetInfo.PreferredDERP)
|
||||
if node.HostInfo.NetInfo != nil {
|
||||
derp = fmt.Sprintf("127.3.3.40:%d", node.HostInfo.NetInfo.PreferredDERP)
|
||||
} else {
|
||||
derp = "127.3.3.40:0" // Zero means disconnected or unknown.
|
||||
}
|
||||
|
@ -87,9 +102,13 @@ func tailNode(
|
|||
|
||||
hostname, err := node.GetFQDN(dnsConfig, baseDomain)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tailNode, failed to create FQDN: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostInfo := node.GetHostInfo()
|
||||
|
||||
online := node.IsOnline()
|
||||
|
||||
tags, _ := pol.TagsOfNode(node)
|
||||
tags = lo.Uniq(append(tags, node.ForcedTags...))
|
||||
|
||||
|
@ -99,30 +118,28 @@ func tailNode(
|
|||
strconv.FormatUint(node.ID, util.Base10),
|
||||
), // in headscale, unlike tailcontrol server, IDs are permanent
|
||||
Name: hostname,
|
||||
Cap: capVer,
|
||||
|
||||
User: tailcfg.UserID(node.UserID),
|
||||
|
||||
Key: node.NodeKey,
|
||||
Key: nodeKey,
|
||||
KeyExpiry: keyExpiry,
|
||||
|
||||
Machine: node.MachineKey,
|
||||
DiscoKey: node.DiscoKey,
|
||||
Machine: machineKey,
|
||||
DiscoKey: discoKey,
|
||||
Addresses: addrs,
|
||||
AllowedIPs: allowedIPs,
|
||||
Endpoints: node.Endpoints,
|
||||
DERP: derp,
|
||||
Hostinfo: node.Hostinfo.View(),
|
||||
Hostinfo: hostInfo.View(),
|
||||
Created: node.CreatedAt,
|
||||
|
||||
Online: node.IsOnline,
|
||||
|
||||
Tags: tags,
|
||||
|
||||
PrimaryRoutes: primaryPrefixes,
|
||||
|
||||
LastSeen: node.LastSeen,
|
||||
Online: &online,
|
||||
MachineAuthorized: !node.IsExpired(),
|
||||
Expired: node.IsExpired(),
|
||||
}
|
||||
|
||||
// - 74: 2023-09-18: Client understands NodeCapMap
|
||||
|
@ -153,11 +170,5 @@ func tailNode(
|
|||
tNode.Capabilities = append(tNode.Capabilities, tailcfg.NodeAttrDisableUPnP)
|
||||
}
|
||||
|
||||
if node.IsOnline == nil || !*node.IsOnline {
|
||||
// LastSeen is only set when node is
|
||||
// not connected to the control server.
|
||||
tNode.LastSeen = node.LastSeen
|
||||
}
|
||||
|
||||
return &tNode, nil
|
||||
}
|
||||
|
|
|
@ -54,41 +54,20 @@ func TestTailNode(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
name: "empty-node",
|
||||
node: &types.Node{
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
},
|
||||
node: &types.Node{},
|
||||
pol: &policy.ACLPolicy{},
|
||||
dnsConfig: &tailcfg.DNSConfig{},
|
||||
baseDomain: "",
|
||||
want: &tailcfg.Node{
|
||||
StableID: "0",
|
||||
Addresses: []netip.Prefix{},
|
||||
AllowedIPs: []netip.Prefix{},
|
||||
DERP: "127.3.3.40:0",
|
||||
Hostinfo: hiview(tailcfg.Hostinfo{}),
|
||||
Tags: []string{},
|
||||
PrimaryRoutes: []netip.Prefix{},
|
||||
MachineAuthorized: true,
|
||||
Capabilities: []tailcfg.NodeCapability{
|
||||
"https://tailscale.com/cap/file-sharing", "https://tailscale.com/cap/is-admin",
|
||||
"https://tailscale.com/cap/ssh", "debug-disable-upnp",
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "minimal-node",
|
||||
node: &types.Node{
|
||||
ID: 0,
|
||||
MachineKey: mustMK(
|
||||
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
|
||||
),
|
||||
NodeKey: mustNK(
|
||||
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
|
||||
),
|
||||
DiscoKey: mustDK(
|
||||
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
|
||||
),
|
||||
MachineKey: "mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
|
||||
NodeKey: "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
|
||||
DiscoKey: "discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
|
||||
IPAddresses: []netip.Addr{
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
|
@ -103,7 +82,8 @@ func TestTailNode(t *testing.T) {
|
|||
AuthKey: &types.PreAuthKey{},
|
||||
LastSeen: &lastSeen,
|
||||
Expiry: &expire,
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
HostInfo: types.HostInfo{},
|
||||
Endpoints: []string{},
|
||||
Routes: []types.Route{
|
||||
{
|
||||
Prefix: types.IPPrefix(netip.MustParsePrefix("0.0.0.0/0")),
|
||||
|
@ -153,6 +133,7 @@ func TestTailNode(t *testing.T) {
|
|||
netip.MustParsePrefix("0.0.0.0/0"),
|
||||
netip.MustParsePrefix("192.168.0.0/24"),
|
||||
},
|
||||
Endpoints: []string{},
|
||||
DERP: "127.3.3.40:0",
|
||||
Hostinfo: hiview(tailcfg.Hostinfo{}),
|
||||
Created: created,
|
||||
|
@ -164,6 +145,7 @@ func TestTailNode(t *testing.T) {
|
|||
},
|
||||
|
||||
LastSeen: &lastSeen,
|
||||
Online: new(bool),
|
||||
MachineAuthorized: true,
|
||||
|
||||
Capabilities: []tailcfg.NodeCapability{
|
||||
|
|
|
@ -1,14 +1,11 @@
|
|||
package notifier
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
type Notifier struct {
|
||||
|
@ -20,9 +17,9 @@ func NewNotifier() *Notifier {
|
|||
return &Notifier{}
|
||||
}
|
||||
|
||||
func (n *Notifier) AddNode(machineKey key.MachinePublic, c chan<- types.StateUpdate) {
|
||||
log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("acquiring lock to add node")
|
||||
defer log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("releasing lock to add node")
|
||||
func (n *Notifier) AddNode(machineKey string, c chan<- types.StateUpdate) {
|
||||
log.Trace().Caller().Str("key", machineKey).Msg("acquiring lock to add node")
|
||||
defer log.Trace().Caller().Str("key", machineKey).Msg("releasing lock to add node")
|
||||
|
||||
n.l.Lock()
|
||||
defer n.l.Unlock()
|
||||
|
@ -31,17 +28,17 @@ func (n *Notifier) AddNode(machineKey key.MachinePublic, c chan<- types.StateUpd
|
|||
n.nodes = make(map[string]chan<- types.StateUpdate)
|
||||
}
|
||||
|
||||
n.nodes[machineKey.String()] = c
|
||||
n.nodes[machineKey] = c
|
||||
|
||||
log.Trace().
|
||||
Str("machine_key", machineKey.ShortString()).
|
||||
Str("machine_key", machineKey).
|
||||
Int("open_chans", len(n.nodes)).
|
||||
Msg("Added new channel")
|
||||
}
|
||||
|
||||
func (n *Notifier) RemoveNode(machineKey key.MachinePublic) {
|
||||
log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("acquiring lock to remove node")
|
||||
defer log.Trace().Caller().Str("key", machineKey.ShortString()).Msg("releasing lock to remove node")
|
||||
func (n *Notifier) RemoveNode(machineKey string) {
|
||||
log.Trace().Caller().Str("key", machineKey).Msg("acquiring lock to remove node")
|
||||
defer log.Trace().Caller().Str("key", machineKey).Msg("releasing lock to remove node")
|
||||
|
||||
n.l.Lock()
|
||||
defer n.l.Unlock()
|
||||
|
@ -50,27 +47,14 @@ func (n *Notifier) RemoveNode(machineKey key.MachinePublic) {
|
|||
return
|
||||
}
|
||||
|
||||
delete(n.nodes, machineKey.String())
|
||||
delete(n.nodes, machineKey)
|
||||
|
||||
log.Trace().
|
||||
Str("machine_key", machineKey.ShortString()).
|
||||
Str("machine_key", machineKey).
|
||||
Int("open_chans", len(n.nodes)).
|
||||
Msg("Removed channel")
|
||||
}
|
||||
|
||||
// IsConnected reports if a node is connected to headscale and has a
|
||||
// poll session open.
|
||||
func (n *Notifier) IsConnected(machineKey key.MachinePublic) bool {
|
||||
n.l.RLock()
|
||||
defer n.l.RUnlock()
|
||||
|
||||
if _, ok := n.nodes[machineKey.String()]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (n *Notifier) NotifyAll(update types.StateUpdate) {
|
||||
n.NotifyWithIgnore(update)
|
||||
}
|
||||
|
@ -94,31 +78,3 @@ func (n *Notifier) NotifyWithIgnore(update types.StateUpdate, ignore ...string)
|
|||
c <- update
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) NotifyByMachineKey(update types.StateUpdate, mKey key.MachinePublic) {
|
||||
log.Trace().Caller().Interface("type", update.Type).Msg("acquiring lock to notify")
|
||||
defer log.Trace().
|
||||
Caller().
|
||||
Interface("type", update.Type).
|
||||
Msg("releasing lock, finished notifing")
|
||||
|
||||
n.l.RLock()
|
||||
defer n.l.RUnlock()
|
||||
|
||||
if c, ok := n.nodes[mKey.String()]; ok {
|
||||
c <- update
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Notifier) String() string {
|
||||
n.l.RLock()
|
||||
defer n.l.RUnlock()
|
||||
|
||||
str := []string{"Notifier, in map:\n"}
|
||||
|
||||
for k, v := range n.nodes {
|
||||
str = append(str, fmt.Sprintf("\t%s: %v\n", k, v))
|
||||
}
|
||||
|
||||
return strings.Join(str, "")
|
||||
}
|
||||
|
|
|
@ -124,28 +124,42 @@ func (h *Headscale) determineTokenExpiration(idTokenExpiration time.Time) time.T
|
|||
|
||||
// RegisterOIDC redirects to the OIDC provider for authentication
|
||||
// Puts NodeKey in cache so the callback can retrieve it using the oidc state param
|
||||
// Listens in /oidc/register/:mKey.
|
||||
// Listens in /oidc/register/:nKey.
|
||||
func (h *Headscale) RegisterOIDC(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
vars := mux.Vars(req)
|
||||
machineKeyStr, ok := vars["mkey"]
|
||||
nodeKeyStr, ok := vars["nkey"]
|
||||
|
||||
log.Debug().
|
||||
Caller().
|
||||
Str("machine_key", machineKeyStr).
|
||||
Str("node_key", nodeKeyStr).
|
||||
Bool("ok", ok).
|
||||
Msg("Received oidc register call")
|
||||
|
||||
if !util.NodePublicKeyRegex.Match([]byte(nodeKeyStr)) {
|
||||
log.Warn().Str("node_key", nodeKeyStr).Msg("Invalid node key passed to registration url")
|
||||
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusUnauthorized)
|
||||
_, err := writer.Write([]byte("Unauthorized"))
|
||||
if err != nil {
|
||||
util.LogErr(err, "Failed to write response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// We need to make sure we dont open for XSS style injections, if the parameter that
|
||||
// is passed as a key is not parsable/validated as a NodePublic key, then fail to render
|
||||
// the template and log an error.
|
||||
var machineKey key.MachinePublic
|
||||
err := machineKey.UnmarshalText(
|
||||
[]byte(machineKeyStr),
|
||||
var nodeKey key.NodePublic
|
||||
err := nodeKey.UnmarshalText(
|
||||
[]byte(util.NodePublicKeyEnsurePrefix(nodeKeyStr)),
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
if !ok || nodeKeyStr == "" || err != nil {
|
||||
log.Warn().
|
||||
Err(err).
|
||||
Msg("Failed to parse incoming nodekey in OIDC registration")
|
||||
|
@ -174,7 +188,7 @@ func (h *Headscale) RegisterOIDC(
|
|||
// place the node key into the state cache, so it can be retrieved later
|
||||
h.registrationCache.Set(
|
||||
stateStr,
|
||||
machineKey,
|
||||
util.NodePublicKeyStripPrefix(nodeKey),
|
||||
registerCacheExpiration,
|
||||
)
|
||||
|
||||
|
@ -252,7 +266,7 @@ func (h *Headscale) OIDCCallback(
|
|||
return
|
||||
}
|
||||
|
||||
machineKey, nodeExists, err := h.validateNodeForOIDCCallback(
|
||||
nodeKey, nodeExists, err := h.validateNodeForOIDCCallback(
|
||||
writer,
|
||||
state,
|
||||
claims,
|
||||
|
@ -280,7 +294,7 @@ func (h *Headscale) OIDCCallback(
|
|||
return
|
||||
}
|
||||
|
||||
if err := h.registerNodeForOIDCCallback(writer, user, machineKey, idTokenExpiry); err != nil {
|
||||
if err := h.registerNodeForOIDCCallback(writer, user, nodeKey, idTokenExpiry); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -525,10 +539,10 @@ func (h *Headscale) validateNodeForOIDCCallback(
|
|||
state string,
|
||||
claims *IDTokenClaims,
|
||||
expiry time.Time,
|
||||
) (*key.MachinePublic, bool, error) {
|
||||
) (*key.NodePublic, bool, error) {
|
||||
// retrieve nodekey from state cache
|
||||
machineKeyIf, machineKeyFound := h.registrationCache.Get(state)
|
||||
if !machineKeyFound {
|
||||
nodeKeyIf, nodeKeyFound := h.registrationCache.Get(state)
|
||||
if !nodeKeyFound {
|
||||
log.Trace().
|
||||
Msg("requested node state key expired before authorisation completed")
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
|
@ -541,12 +555,11 @@ func (h *Headscale) validateNodeForOIDCCallback(
|
|||
return nil, false, errOIDCNodeKeyMissing
|
||||
}
|
||||
|
||||
var machineKey key.MachinePublic
|
||||
machineKey, machineKeyOK := machineKeyIf.(key.MachinePublic)
|
||||
if !machineKeyOK {
|
||||
var nodeKey key.NodePublic
|
||||
nodeKeyFromCache, nodeKeyOK := nodeKeyIf.(string)
|
||||
if !nodeKeyOK {
|
||||
log.Trace().
|
||||
Interface("got", machineKeyIf).
|
||||
Msg("requested node state key is not a nodekey")
|
||||
Msg("requested node state key is not a string")
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusBadRequest)
|
||||
_, err := writer.Write([]byte("state is invalid"))
|
||||
|
@ -557,11 +570,29 @@ func (h *Headscale) validateNodeForOIDCCallback(
|
|||
return nil, false, errOIDCInvalidNodeState
|
||||
}
|
||||
|
||||
err := nodeKey.UnmarshalText(
|
||||
[]byte(util.NodePublicKeyEnsurePrefix(nodeKeyFromCache)),
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("nodeKey", nodeKeyFromCache).
|
||||
Bool("nodeKeyOK", nodeKeyOK).
|
||||
Msg("could not parse node public key")
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusBadRequest)
|
||||
_, werr := writer.Write([]byte("could not parse node public key"))
|
||||
if werr != nil {
|
||||
util.LogErr(err, "Failed to write response")
|
||||
}
|
||||
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// retrieve node information if it exist
|
||||
// The error is not important, because if it does not
|
||||
// exist, then this is a new node and we will move
|
||||
// on to registration.
|
||||
node, _ := h.db.GetNodeByMachineKey(machineKey)
|
||||
node, _ := h.db.GetNodeByNodeKey(nodeKey)
|
||||
|
||||
if node != nil {
|
||||
log.Trace().
|
||||
|
@ -626,7 +657,7 @@ func (h *Headscale) validateNodeForOIDCCallback(
|
|||
return nil, true, nil
|
||||
}
|
||||
|
||||
return &machineKey, false, nil
|
||||
return &nodeKey, false, nil
|
||||
}
|
||||
|
||||
func getUserName(
|
||||
|
@ -709,13 +740,13 @@ func (h *Headscale) findOrCreateNewUserForOIDCCallback(
|
|||
func (h *Headscale) registerNodeForOIDCCallback(
|
||||
writer http.ResponseWriter,
|
||||
user *types.User,
|
||||
machineKey *key.MachinePublic,
|
||||
nodeKey *key.NodePublic,
|
||||
expiry time.Time,
|
||||
) error {
|
||||
if _, err := h.db.RegisterNodeFromAuthCallback(
|
||||
// TODO(kradalby): find a better way to use the cache across modules
|
||||
h.registrationCache,
|
||||
*machineKey,
|
||||
nodeKey.String(),
|
||||
user.Name,
|
||||
&expiry,
|
||||
util.RegisterMethodOIDC,
|
||||
|
|
|
@ -596,13 +596,10 @@ func excludeCorrectlyTaggedNodes(
|
|||
}
|
||||
// for each node if tag is in tags list, don't append it.
|
||||
for _, node := range nodes {
|
||||
hi := node.GetHostInfo()
|
||||
|
||||
found := false
|
||||
|
||||
if node.Hostinfo == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, t := range node.Hostinfo.RequestTags {
|
||||
for _, t := range hi.RequestTags {
|
||||
if util.StringOrPrefixListContains(tags, t) {
|
||||
found = true
|
||||
|
||||
|
@ -674,18 +671,14 @@ func expandOwnersFromTag(
|
|||
pol *ACLPolicy,
|
||||
tag string,
|
||||
) ([]string, error) {
|
||||
noTagErr := fmt.Errorf(
|
||||
var owners []string
|
||||
ows, ok := pol.TagOwners[tag]
|
||||
if !ok {
|
||||
return []string{}, fmt.Errorf(
|
||||
"%w. %v isn't owned by a TagOwner. Please add one first. https://tailscale.com/kb/1018/acls/#tag-owners",
|
||||
ErrInvalidTag,
|
||||
tag,
|
||||
)
|
||||
if pol == nil {
|
||||
return []string{}, noTagErr
|
||||
}
|
||||
var owners []string
|
||||
ows, ok := pol.TagOwners[tag]
|
||||
if !ok {
|
||||
return []string{}, noTagErr
|
||||
}
|
||||
for _, owner := range ows {
|
||||
if isGroup(owner) {
|
||||
|
@ -794,11 +787,8 @@ func (pol *ACLPolicy) expandIPsFromTag(
|
|||
for _, user := range owners {
|
||||
nodes := filterNodesByUser(nodes, user)
|
||||
for _, node := range nodes {
|
||||
if node.Hostinfo == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if util.StringOrPrefixListContains(node.Hostinfo.RequestTags, alias) {
|
||||
hi := node.GetHostInfo()
|
||||
if util.StringOrPrefixListContains(hi.RequestTags, alias) {
|
||||
node.IPAddresses.AppendToIPSet(&build)
|
||||
}
|
||||
}
|
||||
|
@ -892,7 +882,7 @@ func (pol *ACLPolicy) TagsOfNode(
|
|||
|
||||
validTagMap := make(map[string]bool)
|
||||
invalidTagMap := make(map[string]bool)
|
||||
for _, tag := range node.Hostinfo.RequestTags {
|
||||
for _, tag := range node.HostInfo.RequestTags {
|
||||
owners, err := expandOwnersFromTag(pol, tag)
|
||||
if errors.Is(err, ErrInvalidTag) {
|
||||
invalidTagMap[tag] = true
|
||||
|
|
|
@ -16,6 +16,10 @@ import (
|
|||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
var ipComparer = cmp.Comparer(func(x, y netip.Addr) bool {
|
||||
return x.Compare(y) == 0
|
||||
})
|
||||
|
||||
func Test(t *testing.T) {
|
||||
check.TestingT(t)
|
||||
}
|
||||
|
@ -397,7 +401,6 @@ acls:
|
|||
User: types.User{
|
||||
Name: "testuser",
|
||||
},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
},
|
||||
})
|
||||
|
||||
|
@ -948,7 +951,7 @@ func Test_listNodesInUser(t *testing.T) {
|
|||
t.Run(test.name, func(t *testing.T) {
|
||||
got := filterNodesByUser(test.args.nodes, test.args.user)
|
||||
|
||||
if diff := cmp.Diff(test.want, got, util.Comparers...); diff != "" {
|
||||
if diff := cmp.Diff(test.want, got); diff != "" {
|
||||
t.Errorf("listNodesInUser() = (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
@ -1244,7 +1247,7 @@ func Test_expandAlias(t *testing.T) {
|
|||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
HostInfo: types.HostInfo{
|
||||
OS: "centos",
|
||||
Hostname: "foo",
|
||||
RequestTags: []string{"tag:hr-webserver"},
|
||||
|
@ -1255,7 +1258,7 @@ func Test_expandAlias(t *testing.T) {
|
|||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
HostInfo: types.HostInfo{
|
||||
OS: "centos",
|
||||
Hostname: "foo",
|
||||
RequestTags: []string{"tag:hr-webserver"},
|
||||
|
@ -1385,7 +1388,7 @@ func Test_expandAlias(t *testing.T) {
|
|||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
HostInfo: types.HostInfo{
|
||||
OS: "centos",
|
||||
Hostname: "foo",
|
||||
RequestTags: []string{"tag:hr-webserver"},
|
||||
|
@ -1423,7 +1426,7 @@ func Test_expandAlias(t *testing.T) {
|
|||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
HostInfo: types.HostInfo{
|
||||
OS: "centos",
|
||||
Hostname: "foo",
|
||||
RequestTags: []string{"tag:accountant-webserver"},
|
||||
|
@ -1434,7 +1437,7 @@ func Test_expandAlias(t *testing.T) {
|
|||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
HostInfo: types.HostInfo{
|
||||
OS: "centos",
|
||||
Hostname: "foo",
|
||||
RequestTags: []string{"tag:accountant-webserver"},
|
||||
|
@ -1445,14 +1448,12 @@ func Test_expandAlias(t *testing.T) {
|
|||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
User: types.User{Name: "marc"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
},
|
||||
&types.Node{
|
||||
IPAddresses: types.NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -1502,7 +1503,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
|||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
HostInfo: types.HostInfo{
|
||||
OS: "centos",
|
||||
Hostname: "foo",
|
||||
RequestTags: []string{"tag:accountant-webserver"},
|
||||
|
@ -1513,7 +1514,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
|||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
HostInfo: types.HostInfo{
|
||||
OS: "centos",
|
||||
Hostname: "foo",
|
||||
RequestTags: []string{"tag:accountant-webserver"},
|
||||
|
@ -1524,7 +1525,6 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
|||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
},
|
||||
},
|
||||
user: "joe",
|
||||
|
@ -1533,7 +1533,6 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
|||
&types.Node{
|
||||
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.4")},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -1554,7 +1553,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
|||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
HostInfo: types.HostInfo{
|
||||
OS: "centos",
|
||||
Hostname: "foo",
|
||||
RequestTags: []string{"tag:accountant-webserver"},
|
||||
|
@ -1565,7 +1564,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
|||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
HostInfo: types.HostInfo{
|
||||
OS: "centos",
|
||||
Hostname: "foo",
|
||||
RequestTags: []string{"tag:accountant-webserver"},
|
||||
|
@ -1576,7 +1575,6 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
|||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
},
|
||||
},
|
||||
user: "joe",
|
||||
|
@ -1585,7 +1583,6 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
|||
&types.Node{
|
||||
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.4")},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -1601,7 +1598,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
|||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
HostInfo: types.HostInfo{
|
||||
OS: "centos",
|
||||
Hostname: "foo",
|
||||
RequestTags: []string{"tag:accountant-webserver"},
|
||||
|
@ -1613,14 +1610,12 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
|||
},
|
||||
User: types.User{Name: "joe"},
|
||||
ForcedTags: []string{"tag:accountant-webserver"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
},
|
||||
&types.Node{
|
||||
IPAddresses: types.NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
},
|
||||
},
|
||||
user: "joe",
|
||||
|
@ -1629,7 +1624,6 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
|||
&types.Node{
|
||||
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.4")},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -1645,7 +1639,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
|||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
HostInfo: types.HostInfo{
|
||||
OS: "centos",
|
||||
Hostname: "hr-web1",
|
||||
RequestTags: []string{"tag:hr-webserver"},
|
||||
|
@ -1656,7 +1650,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
|||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
HostInfo: types.HostInfo{
|
||||
OS: "centos",
|
||||
Hostname: "hr-web2",
|
||||
RequestTags: []string{"tag:hr-webserver"},
|
||||
|
@ -1667,7 +1661,6 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
|||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
},
|
||||
},
|
||||
user: "joe",
|
||||
|
@ -1678,7 +1671,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
|||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
HostInfo: types.HostInfo{
|
||||
OS: "centos",
|
||||
Hostname: "hr-web1",
|
||||
RequestTags: []string{"tag:hr-webserver"},
|
||||
|
@ -1689,7 +1682,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
|||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
HostInfo: types.HostInfo{
|
||||
OS: "centos",
|
||||
Hostname: "hr-web2",
|
||||
RequestTags: []string{"tag:hr-webserver"},
|
||||
|
@ -1700,7 +1693,6 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
|||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
User: types.User{Name: "joe"},
|
||||
Hostinfo: &tailcfg.Hostinfo{},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -1712,7 +1704,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
|||
test.args.nodes,
|
||||
test.args.user,
|
||||
)
|
||||
if diff := cmp.Diff(test.want, got, util.Comparers...); diff != "" {
|
||||
if diff := cmp.Diff(test.want, got, ipComparer); diff != "" {
|
||||
t.Errorf("excludeCorrectlyTaggedNodes() (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
@ -1943,7 +1935,7 @@ func Test_getTags(t *testing.T) {
|
|||
User: types.User{
|
||||
Name: "joe",
|
||||
},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
HostInfo: types.HostInfo{
|
||||
RequestTags: []string{"tag:valid"},
|
||||
},
|
||||
},
|
||||
|
@ -1963,7 +1955,7 @@ func Test_getTags(t *testing.T) {
|
|||
User: types.User{
|
||||
Name: "joe",
|
||||
},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
HostInfo: types.HostInfo{
|
||||
RequestTags: []string{"tag:valid", "tag:invalid"},
|
||||
},
|
||||
},
|
||||
|
@ -1983,7 +1975,7 @@ func Test_getTags(t *testing.T) {
|
|||
User: types.User{
|
||||
Name: "joe",
|
||||
},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
HostInfo: types.HostInfo{
|
||||
RequestTags: []string{
|
||||
"tag:invalid",
|
||||
"tag:valid",
|
||||
|
@ -2007,7 +1999,7 @@ func Test_getTags(t *testing.T) {
|
|||
User: types.User{
|
||||
Name: "joe",
|
||||
},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
HostInfo: types.HostInfo{
|
||||
RequestTags: []string{"tag:invalid", "very-invalid"},
|
||||
},
|
||||
},
|
||||
|
@ -2023,7 +2015,7 @@ func Test_getTags(t *testing.T) {
|
|||
User: types.User{
|
||||
Name: "joe",
|
||||
},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
HostInfo: types.HostInfo{
|
||||
RequestTags: []string{"tag:invalid", "very-invalid"},
|
||||
},
|
||||
},
|
||||
|
@ -2064,6 +2056,10 @@ func Test_getTags(t *testing.T) {
|
|||
}
|
||||
|
||||
func Test_getFilteredByACLPeers(t *testing.T) {
|
||||
ipComparer := cmp.Comparer(func(x, y netip.Addr) bool {
|
||||
return x.Compare(y) == 0
|
||||
})
|
||||
|
||||
type args struct {
|
||||
nodes types.Nodes
|
||||
rules []tailcfg.FilterRule
|
||||
|
@ -2727,7 +2723,7 @@ func Test_getFilteredByACLPeers(t *testing.T) {
|
|||
tt.args.nodes,
|
||||
tt.args.rules,
|
||||
)
|
||||
if diff := cmp.Diff(tt.want, got, util.Comparers...); diff != "" {
|
||||
if diff := cmp.Diff(tt.want, got, ipComparer); diff != "" {
|
||||
t.Errorf("FilterNodesByACL() unexpected result (-want +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
@ -2990,6 +2986,9 @@ func TestValidExpandTagOwnersInSources(t *testing.T) {
|
|||
|
||||
node := &types.Node{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testnodes",
|
||||
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
UserID: 0,
|
||||
|
@ -2997,7 +2996,7 @@ func TestValidExpandTagOwnersInSources(t *testing.T) {
|
|||
Name: "user1",
|
||||
},
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
Hostinfo: &hostInfo,
|
||||
HostInfo: types.HostInfo(hostInfo),
|
||||
}
|
||||
|
||||
pol := &ACLPolicy{
|
||||
|
@ -3042,6 +3041,9 @@ func TestInvalidTagValidUser(t *testing.T) {
|
|||
|
||||
node := &types.Node{
|
||||
ID: 1,
|
||||
MachineKey: "12345",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testnodes",
|
||||
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
UserID: 1,
|
||||
|
@ -3049,7 +3051,7 @@ func TestInvalidTagValidUser(t *testing.T) {
|
|||
Name: "user1",
|
||||
},
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
Hostinfo: &hostInfo,
|
||||
HostInfo: types.HostInfo(hostInfo),
|
||||
}
|
||||
|
||||
pol := &ACLPolicy{
|
||||
|
@ -3093,6 +3095,9 @@ func TestValidExpandTagOwnersInDestinations(t *testing.T) {
|
|||
|
||||
node := &types.Node{
|
||||
ID: 1,
|
||||
MachineKey: "12345",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testnodes",
|
||||
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
UserID: 1,
|
||||
|
@ -3100,7 +3105,7 @@ func TestValidExpandTagOwnersInDestinations(t *testing.T) {
|
|||
Name: "user1",
|
||||
},
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
Hostinfo: &hostInfo,
|
||||
HostInfo: types.HostInfo(hostInfo),
|
||||
}
|
||||
|
||||
pol := &ACLPolicy{
|
||||
|
@ -3154,6 +3159,9 @@ func TestValidTagInvalidUser(t *testing.T) {
|
|||
|
||||
node := &types.Node{
|
||||
ID: 1,
|
||||
MachineKey: "12345",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "webserver",
|
||||
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
UserID: 1,
|
||||
|
@ -3161,7 +3169,7 @@ func TestValidTagInvalidUser(t *testing.T) {
|
|||
Name: "user1",
|
||||
},
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
Hostinfo: &hostInfo,
|
||||
HostInfo: types.HostInfo(hostInfo),
|
||||
}
|
||||
|
||||
hostInfo2 := tailcfg.Hostinfo{
|
||||
|
@ -3171,6 +3179,9 @@ func TestValidTagInvalidUser(t *testing.T) {
|
|||
|
||||
nodes2 := &types.Node{
|
||||
ID: 2,
|
||||
MachineKey: "56789",
|
||||
NodeKey: "bar2",
|
||||
DiscoKey: "faab",
|
||||
Hostname: "user",
|
||||
IPAddresses: types.NodeAddresses{netip.MustParseAddr("100.64.0.2")},
|
||||
UserID: 1,
|
||||
|
@ -3178,7 +3189,7 @@ func TestValidTagInvalidUser(t *testing.T) {
|
|||
Name: "user1",
|
||||
},
|
||||
RegisterMethod: util.RegisterMethodAuthKey,
|
||||
Hostinfo: &hostInfo2,
|
||||
HostInfo: types.HostInfo(hostInfo2),
|
||||
}
|
||||
|
||||
pol := &ACLPolicy{
|
||||
|
|
|
@ -8,8 +8,8 @@ import (
|
|||
|
||||
"github.com/juanfont/headscale/hscontrol/mapper"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
xslices "golang.org/x/exp/slices"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
|
@ -26,32 +26,35 @@ type UpdateNode func()
|
|||
func logPollFunc(
|
||||
mapRequest tailcfg.MapRequest,
|
||||
node *types.Node,
|
||||
isNoise bool,
|
||||
) (func(string), func(error, string)) {
|
||||
return func(msg string) {
|
||||
log.Info().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Bool("readOnly", mapRequest.ReadOnly).
|
||||
Bool("omitPeers", mapRequest.OmitPeers).
|
||||
Bool("stream", mapRequest.Stream).
|
||||
Str("node_key", node.NodeKey.ShortString()).
|
||||
Str("node_key", node.NodeKey).
|
||||
Str("node", node.Hostname).
|
||||
Msg(msg)
|
||||
},
|
||||
func(err error, msg string) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Bool("readOnly", mapRequest.ReadOnly).
|
||||
Bool("omitPeers", mapRequest.OmitPeers).
|
||||
Bool("stream", mapRequest.Stream).
|
||||
Str("node_key", node.NodeKey.ShortString()).
|
||||
Str("node_key", node.NodeKey).
|
||||
Str("node", node.Hostname).
|
||||
Err(err).
|
||||
Msg(msg)
|
||||
}
|
||||
}
|
||||
|
||||
// handlePoll ensures the node gets the appropriate updates from either
|
||||
// polling or immediate responses.
|
||||
// handlePoll is the common code for the legacy and Noise protocols to
|
||||
// managed the poll loop.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (h *Headscale) handlePoll(
|
||||
|
@ -59,10 +62,12 @@ func (h *Headscale) handlePoll(
|
|||
ctx context.Context,
|
||||
node *types.Node,
|
||||
mapRequest tailcfg.MapRequest,
|
||||
isNoise bool,
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
) {
|
||||
logInfo, logErr := logPollFunc(mapRequest, node)
|
||||
logInfo, logErr := logPollFunc(mapRequest, node, isNoise)
|
||||
|
||||
// This is the mechanism where the node gives us information about its
|
||||
// This is the mechanism where the node gives us inforamtion about its
|
||||
// current configuration.
|
||||
//
|
||||
// If OmitPeers is true, Stream is false, and ReadOnly is false,
|
||||
|
@ -70,112 +75,46 @@ func (h *Headscale) handlePoll(
|
|||
// breaking existing long-polling (Stream == true) connections.
|
||||
// In this case, the server can omit the entire response; the client
|
||||
// only checks the HTTP response status code.
|
||||
// TODO(kradalby): remove ReadOnly when we only support capVer 68+
|
||||
if mapRequest.OmitPeers && !mapRequest.Stream && !mapRequest.ReadOnly {
|
||||
log.Info().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Bool("readOnly", mapRequest.ReadOnly).
|
||||
Bool("omitPeers", mapRequest.OmitPeers).
|
||||
Bool("stream", mapRequest.Stream).
|
||||
Str("node_key", node.NodeKey.ShortString()).
|
||||
Str("node_key", node.NodeKey).
|
||||
Str("node", node.Hostname).
|
||||
Int("cap_ver", int(mapRequest.Version)).
|
||||
Msg("Received update")
|
||||
Strs("endpoints", node.Endpoints).
|
||||
Msg("Received endpoint update")
|
||||
|
||||
change := node.PeerChangeFromMapRequest(mapRequest)
|
||||
now := time.Now().UTC()
|
||||
node.LastSeen = &now
|
||||
node.Hostname = mapRequest.Hostinfo.Hostname
|
||||
node.HostInfo = types.HostInfo(*mapRequest.Hostinfo)
|
||||
node.DiscoKey = util.DiscoPublicKeyStripPrefix(mapRequest.DiscoKey)
|
||||
node.Endpoints = mapRequest.Endpoints
|
||||
|
||||
online := h.nodeNotifier.IsConnected(node.MachineKey)
|
||||
change.Online = &online
|
||||
if err := h.db.NodeSave(node); err != nil {
|
||||
logErr(err, "Failed to persist/update node in the database")
|
||||
http.Error(writer, "", http.StatusInternalServerError)
|
||||
|
||||
node.ApplyPeerChange(&change)
|
||||
return
|
||||
}
|
||||
|
||||
hostInfoChange := node.Hostinfo.Equal(mapRequest.Hostinfo)
|
||||
|
||||
logTracePeerChange(node.Hostname, hostInfoChange, &change)
|
||||
|
||||
// Check if the Hostinfo of the node has changed.
|
||||
// If it has changed, check if there has been a change tod
|
||||
// the routable IPs of the host and update update them in
|
||||
// the database. Then send a Changed update
|
||||
// (containing the whole node object) to peers to inform about
|
||||
// the route change.
|
||||
// If the hostinfo has changed, but not the routes, just update
|
||||
// hostinfo and let the function continue.
|
||||
if !hostInfoChange {
|
||||
oldRoutes := node.Hostinfo.RoutableIPs
|
||||
newRoutes := mapRequest.Hostinfo.RoutableIPs
|
||||
|
||||
oldServicesCount := len(node.Hostinfo.Services)
|
||||
newServicesCount := len(mapRequest.Hostinfo.Services)
|
||||
|
||||
node.Hostinfo = mapRequest.Hostinfo
|
||||
|
||||
sendUpdate := false
|
||||
|
||||
// Route changes come as part of Hostinfo, which means that
|
||||
// when an update comes, the Node Route logic need to run.
|
||||
// This will require a "change" in comparison to a "patch",
|
||||
// which is more costly.
|
||||
if !xslices.Equal(oldRoutes, newRoutes) {
|
||||
var err error
|
||||
sendUpdate, err = h.db.SaveNodeRoutes(node)
|
||||
err := h.db.SaveNodeRoutes(node)
|
||||
if err != nil {
|
||||
logErr(err, "Error processing node routes")
|
||||
http.Error(writer, "", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Services is mostly useful for discovery and not critical,
|
||||
// except for peerapi, which is how nodes talk to eachother.
|
||||
// If peerapi was not part of the initial mapresponse, we
|
||||
// need to make sure its sent out later as it is needed for
|
||||
// Taildrop.
|
||||
// TODO(kradalby): Length comparison is a bit naive, replace.
|
||||
if oldServicesCount != newServicesCount {
|
||||
sendUpdate = true
|
||||
}
|
||||
|
||||
if sendUpdate {
|
||||
if err := h.db.NodeSave(node); err != nil {
|
||||
logErr(err, "Failed to persist/update node in the database")
|
||||
http.Error(writer, "", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
stateUpdate := types.StateUpdate{
|
||||
h.nodeNotifier.NotifyWithIgnore(
|
||||
types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: types.Nodes{node},
|
||||
Message: "called from handlePoll -> update -> new hostinfo",
|
||||
}
|
||||
if stateUpdate.Valid() {
|
||||
h.nodeNotifier.NotifyWithIgnore(
|
||||
stateUpdate,
|
||||
node.MachineKey.String())
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := h.db.NodeSave(node); err != nil {
|
||||
logErr(err, "Failed to persist/update node in the database")
|
||||
http.Error(writer, "", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
stateUpdate := types.StateUpdate{
|
||||
Type: types.StatePeerChangedPatch,
|
||||
ChangePatches: []*tailcfg.PeerChange{&change},
|
||||
}
|
||||
if stateUpdate.Valid() {
|
||||
h.nodeNotifier.NotifyWithIgnore(
|
||||
stateUpdate,
|
||||
node.MachineKey.String())
|
||||
}
|
||||
Changed: types.Nodes{node},
|
||||
},
|
||||
node.MachineKey)
|
||||
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
if f, ok := writer.(http.Flusher); ok {
|
||||
|
@ -183,7 +122,7 @@ func (h *Headscale) handlePoll(
|
|||
}
|
||||
|
||||
return
|
||||
} else if mapRequest.OmitPeers && !mapRequest.Stream && mapRequest.ReadOnly {
|
||||
|
||||
// ReadOnly is whether the client just wants to fetch the
|
||||
// MapResponse, without updating their Endpoints. The
|
||||
// Endpoints field will be ignored and LastSeen will not be
|
||||
|
@ -192,7 +131,7 @@ func (h *Headscale) handlePoll(
|
|||
// The intended use is for clients to discover the DERP map at
|
||||
// start-up before their first real endpoint update.
|
||||
} else if mapRequest.OmitPeers && !mapRequest.Stream && mapRequest.ReadOnly {
|
||||
h.handleLiteRequest(writer, node, mapRequest)
|
||||
h.handleLiteRequest(writer, node, mapRequest, isNoise, capVer)
|
||||
|
||||
return
|
||||
} else if mapRequest.OmitPeers && mapRequest.Stream {
|
||||
|
@ -201,39 +140,12 @@ func (h *Headscale) handlePoll(
|
|||
return
|
||||
}
|
||||
|
||||
change := node.PeerChangeFromMapRequest(mapRequest)
|
||||
|
||||
// A stream is being set up, the node is Online
|
||||
online := true
|
||||
change.Online = &online
|
||||
|
||||
node.ApplyPeerChange(&change)
|
||||
|
||||
// Only save HostInfo if changed, update routes if changed
|
||||
// TODO(kradalby): Remove when capver is over 68
|
||||
if !node.Hostinfo.Equal(mapRequest.Hostinfo) {
|
||||
oldRoutes := node.Hostinfo.RoutableIPs
|
||||
newRoutes := mapRequest.Hostinfo.RoutableIPs
|
||||
|
||||
node.Hostinfo = mapRequest.Hostinfo
|
||||
|
||||
if !xslices.Equal(oldRoutes, newRoutes) {
|
||||
_, err := h.db.SaveNodeRoutes(node)
|
||||
if err != nil {
|
||||
logErr(err, "Error processing node routes")
|
||||
http.Error(writer, "", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := h.db.NodeSave(node); err != nil {
|
||||
logErr(err, "Failed to persist/update node in the database")
|
||||
http.Error(writer, "", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
now := time.Now().UTC()
|
||||
node.LastSeen = &now
|
||||
node.Hostname = mapRequest.Hostinfo.Hostname
|
||||
node.HostInfo = types.HostInfo(*mapRequest.Hostinfo)
|
||||
node.DiscoKey = util.DiscoPublicKeyStripPrefix(mapRequest.DiscoKey)
|
||||
node.Endpoints = mapRequest.Endpoints
|
||||
|
||||
// When a node connects to control, list the peers it has at
|
||||
// that given point, further updates are kept in memory in
|
||||
|
@ -247,14 +159,12 @@ func (h *Headscale) handlePoll(
|
|||
return
|
||||
}
|
||||
|
||||
for _, peer := range peers {
|
||||
online := h.nodeNotifier.IsConnected(peer.MachineKey)
|
||||
peer.IsOnline = &online
|
||||
}
|
||||
|
||||
mapp := mapper.NewMapper(
|
||||
node,
|
||||
peers,
|
||||
h.privateKey2019,
|
||||
isNoise,
|
||||
capVer,
|
||||
h.DERPMap,
|
||||
h.cfg.BaseDomain,
|
||||
h.cfg.DNSConfig,
|
||||
|
@ -262,6 +172,11 @@ func (h *Headscale) handlePoll(
|
|||
h.cfg.RandomizeClientPort,
|
||||
)
|
||||
|
||||
err = h.db.SaveNodeRoutes(node)
|
||||
if err != nil {
|
||||
logErr(err, "Error processing node routes")
|
||||
}
|
||||
|
||||
// update ACLRules with peer informations (to update server tags if necessary)
|
||||
if h.ACLPolicy != nil {
|
||||
// update routes with peer information
|
||||
|
@ -271,6 +186,14 @@ func (h *Headscale) handlePoll(
|
|||
}
|
||||
}
|
||||
|
||||
// TODO(kradalby): Save specific stuff, not whole object.
|
||||
if err := h.db.NodeSave(node); err != nil {
|
||||
logErr(err, "Failed to persist/update node in the database")
|
||||
http.Error(writer, "", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
logInfo("Sending initial map")
|
||||
|
||||
mapResp, err := mapp.FullMapResponse(mapRequest, node, h.ACLPolicy)
|
||||
|
@ -295,26 +218,18 @@ func (h *Headscale) handlePoll(
|
|||
return
|
||||
}
|
||||
|
||||
stateUpdate := types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
ChangeNodes: types.Nodes{node},
|
||||
Message: "called from handlePoll -> new node added",
|
||||
}
|
||||
if stateUpdate.Valid() {
|
||||
h.nodeNotifier.NotifyWithIgnore(
|
||||
stateUpdate,
|
||||
node.MachineKey.String())
|
||||
}
|
||||
types.StateUpdate{
|
||||
Type: types.StatePeerChanged,
|
||||
Changed: types.Nodes{node},
|
||||
},
|
||||
node.MachineKey)
|
||||
|
||||
// Set up the client stream
|
||||
h.pollNetMapStreamWG.Add(1)
|
||||
defer h.pollNetMapStreamWG.Done()
|
||||
|
||||
// Use a buffered channel in case a node is not fully ready
|
||||
// to receive a message to make sure we dont block the entire
|
||||
// notifier.
|
||||
// 12 is arbitrarily chosen.
|
||||
updateChan := make(chan types.StateUpdate, 12)
|
||||
updateChan := make(chan types.StateUpdate)
|
||||
defer closeChanWithLog(updateChan, node.Hostname, "updateChan")
|
||||
|
||||
// Register the node's update channel
|
||||
|
@ -328,10 +243,6 @@ func (h *Headscale) handlePoll(
|
|||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if len(node.Routes) > 0 {
|
||||
go h.db.EnsureFailoverRouteIsAvailable(node)
|
||||
}
|
||||
|
||||
for {
|
||||
logInfo("Waiting for update on stream channel")
|
||||
select {
|
||||
|
@ -361,7 +272,14 @@ func (h *Headscale) handlePoll(
|
|||
// One alternative is to split these different channels into
|
||||
// goroutines, but then you might have a problem without a lock
|
||||
// if a keepalive is written at the same time as an update.
|
||||
go h.updateNodeOnlineStatus(true, node)
|
||||
go func() {
|
||||
err = h.db.UpdateLastSeen(node)
|
||||
if err != nil {
|
||||
logErr(err, "Cannot update node LastSeen")
|
||||
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
case update := <-updateChan:
|
||||
logInfo("Received update")
|
||||
|
@ -371,43 +289,18 @@ func (h *Headscale) handlePoll(
|
|||
var err error
|
||||
|
||||
switch update.Type {
|
||||
case types.StateFullUpdate:
|
||||
logInfo("Sending Full MapResponse")
|
||||
|
||||
data, err = mapp.FullMapResponse(mapRequest, node, h.ACLPolicy)
|
||||
case types.StatePeerChanged:
|
||||
logInfo(fmt.Sprintf("Sending Changed MapResponse: %s", update.Message))
|
||||
|
||||
for _, node := range update.ChangeNodes {
|
||||
// If a node is not reported to be online, it might be
|
||||
// because the value is outdated, check with the notifier.
|
||||
// However, if it is set to Online, and not in the notifier,
|
||||
// this might be because it has announced itself, but not
|
||||
// reached the stage to actually create the notifier channel.
|
||||
if node.IsOnline != nil && !*node.IsOnline {
|
||||
isOnline := h.nodeNotifier.IsConnected(node.MachineKey)
|
||||
node.IsOnline = &isOnline
|
||||
}
|
||||
}
|
||||
|
||||
data, err = mapp.PeerChangedResponse(mapRequest, node, update.ChangeNodes, h.ACLPolicy, update.Message)
|
||||
case types.StatePeerChangedPatch:
|
||||
logInfo("Sending PeerChangedPatch MapResponse")
|
||||
data, err = mapp.PeerChangedPatchResponse(mapRequest, node, update.ChangePatches, h.ACLPolicy)
|
||||
logInfo("Sending PeerChanged MapResponse")
|
||||
data, err = mapp.PeerChangedResponse(mapRequest, node, update.Changed, h.ACLPolicy)
|
||||
case types.StatePeerRemoved:
|
||||
logInfo("Sending PeerRemoved MapResponse")
|
||||
data, err = mapp.PeerRemovedResponse(mapRequest, node, update.Removed)
|
||||
case types.StateSelfUpdate:
|
||||
if len(update.ChangeNodes) == 1 {
|
||||
logInfo("Sending SelfUpdate MapResponse")
|
||||
node = update.ChangeNodes[0]
|
||||
data, err = mapp.LiteMapResponse(mapRequest, node, h.ACLPolicy)
|
||||
} else {
|
||||
logInfo("SelfUpdate contained too many nodes, this is likely a bug in the code, please report.")
|
||||
}
|
||||
case types.StateDERPUpdated:
|
||||
logInfo("Sending DERPUpdate MapResponse")
|
||||
data, err = mapp.DERPMapResponse(mapRequest, node, update.DERPMap)
|
||||
case types.StateFullUpdate:
|
||||
logInfo("Sending Full MapResponse")
|
||||
data, err = mapp.FullMapResponse(mapRequest, node, h.ACLPolicy)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
@ -416,8 +309,6 @@ func (h *Headscale) handlePoll(
|
|||
return
|
||||
}
|
||||
|
||||
// Only send update if there is change
|
||||
if data != nil {
|
||||
_, err = writer.Write(data)
|
||||
if err != nil {
|
||||
logErr(err, "Could not write the map response")
|
||||
|
@ -436,25 +327,37 @@ func (h *Headscale) handlePoll(
|
|||
return
|
||||
}
|
||||
|
||||
// See comment in keepAliveTicker
|
||||
go func() {
|
||||
err = h.db.UpdateLastSeen(node)
|
||||
if err != nil {
|
||||
logErr(err, "Cannot update node LastSeen")
|
||||
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Bool("readOnly", mapRequest.ReadOnly).
|
||||
Bool("omitPeers", mapRequest.OmitPeers).
|
||||
Bool("stream", mapRequest.Stream).
|
||||
Str("node_key", node.NodeKey.ShortString()).
|
||||
Str("machine_key", node.MachineKey.ShortString()).
|
||||
Str("node_key", node.NodeKey).
|
||||
Str("node", node.Hostname).
|
||||
TimeDiff("timeSpent", time.Now(), now).
|
||||
Msg("update sent")
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
logInfo("The client has closed the connection")
|
||||
|
||||
go h.updateNodeOnlineStatus(false, node)
|
||||
go func() {
|
||||
err = h.db.UpdateLastSeen(node)
|
||||
if err != nil {
|
||||
logErr(err, "Cannot update node LastSeen")
|
||||
|
||||
// Failover the node's routes if any.
|
||||
go h.db.FailoverNodeRoutesWithNotify(node)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
// The connection has been closed, so we can stop polling.
|
||||
return
|
||||
|
@ -467,36 +370,6 @@ func (h *Headscale) handlePoll(
|
|||
}
|
||||
}
|
||||
|
||||
// updateNodeOnlineStatus records the last seen status of a node and notifies peers
|
||||
// about change in their online/offline status.
|
||||
// It takes a StateUpdateType of either StatePeerOnlineChanged or StatePeerOfflineChanged.
|
||||
func (h *Headscale) updateNodeOnlineStatus(online bool, node *types.Node) {
|
||||
now := time.Now()
|
||||
|
||||
node.LastSeen = &now
|
||||
|
||||
statusUpdate := types.StateUpdate{
|
||||
Type: types.StatePeerChangedPatch,
|
||||
ChangePatches: []*tailcfg.PeerChange{
|
||||
{
|
||||
NodeID: tailcfg.NodeID(node.ID),
|
||||
Online: &online,
|
||||
LastSeen: &now,
|
||||
},
|
||||
},
|
||||
}
|
||||
if statusUpdate.Valid() {
|
||||
h.nodeNotifier.NotifyWithIgnore(statusUpdate, node.MachineKey.String())
|
||||
}
|
||||
|
||||
err := h.db.UpdateLastSeen(node)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Cannot update node LastSeen")
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func closeChanWithLog[C chan []byte | chan struct{} | chan types.StateUpdate](channel C, node, name string) {
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMap").
|
||||
|
@ -511,12 +384,19 @@ func (h *Headscale) handleLiteRequest(
|
|||
writer http.ResponseWriter,
|
||||
node *types.Node,
|
||||
mapRequest tailcfg.MapRequest,
|
||||
isNoise bool,
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
) {
|
||||
logInfo, logErr := logPollFunc(mapRequest, node)
|
||||
logInfo, logErr := logPollFunc(mapRequest, node, isNoise)
|
||||
|
||||
mapp := mapper.NewMapper(
|
||||
node,
|
||||
// TODO(kradalby): It might not be acceptable to send
|
||||
// an empty peer list here.
|
||||
types.Nodes{},
|
||||
h.privateKey2019,
|
||||
isNoise,
|
||||
capVer,
|
||||
h.DERPMap,
|
||||
h.cfg.BaseDomain,
|
||||
h.cfg.DNSConfig,
|
||||
|
@ -541,38 +421,3 @@ func (h *Headscale) handleLiteRequest(
|
|||
logErr(err, "Failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
func logTracePeerChange(hostname string, hostinfoChange bool, change *tailcfg.PeerChange) {
|
||||
trace := log.Trace().Str("node_id", change.NodeID.String()).Str("hostname", hostname)
|
||||
|
||||
if change.Key != nil {
|
||||
trace = trace.Str("node_key", change.Key.ShortString())
|
||||
}
|
||||
|
||||
if change.DiscoKey != nil {
|
||||
trace = trace.Str("disco_key", change.DiscoKey.ShortString())
|
||||
}
|
||||
|
||||
if change.Online != nil {
|
||||
trace = trace.Bool("online", *change.Online)
|
||||
}
|
||||
|
||||
if change.Endpoints != nil {
|
||||
eps := make([]string, len(change.Endpoints))
|
||||
for idx, ep := range change.Endpoints {
|
||||
eps[idx] = ep.String()
|
||||
}
|
||||
|
||||
trace = trace.Strs("endpoints", eps)
|
||||
}
|
||||
|
||||
if hostinfoChange {
|
||||
trace = trace.Bool("hostinfo_changed", hostinfoChange)
|
||||
}
|
||||
|
||||
if change.DERPRegion != 0 {
|
||||
trace = trace.Int("derp_region", change.DERPRegion)
|
||||
}
|
||||
|
||||
trace.Time("last_seen", *change.LastSeen).Msg("PeerChange received")
|
||||
}
|
||||
|
|
108
hscontrol/poll_legacy.go
Normal file
108
hscontrol/poll_legacy.go
Normal file
|
@ -0,0 +1,108 @@
|
|||
//go:build ts2019
|
||||
|
||||
package hscontrol
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
// PollNetMapHandler takes care of /machine/:id/map
|
||||
//
|
||||
// This is the busiest endpoint, as it keeps the HTTP long poll that updates
|
||||
// the clients when something in the network changes.
|
||||
//
|
||||
// The clients POST stuff like HostInfo and their Endpoints here, but
|
||||
// only after their first request (marked with the ReadOnly field).
|
||||
//
|
||||
// At this moment the updates are sent in a quite horrendous way, but they kinda work.
|
||||
func (h *Headscale) PollNetMapHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
vars := mux.Vars(req)
|
||||
machineKeyStr, ok := vars["mkey"]
|
||||
if !ok || machineKeyStr == "" {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Msg("No machine key in request")
|
||||
http.Error(writer, "No machine key in request", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("id", machineKeyStr).
|
||||
Msg("PollNetMapHandler called")
|
||||
body, _ := io.ReadAll(req.Body)
|
||||
|
||||
var machineKey key.MachinePublic
|
||||
err := machineKey.UnmarshalText([]byte(util.MachinePublicKeyEnsurePrefix(machineKeyStr)))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Err(err).
|
||||
Msg("Cannot parse client key")
|
||||
|
||||
http.Error(writer, "Cannot parse client key", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
mapRequest := tailcfg.MapRequest{}
|
||||
err = util.DecodeAndUnmarshalNaCl(body, &mapRequest, &machineKey, h.privateKey2019)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Err(err).
|
||||
Msg("Cannot decode message")
|
||||
http.Error(writer, "Cannot decode message", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
node, err := h.db.GetNodeByMachineKey(machineKey)
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
log.Warn().
|
||||
Str("handler", "PollNetMap").
|
||||
Msgf("Ignoring request, cannot find node with key %s", machineKey.String())
|
||||
|
||||
http.Error(writer, "", http.StatusUnauthorized)
|
||||
|
||||
return
|
||||
}
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Msgf("Failed to fetch node from the database with Machine key: %s", machineKey.String())
|
||||
http.Error(writer, "", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("id", machineKeyStr).
|
||||
Str("node", node.Hostname).
|
||||
Msg("A node is sending a MapRequest via legacy protocol")
|
||||
|
||||
capVer, err := parseCabailityVersion(req)
|
||||
if err != nil && !errors.Is(err, ErrNoCapabilityVersion) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("failed to parse capVer")
|
||||
http.Error(writer, "Internal error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
h.handlePoll(writer, req.Context(), node, mapRequest, false, capVer)
|
||||
}
|
|
@ -12,10 +12,6 @@ import (
|
|||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
const (
|
||||
MinimumCapVersion tailcfg.CapabilityVersion = 56
|
||||
)
|
||||
|
||||
// NoisePollNetMapHandler takes care of /machine/:id/map using the Noise protocol
|
||||
//
|
||||
// This is the busiest endpoint, as it keeps the HTTP long poll that updates
|
||||
|
@ -51,18 +47,6 @@ func (ns *noiseServer) NoisePollNetMapHandler(
|
|||
return
|
||||
}
|
||||
|
||||
// Reject unsupported versions
|
||||
if mapRequest.Version < MinimumCapVersion {
|
||||
log.Info().
|
||||
Caller().
|
||||
Int("min_version", int(MinimumCapVersion)).
|
||||
Int("client_version", int(mapRequest.Version)).
|
||||
Msg("unsupported client connected")
|
||||
http.Error(writer, "Internal error", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ns.nodeKey = mapRequest.NodeKey
|
||||
|
||||
node, err := ns.headscale.db.GetNodeByAnyKey(
|
||||
|
@ -89,8 +73,20 @@ func (ns *noiseServer) NoisePollNetMapHandler(
|
|||
log.Debug().
|
||||
Str("handler", "NoisePollNetMap").
|
||||
Str("node", node.Hostname).
|
||||
Int("cap_ver", int(mapRequest.Version)).
|
||||
Msg("A node sending a MapRequest with Noise protocol")
|
||||
|
||||
ns.headscale.handlePoll(writer, req.Context(), node, mapRequest)
|
||||
capVer, err := parseCabailityVersion(req)
|
||||
if err != nil && !errors.Is(err, ErrNoCapabilityVersion) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("failed to parse capVer")
|
||||
http.Error(writer, "Internal error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// TODO(kradalby): since we are now passing capVer, we could arguably stop passing
|
||||
// isNoise, and rather have a isNoise function that takes capVer
|
||||
ns.headscale.handlePoll(writer, req.Context(), node, mapRequest, true, capVer)
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@ func (s *Suite) ResetDB(c *check.C) {
|
|||
c.Fatal(err)
|
||||
}
|
||||
cfg := types.Config{
|
||||
PrivateKeyPath: tmpDir + "/private.key",
|
||||
NoisePrivateKeyPath: tmpDir + "/noise_private.key",
|
||||
DBtype: "sqlite3",
|
||||
DBpath: tmpDir + "/headscale_test.db",
|
||||
|
|
|
@ -1,99 +0,0 @@
|
|||
package hscontrol
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/tailscale/tailsql/server/tailsql"
|
||||
"tailscale.com/tsnet"
|
||||
"tailscale.com/tsweb"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
func runTailSQLService(ctx context.Context, logf logger.Logf, stateDir, dbPath string) error {
|
||||
opts := tailsql.Options{
|
||||
Hostname: "tailsql-headscale",
|
||||
StateDir: stateDir,
|
||||
Sources: []tailsql.DBSpec{
|
||||
{
|
||||
Source: "headscale",
|
||||
Label: "headscale - sqlite",
|
||||
Driver: "sqlite",
|
||||
URL: fmt.Sprintf("file:%s?mode=ro", dbPath),
|
||||
Named: map[string]string{
|
||||
"schema": `select * from sqlite_schema`,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tsNode := &tsnet.Server{
|
||||
Dir: os.ExpandEnv(opts.StateDir),
|
||||
Hostname: opts.Hostname,
|
||||
Logf: logger.Discard,
|
||||
}
|
||||
// if *doDebugLog {
|
||||
// tsNode.Logf = logf
|
||||
// }
|
||||
defer tsNode.Close()
|
||||
|
||||
logf("Starting tailscale (hostname=%q)", opts.Hostname)
|
||||
lc, err := tsNode.LocalClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("connect local client: %w", err)
|
||||
}
|
||||
opts.LocalClient = lc // for authentication
|
||||
|
||||
// Make sure the Tailscale node starts up. It might not, if it is a new node
|
||||
// and the user did not provide an auth key.
|
||||
if st, err := tsNode.Up(ctx); err != nil {
|
||||
return fmt.Errorf("starting tailscale: %w", err)
|
||||
} else {
|
||||
logf("tailscale started, node state %q", st.BackendState)
|
||||
}
|
||||
|
||||
// Reaching here, we have a running Tailscale node, now we can set up the
|
||||
// HTTP and/or HTTPS plumbing for TailSQL itself.
|
||||
tsql, err := tailsql.NewServer(opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating tailsql server: %w", err)
|
||||
}
|
||||
|
||||
lst, err := tsNode.Listen("tcp", ":80")
|
||||
if err != nil {
|
||||
return fmt.Errorf("listen port 80: %w", err)
|
||||
}
|
||||
|
||||
if opts.ServeHTTPS {
|
||||
// When serving TLS, add a redirect from HTTP on port 80 to HTTPS on 443.
|
||||
certDomains := tsNode.CertDomains()
|
||||
if len(certDomains) == 0 {
|
||||
fmt.Errorf("no cert domains available for HTTPS")
|
||||
}
|
||||
base := "https://" + certDomains[0]
|
||||
go http.Serve(lst, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
target := base + r.RequestURI
|
||||
http.Redirect(w, r, target, http.StatusPermanentRedirect)
|
||||
}))
|
||||
// log.Printf("Redirecting HTTP to HTTPS at %q", base)
|
||||
|
||||
// For the real service, start a separate listener.
|
||||
// Note: Replaces the port 80 listener.
|
||||
var err error
|
||||
lst, err = tsNode.ListenTLS("tcp", ":443")
|
||||
if err != nil {
|
||||
return fmt.Errorf("listen TLS: %w", err)
|
||||
}
|
||||
logf("enabled serving via HTTPS")
|
||||
}
|
||||
|
||||
mux := tsql.NewMux()
|
||||
tsweb.Debugger(mux)
|
||||
go http.Serve(lst, mux)
|
||||
logf("ailSQL started")
|
||||
<-ctx.Done()
|
||||
logf("TailSQL shutting down...")
|
||||
return tsNode.Close()
|
||||
}
|
|
@ -12,6 +12,33 @@ import (
|
|||
|
||||
var ErrCannotParsePrefix = errors.New("cannot parse prefix")
|
||||
|
||||
// This is a "wrapper" type around tailscales
|
||||
// Hostinfo to allow us to add database "serialization"
|
||||
// methods. This allows us to use a typed values throughout
|
||||
// the code and not have to marshal/unmarshal and error
|
||||
// check all over the code.
|
||||
type HostInfo tailcfg.Hostinfo
|
||||
|
||||
func (hi *HostInfo) Scan(destination interface{}) error {
|
||||
switch value := destination.(type) {
|
||||
case []byte:
|
||||
return json.Unmarshal(value, hi)
|
||||
|
||||
case string:
|
||||
return json.Unmarshal([]byte(value), hi)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("%w: unexpected data type %T", ErrNodeAddressesInvalid, destination)
|
||||
}
|
||||
}
|
||||
|
||||
// Value return json value, implement driver.Valuer interface.
|
||||
func (hi HostInfo) Value() (driver.Value, error) {
|
||||
bytes, err := json.Marshal(hi)
|
||||
|
||||
return string(bytes), err
|
||||
}
|
||||
|
||||
type IPPrefix netip.Prefix
|
||||
|
||||
func (i *IPPrefix) Scan(destination interface{}) error {
|
||||
|
@ -84,37 +111,20 @@ type StateUpdateType int
|
|||
|
||||
const (
|
||||
StateFullUpdate StateUpdateType = iota
|
||||
// StatePeerChanged is used for updates that needs
|
||||
// to be calculated with all peers and all policy rules.
|
||||
// This would typically be things that include tags, routes
|
||||
// and similar.
|
||||
StatePeerChanged
|
||||
StatePeerChangedPatch
|
||||
StatePeerRemoved
|
||||
// StateSelfUpdate is used to indicate that the node
|
||||
// has changed in control, and the client needs to be
|
||||
// informed.
|
||||
// The updated node is inside the ChangeNodes field
|
||||
// which should have a length of one.
|
||||
StateSelfUpdate
|
||||
StateDERPUpdated
|
||||
)
|
||||
|
||||
// StateUpdate is an internal message containing information about
|
||||
// a state change that has happened to the network.
|
||||
// If type is StateFullUpdate, all fields are ignored.
|
||||
type StateUpdate struct {
|
||||
// The type of update
|
||||
Type StateUpdateType
|
||||
|
||||
// ChangeNodes must be set when Type is StatePeerAdded
|
||||
// and StatePeerChanged and contains the full node
|
||||
// object for added nodes.
|
||||
ChangeNodes Nodes
|
||||
|
||||
// ChangePatches must be set when Type is StatePeerChangedPatch
|
||||
// and contains a populated PeerChange object.
|
||||
ChangePatches []*tailcfg.PeerChange
|
||||
// Changed must be set when Type is StatePeerChanged and
|
||||
// contain the Node IDs of nodes that have changed.
|
||||
Changed Nodes
|
||||
|
||||
// Removed must be set when Type is StatePeerRemoved and
|
||||
// contain a list of the nodes that has been removed from
|
||||
|
@ -123,40 +133,5 @@ type StateUpdate struct {
|
|||
|
||||
// DERPMap must be set when Type is StateDERPUpdated and
|
||||
// contain the new DERP Map.
|
||||
DERPMap *tailcfg.DERPMap
|
||||
|
||||
// Additional message for tracking origin or what being
|
||||
// updated, useful for ambiguous updates like StatePeerChanged.
|
||||
Message string
|
||||
}
|
||||
|
||||
// Valid reports if a StateUpdate is correctly filled and
|
||||
// panics if the mandatory fields for a type is not
|
||||
// filled.
|
||||
// Reports true if valid.
|
||||
func (su *StateUpdate) Valid() bool {
|
||||
switch su.Type {
|
||||
case StatePeerChanged:
|
||||
if su.ChangeNodes == nil {
|
||||
panic("Mandatory field ChangeNodes is not set on StatePeerChanged update")
|
||||
}
|
||||
case StatePeerChangedPatch:
|
||||
if su.ChangePatches == nil {
|
||||
panic("Mandatory field ChangePatches is not set on StatePeerChangedPatch update")
|
||||
}
|
||||
case StatePeerRemoved:
|
||||
if su.Removed == nil {
|
||||
panic("Mandatory field Removed is not set on StatePeerRemove update")
|
||||
}
|
||||
case StateSelfUpdate:
|
||||
if su.ChangeNodes == nil || len(su.ChangeNodes) != 1 {
|
||||
panic("Mandatory field ChangeNodes is not set for StateSelfUpdate or has more than one node")
|
||||
}
|
||||
case StateDERPUpdated:
|
||||
if su.DERPMap == nil {
|
||||
panic("Mandatory field DERPMap is not set on StateDERPUpdated update")
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
DERPMap tailcfg.DERPMap
|
||||
}
|
||||
|
|
|
@ -41,6 +41,7 @@ type Config struct {
|
|||
EphemeralNodeInactivityTimeout time.Duration
|
||||
NodeUpdateCheckInterval time.Duration
|
||||
IPPrefixes []netip.Prefix
|
||||
PrivateKeyPath string
|
||||
NoisePrivateKeyPath string
|
||||
BaseDomain string
|
||||
Log LogConfig
|
||||
|
@ -115,7 +116,6 @@ type DERPConfig struct {
|
|||
ServerRegionID int
|
||||
ServerRegionCode string
|
||||
ServerRegionName string
|
||||
ServerPrivateKeyPath string
|
||||
STUNAddr string
|
||||
URLs []url.URL
|
||||
Paths []string
|
||||
|
@ -294,7 +294,6 @@ func GetDERPConfig() DERPConfig {
|
|||
serverRegionCode := viper.GetString("derp.server.region_code")
|
||||
serverRegionName := viper.GetString("derp.server.region_name")
|
||||
stunAddr := viper.GetString("derp.server.stun_listen_addr")
|
||||
privateKeyPath := util.AbsolutePathFromConfigPath(viper.GetString("derp.server.private_key_path"))
|
||||
|
||||
if serverEnabled && stunAddr == "" {
|
||||
log.Fatal().
|
||||
|
@ -326,7 +325,6 @@ func GetDERPConfig() DERPConfig {
|
|||
ServerRegionID: serverRegionID,
|
||||
ServerRegionCode: serverRegionCode,
|
||||
ServerRegionName: serverRegionName,
|
||||
ServerPrivateKeyPath: privateKeyPath,
|
||||
STUNAddr: stunAddr,
|
||||
URLs: urls,
|
||||
Paths: paths,
|
||||
|
@ -592,6 +590,9 @@ func GetHeadscaleConfig() (*Config, error) {
|
|||
DisableUpdateCheck: viper.GetBool("disable_check_updates"),
|
||||
|
||||
IPPrefixes: prefixes,
|
||||
PrivateKeyPath: util.AbsolutePathFromConfigPath(
|
||||
viper.GetString("private_key_path"),
|
||||
),
|
||||
NoisePrivateKeyPath: util.AbsolutePathFromConfigPath(
|
||||
viper.GetString("noise.private_key_path"),
|
||||
),
|
||||
|
|
|
@ -2,7 +2,6 @@ package types
|
|||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
|
@ -12,60 +11,24 @@ import (
|
|||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/policy/matcher"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"go4.org/netipx"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNodeAddressesInvalid = errors.New("failed to parse node addresses")
|
||||
ErrHostnameTooLong = errors.New("hostname too long, cannot except 255 ASCII chars")
|
||||
ErrNodeHasNoGivenName = errors.New("node has no given name")
|
||||
ErrNodeUserHasNoName = errors.New("node user has no name")
|
||||
ErrHostnameTooLong = errors.New("hostname too long")
|
||||
)
|
||||
|
||||
// Node is a Headscale client.
|
||||
type Node struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
|
||||
// MachineKeyDatabaseField is the string representation of MachineKey
|
||||
// it is _only_ used for reading and writing the key to the
|
||||
// database and should not be used.
|
||||
// Use MachineKey instead.
|
||||
MachineKeyDatabaseField string `gorm:"column:machine_key;unique_index"`
|
||||
MachineKey key.MachinePublic `gorm:"-"`
|
||||
|
||||
// NodeKeyDatabaseField is the string representation of NodeKey
|
||||
// it is _only_ used for reading and writing the key to the
|
||||
// database and should not be used.
|
||||
// Use NodeKey instead.
|
||||
NodeKeyDatabaseField string `gorm:"column:node_key"`
|
||||
NodeKey key.NodePublic `gorm:"-"`
|
||||
|
||||
// DiscoKeyDatabaseField is the string representation of DiscoKey
|
||||
// it is _only_ used for reading and writing the key to the
|
||||
// database and should not be used.
|
||||
// Use DiscoKey instead.
|
||||
DiscoKeyDatabaseField string `gorm:"column:disco_key"`
|
||||
DiscoKey key.DiscoPublic `gorm:"-"`
|
||||
|
||||
// EndpointsDatabaseField is the string list representation of Endpoints
|
||||
// it is _only_ used for reading and writing the key to the
|
||||
// database and should not be used.
|
||||
// Use Endpoints instead.
|
||||
EndpointsDatabaseField StringList `gorm:"column:endpoints"`
|
||||
Endpoints []netip.AddrPort `gorm:"-"`
|
||||
|
||||
// EndpointsDatabaseField is the string list representation of Endpoints
|
||||
// it is _only_ used for reading and writing the key to the
|
||||
// database and should not be used.
|
||||
// Use Endpoints instead.
|
||||
HostinfoDatabaseField string `gorm:"column:host_info"`
|
||||
Hostinfo *tailcfg.Hostinfo `gorm:"-"`
|
||||
|
||||
MachineKey string `gorm:"type:varchar(64);unique_index"`
|
||||
NodeKey string
|
||||
DiscoKey string
|
||||
IPAddresses NodeAddresses
|
||||
|
||||
// Hostname represents the name given by the Tailscale
|
||||
|
@ -93,19 +56,30 @@ type Node struct {
|
|||
LastSeen *time.Time
|
||||
Expiry *time.Time
|
||||
|
||||
HostInfo HostInfo
|
||||
Endpoints StringList
|
||||
|
||||
Routes []Route
|
||||
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
DeletedAt *time.Time
|
||||
|
||||
IsOnline *bool `gorm:"-"`
|
||||
}
|
||||
|
||||
type (
|
||||
Nodes []*Node
|
||||
)
|
||||
|
||||
func (nodes Nodes) OnlineNodeMap() map[tailcfg.NodeID]bool {
|
||||
ret := make(map[tailcfg.NodeID]bool)
|
||||
|
||||
for _, node := range nodes {
|
||||
ret[tailcfg.NodeID(node.ID)] = node.IsOnline()
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
type NodeAddresses []netip.Addr
|
||||
|
||||
func (na NodeAddresses) Sort() {
|
||||
|
@ -201,6 +175,21 @@ func (node Node) IsExpired() bool {
|
|||
return time.Now().UTC().After(*node.Expiry)
|
||||
}
|
||||
|
||||
// IsOnline returns if the node is connected to Headscale.
|
||||
// This is really a naive implementation, as we don't really see
|
||||
// if there is a working connection between the client and the server.
|
||||
func (node *Node) IsOnline() bool {
|
||||
if node.LastSeen == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if node.IsExpired() {
|
||||
return false
|
||||
}
|
||||
|
||||
return node.LastSeen.After(time.Now().Add(-KeepAliveInterval))
|
||||
}
|
||||
|
||||
// IsEphemeral returns if the node is registered as an Ephemeral node.
|
||||
// https://tailscale.com/kb/1111/ephemeral-nodes/
|
||||
func (node *Node) IsEphemeral() bool {
|
||||
|
@ -238,89 +227,19 @@ func (nodes Nodes) FilterByIP(ip netip.Addr) Nodes {
|
|||
return found
|
||||
}
|
||||
|
||||
// BeforeSave is a hook that ensures that some values that
|
||||
// cannot be directly marshalled into database values are stored
|
||||
// correctly in the database.
|
||||
// This currently means storing the keys as strings.
|
||||
func (node *Node) BeforeSave(tx *gorm.DB) error {
|
||||
node.MachineKeyDatabaseField = node.MachineKey.String()
|
||||
node.NodeKeyDatabaseField = node.NodeKey.String()
|
||||
node.DiscoKeyDatabaseField = node.DiscoKey.String()
|
||||
|
||||
var endpoints StringList
|
||||
for _, addrPort := range node.Endpoints {
|
||||
endpoints = append(endpoints, addrPort.String())
|
||||
}
|
||||
|
||||
node.EndpointsDatabaseField = endpoints
|
||||
|
||||
hi, err := json.Marshal(node.Hostinfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal Hostinfo to store in db: %w", err)
|
||||
}
|
||||
node.HostinfoDatabaseField = string(hi)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AfterFind is a hook that ensures that Node objects fields that
|
||||
// has a different type in the database is unwrapped and populated
|
||||
// correctly.
|
||||
// This currently unmarshals all the keys, stored as strings, into
|
||||
// the proper types.
|
||||
func (node *Node) AfterFind(tx *gorm.DB) error {
|
||||
var machineKey key.MachinePublic
|
||||
if err := machineKey.UnmarshalText([]byte(node.MachineKeyDatabaseField)); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal machine key from db: %w", err)
|
||||
}
|
||||
node.MachineKey = machineKey
|
||||
|
||||
var nodeKey key.NodePublic
|
||||
if err := nodeKey.UnmarshalText([]byte(node.NodeKeyDatabaseField)); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal node key from db: %w", err)
|
||||
}
|
||||
node.NodeKey = nodeKey
|
||||
|
||||
var discoKey key.DiscoPublic
|
||||
if err := discoKey.UnmarshalText([]byte(node.DiscoKeyDatabaseField)); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal disco key from db: %w", err)
|
||||
}
|
||||
node.DiscoKey = discoKey
|
||||
|
||||
endpoints := make([]netip.AddrPort, len(node.EndpointsDatabaseField))
|
||||
for idx, ep := range node.EndpointsDatabaseField {
|
||||
addrPort, err := netip.ParseAddrPort(ep)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse endpoint from db: %w", err)
|
||||
}
|
||||
|
||||
endpoints[idx] = addrPort
|
||||
}
|
||||
node.Endpoints = endpoints
|
||||
|
||||
var hi tailcfg.Hostinfo
|
||||
if err := json.Unmarshal([]byte(node.HostinfoDatabaseField), &hi); err != nil {
|
||||
log.Trace().Err(err).Msgf("Hostinfo content: %s", node.HostinfoDatabaseField)
|
||||
|
||||
return fmt.Errorf("failed to unmarshal Hostinfo from db: %w", err)
|
||||
}
|
||||
node.Hostinfo = &hi
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (node *Node) Proto() *v1.Node {
|
||||
nodeProto := &v1.Node{
|
||||
Id: node.ID,
|
||||
MachineKey: node.MachineKey.String(),
|
||||
MachineKey: node.MachineKey,
|
||||
|
||||
NodeKey: node.NodeKey.String(),
|
||||
DiscoKey: node.DiscoKey.String(),
|
||||
NodeKey: node.NodeKey,
|
||||
DiscoKey: node.DiscoKey,
|
||||
IpAddresses: node.IPAddresses.StringSlice(),
|
||||
Name: node.Hostname,
|
||||
GivenName: node.GivenName,
|
||||
User: node.User.Proto(),
|
||||
ForcedTags: node.ForcedTags,
|
||||
Online: node.IsOnline(),
|
||||
|
||||
// TODO(kradalby): Implement register method enum converter
|
||||
// RegisterMethod: ,
|
||||
|
@ -343,17 +262,14 @@ func (node *Node) Proto() *v1.Node {
|
|||
return nodeProto
|
||||
}
|
||||
|
||||
// GetHostInfo returns a Hostinfo struct for the node.
|
||||
func (node *Node) GetHostInfo() tailcfg.Hostinfo {
|
||||
return tailcfg.Hostinfo(node.HostInfo)
|
||||
}
|
||||
|
||||
func (node *Node) GetFQDN(dnsConfig *tailcfg.DNSConfig, baseDomain string) (string, error) {
|
||||
var hostname string
|
||||
if dnsConfig != nil && dnsConfig.Proxied { // MagicDNS
|
||||
if node.GivenName == "" {
|
||||
return "", fmt.Errorf("failed to create valid FQDN: %w", ErrNodeHasNoGivenName)
|
||||
}
|
||||
|
||||
if node.User.Name == "" {
|
||||
return "", fmt.Errorf("failed to create valid FQDN: %w", ErrNodeUserHasNoName)
|
||||
}
|
||||
|
||||
hostname = fmt.Sprintf(
|
||||
"%s.%s.%s",
|
||||
node.GivenName,
|
||||
|
@ -362,7 +278,7 @@ func (node *Node) GetFQDN(dnsConfig *tailcfg.DNSConfig, baseDomain string) (stri
|
|||
)
|
||||
if len(hostname) > MaxHostnameLength {
|
||||
return "", fmt.Errorf(
|
||||
"failed to create valid FQDN (%s): %w",
|
||||
"hostname %q is too long it cannot except 255 ASCII chars: %w",
|
||||
hostname,
|
||||
ErrHostnameTooLong,
|
||||
)
|
||||
|
@ -374,98 +290,49 @@ func (node *Node) GetFQDN(dnsConfig *tailcfg.DNSConfig, baseDomain string) (stri
|
|||
return hostname, nil
|
||||
}
|
||||
|
||||
// func (node *Node) String() string {
|
||||
// return node.Hostname
|
||||
// }
|
||||
func (node *Node) MachinePublicKey() (key.MachinePublic, error) {
|
||||
var machineKey key.MachinePublic
|
||||
|
||||
// PeerChangeFromMapRequest takes a MapRequest and compares it to the node
|
||||
// to produce a PeerChange struct that can be used to updated the node and
|
||||
// inform peers about smaller changes to the node.
|
||||
// When a field is added to this function, remember to also add it to:
|
||||
// - node.ApplyPeerChange
|
||||
// - logTracePeerChange in poll.go
|
||||
func (node *Node) PeerChangeFromMapRequest(req tailcfg.MapRequest) tailcfg.PeerChange {
|
||||
ret := tailcfg.PeerChange{
|
||||
NodeID: tailcfg.NodeID(node.ID),
|
||||
}
|
||||
|
||||
if node.NodeKey.String() != req.NodeKey.String() {
|
||||
ret.Key = &req.NodeKey
|
||||
}
|
||||
|
||||
if node.DiscoKey.String() != req.DiscoKey.String() {
|
||||
ret.DiscoKey = &req.DiscoKey
|
||||
}
|
||||
|
||||
if node.Hostinfo != nil &&
|
||||
node.Hostinfo.NetInfo != nil &&
|
||||
req.Hostinfo != nil &&
|
||||
req.Hostinfo.NetInfo != nil &&
|
||||
node.Hostinfo.NetInfo.PreferredDERP != req.Hostinfo.NetInfo.PreferredDERP {
|
||||
ret.DERPRegion = req.Hostinfo.NetInfo.PreferredDERP
|
||||
}
|
||||
|
||||
if req.Hostinfo != nil && req.Hostinfo.NetInfo != nil {
|
||||
// If there is no stored Hostinfo or NetInfo, use
|
||||
// the new PreferredDERP.
|
||||
if node.Hostinfo == nil {
|
||||
ret.DERPRegion = req.Hostinfo.NetInfo.PreferredDERP
|
||||
} else if node.Hostinfo.NetInfo == nil {
|
||||
ret.DERPRegion = req.Hostinfo.NetInfo.PreferredDERP
|
||||
} else {
|
||||
// If there is a PreferredDERP check if it has changed.
|
||||
if node.Hostinfo.NetInfo.PreferredDERP != req.Hostinfo.NetInfo.PreferredDERP {
|
||||
ret.DERPRegion = req.Hostinfo.NetInfo.PreferredDERP
|
||||
}
|
||||
if node.MachineKey != "" {
|
||||
err := machineKey.UnmarshalText(
|
||||
[]byte(util.MachinePublicKeyEnsurePrefix(node.MachineKey)),
|
||||
)
|
||||
if err != nil {
|
||||
return key.MachinePublic{}, fmt.Errorf("failed to parse machine public key: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(kradalby): Find a good way to compare updates
|
||||
ret.Endpoints = req.Endpoints
|
||||
|
||||
now := time.Now()
|
||||
ret.LastSeen = &now
|
||||
|
||||
return ret
|
||||
return machineKey, nil
|
||||
}
|
||||
|
||||
// ApplyPeerChange takes a PeerChange struct and updates the node.
|
||||
func (node *Node) ApplyPeerChange(change *tailcfg.PeerChange) {
|
||||
if change.Key != nil {
|
||||
node.NodeKey = *change.Key
|
||||
}
|
||||
|
||||
if change.DiscoKey != nil {
|
||||
node.DiscoKey = *change.DiscoKey
|
||||
}
|
||||
|
||||
if change.Online != nil {
|
||||
node.IsOnline = change.Online
|
||||
}
|
||||
|
||||
if change.Endpoints != nil {
|
||||
node.Endpoints = change.Endpoints
|
||||
}
|
||||
|
||||
// This might technically not be useful as we replace
|
||||
// the whole hostinfo blob when it has changed.
|
||||
if change.DERPRegion != 0 {
|
||||
if node.Hostinfo == nil {
|
||||
node.Hostinfo = &tailcfg.Hostinfo{
|
||||
NetInfo: &tailcfg.NetInfo{
|
||||
PreferredDERP: change.DERPRegion,
|
||||
},
|
||||
}
|
||||
} else if node.Hostinfo.NetInfo == nil {
|
||||
node.Hostinfo.NetInfo = &tailcfg.NetInfo{
|
||||
PreferredDERP: change.DERPRegion,
|
||||
func (node *Node) DiscoPublicKey() (key.DiscoPublic, error) {
|
||||
var discoKey key.DiscoPublic
|
||||
if node.DiscoKey != "" {
|
||||
err := discoKey.UnmarshalText(
|
||||
[]byte(util.DiscoPublicKeyEnsurePrefix(node.DiscoKey)),
|
||||
)
|
||||
if err != nil {
|
||||
return key.DiscoPublic{}, fmt.Errorf("failed to parse disco public key: %w", err)
|
||||
}
|
||||
} else {
|
||||
node.Hostinfo.NetInfo.PreferredDERP = change.DERPRegion
|
||||
}
|
||||
discoKey = key.DiscoPublic{}
|
||||
}
|
||||
|
||||
node.LastSeen = change.LastSeen
|
||||
return discoKey, nil
|
||||
}
|
||||
|
||||
func (node *Node) NodePublicKey() (key.NodePublic, error) {
|
||||
var nodeKey key.NodePublic
|
||||
err := nodeKey.UnmarshalText([]byte(util.NodePublicKeyEnsurePrefix(node.NodeKey)))
|
||||
if err != nil {
|
||||
return key.NodePublic{}, fmt.Errorf("failed to parse node public key: %w", err)
|
||||
}
|
||||
|
||||
return nodeKey, nil
|
||||
}
|
||||
|
||||
func (node Node) String() string {
|
||||
return node.Hostname
|
||||
}
|
||||
|
||||
func (nodes Nodes) String() string {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue