Remove support for non-noise clients (pre-1.32) (#1611)
This commit is contained in:
parent
b918aa03fc
commit
a59aab2081
72 changed files with 319 additions and 679 deletions
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -46,7 +46,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -23,8 +23,10 @@ after improving the test harness as part of adopting [#1460](https://github.com/
|
|||
|
||||
### BREAKING
|
||||
|
||||
Code reorganisation, a lot of code has moved, please review the following PRs accordingly [#1473](https://github.com/juanfont/headscale/pull/1473)
|
||||
API: Machine is now Node [#1553](https://github.com/juanfont/headscale/pull/1553)
|
||||
- Code reorganisation, a lot of code has moved, please review the following PRs accordingly [#1473](https://github.com/juanfont/headscale/pull/1473)
|
||||
- API: Machine is now Node [#1553](https://github.com/juanfont/headscale/pull/1553)
|
||||
- Remove support for older Tailscale clients [#1611](https://github.com/juanfont/headscale/pull/1611)
|
||||
- The latest supported client is 1.32
|
||||
|
||||
### Changes
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ RUN go mod download
|
|||
|
||||
COPY . .
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -tags ts2019 -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN strip /go/bin/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ RUN go mod download
|
|||
|
||||
COPY . .
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -tags ts2019 -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
# Debug image
|
||||
|
|
6
Makefile
6
Makefile
|
@ -10,8 +10,6 @@ ifeq ($(filter $(GOOS), openbsd netbsd soloaris plan9), )
|
|||
else
|
||||
endif
|
||||
|
||||
TAGS = -tags ts2019
|
||||
|
||||
# GO_SOURCES = $(wildcard *.go)
|
||||
# PROTO_SOURCES = $(wildcard **/*.proto)
|
||||
GO_SOURCES = $(call rwildcard,,*.go)
|
||||
|
@ -24,7 +22,7 @@ build:
|
|||
dev: lint test build
|
||||
|
||||
test:
|
||||
gotestsum -- $(TAGS) -short -coverprofile=coverage.out ./...
|
||||
gotestsum -- -short -coverprofile=coverage.out ./...
|
||||
|
||||
test_integration:
|
||||
docker run \
|
||||
|
@ -34,7 +32,7 @@ test_integration:
|
|||
-v $$PWD:$$PWD -w $$PWD/integration \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- $(TAGS) -failfast ./... -timeout 120m -parallel 8
|
||||
go run gotest.tools/gotestsum@latest -- -failfast ./... -timeout 120m -parallel 8
|
||||
|
||||
lint:
|
||||
golangci-lint run --fix --timeout 10m
|
||||
|
|
|
@ -67,7 +67,6 @@ jobs:
|
|||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -67,7 +67,7 @@ var listAPIKeys = &cobra.Command{
|
|||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.ApiKeys, "", output)
|
||||
SuccessOutput(response.GetApiKeys(), "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -75,11 +75,11 @@ var listAPIKeys = &cobra.Command{
|
|||
tableData := pterm.TableData{
|
||||
{"ID", "Prefix", "Expiration", "Created"},
|
||||
}
|
||||
for _, key := range response.ApiKeys {
|
||||
for _, key := range response.GetApiKeys() {
|
||||
expiration := "-"
|
||||
|
||||
if key.GetExpiration() != nil {
|
||||
expiration = ColourTime(key.Expiration.AsTime())
|
||||
expiration = ColourTime(key.GetExpiration().AsTime())
|
||||
}
|
||||
|
||||
tableData = append(tableData, []string{
|
||||
|
@ -155,7 +155,7 @@ If you loose a key, create a new one and revoke (expire) the old one.`,
|
|||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.ApiKey, response.ApiKey, output)
|
||||
SuccessOutput(response.GetApiKey(), response.GetApiKey(), output)
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -135,6 +135,6 @@ var createNodeCmd = &cobra.Command{
|
|||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Node, "Node created", output)
|
||||
SuccessOutput(response.GetNode(), "Node created", output)
|
||||
},
|
||||
}
|
||||
|
|
|
@ -152,8 +152,8 @@ var registerNodeCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
SuccessOutput(
|
||||
response.Node,
|
||||
fmt.Sprintf("Node %s registered", response.Node.GivenName), output)
|
||||
response.GetNode(),
|
||||
fmt.Sprintf("Node %s registered", response.GetNode().GetGivenName()), output)
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -196,12 +196,12 @@ var listNodesCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.Nodes, "", output)
|
||||
SuccessOutput(response.GetNodes(), "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
tableData, err := nodesToPtables(user, showTags, response.Nodes)
|
||||
tableData, err := nodesToPtables(user, showTags, response.GetNodes())
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
|
||||
|
@ -262,7 +262,7 @@ var expireNodeCmd = &cobra.Command{
|
|||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Node, "Node expired", output)
|
||||
SuccessOutput(response.GetNode(), "Node expired", output)
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -310,7 +310,7 @@ var renameNodeCmd = &cobra.Command{
|
|||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Node, "Node renamed", output)
|
||||
SuccessOutput(response.GetNode(), "Node renamed", output)
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -364,7 +364,7 @@ var deleteNodeCmd = &cobra.Command{
|
|||
prompt := &survey.Confirm{
|
||||
Message: fmt.Sprintf(
|
||||
"Do you want to remove the node %s?",
|
||||
getResponse.GetNode().Name,
|
||||
getResponse.GetNode().GetName(),
|
||||
),
|
||||
}
|
||||
err = survey.AskOne(prompt, &confirm)
|
||||
|
@ -473,7 +473,7 @@ var moveNodeCmd = &cobra.Command{
|
|||
return
|
||||
}
|
||||
|
||||
SuccessOutput(moveResponse.Node, "Node moved to another user", output)
|
||||
SuccessOutput(moveResponse.GetNode(), "Node moved to another user", output)
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -507,21 +507,21 @@ func nodesToPtables(
|
|||
|
||||
for _, node := range nodes {
|
||||
var ephemeral bool
|
||||
if node.PreAuthKey != nil && node.PreAuthKey.Ephemeral {
|
||||
if node.GetPreAuthKey() != nil && node.GetPreAuthKey().GetEphemeral() {
|
||||
ephemeral = true
|
||||
}
|
||||
|
||||
var lastSeen time.Time
|
||||
var lastSeenTime string
|
||||
if node.LastSeen != nil {
|
||||
lastSeen = node.LastSeen.AsTime()
|
||||
if node.GetLastSeen() != nil {
|
||||
lastSeen = node.GetLastSeen().AsTime()
|
||||
lastSeenTime = lastSeen.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
|
||||
var expiry time.Time
|
||||
var expiryTime string
|
||||
if node.Expiry != nil {
|
||||
expiry = node.Expiry.AsTime()
|
||||
if node.GetExpiry() != nil {
|
||||
expiry = node.GetExpiry().AsTime()
|
||||
expiryTime = expiry.Format("2006-01-02 15:04:05")
|
||||
} else {
|
||||
expiryTime = "N/A"
|
||||
|
@ -529,7 +529,7 @@ func nodesToPtables(
|
|||
|
||||
var machineKey key.MachinePublic
|
||||
err := machineKey.UnmarshalText(
|
||||
[]byte(node.MachineKey),
|
||||
[]byte(node.GetMachineKey()),
|
||||
)
|
||||
if err != nil {
|
||||
machineKey = key.MachinePublic{}
|
||||
|
@ -537,14 +537,14 @@ func nodesToPtables(
|
|||
|
||||
var nodeKey key.NodePublic
|
||||
err = nodeKey.UnmarshalText(
|
||||
[]byte(node.NodeKey),
|
||||
[]byte(node.GetNodeKey()),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var online string
|
||||
if node.Online {
|
||||
if node.GetOnline() {
|
||||
online = pterm.LightGreen("online")
|
||||
} else {
|
||||
online = pterm.LightRed("offline")
|
||||
|
@ -558,36 +558,36 @@ func nodesToPtables(
|
|||
}
|
||||
|
||||
var forcedTags string
|
||||
for _, tag := range node.ForcedTags {
|
||||
for _, tag := range node.GetForcedTags() {
|
||||
forcedTags += "," + tag
|
||||
}
|
||||
forcedTags = strings.TrimLeft(forcedTags, ",")
|
||||
var invalidTags string
|
||||
for _, tag := range node.InvalidTags {
|
||||
if !contains(node.ForcedTags, tag) {
|
||||
for _, tag := range node.GetInvalidTags() {
|
||||
if !contains(node.GetForcedTags(), tag) {
|
||||
invalidTags += "," + pterm.LightRed(tag)
|
||||
}
|
||||
}
|
||||
invalidTags = strings.TrimLeft(invalidTags, ",")
|
||||
var validTags string
|
||||
for _, tag := range node.ValidTags {
|
||||
if !contains(node.ForcedTags, tag) {
|
||||
for _, tag := range node.GetValidTags() {
|
||||
if !contains(node.GetForcedTags(), tag) {
|
||||
validTags += "," + pterm.LightGreen(tag)
|
||||
}
|
||||
}
|
||||
validTags = strings.TrimLeft(validTags, ",")
|
||||
|
||||
var user string
|
||||
if currentUser == "" || (currentUser == node.User.Name) {
|
||||
user = pterm.LightMagenta(node.User.Name)
|
||||
if currentUser == "" || (currentUser == node.GetUser().GetName()) {
|
||||
user = pterm.LightMagenta(node.GetUser().GetName())
|
||||
} else {
|
||||
// Shared into this user
|
||||
user = pterm.LightYellow(node.User.Name)
|
||||
user = pterm.LightYellow(node.GetUser().GetName())
|
||||
}
|
||||
|
||||
var IPV4Address string
|
||||
var IPV6Address string
|
||||
for _, addr := range node.IpAddresses {
|
||||
for _, addr := range node.GetIpAddresses() {
|
||||
if netip.MustParseAddr(addr).Is4() {
|
||||
IPV4Address = addr
|
||||
} else {
|
||||
|
@ -596,8 +596,8 @@ func nodesToPtables(
|
|||
}
|
||||
|
||||
nodeData := []string{
|
||||
strconv.FormatUint(node.Id, util.Base10),
|
||||
node.Name,
|
||||
strconv.FormatUint(node.GetId(), util.Base10),
|
||||
node.GetName(),
|
||||
node.GetGivenName(),
|
||||
machineKey.ShortString(),
|
||||
nodeKey.ShortString(),
|
||||
|
|
|
@ -84,7 +84,7 @@ var listPreAuthKeys = &cobra.Command{
|
|||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.PreAuthKeys, "", output)
|
||||
SuccessOutput(response.GetPreAuthKeys(), "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -101,10 +101,10 @@ var listPreAuthKeys = &cobra.Command{
|
|||
"Tags",
|
||||
},
|
||||
}
|
||||
for _, key := range response.PreAuthKeys {
|
||||
for _, key := range response.GetPreAuthKeys() {
|
||||
expiration := "-"
|
||||
if key.GetExpiration() != nil {
|
||||
expiration = ColourTime(key.Expiration.AsTime())
|
||||
expiration = ColourTime(key.GetExpiration().AsTime())
|
||||
}
|
||||
|
||||
var reusable string
|
||||
|
@ -116,7 +116,7 @@ var listPreAuthKeys = &cobra.Command{
|
|||
|
||||
aclTags := ""
|
||||
|
||||
for _, tag := range key.AclTags {
|
||||
for _, tag := range key.GetAclTags() {
|
||||
aclTags += "," + tag
|
||||
}
|
||||
|
||||
|
@ -214,7 +214,7 @@ var createPreAuthKeyCmd = &cobra.Command{
|
|||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.PreAuthKey, response.PreAuthKey.Key, output)
|
||||
SuccessOutput(response.GetPreAuthKey(), response.GetPreAuthKey().GetKey(), output)
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -87,12 +87,12 @@ var listRoutesCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.Routes, "", output)
|
||||
SuccessOutput(response.GetRoutes(), "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
routes = response.Routes
|
||||
routes = response.GetRoutes()
|
||||
} else {
|
||||
response, err := client.GetNodeRoutes(ctx, &v1.GetNodeRoutesRequest{
|
||||
NodeId: machineID,
|
||||
|
@ -108,12 +108,12 @@ var listRoutesCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.Routes, "", output)
|
||||
SuccessOutput(response.GetRoutes(), "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
routes = response.Routes
|
||||
routes = response.GetRoutes()
|
||||
}
|
||||
|
||||
tableData := routesToPtables(routes)
|
||||
|
@ -271,25 +271,25 @@ func routesToPtables(routes []*v1.Route) pterm.TableData {
|
|||
|
||||
for _, route := range routes {
|
||||
var isPrimaryStr string
|
||||
prefix, err := netip.ParsePrefix(route.Prefix)
|
||||
prefix, err := netip.ParsePrefix(route.GetPrefix())
|
||||
if err != nil {
|
||||
log.Printf("Error parsing prefix %s: %s", route.Prefix, err)
|
||||
log.Printf("Error parsing prefix %s: %s", route.GetPrefix(), err)
|
||||
|
||||
continue
|
||||
}
|
||||
if prefix == types.ExitRouteV4 || prefix == types.ExitRouteV6 {
|
||||
isPrimaryStr = "-"
|
||||
} else {
|
||||
isPrimaryStr = strconv.FormatBool(route.IsPrimary)
|
||||
isPrimaryStr = strconv.FormatBool(route.GetIsPrimary())
|
||||
}
|
||||
|
||||
tableData = append(tableData,
|
||||
[]string{
|
||||
strconv.FormatUint(route.Id, Base10),
|
||||
route.Node.GivenName,
|
||||
route.Prefix,
|
||||
strconv.FormatBool(route.Advertised),
|
||||
strconv.FormatBool(route.Enabled),
|
||||
strconv.FormatUint(route.GetId(), Base10),
|
||||
route.GetNode().GetGivenName(),
|
||||
route.GetPrefix(),
|
||||
strconv.FormatBool(route.GetAdvertised()),
|
||||
strconv.FormatBool(route.GetEnabled()),
|
||||
isPrimaryStr,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ var createUserCmd = &cobra.Command{
|
|||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.User, "User created", output)
|
||||
SuccessOutput(response.GetUser(), "User created", output)
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -169,7 +169,7 @@ var listUsersCmd = &cobra.Command{
|
|||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.Users, "", output)
|
||||
SuccessOutput(response.GetUsers(), "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -236,6 +236,6 @@ var renameUserCmd = &cobra.Command{
|
|||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.User, "User renamed", output)
|
||||
SuccessOutput(response.GetUser(), "User renamed", output)
|
||||
},
|
||||
}
|
||||
|
|
|
@ -40,19 +40,12 @@ grpc_listen_addr: 127.0.0.1:50443
|
|||
# are doing.
|
||||
grpc_allow_insecure: false
|
||||
|
||||
# Private key used to encrypt the traffic between headscale
|
||||
# and Tailscale clients.
|
||||
# The private key file will be autogenerated if it's missing.
|
||||
#
|
||||
private_key_path: /var/lib/headscale/private.key
|
||||
|
||||
# The Noise section includes specific configuration for the
|
||||
# TS2021 Noise protocol
|
||||
noise:
|
||||
# The Noise private key is used to encrypt the
|
||||
# traffic between headscale and Tailscale clients when
|
||||
# using the new Noise-based protocol. It must be different
|
||||
# from the legacy private key.
|
||||
# using the new Noise-based protocol.
|
||||
private_key_path: /var/lib/headscale/noise_private.key
|
||||
|
||||
# List of IP prefixes to allocate tailaddresses from.
|
||||
|
@ -95,6 +88,12 @@ derp:
|
|||
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
|
||||
stun_listen_addr: "0.0.0.0:3478"
|
||||
|
||||
# Private key used to encrypt the traffic between headscale DERP
|
||||
# and Tailscale clients.
|
||||
# The private key file will be autogenerated if it's missing.
|
||||
#
|
||||
private_key_path: /var/lib/headscale/derp_server_private.key
|
||||
|
||||
# List of externally available DERP maps encoded in JSON
|
||||
urls:
|
||||
- https://controlplane.tailscale.com/derpmap/default
|
||||
|
|
|
@ -26,8 +26,6 @@
|
|||
version = headscaleVersion;
|
||||
src = pkgs.lib.cleanSource self;
|
||||
|
||||
tags = ["ts2019"];
|
||||
|
||||
# Only run unit tests when testing a build
|
||||
checkFlags = ["-short"];
|
||||
|
||||
|
@ -129,7 +127,6 @@
|
|||
buildInputs = devDeps;
|
||||
|
||||
shellHook = ''
|
||||
export GOFLAGS=-tags="ts2019"
|
||||
export PATH="$PWD/result/bin:$PATH"
|
||||
|
||||
mkdir -p ./ignored
|
||||
|
|
|
@ -77,7 +77,6 @@ type Headscale struct {
|
|||
dbString string
|
||||
dbType string
|
||||
dbDebug bool
|
||||
privateKey2019 *key.MachinePrivate
|
||||
noisePrivateKey *key.MachinePrivate
|
||||
|
||||
DERPMap *tailcfg.DERPMap
|
||||
|
@ -101,21 +100,11 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
|||
runtime.SetBlockProfileRate(1)
|
||||
}
|
||||
|
||||
privateKey, err := readOrCreatePrivateKey(cfg.PrivateKeyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read or create private key: %w", err)
|
||||
}
|
||||
|
||||
// TS2021 requires to have a different key from the legacy protocol.
|
||||
noisePrivateKey, err := readOrCreatePrivateKey(cfg.NoisePrivateKeyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read or create Noise protocol private key: %w", err)
|
||||
}
|
||||
|
||||
if privateKey.Equal(*noisePrivateKey) {
|
||||
return nil, fmt.Errorf("private key and noise private key are the same: %w", err)
|
||||
}
|
||||
|
||||
var dbString string
|
||||
switch cfg.DBtype {
|
||||
case db.Postgres:
|
||||
|
@ -156,7 +145,6 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
|||
cfg: cfg,
|
||||
dbType: cfg.DBtype,
|
||||
dbString: dbString,
|
||||
privateKey2019: privateKey,
|
||||
noisePrivateKey: noisePrivateKey,
|
||||
registrationCache: registrationCache,
|
||||
pollNetMapStreamWG: sync.WaitGroup{},
|
||||
|
@ -199,10 +187,18 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
|
|||
}
|
||||
|
||||
if cfg.DERP.ServerEnabled {
|
||||
// TODO(kradalby): replace this key with a dedicated DERP key.
|
||||
derpServerKey, err := readOrCreatePrivateKey(cfg.DERP.ServerPrivateKeyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read or create DERP server private key: %w", err)
|
||||
}
|
||||
|
||||
if derpServerKey.Equal(*noisePrivateKey) {
|
||||
return nil, fmt.Errorf("DERP server private key and noise private key are the same: %w", err)
|
||||
}
|
||||
|
||||
embeddedDERPServer, err := derpServer.NewDERPServer(
|
||||
cfg.ServerURL,
|
||||
key.NodePrivate(*privateKey),
|
||||
key.NodePrivate(*derpServerKey),
|
||||
&cfg.DERP,
|
||||
)
|
||||
if err != nil {
|
||||
|
@ -450,7 +446,6 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
|
|||
router.HandleFunc("/health", h.HealthHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/key", h.KeyHandler).Methods(http.MethodGet)
|
||||
router.HandleFunc("/register/{mkey}", h.RegisterWebAPI).Methods(http.MethodGet)
|
||||
h.addLegacyHandlers(router)
|
||||
|
||||
router.HandleFunc("/oidc/register/{mkey}", h.RegisterOIDC).Methods(http.MethodGet)
|
||||
router.HandleFunc("/oidc/callback", h.OIDCCallback).Methods(http.MethodGet)
|
||||
|
@ -914,12 +909,6 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
|
|||
|
||||
var machineKey key.MachinePrivate
|
||||
if err = machineKey.UnmarshalText([]byte(trimmedPrivateKey)); err != nil {
|
||||
log.Info().
|
||||
Str("path", path).
|
||||
Msg("This might be due to a legacy (headscale pre-0.12) private key. " +
|
||||
"If the key is in WireGuard format, delete the key and restart headscale. " +
|
||||
"A new key will automatically be generated. All Tailscale clients will have to be restarted")
|
||||
|
||||
return nil, fmt.Errorf("failed to parse private key: %w", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
package hscontrol
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/mapper"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
@ -16,22 +16,19 @@ import (
|
|||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
// handleRegister is the common logic for registering a client in the legacy and Noise protocols
|
||||
//
|
||||
// When using Noise, the machineKey is Zero.
|
||||
// handleRegister is the logic for registering a client.
|
||||
func (h *Headscale) handleRegister(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
isNoise bool,
|
||||
) {
|
||||
now := time.Now().UTC()
|
||||
node, err := h.db.GetNodeByAnyKey(machineKey, registerRequest.NodeKey, registerRequest.OldNodeKey)
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
// If the node has AuthKey set, handle registration via PreAuthKeys
|
||||
if registerRequest.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(writer, registerRequest, machineKey, isNoise)
|
||||
h.handleAuthKey(writer, registerRequest, machineKey)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -53,14 +50,13 @@ func (h *Headscale) handleRegister(
|
|||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
|
||||
Str("follow_up", registerRequest.Followup).
|
||||
Bool("noise", isNoise).
|
||||
Msg("Node is waiting for interactive login")
|
||||
|
||||
select {
|
||||
case <-req.Context().Done():
|
||||
return
|
||||
case <-time.After(registrationHoldoff):
|
||||
h.handleNewNode(writer, registerRequest, machineKey, isNoise)
|
||||
h.handleNewNode(writer, registerRequest, machineKey)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -74,7 +70,6 @@ func (h *Headscale) handleRegister(
|
|||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
|
||||
Str("follow_up", registerRequest.Followup).
|
||||
Bool("noise", isNoise).
|
||||
Msg("New node not yet in the database")
|
||||
|
||||
givenName, err := h.db.GenerateGivenName(
|
||||
|
@ -108,7 +103,6 @@ func (h *Headscale) handleRegister(
|
|||
if !registerRequest.Expiry.IsZero() {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Time("expiry", registerRequest.Expiry).
|
||||
Msg("Non-zero expiry time requested")
|
||||
|
@ -121,7 +115,7 @@ func (h *Headscale) handleRegister(
|
|||
registerCacheExpiration,
|
||||
)
|
||||
|
||||
h.handleNewNode(writer, registerRequest, machineKey, isNoise)
|
||||
h.handleNewNode(writer, registerRequest, machineKey)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -157,7 +151,7 @@ func (h *Headscale) handleRegister(
|
|||
// https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go#L648
|
||||
if !registerRequest.Expiry.IsZero() &&
|
||||
registerRequest.Expiry.UTC().Before(now) {
|
||||
h.handleNodeLogOut(writer, *node, machineKey, isNoise)
|
||||
h.handleNodeLogOut(writer, *node, machineKey)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -165,7 +159,7 @@ func (h *Headscale) handleRegister(
|
|||
// If node is not expired, and it is register, we have a already accepted this node,
|
||||
// let it proceed with a valid registration
|
||||
if !node.IsExpired() {
|
||||
h.handleNodeWithValidRegistration(writer, *node, machineKey, isNoise)
|
||||
h.handleNodeWithValidRegistration(writer, *node, machineKey)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -179,7 +173,6 @@ func (h *Headscale) handleRegister(
|
|||
registerRequest,
|
||||
*node,
|
||||
machineKey,
|
||||
isNoise,
|
||||
)
|
||||
|
||||
return
|
||||
|
@ -194,7 +187,7 @@ func (h *Headscale) handleRegister(
|
|||
}
|
||||
|
||||
// The node has expired or it is logged out
|
||||
h.handleNodeExpiredOrLoggedOut(writer, registerRequest, *node, machineKey, isNoise)
|
||||
h.handleNodeExpiredOrLoggedOut(writer, registerRequest, *node, machineKey)
|
||||
|
||||
// TODO(juan): RegisterRequest includes an Expiry time, that we could optionally use
|
||||
node.Expiry = &time.Time{}
|
||||
|
@ -215,7 +208,6 @@ func (h *Headscale) handleRegister(
|
|||
}
|
||||
|
||||
// handleAuthKey contains the logic to manage auth key client registration
|
||||
// It is used both by the legacy and the new Noise protocol.
|
||||
// When using Noise, the machineKey is Zero.
|
||||
//
|
||||
// TODO: check if any locks are needed around IP allocation.
|
||||
|
@ -223,12 +215,10 @@ func (h *Headscale) handleAuthKey(
|
|||
writer http.ResponseWriter,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
isNoise bool,
|
||||
) {
|
||||
log.Debug().
|
||||
Caller().
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Bool("noise", isNoise).
|
||||
Msgf("Processing auth key for %s", registerRequest.Hostinfo.Hostname)
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
|
@ -236,17 +226,15 @@ func (h *Headscale) handleAuthKey(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed authentication via AuthKey")
|
||||
resp.MachineAuthorized = false
|
||||
|
||||
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
|
||||
respBody, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
|
@ -263,14 +251,12 @@ func (h *Headscale) handleAuthKey(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Msg("Failed authentication via AuthKey")
|
||||
|
||||
|
@ -286,7 +272,6 @@ func (h *Headscale) handleAuthKey(
|
|||
|
||||
log.Debug().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Msg("Authentication key was valid, proceeding to acquire IP addresses")
|
||||
|
||||
|
@ -300,7 +285,6 @@ func (h *Headscale) handleAuthKey(
|
|||
if node != nil {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", node.Hostname).
|
||||
Msg("node was already registered before, refreshing with new auth key")
|
||||
|
||||
|
@ -310,7 +294,6 @@ func (h *Headscale) handleAuthKey(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", node.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed to refresh node")
|
||||
|
@ -318,7 +301,7 @@ func (h *Headscale) handleAuthKey(
|
|||
return
|
||||
}
|
||||
|
||||
aclTags := pak.Proto().AclTags
|
||||
aclTags := pak.Proto().GetAclTags()
|
||||
if len(aclTags) > 0 {
|
||||
// This conditional preserves the existing behaviour, although SaaS would reset the tags on auth-key login
|
||||
err = h.db.SetTags(node, aclTags)
|
||||
|
@ -326,7 +309,6 @@ func (h *Headscale) handleAuthKey(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", node.Hostname).
|
||||
Strs("aclTags", aclTags).
|
||||
Err(err).
|
||||
|
@ -342,7 +324,6 @@ func (h *Headscale) handleAuthKey(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("func", "RegistrationHandler").
|
||||
Str("hostinfo.name", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
|
@ -361,7 +342,7 @@ func (h *Headscale) handleAuthKey(
|
|||
NodeKey: nodeKey,
|
||||
LastSeen: &now,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
ForcedTags: pak.Proto().AclTags,
|
||||
ForcedTags: pak.Proto().GetAclTags(),
|
||||
}
|
||||
|
||||
node, err = h.db.RegisterNode(
|
||||
|
@ -370,7 +351,6 @@ func (h *Headscale) handleAuthKey(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("could not register node")
|
||||
nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name).
|
||||
|
@ -385,7 +365,6 @@ func (h *Headscale) handleAuthKey(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Failed to use pre-auth key")
|
||||
nodeRegistrations.WithLabelValues("new", util.RegisterMethodAuthKey, "error", pak.User.Name).
|
||||
|
@ -401,11 +380,10 @@ func (h *Headscale) handleAuthKey(
|
|||
// Otherwise it will need to exec `tailscale up` twice to fetch the *LoginName*
|
||||
resp.Login = *pak.User.TailscaleLogin()
|
||||
|
||||
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
|
||||
respBody, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
|
@ -423,32 +401,29 @@ func (h *Headscale) handleAuthKey(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Str("ips", strings.Join(node.IPAddresses.StringSlice(), ", ")).
|
||||
Msg("Successfully authenticated via AuthKey")
|
||||
}
|
||||
|
||||
// handleNewNode exposes for both legacy and Noise the functionality to get a URL
|
||||
// for authorizing the node. This url is then showed to the user by the local Tailscale client.
|
||||
// handleNewNode returns the authorisation URL to the client based on what type
|
||||
// of registration headscale is configured with.
|
||||
// This url is then showed to the user by the local Tailscale client.
|
||||
func (h *Headscale) handleNewNode(
|
||||
writer http.ResponseWriter,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machineKey key.MachinePublic,
|
||||
isNoise bool,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
// The node registration is new, redirect the client to the registration URL
|
||||
log.Debug().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Msg("The node seems to be new, sending auth url")
|
||||
|
||||
|
@ -464,11 +439,10 @@ func (h *Headscale) handleNewNode(
|
|||
machineKey.String())
|
||||
}
|
||||
|
||||
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
|
||||
respBody, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
@ -481,7 +455,6 @@ func (h *Headscale) handleNewNode(
|
|||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Bool("noise", isNoise).
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
|
@ -489,7 +462,6 @@ func (h *Headscale) handleNewNode(
|
|||
|
||||
log.Info().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("AuthURL", resp.AuthURL).
|
||||
Str("node", registerRequest.Hostinfo.Hostname).
|
||||
Msg("Successfully sent auth url")
|
||||
|
@ -499,12 +471,10 @@ func (h *Headscale) handleNodeLogOut(
|
|||
writer http.ResponseWriter,
|
||||
node types.Node,
|
||||
machineKey key.MachinePublic,
|
||||
isNoise bool,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
log.Info().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", node.Hostname).
|
||||
Msg("Client requested logout")
|
||||
|
||||
|
@ -513,7 +483,6 @@ func (h *Headscale) handleNodeLogOut(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Failed to expire node")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
@ -525,11 +494,10 @@ func (h *Headscale) handleNodeLogOut(
|
|||
resp.MachineAuthorized = false
|
||||
resp.NodeKeyExpired = true
|
||||
resp.User = *node.User.TailscaleUser()
|
||||
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
|
||||
respBody, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
@ -542,7 +510,6 @@ func (h *Headscale) handleNodeLogOut(
|
|||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Bool("noise", isNoise).
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
|
@ -564,7 +531,6 @@ func (h *Headscale) handleNodeLogOut(
|
|||
|
||||
log.Info().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", node.Hostname).
|
||||
Msg("Successfully logged out")
|
||||
}
|
||||
|
@ -573,14 +539,12 @@ func (h *Headscale) handleNodeWithValidRegistration(
|
|||
writer http.ResponseWriter,
|
||||
node types.Node,
|
||||
machineKey key.MachinePublic,
|
||||
isNoise bool,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
// The node registration is valid, respond with redirect to /map
|
||||
log.Debug().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", node.Hostname).
|
||||
Msg("Client is registered and we have the current NodeKey. All clear to /map")
|
||||
|
||||
|
@ -589,11 +553,10 @@ func (h *Headscale) handleNodeWithValidRegistration(
|
|||
resp.User = *node.User.TailscaleUser()
|
||||
resp.Login = *node.User.TailscaleLogin()
|
||||
|
||||
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
|
||||
respBody, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
nodeRegistrations.WithLabelValues("update", "web", "error", node.User.Name).
|
||||
|
@ -611,14 +574,12 @@ func (h *Headscale) handleNodeWithValidRegistration(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", node.Hostname).
|
||||
Msg("Node successfully authorized")
|
||||
}
|
||||
|
@ -628,13 +589,11 @@ func (h *Headscale) handleNodeKeyRefresh(
|
|||
registerRequest tailcfg.RegisterRequest,
|
||||
node types.Node,
|
||||
machineKey key.MachinePublic,
|
||||
isNoise bool,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
log.Info().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", node.Hostname).
|
||||
Msg("We have the OldNodeKey in the database. This is a key refresh")
|
||||
|
||||
|
@ -651,11 +610,10 @@ func (h *Headscale) handleNodeKeyRefresh(
|
|||
|
||||
resp.AuthURL = ""
|
||||
resp.User = *node.User.TailscaleUser()
|
||||
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
|
||||
respBody, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
@ -669,14 +627,12 @@ func (h *Headscale) handleNodeKeyRefresh(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
Str("old_node_key", registerRequest.OldNodeKey.ShortString()).
|
||||
Str("node", node.Hostname).
|
||||
|
@ -688,12 +644,11 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut(
|
|||
registerRequest tailcfg.RegisterRequest,
|
||||
node types.Node,
|
||||
machineKey key.MachinePublic,
|
||||
isNoise bool,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
if registerRequest.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(writer, registerRequest, machineKey, isNoise)
|
||||
h.handleAuthKey(writer, registerRequest, machineKey)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -701,7 +656,6 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut(
|
|||
// The client has registered before, but has expired or logged out
|
||||
log.Trace().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", node.Hostname).
|
||||
Str("machine_key", machineKey.ShortString()).
|
||||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
|
@ -718,11 +672,10 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut(
|
|||
machineKey.String())
|
||||
}
|
||||
|
||||
respBody, err := mapper.MarshalResponse(resp, isNoise, h.privateKey2019, machineKey)
|
||||
respBody, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
nodeRegistrations.WithLabelValues("reauth", "web", "error", node.User.Name).
|
||||
|
@ -740,14 +693,12 @@ func (h *Headscale) handleNodeExpiredOrLoggedOut(
|
|||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("machine_key", machineKey.ShortString()).
|
||||
Str("node_key", registerRequest.NodeKey.ShortString()).
|
||||
Str("node_key_old", registerRequest.OldNodeKey.ShortString()).
|
||||
|
|
|
@ -1,61 +0,0 @@
|
|||
//go:build ts2019
|
||||
|
||||
package hscontrol
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
// RegistrationHandler handles the actual registration process of a machine
|
||||
// Endpoint /machine/:mkey.
|
||||
func (h *Headscale) RegistrationHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
vars := mux.Vars(req)
|
||||
machineKeyStr, ok := vars["mkey"]
|
||||
if !ok || machineKeyStr == "" {
|
||||
log.Error().
|
||||
Str("handler", "RegistrationHandler").
|
||||
Msg("No machine ID in request")
|
||||
http.Error(writer, "No machine ID in request", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
body, _ := io.ReadAll(req.Body)
|
||||
|
||||
var machineKey key.MachinePublic
|
||||
err := machineKey.UnmarshalText([]byte("mkey:" + machineKeyStr))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot parse machine key")
|
||||
nodeRegistrations.WithLabelValues("unknown", "web", "error", "unknown").Inc()
|
||||
http.Error(writer, "Cannot parse machine key", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
registerRequest := tailcfg.RegisterRequest{}
|
||||
err = util.DecodeAndUnmarshalNaCl(body, ®isterRequest, &machineKey, h.privateKey2019)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot decode message")
|
||||
nodeRegistrations.WithLabelValues("unknown", "web", "error", "unknown").Inc()
|
||||
http.Error(writer, "Cannot decode message", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
h.handleRegister(writer, req, registerRequest, machineKey, false)
|
||||
}
|
|
@ -39,7 +39,19 @@ func (ns *noiseServer) NoiseRegistrationHandler(
|
|||
return
|
||||
}
|
||||
|
||||
// Reject unsupported versions
|
||||
if registerRequest.Version < MinimumCapVersion {
|
||||
log.Info().
|
||||
Caller().
|
||||
Int("min_version", int(MinimumCapVersion)).
|
||||
Int("client_version", int(registerRequest.Version)).
|
||||
Msg("unsupported client connected")
|
||||
http.Error(writer, "Internal error", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ns.nodeKey = registerRequest.NodeKey
|
||||
|
||||
ns.headscale.handleRegister(writer, req, registerRequest, ns.conn.Peer(), true)
|
||||
ns.headscale.handleRegister(writer, req, registerRequest, ns.conn.Peer())
|
||||
}
|
||||
|
|
|
@ -198,5 +198,5 @@ func (*Suite) TestPreAuthKeyACLTags(c *check.C) {
|
|||
|
||||
listedPaks, err := db.ListPreAuthKeys("test8")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(listedPaks[0].Proto().AclTags, check.DeepEquals, tags)
|
||||
c.Assert(listedPaks[0].Proto().GetAclTags(), check.DeepEquals, tags)
|
||||
}
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
//go:build ts2019
|
||||
|
||||
package hscontrol
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func (h *Headscale) addLegacyHandlers(router *mux.Router) {
|
||||
router.HandleFunc("/machine/{mkey}/map", h.PollNetMapHandler).
|
||||
Methods(http.MethodPost)
|
||||
router.HandleFunc("/machine/{mkey}", h.RegistrationHandler).Methods(http.MethodPost)
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
//go:build !ts2019
|
||||
|
||||
package hscontrol
|
||||
|
||||
import "github.com/gorilla/mux"
|
||||
|
||||
func (h *Headscale) addLegacyHandlers(router *mux.Router) {
|
||||
}
|
|
@ -8,7 +8,6 @@ import (
|
|||
"html/template"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
|
@ -63,26 +62,6 @@ func (h *Headscale) KeyHandler(
|
|||
// New Tailscale clients send a 'v' parameter to indicate the CurrentCapabilityVersion
|
||||
capVer, err := parseCabailityVersion(req)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrNoCapabilityVersion) {
|
||||
log.Debug().
|
||||
Str("handler", "/key").
|
||||
Msg("New legacy client")
|
||||
// Old clients don't send a 'v' parameter, so we send the legacy public key
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err := writer.Write(
|
||||
[]byte(strings.TrimPrefix(h.privateKey2019.Public().String(), "mkey:")),
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
|
@ -101,7 +80,7 @@ func (h *Headscale) KeyHandler(
|
|||
|
||||
log.Debug().
|
||||
Str("handler", "/key").
|
||||
Int("v", int(capVer)).
|
||||
Int("cap_ver", int(capVer)).
|
||||
Msg("New noise client")
|
||||
if err != nil {
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
|
@ -120,7 +99,6 @@ func (h *Headscale) KeyHandler(
|
|||
// TS2021 (Tailscale v2 protocol) requires to have a different key
|
||||
if capVer >= NoiseCapabilityVersion {
|
||||
resp := tailcfg.OverTLSPublicKeyResponse{
|
||||
LegacyPublicKey: h.privateKey2019.Public(),
|
||||
PublicKey: h.noisePrivateKey.Public(),
|
||||
}
|
||||
writer.Header().Set("Content-Type", "application/json")
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
"tailscale.com/smallzstd"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -48,10 +47,6 @@ var debugDumpMapResponsePath = envknob.String("HEADSCALE_DEBUG_DUMP_MAPRESPONSE_
|
|||
// - Create a "minifier" that removes info not needed for the node
|
||||
|
||||
type Mapper struct {
|
||||
privateKey2019 *key.MachinePrivate
|
||||
isNoise bool
|
||||
capVer tailcfg.CapabilityVersion
|
||||
|
||||
// Configuration
|
||||
// TODO(kradalby): figure out if this is the format we want this in
|
||||
derpMap *tailcfg.DERPMap
|
||||
|
@ -73,9 +68,6 @@ type Mapper struct {
|
|||
func NewMapper(
|
||||
node *types.Node,
|
||||
peers types.Nodes,
|
||||
privateKey *key.MachinePrivate,
|
||||
isNoise bool,
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
derpMap *tailcfg.DERPMap,
|
||||
baseDomain string,
|
||||
dnsCfg *tailcfg.DNSConfig,
|
||||
|
@ -84,17 +76,12 @@ func NewMapper(
|
|||
) *Mapper {
|
||||
log.Debug().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Str("node", node.Hostname).
|
||||
Msg("creating new mapper")
|
||||
|
||||
uid, _ := util.GenerateRandomStringDNSSafe(mapperIDLength)
|
||||
|
||||
return &Mapper{
|
||||
privateKey2019: privateKey,
|
||||
isNoise: isNoise,
|
||||
capVer: capVer,
|
||||
|
||||
derpMap: derpMap,
|
||||
baseDomain: baseDomain,
|
||||
dnsCfg: dnsCfg,
|
||||
|
@ -212,10 +199,11 @@ func addNextDNSMetadata(resolvers []*dnstype.Resolver, node *types.Node) {
|
|||
func (m *Mapper) fullMapResponse(
|
||||
node *types.Node,
|
||||
pol *policy.ACLPolicy,
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
) (*tailcfg.MapResponse, error) {
|
||||
peers := nodeMapToList(m.peers)
|
||||
|
||||
resp, err := m.baseWithConfigMapResponse(node, pol)
|
||||
resp, err := m.baseWithConfigMapResponse(node, pol, capVer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -224,7 +212,7 @@ func (m *Mapper) fullMapResponse(
|
|||
resp,
|
||||
pol,
|
||||
node,
|
||||
m.capVer,
|
||||
capVer,
|
||||
peers,
|
||||
peers,
|
||||
m.baseDomain,
|
||||
|
@ -247,15 +235,11 @@ func (m *Mapper) FullMapResponse(
|
|||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
resp, err := m.fullMapResponse(node, pol)
|
||||
resp, err := m.fullMapResponse(node, pol, mapRequest.Version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if m.isNoise {
|
||||
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
|
||||
}
|
||||
|
||||
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
|
||||
}
|
||||
|
||||
|
@ -267,15 +251,11 @@ func (m *Mapper) LiteMapResponse(
|
|||
node *types.Node,
|
||||
pol *policy.ACLPolicy,
|
||||
) ([]byte, error) {
|
||||
resp, err := m.baseWithConfigMapResponse(node, pol)
|
||||
resp, err := m.baseWithConfigMapResponse(node, pol, mapRequest.Version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if m.isNoise {
|
||||
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
|
||||
}
|
||||
|
||||
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
|
||||
}
|
||||
|
||||
|
@ -325,7 +305,7 @@ func (m *Mapper) PeerChangedResponse(
|
|||
&resp,
|
||||
pol,
|
||||
node,
|
||||
m.capVer,
|
||||
mapRequest.Version,
|
||||
nodeMapToList(m.peers),
|
||||
changed,
|
||||
m.baseDomain,
|
||||
|
@ -414,16 +394,9 @@ func (m *Mapper) marshalMapResponse(
|
|||
var respBody []byte
|
||||
if compression == util.ZstdCompression {
|
||||
respBody = zstdEncode(jsonBody)
|
||||
if !m.isNoise { // if legacy protocol
|
||||
respBody = m.privateKey2019.SealTo(node.MachineKey, respBody)
|
||||
}
|
||||
} else {
|
||||
if !m.isNoise { // if legacy protocol
|
||||
respBody = m.privateKey2019.SealTo(node.MachineKey, jsonBody)
|
||||
} else {
|
||||
respBody = jsonBody
|
||||
}
|
||||
}
|
||||
|
||||
data := make([]byte, reservedResponseHeaderSize)
|
||||
binary.LittleEndian.PutUint32(data, uint32(len(respBody)))
|
||||
|
@ -432,32 +405,6 @@ func (m *Mapper) marshalMapResponse(
|
|||
return data, nil
|
||||
}
|
||||
|
||||
// MarshalResponse takes an Tailscale Response, marhsal it to JSON.
|
||||
// If isNoise is set, then the JSON body will be returned
|
||||
// If !isNoise and privateKey2019 is set, the JSON body will be sealed in a Nacl box.
|
||||
func MarshalResponse(
|
||||
resp interface{},
|
||||
isNoise bool,
|
||||
privateKey2019 *key.MachinePrivate,
|
||||
machineKey key.MachinePublic,
|
||||
) ([]byte, error) {
|
||||
jsonBody, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot marshal response")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !isNoise && privateKey2019 != nil {
|
||||
return privateKey2019.SealTo(machineKey, jsonBody), nil
|
||||
}
|
||||
|
||||
return jsonBody, nil
|
||||
}
|
||||
|
||||
func zstdEncode(in []byte) []byte {
|
||||
encoder, ok := zstdEncoderPool.Get().(*zstd.Encoder)
|
||||
if !ok {
|
||||
|
@ -503,10 +450,11 @@ func (m *Mapper) baseMapResponse() tailcfg.MapResponse {
|
|||
func (m *Mapper) baseWithConfigMapResponse(
|
||||
node *types.Node,
|
||||
pol *policy.ACLPolicy,
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
) (*tailcfg.MapResponse, error) {
|
||||
resp := m.baseMapResponse()
|
||||
|
||||
tailnode, err := tailNode(node, m.capVer, pol, m.dnsCfg, m.baseDomain, m.randomClientPort)
|
||||
tailnode, err := tailNode(node, capVer, pol, m.dnsCfg, m.baseDomain, m.randomClientPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -472,9 +472,6 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
mappy := NewMapper(
|
||||
tt.node,
|
||||
tt.peers,
|
||||
nil,
|
||||
false,
|
||||
0,
|
||||
tt.derpMap,
|
||||
tt.baseDomain,
|
||||
tt.dnsConfig,
|
||||
|
@ -485,6 +482,7 @@ func Test_fullMapResponse(t *testing.T) {
|
|||
got, err := mappy.fullMapResponse(
|
||||
tt.node,
|
||||
tt.pol,
|
||||
0,
|
||||
)
|
||||
|
||||
if (err != nil) != tt.wantErr {
|
||||
|
|
|
@ -25,12 +25,10 @@ type UpdateNode func()
|
|||
func logPollFunc(
|
||||
mapRequest tailcfg.MapRequest,
|
||||
node *types.Node,
|
||||
isNoise bool,
|
||||
) (func(string), func(error, string)) {
|
||||
return func(msg string) {
|
||||
log.Info().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Bool("readOnly", mapRequest.ReadOnly).
|
||||
Bool("omitPeers", mapRequest.OmitPeers).
|
||||
Bool("stream", mapRequest.Stream).
|
||||
|
@ -41,7 +39,6 @@ func logPollFunc(
|
|||
func(err error, msg string) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Bool("readOnly", mapRequest.ReadOnly).
|
||||
Bool("omitPeers", mapRequest.OmitPeers).
|
||||
Bool("stream", mapRequest.Stream).
|
||||
|
@ -52,8 +49,8 @@ func logPollFunc(
|
|||
}
|
||||
}
|
||||
|
||||
// handlePoll is the common code for the legacy and Noise protocols to
|
||||
// managed the poll loop.
|
||||
// handlePoll ensures the node gets the appropriate updates from either
|
||||
// polling or immediate responses.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (h *Headscale) handlePoll(
|
||||
|
@ -61,10 +58,8 @@ func (h *Headscale) handlePoll(
|
|||
ctx context.Context,
|
||||
node *types.Node,
|
||||
mapRequest tailcfg.MapRequest,
|
||||
isNoise bool,
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
) {
|
||||
logInfo, logErr := logPollFunc(mapRequest, node, isNoise)
|
||||
logInfo, logErr := logPollFunc(mapRequest, node)
|
||||
|
||||
// This is the mechanism where the node gives us inforamtion about its
|
||||
// current configuration.
|
||||
|
@ -77,12 +72,12 @@ func (h *Headscale) handlePoll(
|
|||
if mapRequest.OmitPeers && !mapRequest.Stream && !mapRequest.ReadOnly {
|
||||
log.Info().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Bool("readOnly", mapRequest.ReadOnly).
|
||||
Bool("omitPeers", mapRequest.OmitPeers).
|
||||
Bool("stream", mapRequest.Stream).
|
||||
Str("node_key", node.NodeKey.ShortString()).
|
||||
Str("node", node.Hostname).
|
||||
Int("cap_ver", int(mapRequest.Version)).
|
||||
Msg("Received endpoint update")
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
@ -129,7 +124,7 @@ func (h *Headscale) handlePoll(
|
|||
// The intended use is for clients to discover the DERP map at
|
||||
// start-up before their first real endpoint update.
|
||||
} else if mapRequest.OmitPeers && !mapRequest.Stream && mapRequest.ReadOnly {
|
||||
h.handleLiteRequest(writer, node, mapRequest, isNoise, capVer)
|
||||
h.handleLiteRequest(writer, node, mapRequest)
|
||||
|
||||
return
|
||||
} else if mapRequest.OmitPeers && mapRequest.Stream {
|
||||
|
@ -160,9 +155,6 @@ func (h *Headscale) handlePoll(
|
|||
mapp := mapper.NewMapper(
|
||||
node,
|
||||
peers,
|
||||
h.privateKey2019,
|
||||
isNoise,
|
||||
capVer,
|
||||
h.DERPMap,
|
||||
h.cfg.BaseDomain,
|
||||
h.cfg.DNSConfig,
|
||||
|
@ -337,7 +329,6 @@ func (h *Headscale) handlePoll(
|
|||
|
||||
log.Info().
|
||||
Caller().
|
||||
Bool("noise", isNoise).
|
||||
Bool("readOnly", mapRequest.ReadOnly).
|
||||
Bool("omitPeers", mapRequest.OmitPeers).
|
||||
Bool("stream", mapRequest.Stream).
|
||||
|
@ -382,19 +373,14 @@ func (h *Headscale) handleLiteRequest(
|
|||
writer http.ResponseWriter,
|
||||
node *types.Node,
|
||||
mapRequest tailcfg.MapRequest,
|
||||
isNoise bool,
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
) {
|
||||
logInfo, logErr := logPollFunc(mapRequest, node, isNoise)
|
||||
logInfo, logErr := logPollFunc(mapRequest, node)
|
||||
|
||||
mapp := mapper.NewMapper(
|
||||
node,
|
||||
// TODO(kradalby): It might not be acceptable to send
|
||||
// an empty peer list here.
|
||||
types.Nodes{},
|
||||
h.privateKey2019,
|
||||
isNoise,
|
||||
capVer,
|
||||
h.DERPMap,
|
||||
h.cfg.BaseDomain,
|
||||
h.cfg.DNSConfig,
|
||||
|
|
|
@ -1,108 +0,0 @@
|
|||
//go:build ts2019
|
||||
|
||||
package hscontrol
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
// PollNetMapHandler takes care of /machine/:id/map
|
||||
//
|
||||
// This is the busiest endpoint, as it keeps the HTTP long poll that updates
|
||||
// the clients when something in the network changes.
|
||||
//
|
||||
// The clients POST stuff like HostInfo and their Endpoints here, but
|
||||
// only after their first request (marked with the ReadOnly field).
|
||||
//
|
||||
// At this moment the updates are sent in a quite horrendous way, but they kinda work.
|
||||
func (h *Headscale) PollNetMapHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
vars := mux.Vars(req)
|
||||
machineKeyStr, ok := vars["mkey"]
|
||||
if !ok || machineKeyStr == "" {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Msg("No machine key in request")
|
||||
http.Error(writer, "No machine key in request", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("id", machineKeyStr).
|
||||
Msg("PollNetMapHandler called")
|
||||
body, _ := io.ReadAll(req.Body)
|
||||
|
||||
var machineKey key.MachinePublic
|
||||
err := machineKey.UnmarshalText([]byte("mkey:" + machineKeyStr))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Err(err).
|
||||
Msg("Cannot parse client key")
|
||||
|
||||
http.Error(writer, "Cannot parse client key", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
mapRequest := tailcfg.MapRequest{}
|
||||
err = util.DecodeAndUnmarshalNaCl(body, &mapRequest, &machineKey, h.privateKey2019)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Err(err).
|
||||
Msg("Cannot decode message")
|
||||
http.Error(writer, "Cannot decode message", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
node, err := h.db.GetNodeByMachineKey(machineKey)
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
log.Warn().
|
||||
Str("handler", "PollNetMap").
|
||||
Msgf("Ignoring request, cannot find node with key %s", machineKey.String())
|
||||
|
||||
http.Error(writer, "", http.StatusUnauthorized)
|
||||
|
||||
return
|
||||
}
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Msgf("Failed to fetch node from the database with Machine key: %s", machineKey.String())
|
||||
http.Error(writer, "", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("id", machineKeyStr).
|
||||
Str("node", node.Hostname).
|
||||
Msg("A node is sending a MapRequest via legacy protocol")
|
||||
|
||||
capVer, err := parseCabailityVersion(req)
|
||||
if err != nil && !errors.Is(err, ErrNoCapabilityVersion) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("failed to parse capVer")
|
||||
http.Error(writer, "Internal error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
h.handlePoll(writer, req.Context(), node, mapRequest, false, capVer)
|
||||
}
|
|
@ -12,6 +12,10 @@ import (
|
|||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
const (
|
||||
MinimumCapVersion tailcfg.CapabilityVersion = 36
|
||||
)
|
||||
|
||||
// NoisePollNetMapHandler takes care of /machine/:id/map using the Noise protocol
|
||||
//
|
||||
// This is the busiest endpoint, as it keeps the HTTP long poll that updates
|
||||
|
@ -47,6 +51,18 @@ func (ns *noiseServer) NoisePollNetMapHandler(
|
|||
return
|
||||
}
|
||||
|
||||
// Reject unsupported versions
|
||||
if mapRequest.Version < MinimumCapVersion {
|
||||
log.Info().
|
||||
Caller().
|
||||
Int("min_version", int(MinimumCapVersion)).
|
||||
Int("client_version", int(mapRequest.Version)).
|
||||
Msg("unsupported client connected")
|
||||
http.Error(writer, "Internal error", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ns.nodeKey = mapRequest.NodeKey
|
||||
|
||||
node, err := ns.headscale.db.GetNodeByAnyKey(
|
||||
|
@ -73,20 +89,8 @@ func (ns *noiseServer) NoisePollNetMapHandler(
|
|||
log.Debug().
|
||||
Str("handler", "NoisePollNetMap").
|
||||
Str("node", node.Hostname).
|
||||
Int("cap_ver", int(mapRequest.Version)).
|
||||
Msg("A node sending a MapRequest with Noise protocol")
|
||||
|
||||
capVer, err := parseCabailityVersion(req)
|
||||
if err != nil && !errors.Is(err, ErrNoCapabilityVersion) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("failed to parse capVer")
|
||||
http.Error(writer, "Internal error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// TODO(kradalby): since we are now passing capVer, we could arguably stop passing
|
||||
// isNoise, and rather have a isNoise function that takes capVer
|
||||
ns.headscale.handlePoll(writer, req.Context(), node, mapRequest, true, capVer)
|
||||
ns.headscale.handlePoll(writer, req.Context(), node, mapRequest)
|
||||
}
|
||||
|
|
|
@ -40,7 +40,6 @@ func (s *Suite) ResetDB(c *check.C) {
|
|||
c.Fatal(err)
|
||||
}
|
||||
cfg := types.Config{
|
||||
PrivateKeyPath: tmpDir + "/private.key",
|
||||
NoisePrivateKeyPath: tmpDir + "/noise_private.key",
|
||||
DBtype: "sqlite3",
|
||||
DBpath: tmpDir + "/headscale_test.db",
|
||||
|
|
|
@ -41,7 +41,6 @@ type Config struct {
|
|||
EphemeralNodeInactivityTimeout time.Duration
|
||||
NodeUpdateCheckInterval time.Duration
|
||||
IPPrefixes []netip.Prefix
|
||||
PrivateKeyPath string
|
||||
NoisePrivateKeyPath string
|
||||
BaseDomain string
|
||||
Log LogConfig
|
||||
|
@ -112,6 +111,7 @@ type DERPConfig struct {
|
|||
ServerRegionID int
|
||||
ServerRegionCode string
|
||||
ServerRegionName string
|
||||
ServerPrivateKeyPath string
|
||||
STUNAddr string
|
||||
URLs []url.URL
|
||||
Paths []string
|
||||
|
@ -286,6 +286,7 @@ func GetDERPConfig() DERPConfig {
|
|||
serverRegionCode := viper.GetString("derp.server.region_code")
|
||||
serverRegionName := viper.GetString("derp.server.region_name")
|
||||
stunAddr := viper.GetString("derp.server.stun_listen_addr")
|
||||
privateKeyPath := util.AbsolutePathFromConfigPath(viper.GetString("derp.server.private_key_path"))
|
||||
|
||||
if serverEnabled && stunAddr == "" {
|
||||
log.Fatal().
|
||||
|
@ -317,6 +318,7 @@ func GetDERPConfig() DERPConfig {
|
|||
ServerRegionID: serverRegionID,
|
||||
ServerRegionCode: serverRegionCode,
|
||||
ServerRegionName: serverRegionName,
|
||||
ServerPrivateKeyPath: privateKeyPath,
|
||||
STUNAddr: stunAddr,
|
||||
URLs: urls,
|
||||
Paths: paths,
|
||||
|
@ -582,9 +584,6 @@ func GetHeadscaleConfig() (*Config, error) {
|
|||
DisableUpdateCheck: viper.GetBool("disable_check_updates"),
|
||||
|
||||
IPPrefixes: prefixes,
|
||||
PrivateKeyPath: util.AbsolutePathFromConfigPath(
|
||||
viper.GetString("private_key_path"),
|
||||
),
|
||||
NoisePrivateKeyPath: util.AbsolutePathFromConfigPath(
|
||||
viper.GetString("noise.private_key_path"),
|
||||
),
|
||||
|
|
|
@ -60,7 +60,7 @@ func TestUserCommand(t *testing.T) {
|
|||
)
|
||||
assertNoErr(t, err)
|
||||
|
||||
result := []string{listUsers[0].Name, listUsers[1].Name}
|
||||
result := []string{listUsers[0].GetName(), listUsers[1].GetName()}
|
||||
sort.Strings(result)
|
||||
|
||||
assert.Equal(
|
||||
|
@ -95,7 +95,7 @@ func TestUserCommand(t *testing.T) {
|
|||
)
|
||||
assertNoErr(t, err)
|
||||
|
||||
result = []string{listAfterRenameUsers[0].Name, listAfterRenameUsers[1].Name}
|
||||
result = []string{listAfterRenameUsers[0].GetName(), listAfterRenameUsers[1].GetName()}
|
||||
sort.Strings(result)
|
||||
|
||||
assert.Equal(
|
||||
|
@ -177,29 +177,29 @@ func TestPreAuthKeyCommand(t *testing.T) {
|
|||
|
||||
assert.Equal(
|
||||
t,
|
||||
[]string{keys[0].Id, keys[1].Id, keys[2].Id},
|
||||
[]string{listedPreAuthKeys[1].Id, listedPreAuthKeys[2].Id, listedPreAuthKeys[3].Id},
|
||||
[]string{keys[0].GetId(), keys[1].GetId(), keys[2].GetId()},
|
||||
[]string{listedPreAuthKeys[1].GetId(), listedPreAuthKeys[2].GetId(), listedPreAuthKeys[3].GetId()},
|
||||
)
|
||||
|
||||
assert.NotEmpty(t, listedPreAuthKeys[1].Key)
|
||||
assert.NotEmpty(t, listedPreAuthKeys[2].Key)
|
||||
assert.NotEmpty(t, listedPreAuthKeys[3].Key)
|
||||
assert.NotEmpty(t, listedPreAuthKeys[1].GetKey())
|
||||
assert.NotEmpty(t, listedPreAuthKeys[2].GetKey())
|
||||
assert.NotEmpty(t, listedPreAuthKeys[3].GetKey())
|
||||
|
||||
assert.True(t, listedPreAuthKeys[1].Expiration.AsTime().After(time.Now()))
|
||||
assert.True(t, listedPreAuthKeys[2].Expiration.AsTime().After(time.Now()))
|
||||
assert.True(t, listedPreAuthKeys[3].Expiration.AsTime().After(time.Now()))
|
||||
assert.True(t, listedPreAuthKeys[1].GetExpiration().AsTime().After(time.Now()))
|
||||
assert.True(t, listedPreAuthKeys[2].GetExpiration().AsTime().After(time.Now()))
|
||||
assert.True(t, listedPreAuthKeys[3].GetExpiration().AsTime().After(time.Now()))
|
||||
|
||||
assert.True(
|
||||
t,
|
||||
listedPreAuthKeys[1].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
listedPreAuthKeys[1].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
)
|
||||
assert.True(
|
||||
t,
|
||||
listedPreAuthKeys[2].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
listedPreAuthKeys[2].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
)
|
||||
assert.True(
|
||||
t,
|
||||
listedPreAuthKeys[3].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
listedPreAuthKeys[3].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
)
|
||||
|
||||
for index := range listedPreAuthKeys {
|
||||
|
@ -207,7 +207,7 @@ func TestPreAuthKeyCommand(t *testing.T) {
|
|||
continue
|
||||
}
|
||||
|
||||
assert.Equal(t, listedPreAuthKeys[index].AclTags, []string{"tag:test1", "tag:test2"})
|
||||
assert.Equal(t, listedPreAuthKeys[index].GetAclTags(), []string{"tag:test1", "tag:test2"})
|
||||
}
|
||||
|
||||
// Test key expiry
|
||||
|
@ -218,7 +218,7 @@ func TestPreAuthKeyCommand(t *testing.T) {
|
|||
"--user",
|
||||
user,
|
||||
"expire",
|
||||
listedPreAuthKeys[1].Key,
|
||||
listedPreAuthKeys[1].GetKey(),
|
||||
},
|
||||
)
|
||||
assertNoErr(t, err)
|
||||
|
@ -239,9 +239,9 @@ func TestPreAuthKeyCommand(t *testing.T) {
|
|||
)
|
||||
assertNoErr(t, err)
|
||||
|
||||
assert.True(t, listedPreAuthKeysAfterExpire[1].Expiration.AsTime().Before(time.Now()))
|
||||
assert.True(t, listedPreAuthKeysAfterExpire[2].Expiration.AsTime().After(time.Now()))
|
||||
assert.True(t, listedPreAuthKeysAfterExpire[3].Expiration.AsTime().After(time.Now()))
|
||||
assert.True(t, listedPreAuthKeysAfterExpire[1].GetExpiration().AsTime().Before(time.Now()))
|
||||
assert.True(t, listedPreAuthKeysAfterExpire[2].GetExpiration().AsTime().After(time.Now()))
|
||||
assert.True(t, listedPreAuthKeysAfterExpire[3].GetExpiration().AsTime().After(time.Now()))
|
||||
}
|
||||
|
||||
func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {
|
||||
|
@ -300,10 +300,10 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {
|
|||
// There is one key created by "scenario.CreateHeadscaleEnv"
|
||||
assert.Len(t, listedPreAuthKeys, 2)
|
||||
|
||||
assert.True(t, listedPreAuthKeys[1].Expiration.AsTime().After(time.Now()))
|
||||
assert.True(t, listedPreAuthKeys[1].GetExpiration().AsTime().After(time.Now()))
|
||||
assert.True(
|
||||
t,
|
||||
listedPreAuthKeys[1].Expiration.AsTime().Before(time.Now().Add(time.Minute*70)),
|
||||
listedPreAuthKeys[1].GetExpiration().AsTime().Before(time.Now().Add(time.Minute*70)),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -442,9 +442,9 @@ func TestEnablingRoutes(t *testing.T) {
|
|||
assert.Len(t, routes, 3)
|
||||
|
||||
for _, route := range routes {
|
||||
assert.Equal(t, route.Advertised, true)
|
||||
assert.Equal(t, route.Enabled, false)
|
||||
assert.Equal(t, route.IsPrimary, false)
|
||||
assert.Equal(t, route.GetAdvertised(), true)
|
||||
assert.Equal(t, route.GetEnabled(), false)
|
||||
assert.Equal(t, route.GetIsPrimary(), false)
|
||||
}
|
||||
|
||||
for _, route := range routes {
|
||||
|
@ -454,7 +454,7 @@ func TestEnablingRoutes(t *testing.T) {
|
|||
"routes",
|
||||
"enable",
|
||||
"--route",
|
||||
strconv.Itoa(int(route.Id)),
|
||||
strconv.Itoa(int(route.GetId())),
|
||||
})
|
||||
assertNoErr(t, err)
|
||||
}
|
||||
|
@ -475,12 +475,12 @@ func TestEnablingRoutes(t *testing.T) {
|
|||
assert.Len(t, enablingRoutes, 3)
|
||||
|
||||
for _, route := range enablingRoutes {
|
||||
assert.Equal(t, route.Advertised, true)
|
||||
assert.Equal(t, route.Enabled, true)
|
||||
assert.Equal(t, route.IsPrimary, true)
|
||||
assert.Equal(t, route.GetAdvertised(), true)
|
||||
assert.Equal(t, route.GetEnabled(), true)
|
||||
assert.Equal(t, route.GetIsPrimary(), true)
|
||||
}
|
||||
|
||||
routeIDToBeDisabled := enablingRoutes[0].Id
|
||||
routeIDToBeDisabled := enablingRoutes[0].GetId()
|
||||
|
||||
_, err = headscale.Execute(
|
||||
[]string{
|
||||
|
@ -507,14 +507,14 @@ func TestEnablingRoutes(t *testing.T) {
|
|||
assertNoErr(t, err)
|
||||
|
||||
for _, route := range disablingRoutes {
|
||||
assert.Equal(t, true, route.Advertised)
|
||||
assert.Equal(t, true, route.GetAdvertised())
|
||||
|
||||
if route.Id == routeIDToBeDisabled {
|
||||
assert.Equal(t, route.Enabled, false)
|
||||
assert.Equal(t, route.IsPrimary, false)
|
||||
if route.GetId() == routeIDToBeDisabled {
|
||||
assert.Equal(t, route.GetEnabled(), false)
|
||||
assert.Equal(t, route.GetIsPrimary(), false)
|
||||
} else {
|
||||
assert.Equal(t, route.Enabled, true)
|
||||
assert.Equal(t, route.IsPrimary, true)
|
||||
assert.Equal(t, route.GetEnabled(), true)
|
||||
assert.Equal(t, route.GetIsPrimary(), true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -577,43 +577,43 @@ func TestApiKeyCommand(t *testing.T) {
|
|||
|
||||
assert.Len(t, listedAPIKeys, 5)
|
||||
|
||||
assert.Equal(t, uint64(1), listedAPIKeys[0].Id)
|
||||
assert.Equal(t, uint64(2), listedAPIKeys[1].Id)
|
||||
assert.Equal(t, uint64(3), listedAPIKeys[2].Id)
|
||||
assert.Equal(t, uint64(4), listedAPIKeys[3].Id)
|
||||
assert.Equal(t, uint64(5), listedAPIKeys[4].Id)
|
||||
assert.Equal(t, uint64(1), listedAPIKeys[0].GetId())
|
||||
assert.Equal(t, uint64(2), listedAPIKeys[1].GetId())
|
||||
assert.Equal(t, uint64(3), listedAPIKeys[2].GetId())
|
||||
assert.Equal(t, uint64(4), listedAPIKeys[3].GetId())
|
||||
assert.Equal(t, uint64(5), listedAPIKeys[4].GetId())
|
||||
|
||||
assert.NotEmpty(t, listedAPIKeys[0].Prefix)
|
||||
assert.NotEmpty(t, listedAPIKeys[1].Prefix)
|
||||
assert.NotEmpty(t, listedAPIKeys[2].Prefix)
|
||||
assert.NotEmpty(t, listedAPIKeys[3].Prefix)
|
||||
assert.NotEmpty(t, listedAPIKeys[4].Prefix)
|
||||
assert.NotEmpty(t, listedAPIKeys[0].GetPrefix())
|
||||
assert.NotEmpty(t, listedAPIKeys[1].GetPrefix())
|
||||
assert.NotEmpty(t, listedAPIKeys[2].GetPrefix())
|
||||
assert.NotEmpty(t, listedAPIKeys[3].GetPrefix())
|
||||
assert.NotEmpty(t, listedAPIKeys[4].GetPrefix())
|
||||
|
||||
assert.True(t, listedAPIKeys[0].Expiration.AsTime().After(time.Now()))
|
||||
assert.True(t, listedAPIKeys[1].Expiration.AsTime().After(time.Now()))
|
||||
assert.True(t, listedAPIKeys[2].Expiration.AsTime().After(time.Now()))
|
||||
assert.True(t, listedAPIKeys[3].Expiration.AsTime().After(time.Now()))
|
||||
assert.True(t, listedAPIKeys[4].Expiration.AsTime().After(time.Now()))
|
||||
assert.True(t, listedAPIKeys[0].GetExpiration().AsTime().After(time.Now()))
|
||||
assert.True(t, listedAPIKeys[1].GetExpiration().AsTime().After(time.Now()))
|
||||
assert.True(t, listedAPIKeys[2].GetExpiration().AsTime().After(time.Now()))
|
||||
assert.True(t, listedAPIKeys[3].GetExpiration().AsTime().After(time.Now()))
|
||||
assert.True(t, listedAPIKeys[4].GetExpiration().AsTime().After(time.Now()))
|
||||
|
||||
assert.True(
|
||||
t,
|
||||
listedAPIKeys[0].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
listedAPIKeys[0].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
)
|
||||
assert.True(
|
||||
t,
|
||||
listedAPIKeys[1].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
listedAPIKeys[1].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
)
|
||||
assert.True(
|
||||
t,
|
||||
listedAPIKeys[2].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
listedAPIKeys[2].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
)
|
||||
assert.True(
|
||||
t,
|
||||
listedAPIKeys[3].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
listedAPIKeys[3].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
)
|
||||
assert.True(
|
||||
t,
|
||||
listedAPIKeys[4].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
listedAPIKeys[4].GetExpiration().AsTime().Before(time.Now().Add(time.Hour*26)),
|
||||
)
|
||||
|
||||
expiredPrefixes := make(map[string]bool)
|
||||
|
@ -626,12 +626,12 @@ func TestApiKeyCommand(t *testing.T) {
|
|||
"apikeys",
|
||||
"expire",
|
||||
"--prefix",
|
||||
listedAPIKeys[idx].Prefix,
|
||||
listedAPIKeys[idx].GetPrefix(),
|
||||
},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
|
||||
expiredPrefixes[listedAPIKeys[idx].Prefix] = true
|
||||
expiredPrefixes[listedAPIKeys[idx].GetPrefix()] = true
|
||||
}
|
||||
|
||||
var listedAfterExpireAPIKeys []v1.ApiKey
|
||||
|
@ -648,17 +648,17 @@ func TestApiKeyCommand(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
|
||||
for index := range listedAfterExpireAPIKeys {
|
||||
if _, ok := expiredPrefixes[listedAfterExpireAPIKeys[index].Prefix]; ok {
|
||||
if _, ok := expiredPrefixes[listedAfterExpireAPIKeys[index].GetPrefix()]; ok {
|
||||
// Expired
|
||||
assert.True(
|
||||
t,
|
||||
listedAfterExpireAPIKeys[index].Expiration.AsTime().Before(time.Now()),
|
||||
listedAfterExpireAPIKeys[index].GetExpiration().AsTime().Before(time.Now()),
|
||||
)
|
||||
} else {
|
||||
// Not expired
|
||||
assert.False(
|
||||
t,
|
||||
listedAfterExpireAPIKeys[index].Expiration.AsTime().Before(time.Now()),
|
||||
listedAfterExpireAPIKeys[index].GetExpiration().AsTime().Before(time.Now()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -744,7 +744,7 @@ func TestNodeTagCommand(t *testing.T) {
|
|||
)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, []string{"tag:test"}, node.ForcedTags)
|
||||
assert.Equal(t, []string{"tag:test"}, node.GetForcedTags())
|
||||
|
||||
// try to set a wrong tag and retrieve the error
|
||||
type errOutput struct {
|
||||
|
@ -781,8 +781,8 @@ func TestNodeTagCommand(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
found := false
|
||||
for _, node := range resultMachines {
|
||||
if node.ForcedTags != nil {
|
||||
for _, tag := range node.ForcedTags {
|
||||
if node.GetForcedTags() != nil {
|
||||
for _, tag := range node.GetForcedTags() {
|
||||
if tag == "tag:test" {
|
||||
found = true
|
||||
}
|
||||
|
@ -885,17 +885,17 @@ func TestNodeCommand(t *testing.T) {
|
|||
|
||||
assert.Len(t, listAll, 5)
|
||||
|
||||
assert.Equal(t, uint64(1), listAll[0].Id)
|
||||
assert.Equal(t, uint64(2), listAll[1].Id)
|
||||
assert.Equal(t, uint64(3), listAll[2].Id)
|
||||
assert.Equal(t, uint64(4), listAll[3].Id)
|
||||
assert.Equal(t, uint64(5), listAll[4].Id)
|
||||
assert.Equal(t, uint64(1), listAll[0].GetId())
|
||||
assert.Equal(t, uint64(2), listAll[1].GetId())
|
||||
assert.Equal(t, uint64(3), listAll[2].GetId())
|
||||
assert.Equal(t, uint64(4), listAll[3].GetId())
|
||||
assert.Equal(t, uint64(5), listAll[4].GetId())
|
||||
|
||||
assert.Equal(t, "node-1", listAll[0].Name)
|
||||
assert.Equal(t, "node-2", listAll[1].Name)
|
||||
assert.Equal(t, "node-3", listAll[2].Name)
|
||||
assert.Equal(t, "node-4", listAll[3].Name)
|
||||
assert.Equal(t, "node-5", listAll[4].Name)
|
||||
assert.Equal(t, "node-1", listAll[0].GetName())
|
||||
assert.Equal(t, "node-2", listAll[1].GetName())
|
||||
assert.Equal(t, "node-3", listAll[2].GetName())
|
||||
assert.Equal(t, "node-4", listAll[3].GetName())
|
||||
assert.Equal(t, "node-5", listAll[4].GetName())
|
||||
|
||||
otherUserMachineKeys := []string{
|
||||
"mkey:b5b444774186d4217adcec407563a1223929465ee2c68a4da13af0d0185b4f8e",
|
||||
|
@ -963,11 +963,11 @@ func TestNodeCommand(t *testing.T) {
|
|||
// All nodes, nodes + otherUser
|
||||
assert.Len(t, listAllWithotherUser, 7)
|
||||
|
||||
assert.Equal(t, uint64(6), listAllWithotherUser[5].Id)
|
||||
assert.Equal(t, uint64(7), listAllWithotherUser[6].Id)
|
||||
assert.Equal(t, uint64(6), listAllWithotherUser[5].GetId())
|
||||
assert.Equal(t, uint64(7), listAllWithotherUser[6].GetId())
|
||||
|
||||
assert.Equal(t, "otherUser-node-1", listAllWithotherUser[5].Name)
|
||||
assert.Equal(t, "otherUser-node-2", listAllWithotherUser[6].Name)
|
||||
assert.Equal(t, "otherUser-node-1", listAllWithotherUser[5].GetName())
|
||||
assert.Equal(t, "otherUser-node-2", listAllWithotherUser[6].GetName())
|
||||
|
||||
// Test list all nodes after added otherUser
|
||||
var listOnlyotherUserMachineUser []v1.Node
|
||||
|
@ -988,18 +988,18 @@ func TestNodeCommand(t *testing.T) {
|
|||
|
||||
assert.Len(t, listOnlyotherUserMachineUser, 2)
|
||||
|
||||
assert.Equal(t, uint64(6), listOnlyotherUserMachineUser[0].Id)
|
||||
assert.Equal(t, uint64(7), listOnlyotherUserMachineUser[1].Id)
|
||||
assert.Equal(t, uint64(6), listOnlyotherUserMachineUser[0].GetId())
|
||||
assert.Equal(t, uint64(7), listOnlyotherUserMachineUser[1].GetId())
|
||||
|
||||
assert.Equal(
|
||||
t,
|
||||
"otherUser-node-1",
|
||||
listOnlyotherUserMachineUser[0].Name,
|
||||
listOnlyotherUserMachineUser[0].GetName(),
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
"otherUser-node-2",
|
||||
listOnlyotherUserMachineUser[1].Name,
|
||||
listOnlyotherUserMachineUser[1].GetName(),
|
||||
)
|
||||
|
||||
// Delete a nodes
|
||||
|
@ -1123,11 +1123,11 @@ func TestNodeExpireCommand(t *testing.T) {
|
|||
|
||||
assert.Len(t, listAll, 5)
|
||||
|
||||
assert.True(t, listAll[0].Expiry.AsTime().IsZero())
|
||||
assert.True(t, listAll[1].Expiry.AsTime().IsZero())
|
||||
assert.True(t, listAll[2].Expiry.AsTime().IsZero())
|
||||
assert.True(t, listAll[3].Expiry.AsTime().IsZero())
|
||||
assert.True(t, listAll[4].Expiry.AsTime().IsZero())
|
||||
assert.True(t, listAll[0].GetExpiry().AsTime().IsZero())
|
||||
assert.True(t, listAll[1].GetExpiry().AsTime().IsZero())
|
||||
assert.True(t, listAll[2].GetExpiry().AsTime().IsZero())
|
||||
assert.True(t, listAll[3].GetExpiry().AsTime().IsZero())
|
||||
assert.True(t, listAll[4].GetExpiry().AsTime().IsZero())
|
||||
|
||||
for idx := 0; idx < 3; idx++ {
|
||||
_, err := headscale.Execute(
|
||||
|
@ -1136,7 +1136,7 @@ func TestNodeExpireCommand(t *testing.T) {
|
|||
"nodes",
|
||||
"expire",
|
||||
"--identifier",
|
||||
fmt.Sprintf("%d", listAll[idx].Id),
|
||||
fmt.Sprintf("%d", listAll[idx].GetId()),
|
||||
},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
|
@ -1158,11 +1158,11 @@ func TestNodeExpireCommand(t *testing.T) {
|
|||
|
||||
assert.Len(t, listAllAfterExpiry, 5)
|
||||
|
||||
assert.True(t, listAllAfterExpiry[0].Expiry.AsTime().Before(time.Now()))
|
||||
assert.True(t, listAllAfterExpiry[1].Expiry.AsTime().Before(time.Now()))
|
||||
assert.True(t, listAllAfterExpiry[2].Expiry.AsTime().Before(time.Now()))
|
||||
assert.True(t, listAllAfterExpiry[3].Expiry.AsTime().IsZero())
|
||||
assert.True(t, listAllAfterExpiry[4].Expiry.AsTime().IsZero())
|
||||
assert.True(t, listAllAfterExpiry[0].GetExpiry().AsTime().Before(time.Now()))
|
||||
assert.True(t, listAllAfterExpiry[1].GetExpiry().AsTime().Before(time.Now()))
|
||||
assert.True(t, listAllAfterExpiry[2].GetExpiry().AsTime().Before(time.Now()))
|
||||
assert.True(t, listAllAfterExpiry[3].GetExpiry().AsTime().IsZero())
|
||||
assert.True(t, listAllAfterExpiry[4].GetExpiry().AsTime().IsZero())
|
||||
}
|
||||
|
||||
func TestNodeRenameCommand(t *testing.T) {
|
||||
|
@ -1264,7 +1264,7 @@ func TestNodeRenameCommand(t *testing.T) {
|
|||
"nodes",
|
||||
"rename",
|
||||
"--identifier",
|
||||
fmt.Sprintf("%d", listAll[idx].Id),
|
||||
fmt.Sprintf("%d", listAll[idx].GetId()),
|
||||
fmt.Sprintf("newnode-%d", idx+1),
|
||||
},
|
||||
)
|
||||
|
@ -1300,7 +1300,7 @@ func TestNodeRenameCommand(t *testing.T) {
|
|||
"nodes",
|
||||
"rename",
|
||||
"--identifier",
|
||||
fmt.Sprintf("%d", listAll[4].Id),
|
||||
fmt.Sprintf("%d", listAll[4].GetId()),
|
||||
"testmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaachine12345678901234567890",
|
||||
},
|
||||
)
|
||||
|
@ -1387,11 +1387,11 @@ func TestNodeMoveCommand(t *testing.T) {
|
|||
)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, uint64(1), node.Id)
|
||||
assert.Equal(t, "nomad-node", node.Name)
|
||||
assert.Equal(t, node.User.Name, "old-user")
|
||||
assert.Equal(t, uint64(1), node.GetId())
|
||||
assert.Equal(t, "nomad-node", node.GetName())
|
||||
assert.Equal(t, node.GetUser().GetName(), "old-user")
|
||||
|
||||
nodeID := fmt.Sprintf("%d", node.Id)
|
||||
nodeID := fmt.Sprintf("%d", node.GetId())
|
||||
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
|
@ -1410,7 +1410,7 @@ func TestNodeMoveCommand(t *testing.T) {
|
|||
)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, node.User.Name, "new-user")
|
||||
assert.Equal(t, node.GetUser().GetName(), "new-user")
|
||||
|
||||
var allNodes []v1.Node
|
||||
err = executeAndUnmarshal(
|
||||
|
@ -1428,9 +1428,9 @@ func TestNodeMoveCommand(t *testing.T) {
|
|||
|
||||
assert.Len(t, allNodes, 1)
|
||||
|
||||
assert.Equal(t, allNodes[0].Id, node.Id)
|
||||
assert.Equal(t, allNodes[0].User, node.User)
|
||||
assert.Equal(t, allNodes[0].User.Name, "new-user")
|
||||
assert.Equal(t, allNodes[0].GetId(), node.GetId())
|
||||
assert.Equal(t, allNodes[0].GetUser(), node.GetUser())
|
||||
assert.Equal(t, allNodes[0].GetUser().GetName(), "new-user")
|
||||
|
||||
moveToNonExistingNSResult, err := headscale.Execute(
|
||||
[]string{
|
||||
|
@ -1452,7 +1452,7 @@ func TestNodeMoveCommand(t *testing.T) {
|
|||
moveToNonExistingNSResult,
|
||||
"user not found",
|
||||
)
|
||||
assert.Equal(t, node.User.Name, "new-user")
|
||||
assert.Equal(t, node.GetUser().GetName(), "new-user")
|
||||
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
|
@ -1471,7 +1471,7 @@ func TestNodeMoveCommand(t *testing.T) {
|
|||
)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, node.User.Name, "old-user")
|
||||
assert.Equal(t, node.GetUser().GetName(), "old-user")
|
||||
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
|
@ -1490,5 +1490,5 @@ func TestNodeMoveCommand(t *testing.T) {
|
|||
)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, node.User.Name, "old-user")
|
||||
assert.Equal(t, node.GetUser().GetName(), "old-user")
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ func TestDERPServerScenario(t *testing.T) {
|
|||
headscaleConfig["HEADSCALE_DERP_SERVER_REGION_CODE"] = "headscale"
|
||||
headscaleConfig["HEADSCALE_DERP_SERVER_REGION_NAME"] = "Headscale Embedded DERP"
|
||||
headscaleConfig["HEADSCALE_DERP_SERVER_STUN_LISTEN_ADDR"] = "0.0.0.0:3478"
|
||||
headscaleConfig["HEADSCALE_DERP_SERVER_PRIVATE_KEY_PATH"] = "/tmp/derp.key"
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
spec,
|
||||
|
|
|
@ -530,10 +530,10 @@ func TestExpireNode(t *testing.T) {
|
|||
|
||||
peerPublicKey := strings.TrimPrefix(peerStatus.PublicKey.String(), "nodekey:")
|
||||
|
||||
assert.NotEqual(t, node.NodeKey, peerPublicKey)
|
||||
assert.NotEqual(t, node.GetNodeKey(), peerPublicKey)
|
||||
}
|
||||
|
||||
if client.Hostname() != node.Name {
|
||||
if client.Hostname() != node.GetName() {
|
||||
// Assert that we have the original count - self - expired node
|
||||
assert.Len(t, status.Peers(), len(MustTestVersions)-2)
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ run_tests() {
|
|||
--volume "$PWD"/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
|
|
|
@ -22,6 +22,17 @@ const (
|
|||
scenarioHashLength = 6
|
||||
)
|
||||
|
||||
func enabledVersions(vs map[string]bool) []string {
|
||||
var ret []string
|
||||
for version, enabled := range vs {
|
||||
if enabled {
|
||||
ret = append(ret, version)
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
var (
|
||||
errNoHeadscaleAvailable = errors.New("no headscale available")
|
||||
errNoUserAvailable = errors.New("no user available")
|
||||
|
@ -29,29 +40,30 @@ var (
|
|||
|
||||
// Tailscale started adding TS2021 support in CapabilityVersion>=28 (v1.24.0), but
|
||||
// proper support in Headscale was only added for CapabilityVersion>=39 clients (v1.30.0).
|
||||
tailscaleVersions2021 = []string{
|
||||
"head",
|
||||
"unstable",
|
||||
"1.50",
|
||||
"1.48",
|
||||
"1.46",
|
||||
"1.44",
|
||||
"1.42",
|
||||
"1.40",
|
||||
"1.38",
|
||||
"1.36",
|
||||
"1.34",
|
||||
"1.32",
|
||||
"1.30",
|
||||
tailscaleVersions2021 = map[string]bool{
|
||||
"head": true,
|
||||
"unstable": true,
|
||||
"1.52": true, // CapVer:
|
||||
"1.50": true, // CapVer: 74
|
||||
"1.48": true, // CapVer: 68
|
||||
"1.46": true, // CapVer: 65
|
||||
"1.44": true, // CapVer: 63
|
||||
"1.42": true, // CapVer: 61
|
||||
"1.40": true, // CapVer: 61
|
||||
"1.38": true, // CapVer: 58
|
||||
"1.36": true, // CapVer: 56
|
||||
"1.34": true, // CapVer: 51
|
||||
"1.32": true, // Oldest supported version, CapVer: 46
|
||||
"1.30": false,
|
||||
}
|
||||
|
||||
tailscaleVersions2019 = []string{
|
||||
"1.28",
|
||||
"1.26",
|
||||
"1.24", // Tailscale SSH
|
||||
"1.22",
|
||||
"1.20",
|
||||
"1.18",
|
||||
tailscaleVersions2019 = map[string]bool{
|
||||
"1.28": false,
|
||||
"1.26": false,
|
||||
"1.24": false, // Tailscale SSH
|
||||
"1.22": false,
|
||||
"1.20": false,
|
||||
"1.18": false,
|
||||
}
|
||||
|
||||
// tailscaleVersionsUnavailable = []string{
|
||||
|
@ -72,8 +84,8 @@ var (
|
|||
// The rest of the version represents Tailscale versions that can be
|
||||
// found in Tailscale's apt repository.
|
||||
AllVersions = append(
|
||||
tailscaleVersions2021,
|
||||
tailscaleVersions2019...,
|
||||
enabledVersions(tailscaleVersions2021),
|
||||
enabledVersions(tailscaleVersions2019)...,
|
||||
)
|
||||
|
||||
// MustTestVersions is the minimum set of versions we should test.
|
||||
|
@ -83,8 +95,8 @@ var (
|
|||
// - Two latest versions
|
||||
// - Two oldest versions.
|
||||
MustTestVersions = append(
|
||||
tailscaleVersions2021[0:4],
|
||||
tailscaleVersions2019[len(tailscaleVersions2019)-2:]...,
|
||||
AllVersions[0:4],
|
||||
AllVersions[len(AllVersions)-2:]...,
|
||||
)
|
||||
)
|
||||
|
||||
|
|
Loading…
Reference in a new issue