2023-05-26 04:26:34 -06:00
package mapper
import (
"encoding/binary"
"encoding/json"
"fmt"
2023-07-17 03:13:48 -06:00
"io/fs"
2023-05-26 04:26:34 -06:00
"net/url"
2023-07-17 03:13:48 -06:00
"os"
"path"
2023-12-09 10:09:24 -07:00
"slices"
2023-06-29 04:20:22 -06:00
"sort"
2023-05-26 04:26:34 -06:00
"strings"
"sync"
2023-07-17 03:13:48 -06:00
"sync/atomic"
2023-05-26 04:26:34 -06:00
"time"
mapset "github.com/deckarep/golang-set/v2"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/klauspost/compress/zstd"
"github.com/rs/zerolog/log"
2023-12-09 10:09:24 -07:00
"golang.org/x/exp/maps"
2023-07-17 03:13:48 -06:00
"tailscale.com/envknob"
2023-05-26 04:26:34 -06:00
"tailscale.com/smallzstd"
"tailscale.com/tailcfg"
"tailscale.com/types/dnstype"
)
const (
nextDNSDoHPrefix = "https://dns.nextdns.io"
reservedResponseHeaderSize = 4
2023-07-26 03:53:42 -06:00
mapperIDLength = 8
debugMapResponsePerm = 0 o755
2023-05-26 04:26:34 -06:00
)
2023-07-17 03:13:48 -06:00
var debugDumpMapResponsePath = envknob . String ( "HEADSCALE_DEBUG_DUMP_MAPRESPONSE_PATH" )
2023-08-09 14:56:21 -06:00
// TODO: Optimise
// As this work continues, the idea is that there will be one Mapper instance
// per node, attached to the open stream between the control and client.
2023-09-24 05:42:05 -06:00
// This means that this can hold a state per node and we can use that to
2023-08-09 14:56:21 -06:00
// improve the mapresponses sent.
// We could:
// - Keep information about the previous mapresponse so we can send a diff
// - Store hashes
// - Create a "minifier" that removes info not needed for the node
2023-12-09 10:09:24 -07:00
// - some sort of batching, wait for 5 or 60 seconds before sending
2023-08-09 14:56:21 -06:00
2023-05-26 04:26:34 -06:00
type Mapper struct {
// Configuration
// TODO(kradalby): figure out if this is the format we want this in
derpMap * tailcfg . DERPMap
baseDomain string
dnsCfg * tailcfg . DNSConfig
logtail bool
randomClientPort bool
2023-07-24 00:58:51 -06:00
uid string
created time . Time
seq uint64
2023-08-09 14:20:05 -06:00
// Map isnt concurrency safe, so we need to ensure
// only one func is accessing it over time.
2023-12-09 10:09:24 -07:00
mu sync . Mutex
peers map [ uint64 ] * types . Node
patches map [ uint64 ] [ ] patch
}
type patch struct {
timestamp time . Time
change * tailcfg . PeerChange
2023-05-26 04:26:34 -06:00
}
func NewMapper (
2023-09-24 05:42:05 -06:00
node * types . Node ,
peers types . Nodes ,
2023-05-26 04:26:34 -06:00
derpMap * tailcfg . DERPMap ,
baseDomain string ,
dnsCfg * tailcfg . DNSConfig ,
logtail bool ,
randomClientPort bool ,
) * Mapper {
2023-07-24 00:58:51 -06:00
log . Debug ( ) .
Caller ( ) .
2023-09-24 05:42:05 -06:00
Str ( "node" , node . Hostname ) .
2023-07-24 00:58:51 -06:00
Msg ( "creating new mapper" )
2023-07-26 03:53:42 -06:00
uid , _ := util . GenerateRandomStringDNSSafe ( mapperIDLength )
2023-07-24 00:58:51 -06:00
2023-05-26 04:26:34 -06:00
return & Mapper {
derpMap : derpMap ,
baseDomain : baseDomain ,
dnsCfg : dnsCfg ,
logtail : logtail ,
randomClientPort : randomClientPort ,
2023-07-24 00:58:51 -06:00
uid : uid ,
created : time . Now ( ) ,
seq : 0 ,
2023-08-09 14:20:05 -06:00
// TODO: populate
2023-12-09 10:09:24 -07:00
peers : peers . IDMap ( ) ,
patches : make ( map [ uint64 ] [ ] patch ) ,
2023-05-26 04:26:34 -06:00
}
}
2023-07-24 00:58:51 -06:00
func ( m * Mapper ) String ( ) string {
return fmt . Sprintf ( "Mapper: { seq: %d, uid: %s, created: %s }" , m . seq , m . uid , m . created )
}
2023-05-26 04:26:34 -06:00
func generateUserProfiles (
2023-09-24 05:42:05 -06:00
node * types . Node ,
peers types . Nodes ,
2023-05-26 04:26:34 -06:00
baseDomain string ,
) [ ] tailcfg . UserProfile {
userMap := make ( map [ string ] types . User )
2023-09-24 05:42:05 -06:00
userMap [ node . User . Name ] = node . User
2023-05-26 04:26:34 -06:00
for _ , peer := range peers {
userMap [ peer . User . Name ] = peer . User // not worth checking if already is there
}
profiles := [ ] tailcfg . UserProfile { }
for _ , user := range userMap {
displayName := user . Name
if baseDomain != "" {
displayName = fmt . Sprintf ( "%s@%s" , user . Name , baseDomain )
}
profiles = append ( profiles ,
tailcfg . UserProfile {
ID : tailcfg . UserID ( user . ID ) ,
LoginName : user . Name ,
DisplayName : displayName ,
} )
}
return profiles
}
func generateDNSConfig (
base * tailcfg . DNSConfig ,
baseDomain string ,
2023-09-24 05:42:05 -06:00
node * types . Node ,
peers types . Nodes ,
2023-05-26 04:26:34 -06:00
) * tailcfg . DNSConfig {
dnsConfig := base . Clone ( )
// if MagicDNS is enabled
if base != nil && base . Proxied {
// Only inject the Search Domain of the current user
// shared nodes should use their full FQDN
dnsConfig . Domains = append (
dnsConfig . Domains ,
fmt . Sprintf (
"%s.%s" ,
2023-09-24 05:42:05 -06:00
node . User . Name ,
2023-05-26 04:26:34 -06:00
baseDomain ,
) ,
)
userSet := mapset . NewSet [ types . User ] ( )
2023-09-24 05:42:05 -06:00
userSet . Add ( node . User )
2023-05-26 04:26:34 -06:00
for _ , p := range peers {
userSet . Add ( p . User )
}
for _ , user := range userSet . ToSlice ( ) {
dnsRoute := fmt . Sprintf ( "%v.%v" , user . Name , baseDomain )
dnsConfig . Routes [ dnsRoute ] = nil
}
} else {
dnsConfig = base
}
2023-09-24 05:42:05 -06:00
addNextDNSMetadata ( dnsConfig . Resolvers , node )
2023-05-26 04:26:34 -06:00
return dnsConfig
}
// If any nextdns DoH resolvers are present in the list of resolvers it will
2023-09-24 05:42:05 -06:00
// take metadata from the node metadata and instruct tailscale to add it
2023-05-26 04:26:34 -06:00
// to the requests. This makes it possible to identify from which device the
// requests come in the NextDNS dashboard.
//
// This will produce a resolver like:
// `https://dns.nextdns.io/<nextdns-id>?device_name=node-name&device_model=linux&device_ip=100.64.0.1`
2023-09-24 05:42:05 -06:00
func addNextDNSMetadata ( resolvers [ ] * dnstype . Resolver , node * types . Node ) {
2023-05-26 04:26:34 -06:00
for _ , resolver := range resolvers {
if strings . HasPrefix ( resolver . Addr , nextDNSDoHPrefix ) {
attrs := url . Values {
2023-09-24 05:42:05 -06:00
"device_name" : [ ] string { node . Hostname } ,
2023-11-21 10:20:06 -07:00
"device_model" : [ ] string { node . Hostinfo . OS } ,
2023-05-26 04:26:34 -06:00
}
2023-09-24 05:42:05 -06:00
if len ( node . IPAddresses ) > 0 {
attrs . Add ( "device_ip" , node . IPAddresses [ 0 ] . String ( ) )
2023-05-26 04:26:34 -06:00
}
resolver . Addr = fmt . Sprintf ( "%s?%s" , resolver . Addr , attrs . Encode ( ) )
}
}
}
2023-08-09 14:56:21 -06:00
// fullMapResponse creates a complete MapResponse for a node.
// It is a separate function to make testing easier.
func ( m * Mapper ) fullMapResponse (
2023-09-24 05:42:05 -06:00
node * types . Node ,
2023-08-09 14:56:21 -06:00
pol * policy . ACLPolicy ,
2023-11-23 00:31:33 -07:00
capVer tailcfg . CapabilityVersion ,
2023-08-09 14:56:21 -06:00
) ( * tailcfg . MapResponse , error ) {
2023-09-24 05:42:05 -06:00
peers := nodeMapToList ( m . peers )
2023-08-09 14:56:21 -06:00
2023-11-23 00:31:33 -07:00
resp , err := m . baseWithConfigMapResponse ( node , pol , capVer )
2023-08-09 14:56:21 -06:00
if err != nil {
return nil , err
}
err = appendPeerChanges (
resp ,
pol ,
2023-09-24 05:42:05 -06:00
node ,
2023-11-23 00:31:33 -07:00
capVer ,
2023-08-09 14:56:21 -06:00
peers ,
peers ,
m . baseDomain ,
m . dnsCfg ,
2023-09-28 13:33:53 -06:00
m . randomClientPort ,
2023-08-09 14:56:21 -06:00
)
if err != nil {
return nil , err
}
return resp , nil
}
2023-09-24 05:42:05 -06:00
// FullMapResponse returns a MapResponse for the given node.
2023-07-24 00:58:51 -06:00
func ( m * Mapper ) FullMapResponse (
2023-05-26 04:26:34 -06:00
mapRequest tailcfg . MapRequest ,
2023-09-24 05:42:05 -06:00
node * types . Node ,
2023-05-26 04:26:34 -06:00
pol * policy . ACLPolicy ,
) ( [ ] byte , error ) {
2023-08-09 14:20:05 -06:00
m . mu . Lock ( )
defer m . mu . Unlock ( )
2023-05-31 10:45:04 -06:00
2023-12-09 10:09:24 -07:00
peers := maps . Keys ( m . peers )
peersWithPatches := maps . Keys ( m . patches )
slices . Sort ( peers )
slices . Sort ( peersWithPatches )
if len ( peersWithPatches ) > 0 {
log . Debug ( ) .
Str ( "node" , node . Hostname ) .
Uints64 ( "peers" , peers ) .
Uints64 ( "pending_patches" , peersWithPatches ) .
Msgf ( "node requested full map response, but has pending patches" )
}
2023-11-23 00:31:33 -07:00
resp , err := m . fullMapResponse ( node , pol , mapRequest . Version )
2023-05-26 04:26:34 -06:00
if err != nil {
return nil , err
}
2023-09-24 05:42:05 -06:00
return m . marshalMapResponse ( mapRequest , resp , node , mapRequest . Compress )
2023-06-29 04:20:22 -06:00
}
2023-05-26 04:26:34 -06:00
2023-09-24 05:42:05 -06:00
// LiteMapResponse returns a MapResponse for the given node.
2023-07-26 06:42:12 -06:00
// Lite means that the peers has been omitted, this is intended
2023-07-26 05:55:03 -06:00
// to be used to answer MapRequests with OmitPeers set to true.
func ( m * Mapper ) LiteMapResponse (
mapRequest tailcfg . MapRequest ,
2023-09-24 05:42:05 -06:00
node * types . Node ,
2023-07-26 05:55:03 -06:00
pol * policy . ACLPolicy ,
) ( [ ] byte , error ) {
2023-11-23 00:31:33 -07:00
resp , err := m . baseWithConfigMapResponse ( node , pol , mapRequest . Version )
2023-07-26 05:55:03 -06:00
if err != nil {
return nil , err
}
2023-09-24 05:42:05 -06:00
return m . marshalMapResponse ( mapRequest , resp , node , mapRequest . Compress )
2023-07-26 05:55:03 -06:00
}
2023-07-24 00:58:51 -06:00
func ( m * Mapper ) KeepAliveResponse (
2023-06-29 04:20:22 -06:00
mapRequest tailcfg . MapRequest ,
2023-09-24 05:42:05 -06:00
node * types . Node ,
2023-06-29 04:20:22 -06:00
) ( [ ] byte , error ) {
2023-08-09 14:56:21 -06:00
resp := m . baseMapResponse ( )
2023-06-29 04:20:22 -06:00
resp . KeepAlive = true
2023-05-26 04:26:34 -06:00
2023-09-24 05:42:05 -06:00
return m . marshalMapResponse ( mapRequest , & resp , node , mapRequest . Compress )
2023-05-26 04:26:34 -06:00
}
2023-07-24 00:58:51 -06:00
func ( m * Mapper ) DERPMapResponse (
2023-05-26 04:26:34 -06:00
mapRequest tailcfg . MapRequest ,
2023-09-24 05:42:05 -06:00
node * types . Node ,
2023-12-09 10:09:24 -07:00
derpMap * tailcfg . DERPMap ,
2023-05-26 04:26:34 -06:00
) ( [ ] byte , error ) {
2023-12-09 10:09:24 -07:00
m . derpMap = derpMap
2023-08-09 14:56:21 -06:00
resp := m . baseMapResponse ( )
2023-12-09 10:09:24 -07:00
resp . DERPMap = derpMap
2023-05-26 04:26:34 -06:00
2023-09-24 05:42:05 -06:00
return m . marshalMapResponse ( mapRequest , & resp , node , mapRequest . Compress )
2023-06-29 04:20:22 -06:00
}
2023-07-24 00:58:51 -06:00
func ( m * Mapper ) PeerChangedResponse (
2023-06-29 04:20:22 -06:00
mapRequest tailcfg . MapRequest ,
2023-09-24 05:42:05 -06:00
node * types . Node ,
changed types . Nodes ,
2023-06-29 04:20:22 -06:00
pol * policy . ACLPolicy ,
2023-12-09 10:09:24 -07:00
messages ... string ,
2023-06-29 04:20:22 -06:00
) ( [ ] byte , error ) {
2023-08-09 14:20:05 -06:00
m . mu . Lock ( )
defer m . mu . Unlock ( )
// Update our internal map.
2023-09-24 05:42:05 -06:00
for _ , node := range changed {
2023-12-09 10:09:24 -07:00
if patches , ok := m . patches [ node . ID ] ; ok {
// preserve online status in case the patch has an outdated one
online := node . IsOnline
for _ , p := range patches {
// TODO(kradalby): Figure if this needs to be sorted by timestamp
node . ApplyPeerChange ( p . change )
}
2023-06-29 04:20:22 -06:00
2023-12-09 10:09:24 -07:00
// Ensure the patches are not applied again later
delete ( m . patches , node . ID )
node . IsOnline = online
}
m . peers [ node . ID ] = node
2023-05-26 04:26:34 -06:00
}
2023-08-09 14:56:21 -06:00
resp := m . baseMapResponse ( )
err := appendPeerChanges (
& resp ,
2023-06-29 04:20:22 -06:00
pol ,
2023-09-24 05:42:05 -06:00
node ,
2023-11-23 00:31:33 -07:00
mapRequest . Version ,
2023-09-24 05:42:05 -06:00
nodeMapToList ( m . peers ) ,
2023-08-09 14:56:21 -06:00
changed ,
m . baseDomain ,
m . dnsCfg ,
2023-09-28 13:33:53 -06:00
m . randomClientPort ,
2023-06-29 04:20:22 -06:00
)
2023-05-26 04:26:34 -06:00
if err != nil {
2023-06-29 04:20:22 -06:00
return nil , err
}
2023-12-09 10:09:24 -07:00
return m . marshalMapResponse ( mapRequest , & resp , node , mapRequest . Compress , messages ... )
}
// PeerChangedPatchResponse creates a patch MapResponse with
// incoming update from a state change.
func ( m * Mapper ) PeerChangedPatchResponse (
mapRequest tailcfg . MapRequest ,
node * types . Node ,
changed [ ] * tailcfg . PeerChange ,
pol * policy . ACLPolicy ,
) ( [ ] byte , error ) {
m . mu . Lock ( )
defer m . mu . Unlock ( )
sendUpdate := false
// patch the internal map
for _ , change := range changed {
if peer , ok := m . peers [ uint64 ( change . NodeID ) ] ; ok {
peer . ApplyPeerChange ( change )
sendUpdate = true
} else {
log . Trace ( ) . Str ( "node" , node . Hostname ) . Msgf ( "Node with ID %s is missing from mapper for Node %s, saving patch for when node is available" , change . NodeID , node . Hostname )
p := patch {
timestamp : time . Now ( ) ,
change : change ,
}
if patches , ok := m . patches [ uint64 ( change . NodeID ) ] ; ok {
patches := append ( patches , p )
m . patches [ uint64 ( change . NodeID ) ] = patches
} else {
m . patches [ uint64 ( change . NodeID ) ] = [ ] patch { p }
}
}
}
if ! sendUpdate {
return nil , nil
}
resp := m . baseMapResponse ( )
resp . PeersChangedPatch = changed
2023-06-29 04:20:22 -06:00
2023-09-24 05:42:05 -06:00
return m . marshalMapResponse ( mapRequest , & resp , node , mapRequest . Compress )
2023-05-26 04:26:34 -06:00
}
2023-12-09 10:09:24 -07:00
// TODO(kradalby): We need some integration tests for this.
2023-07-24 00:58:51 -06:00
func ( m * Mapper ) PeerRemovedResponse (
2023-06-29 04:20:22 -06:00
mapRequest tailcfg . MapRequest ,
2023-09-24 05:42:05 -06:00
node * types . Node ,
2023-06-29 04:20:22 -06:00
removed [ ] tailcfg . NodeID ,
2023-05-26 04:26:34 -06:00
) ( [ ] byte , error ) {
2023-08-09 14:20:05 -06:00
m . mu . Lock ( )
defer m . mu . Unlock ( )
2023-12-09 10:09:24 -07:00
// Some nodes might have been removed already
// so we dont want to ask downstream to remove
// twice, than can cause a panic in tailscaled.
notYetRemoved := [ ] tailcfg . NodeID { }
2023-08-09 14:20:05 -06:00
// remove from our internal map
for _ , id := range removed {
2023-12-09 10:09:24 -07:00
if _ , ok := m . peers [ uint64 ( id ) ] ; ok {
notYetRemoved = append ( notYetRemoved , id )
}
2023-08-09 14:20:05 -06:00
delete ( m . peers , uint64 ( id ) )
2023-12-09 10:09:24 -07:00
delete ( m . patches , uint64 ( id ) )
2023-08-09 14:20:05 -06:00
}
2023-08-09 14:56:21 -06:00
resp := m . baseMapResponse ( )
2023-12-09 10:09:24 -07:00
resp . PeersRemoved = notYetRemoved
2023-06-29 04:20:22 -06:00
2023-09-24 05:42:05 -06:00
return m . marshalMapResponse ( mapRequest , & resp , node , mapRequest . Compress )
2023-06-29 04:20:22 -06:00
}
2023-07-24 00:58:51 -06:00
func ( m * Mapper ) marshalMapResponse (
2023-07-26 06:42:12 -06:00
mapRequest tailcfg . MapRequest ,
2023-06-29 04:20:22 -06:00
resp * tailcfg . MapResponse ,
2023-09-24 05:42:05 -06:00
node * types . Node ,
2023-06-29 04:20:22 -06:00
compression string ,
2023-12-09 10:09:24 -07:00
messages ... string ,
2023-06-29 04:20:22 -06:00
) ( [ ] byte , error ) {
2023-07-24 00:58:51 -06:00
atomic . AddUint64 ( & m . seq , 1 )
2023-05-26 04:26:34 -06:00
jsonBody , err := json . Marshal ( resp )
if err != nil {
log . Error ( ) .
Caller ( ) .
Err ( err ) .
Msg ( "Cannot marshal map response" )
}
2023-07-17 03:13:48 -06:00
if debugDumpMapResponsePath != "" {
data := map [ string ] interface { } {
2023-12-09 10:09:24 -07:00
"Messages" : messages ,
2023-07-17 03:13:48 -06:00
"MapRequest" : mapRequest ,
"MapResponse" : resp ,
}
2023-12-09 10:09:24 -07:00
responseType := "keepalive"
switch {
case resp . Peers != nil && len ( resp . Peers ) > 0 :
responseType = "full"
2024-01-05 02:41:56 -07:00
case resp . Peers == nil && resp . PeersChanged == nil && resp . PeersChangedPatch == nil :
responseType = "lite"
2023-12-09 10:09:24 -07:00
case resp . PeersChanged != nil && len ( resp . PeersChanged ) > 0 :
responseType = "changed"
case resp . PeersChangedPatch != nil && len ( resp . PeersChangedPatch ) > 0 :
responseType = "patch"
case resp . PeersRemoved != nil && len ( resp . PeersRemoved ) > 0 :
responseType = "removed"
}
body , err := json . MarshalIndent ( data , "" , " " )
2023-07-17 03:13:48 -06:00
if err != nil {
log . Error ( ) .
Caller ( ) .
Err ( err ) .
Msg ( "Cannot marshal map response" )
}
perms := fs . FileMode ( debugMapResponsePerm )
2023-09-24 05:42:05 -06:00
mPath := path . Join ( debugDumpMapResponsePath , node . Hostname )
2023-07-17 03:13:48 -06:00
err = os . MkdirAll ( mPath , perms )
if err != nil {
panic ( err )
}
2023-08-09 14:56:21 -06:00
now := time . Now ( ) . UnixNano ( )
2023-07-17 03:13:48 -06:00
mapResponsePath := path . Join (
mPath ,
2023-12-09 10:09:24 -07:00
fmt . Sprintf ( "%d-%s-%d-%s.json" , now , m . uid , atomic . LoadUint64 ( & m . seq ) , responseType ) ,
2023-07-17 03:13:48 -06:00
)
log . Trace ( ) . Msgf ( "Writing MapResponse to %s" , mapResponsePath )
2023-07-26 06:42:12 -06:00
err = os . WriteFile ( mapResponsePath , body , perms )
2023-07-17 03:13:48 -06:00
if err != nil {
panic ( err )
}
}
2023-05-26 04:26:34 -06:00
var respBody [ ] byte
if compression == util . ZstdCompression {
respBody = zstdEncode ( jsonBody )
} else {
2023-11-23 00:31:33 -07:00
respBody = jsonBody
2023-05-26 04:26:34 -06:00
}
data := make ( [ ] byte , reservedResponseHeaderSize )
binary . LittleEndian . PutUint32 ( data , uint32 ( len ( respBody ) ) )
data = append ( data , respBody ... )
return data , nil
}
func zstdEncode ( in [ ] byte ) [ ] byte {
encoder , ok := zstdEncoderPool . Get ( ) . ( * zstd . Encoder )
if ! ok {
panic ( "invalid type in sync pool" )
}
out := encoder . EncodeAll ( in , nil )
_ = encoder . Close ( )
zstdEncoderPool . Put ( encoder )
return out
}
var zstdEncoderPool = & sync . Pool {
New : func ( ) any {
encoder , err := smallzstd . NewEncoder (
nil ,
zstd . WithEncoderLevel ( zstd . SpeedFastest ) )
if err != nil {
panic ( err )
}
return encoder
} ,
}
2023-06-29 04:20:22 -06:00
2023-08-09 14:56:21 -06:00
// baseMapResponse returns a tailcfg.MapResponse with
// KeepAlive false and ControlTime set to now.
func ( m * Mapper ) baseMapResponse ( ) tailcfg . MapResponse {
2023-06-29 04:20:22 -06:00
now := time . Now ( )
resp := tailcfg . MapResponse {
KeepAlive : false ,
ControlTime : & now ,
2023-12-09 10:09:24 -07:00
// TODO(kradalby): Implement PingRequest?
2023-06-29 04:20:22 -06:00
}
return resp
}
2023-08-09 14:20:05 -06:00
2023-08-09 14:56:21 -06:00
// baseWithConfigMapResponse returns a tailcfg.MapResponse struct
// with the basic configuration from headscale set.
// It is used in for bigger updates, such as full and lite, not
// incremental.
func ( m * Mapper ) baseWithConfigMapResponse (
2023-09-24 05:42:05 -06:00
node * types . Node ,
2023-08-09 14:56:21 -06:00
pol * policy . ACLPolicy ,
2023-11-23 00:31:33 -07:00
capVer tailcfg . CapabilityVersion ,
2023-08-09 14:56:21 -06:00
) ( * tailcfg . MapResponse , error ) {
resp := m . baseMapResponse ( )
2023-11-23 00:31:33 -07:00
tailnode , err := tailNode ( node , capVer , pol , m . dnsCfg , m . baseDomain , m . randomClientPort )
2023-08-09 14:56:21 -06:00
if err != nil {
return nil , err
}
resp . Node = tailnode
resp . DERPMap = m . derpMap
resp . Domain = m . baseDomain
// Do not instruct clients to collect services we do not
// support or do anything with them
resp . CollectServices = "false"
resp . KeepAlive = false
resp . Debug = & tailcfg . Debug {
2023-09-28 13:33:53 -06:00
DisableLogTail : ! m . logtail ,
2023-08-09 14:56:21 -06:00
}
return & resp , nil
}
2023-09-24 05:42:05 -06:00
func nodeMapToList ( nodes map [ uint64 ] * types . Node ) types . Nodes {
ret := make ( types . Nodes , 0 )
2023-08-09 14:20:05 -06:00
2023-09-24 05:42:05 -06:00
for _ , node := range nodes {
ret = append ( ret , node )
2023-08-09 14:20:05 -06:00
}
return ret
}
2023-08-09 14:56:21 -06:00
// appendPeerChanges mutates a tailcfg.MapResponse with all the
// necessary changes when peers have changed.
func appendPeerChanges (
resp * tailcfg . MapResponse ,
pol * policy . ACLPolicy ,
2023-09-24 05:42:05 -06:00
node * types . Node ,
2023-09-28 13:33:53 -06:00
capVer tailcfg . CapabilityVersion ,
2023-09-24 05:42:05 -06:00
peers types . Nodes ,
changed types . Nodes ,
2023-08-09 14:56:21 -06:00
baseDomain string ,
dnsCfg * tailcfg . DNSConfig ,
2023-09-28 13:33:53 -06:00
randomClientPort bool ,
2023-08-09 14:56:21 -06:00
) error {
fullChange := len ( peers ) == len ( changed )
rules , sshPolicy , err := policy . GenerateFilterAndSSHRules (
pol ,
2023-09-24 05:42:05 -06:00
node ,
2023-08-09 14:56:21 -06:00
peers ,
)
if err != nil {
return err
}
2023-09-24 05:42:05 -06:00
// If there are filter rules present, see if there are any nodes that cannot
2023-08-09 14:56:21 -06:00
// access eachother at all and remove them from the peers.
if len ( rules ) > 0 {
2023-09-24 05:42:05 -06:00
changed = policy . FilterNodesByACL ( node , changed , rules )
2023-08-09 14:56:21 -06:00
}
2023-09-24 05:42:05 -06:00
profiles := generateUserProfiles ( node , changed , baseDomain )
2023-08-09 14:56:21 -06:00
dnsConfig := generateDNSConfig (
dnsCfg ,
baseDomain ,
2023-09-24 05:42:05 -06:00
node ,
2023-08-09 14:56:21 -06:00
peers ,
)
2023-09-28 13:33:53 -06:00
tailPeers , err := tailNodes ( changed , capVer , pol , dnsCfg , baseDomain , randomClientPort )
2023-08-09 14:56:21 -06:00
if err != nil {
return err
}
// Peers is always returned sorted by Node.ID.
sort . SliceStable ( tailPeers , func ( x , y int ) bool {
return tailPeers [ x ] . ID < tailPeers [ y ] . ID
} )
if fullChange {
resp . Peers = tailPeers
} else {
resp . PeersChanged = tailPeers
}
resp . DNSConfig = dnsConfig
2023-09-24 05:42:05 -06:00
resp . PacketFilter = policy . ReduceFilterRules ( node , rules )
2023-08-09 14:56:21 -06:00
resp . UserProfiles = profiles
resp . SSHPolicy = sshPolicy
return nil
}