2023-05-26 06:26:34 -04:00
package mapper
import (
"encoding/binary"
"encoding/json"
"fmt"
2023-07-17 05:13:48 -04:00
"io/fs"
2023-05-26 06:26:34 -04:00
"net/url"
2023-07-17 05:13:48 -04:00
"os"
"path"
2023-12-09 12:09:24 -05:00
"slices"
2023-06-29 06:20:22 -04:00
"sort"
2023-05-26 06:26:34 -04:00
"strings"
"sync"
2023-07-17 05:13:48 -04:00
"sync/atomic"
2023-05-26 06:26:34 -04:00
"time"
mapset "github.com/deckarep/golang-set/v2"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/klauspost/compress/zstd"
"github.com/rs/zerolog/log"
2023-12-09 12:09:24 -05:00
"golang.org/x/exp/maps"
2023-07-17 05:13:48 -04:00
"tailscale.com/envknob"
2023-05-26 06:26:34 -04:00
"tailscale.com/smallzstd"
"tailscale.com/tailcfg"
"tailscale.com/types/dnstype"
)
const (
nextDNSDoHPrefix = "https://dns.nextdns.io"
reservedResponseHeaderSize = 4
2023-07-26 05:53:42 -04:00
mapperIDLength = 8
debugMapResponsePerm = 0 o755
2023-05-26 06:26:34 -04:00
)
2023-07-17 05:13:48 -04:00
var debugDumpMapResponsePath = envknob . String ( "HEADSCALE_DEBUG_DUMP_MAPRESPONSE_PATH" )
2023-08-09 16:56:21 -04:00
// TODO: Optimise
// As this work continues, the idea is that there will be one Mapper instance
// per node, attached to the open stream between the control and client.
2023-09-24 07:42:05 -04:00
// This means that this can hold a state per node and we can use that to
2023-08-09 16:56:21 -04:00
// improve the mapresponses sent.
// We could:
// - Keep information about the previous mapresponse so we can send a diff
// - Store hashes
// - Create a "minifier" that removes info not needed for the node
2023-12-09 12:09:24 -05:00
// - some sort of batching, wait for 5 or 60 seconds before sending
2023-08-09 16:56:21 -04:00
2023-05-26 06:26:34 -04:00
type Mapper struct {
// Configuration
// TODO(kradalby): figure out if this is the format we want this in
derpMap * tailcfg . DERPMap
baseDomain string
dnsCfg * tailcfg . DNSConfig
logtail bool
randomClientPort bool
2023-07-24 02:58:51 -04:00
uid string
created time . Time
seq uint64
2023-08-09 16:20:05 -04:00
// Map isnt concurrency safe, so we need to ensure
// only one func is accessing it over time.
2023-12-09 12:09:24 -05:00
mu sync . Mutex
peers map [ uint64 ] * types . Node
patches map [ uint64 ] [ ] patch
}
type patch struct {
timestamp time . Time
change * tailcfg . PeerChange
2023-05-26 06:26:34 -04:00
}
func NewMapper (
2023-09-24 07:42:05 -04:00
node * types . Node ,
peers types . Nodes ,
2023-05-26 06:26:34 -04:00
derpMap * tailcfg . DERPMap ,
baseDomain string ,
dnsCfg * tailcfg . DNSConfig ,
logtail bool ,
randomClientPort bool ,
) * Mapper {
2023-07-24 02:58:51 -04:00
log . Debug ( ) .
Caller ( ) .
2023-09-24 07:42:05 -04:00
Str ( "node" , node . Hostname ) .
2023-07-24 02:58:51 -04:00
Msg ( "creating new mapper" )
2023-07-26 05:53:42 -04:00
uid , _ := util . GenerateRandomStringDNSSafe ( mapperIDLength )
2023-07-24 02:58:51 -04:00
2023-05-26 06:26:34 -04:00
return & Mapper {
derpMap : derpMap ,
baseDomain : baseDomain ,
dnsCfg : dnsCfg ,
logtail : logtail ,
randomClientPort : randomClientPort ,
2023-07-24 02:58:51 -04:00
uid : uid ,
created : time . Now ( ) ,
seq : 0 ,
2023-08-09 16:20:05 -04:00
// TODO: populate
2023-12-09 12:09:24 -05:00
peers : peers . IDMap ( ) ,
patches : make ( map [ uint64 ] [ ] patch ) ,
2023-05-26 06:26:34 -04:00
}
}
2023-07-24 02:58:51 -04:00
func ( m * Mapper ) String ( ) string {
return fmt . Sprintf ( "Mapper: { seq: %d, uid: %s, created: %s }" , m . seq , m . uid , m . created )
}
2023-05-26 06:26:34 -04:00
func generateUserProfiles (
2023-09-24 07:42:05 -04:00
node * types . Node ,
peers types . Nodes ,
2023-05-26 06:26:34 -04:00
baseDomain string ,
) [ ] tailcfg . UserProfile {
userMap := make ( map [ string ] types . User )
2023-09-24 07:42:05 -04:00
userMap [ node . User . Name ] = node . User
2023-05-26 06:26:34 -04:00
for _ , peer := range peers {
userMap [ peer . User . Name ] = peer . User // not worth checking if already is there
}
profiles := [ ] tailcfg . UserProfile { }
for _ , user := range userMap {
displayName := user . Name
if baseDomain != "" {
displayName = fmt . Sprintf ( "%s@%s" , user . Name , baseDomain )
}
profiles = append ( profiles ,
tailcfg . UserProfile {
ID : tailcfg . UserID ( user . ID ) ,
LoginName : user . Name ,
DisplayName : displayName ,
} )
}
return profiles
}
func generateDNSConfig (
base * tailcfg . DNSConfig ,
baseDomain string ,
2023-09-24 07:42:05 -04:00
node * types . Node ,
peers types . Nodes ,
2023-05-26 06:26:34 -04:00
) * tailcfg . DNSConfig {
dnsConfig := base . Clone ( )
// if MagicDNS is enabled
if base != nil && base . Proxied {
// Only inject the Search Domain of the current user
// shared nodes should use their full FQDN
dnsConfig . Domains = append (
dnsConfig . Domains ,
fmt . Sprintf (
"%s.%s" ,
2023-09-24 07:42:05 -04:00
node . User . Name ,
2023-05-26 06:26:34 -04:00
baseDomain ,
) ,
)
userSet := mapset . NewSet [ types . User ] ( )
2023-09-24 07:42:05 -04:00
userSet . Add ( node . User )
2023-05-26 06:26:34 -04:00
for _ , p := range peers {
userSet . Add ( p . User )
}
for _ , user := range userSet . ToSlice ( ) {
dnsRoute := fmt . Sprintf ( "%v.%v" , user . Name , baseDomain )
dnsConfig . Routes [ dnsRoute ] = nil
}
} else {
dnsConfig = base
}
2023-09-24 07:42:05 -04:00
addNextDNSMetadata ( dnsConfig . Resolvers , node )
2023-05-26 06:26:34 -04:00
return dnsConfig
}
// If any nextdns DoH resolvers are present in the list of resolvers it will
2023-09-24 07:42:05 -04:00
// take metadata from the node metadata and instruct tailscale to add it
2023-05-26 06:26:34 -04:00
// to the requests. This makes it possible to identify from which device the
// requests come in the NextDNS dashboard.
//
// This will produce a resolver like:
// `https://dns.nextdns.io/<nextdns-id>?device_name=node-name&device_model=linux&device_ip=100.64.0.1`
2023-09-24 07:42:05 -04:00
func addNextDNSMetadata ( resolvers [ ] * dnstype . Resolver , node * types . Node ) {
2023-05-26 06:26:34 -04:00
for _ , resolver := range resolvers {
if strings . HasPrefix ( resolver . Addr , nextDNSDoHPrefix ) {
attrs := url . Values {
2023-09-24 07:42:05 -04:00
"device_name" : [ ] string { node . Hostname } ,
2023-11-21 12:20:06 -05:00
"device_model" : [ ] string { node . Hostinfo . OS } ,
2023-05-26 06:26:34 -04:00
}
2023-09-24 07:42:05 -04:00
if len ( node . IPAddresses ) > 0 {
attrs . Add ( "device_ip" , node . IPAddresses [ 0 ] . String ( ) )
2023-05-26 06:26:34 -04:00
}
resolver . Addr = fmt . Sprintf ( "%s?%s" , resolver . Addr , attrs . Encode ( ) )
}
}
}
2023-08-09 16:56:21 -04:00
// fullMapResponse creates a complete MapResponse for a node.
// It is a separate function to make testing easier.
func ( m * Mapper ) fullMapResponse (
2023-09-24 07:42:05 -04:00
node * types . Node ,
2023-08-09 16:56:21 -04:00
pol * policy . ACLPolicy ,
2023-11-23 02:31:33 -05:00
capVer tailcfg . CapabilityVersion ,
2023-08-09 16:56:21 -04:00
) ( * tailcfg . MapResponse , error ) {
2023-09-24 07:42:05 -04:00
peers := nodeMapToList ( m . peers )
2023-08-09 16:56:21 -04:00
2023-11-23 02:31:33 -05:00
resp , err := m . baseWithConfigMapResponse ( node , pol , capVer )
2023-08-09 16:56:21 -04:00
if err != nil {
return nil , err
}
err = appendPeerChanges (
resp ,
pol ,
2023-09-24 07:42:05 -04:00
node ,
2023-11-23 02:31:33 -05:00
capVer ,
2023-08-09 16:56:21 -04:00
peers ,
peers ,
m . baseDomain ,
m . dnsCfg ,
2023-09-28 15:33:53 -04:00
m . randomClientPort ,
2023-08-09 16:56:21 -04:00
)
if err != nil {
return nil , err
}
return resp , nil
}
2023-09-24 07:42:05 -04:00
// FullMapResponse returns a MapResponse for the given node.
2023-07-24 02:58:51 -04:00
func ( m * Mapper ) FullMapResponse (
2023-05-26 06:26:34 -04:00
mapRequest tailcfg . MapRequest ,
2023-09-24 07:42:05 -04:00
node * types . Node ,
2023-05-26 06:26:34 -04:00
pol * policy . ACLPolicy ,
) ( [ ] byte , error ) {
2023-08-09 16:20:05 -04:00
m . mu . Lock ( )
defer m . mu . Unlock ( )
2023-05-31 12:45:04 -04:00
2023-12-09 12:09:24 -05:00
peers := maps . Keys ( m . peers )
peersWithPatches := maps . Keys ( m . patches )
slices . Sort ( peers )
slices . Sort ( peersWithPatches )
if len ( peersWithPatches ) > 0 {
log . Debug ( ) .
Str ( "node" , node . Hostname ) .
Uints64 ( "peers" , peers ) .
Uints64 ( "pending_patches" , peersWithPatches ) .
Msgf ( "node requested full map response, but has pending patches" )
}
2023-11-23 02:31:33 -05:00
resp , err := m . fullMapResponse ( node , pol , mapRequest . Version )
2023-05-26 06:26:34 -04:00
if err != nil {
return nil , err
}
2023-09-24 07:42:05 -04:00
return m . marshalMapResponse ( mapRequest , resp , node , mapRequest . Compress )
2023-06-29 06:20:22 -04:00
}
2023-05-26 06:26:34 -04:00
2023-09-24 07:42:05 -04:00
// LiteMapResponse returns a MapResponse for the given node.
2023-07-26 08:42:12 -04:00
// Lite means that the peers has been omitted, this is intended
2023-07-26 07:55:03 -04:00
// to be used to answer MapRequests with OmitPeers set to true.
func ( m * Mapper ) LiteMapResponse (
mapRequest tailcfg . MapRequest ,
2023-09-24 07:42:05 -04:00
node * types . Node ,
2023-07-26 07:55:03 -04:00
pol * policy . ACLPolicy ,
) ( [ ] byte , error ) {
2023-11-23 02:31:33 -05:00
resp , err := m . baseWithConfigMapResponse ( node , pol , mapRequest . Version )
2023-07-26 07:55:03 -04:00
if err != nil {
return nil , err
}
2024-01-18 11:30:25 -05:00
rules , sshPolicy , err := policy . GenerateFilterAndSSHRules (
pol ,
node ,
nodeMapToList ( m . peers ) ,
)
if err != nil {
return nil , err
}
resp . PacketFilter = policy . ReduceFilterRules ( node , rules )
resp . SSHPolicy = sshPolicy
2023-09-24 07:42:05 -04:00
return m . marshalMapResponse ( mapRequest , resp , node , mapRequest . Compress )
2023-07-26 07:55:03 -04:00
}
2023-07-24 02:58:51 -04:00
func ( m * Mapper ) KeepAliveResponse (
2023-06-29 06:20:22 -04:00
mapRequest tailcfg . MapRequest ,
2023-09-24 07:42:05 -04:00
node * types . Node ,
2023-06-29 06:20:22 -04:00
) ( [ ] byte , error ) {
2023-08-09 16:56:21 -04:00
resp := m . baseMapResponse ( )
2023-06-29 06:20:22 -04:00
resp . KeepAlive = true
2023-05-26 06:26:34 -04:00
2023-09-24 07:42:05 -04:00
return m . marshalMapResponse ( mapRequest , & resp , node , mapRequest . Compress )
2023-05-26 06:26:34 -04:00
}
2023-07-24 02:58:51 -04:00
func ( m * Mapper ) DERPMapResponse (
2023-05-26 06:26:34 -04:00
mapRequest tailcfg . MapRequest ,
2023-09-24 07:42:05 -04:00
node * types . Node ,
2023-12-09 12:09:24 -05:00
derpMap * tailcfg . DERPMap ,
2023-05-26 06:26:34 -04:00
) ( [ ] byte , error ) {
2023-12-09 12:09:24 -05:00
m . derpMap = derpMap
2023-08-09 16:56:21 -04:00
resp := m . baseMapResponse ( )
2023-12-09 12:09:24 -05:00
resp . DERPMap = derpMap
2023-05-26 06:26:34 -04:00
2023-09-24 07:42:05 -04:00
return m . marshalMapResponse ( mapRequest , & resp , node , mapRequest . Compress )
2023-06-29 06:20:22 -04:00
}
2023-07-24 02:58:51 -04:00
func ( m * Mapper ) PeerChangedResponse (
2023-06-29 06:20:22 -04:00
mapRequest tailcfg . MapRequest ,
2023-09-24 07:42:05 -04:00
node * types . Node ,
changed types . Nodes ,
2023-06-29 06:20:22 -04:00
pol * policy . ACLPolicy ,
2023-12-09 12:09:24 -05:00
messages ... string ,
2023-06-29 06:20:22 -04:00
) ( [ ] byte , error ) {
2023-08-09 16:20:05 -04:00
m . mu . Lock ( )
defer m . mu . Unlock ( )
// Update our internal map.
2023-09-24 07:42:05 -04:00
for _ , node := range changed {
2023-12-09 12:09:24 -05:00
if patches , ok := m . patches [ node . ID ] ; ok {
// preserve online status in case the patch has an outdated one
online := node . IsOnline
for _ , p := range patches {
// TODO(kradalby): Figure if this needs to be sorted by timestamp
node . ApplyPeerChange ( p . change )
}
2023-06-29 06:20:22 -04:00
2023-12-09 12:09:24 -05:00
// Ensure the patches are not applied again later
delete ( m . patches , node . ID )
node . IsOnline = online
}
m . peers [ node . ID ] = node
2023-05-26 06:26:34 -04:00
}
2023-08-09 16:56:21 -04:00
resp := m . baseMapResponse ( )
err := appendPeerChanges (
& resp ,
2023-06-29 06:20:22 -04:00
pol ,
2023-09-24 07:42:05 -04:00
node ,
2023-11-23 02:31:33 -05:00
mapRequest . Version ,
2023-09-24 07:42:05 -04:00
nodeMapToList ( m . peers ) ,
2023-08-09 16:56:21 -04:00
changed ,
m . baseDomain ,
m . dnsCfg ,
2023-09-28 15:33:53 -04:00
m . randomClientPort ,
2023-06-29 06:20:22 -04:00
)
2023-05-26 06:26:34 -04:00
if err != nil {
2023-06-29 06:20:22 -04:00
return nil , err
}
2023-12-09 12:09:24 -05:00
return m . marshalMapResponse ( mapRequest , & resp , node , mapRequest . Compress , messages ... )
}
// PeerChangedPatchResponse creates a patch MapResponse with
// incoming update from a state change.
func ( m * Mapper ) PeerChangedPatchResponse (
mapRequest tailcfg . MapRequest ,
node * types . Node ,
changed [ ] * tailcfg . PeerChange ,
pol * policy . ACLPolicy ,
) ( [ ] byte , error ) {
m . mu . Lock ( )
defer m . mu . Unlock ( )
sendUpdate := false
// patch the internal map
for _ , change := range changed {
if peer , ok := m . peers [ uint64 ( change . NodeID ) ] ; ok {
peer . ApplyPeerChange ( change )
sendUpdate = true
} else {
log . Trace ( ) . Str ( "node" , node . Hostname ) . Msgf ( "Node with ID %s is missing from mapper for Node %s, saving patch for when node is available" , change . NodeID , node . Hostname )
p := patch {
timestamp : time . Now ( ) ,
change : change ,
}
if patches , ok := m . patches [ uint64 ( change . NodeID ) ] ; ok {
patches := append ( patches , p )
m . patches [ uint64 ( change . NodeID ) ] = patches
} else {
m . patches [ uint64 ( change . NodeID ) ] = [ ] patch { p }
}
}
}
if ! sendUpdate {
return nil , nil
}
resp := m . baseMapResponse ( )
resp . PeersChangedPatch = changed
2023-06-29 06:20:22 -04:00
2023-09-24 07:42:05 -04:00
return m . marshalMapResponse ( mapRequest , & resp , node , mapRequest . Compress )
2023-05-26 06:26:34 -04:00
}
2023-12-09 12:09:24 -05:00
// TODO(kradalby): We need some integration tests for this.
2023-07-24 02:58:51 -04:00
func ( m * Mapper ) PeerRemovedResponse (
2023-06-29 06:20:22 -04:00
mapRequest tailcfg . MapRequest ,
2023-09-24 07:42:05 -04:00
node * types . Node ,
2023-06-29 06:20:22 -04:00
removed [ ] tailcfg . NodeID ,
2023-05-26 06:26:34 -04:00
) ( [ ] byte , error ) {
2023-08-09 16:20:05 -04:00
m . mu . Lock ( )
defer m . mu . Unlock ( )
2023-12-09 12:09:24 -05:00
// Some nodes might have been removed already
// so we dont want to ask downstream to remove
// twice, than can cause a panic in tailscaled.
notYetRemoved := [ ] tailcfg . NodeID { }
2023-08-09 16:20:05 -04:00
// remove from our internal map
for _ , id := range removed {
2023-12-09 12:09:24 -05:00
if _ , ok := m . peers [ uint64 ( id ) ] ; ok {
notYetRemoved = append ( notYetRemoved , id )
}
2023-08-09 16:20:05 -04:00
delete ( m . peers , uint64 ( id ) )
2023-12-09 12:09:24 -05:00
delete ( m . patches , uint64 ( id ) )
2023-08-09 16:20:05 -04:00
}
2023-08-09 16:56:21 -04:00
resp := m . baseMapResponse ( )
2023-12-09 12:09:24 -05:00
resp . PeersRemoved = notYetRemoved
2023-06-29 06:20:22 -04:00
2023-09-24 07:42:05 -04:00
return m . marshalMapResponse ( mapRequest , & resp , node , mapRequest . Compress )
2023-06-29 06:20:22 -04:00
}
2023-07-24 02:58:51 -04:00
func ( m * Mapper ) marshalMapResponse (
2023-07-26 08:42:12 -04:00
mapRequest tailcfg . MapRequest ,
2023-06-29 06:20:22 -04:00
resp * tailcfg . MapResponse ,
2023-09-24 07:42:05 -04:00
node * types . Node ,
2023-06-29 06:20:22 -04:00
compression string ,
2023-12-09 12:09:24 -05:00
messages ... string ,
2023-06-29 06:20:22 -04:00
) ( [ ] byte , error ) {
2023-07-24 02:58:51 -04:00
atomic . AddUint64 ( & m . seq , 1 )
2023-05-26 06:26:34 -04:00
jsonBody , err := json . Marshal ( resp )
if err != nil {
log . Error ( ) .
Caller ( ) .
Err ( err ) .
Msg ( "Cannot marshal map response" )
}
2023-07-17 05:13:48 -04:00
if debugDumpMapResponsePath != "" {
data := map [ string ] interface { } {
2023-12-09 12:09:24 -05:00
"Messages" : messages ,
2023-07-17 05:13:48 -04:00
"MapRequest" : mapRequest ,
"MapResponse" : resp ,
}
2023-12-09 12:09:24 -05:00
responseType := "keepalive"
switch {
case resp . Peers != nil && len ( resp . Peers ) > 0 :
responseType = "full"
2024-01-05 04:41:56 -05:00
case resp . Peers == nil && resp . PeersChanged == nil && resp . PeersChangedPatch == nil :
responseType = "lite"
2023-12-09 12:09:24 -05:00
case resp . PeersChanged != nil && len ( resp . PeersChanged ) > 0 :
responseType = "changed"
case resp . PeersChangedPatch != nil && len ( resp . PeersChangedPatch ) > 0 :
responseType = "patch"
case resp . PeersRemoved != nil && len ( resp . PeersRemoved ) > 0 :
responseType = "removed"
}
body , err := json . MarshalIndent ( data , "" , " " )
2023-07-17 05:13:48 -04:00
if err != nil {
log . Error ( ) .
Caller ( ) .
Err ( err ) .
Msg ( "Cannot marshal map response" )
}
perms := fs . FileMode ( debugMapResponsePerm )
2023-09-24 07:42:05 -04:00
mPath := path . Join ( debugDumpMapResponsePath , node . Hostname )
2023-07-17 05:13:48 -04:00
err = os . MkdirAll ( mPath , perms )
if err != nil {
panic ( err )
}
2023-08-09 16:56:21 -04:00
now := time . Now ( ) . UnixNano ( )
2023-07-17 05:13:48 -04:00
mapResponsePath := path . Join (
mPath ,
2023-12-09 12:09:24 -05:00
fmt . Sprintf ( "%d-%s-%d-%s.json" , now , m . uid , atomic . LoadUint64 ( & m . seq ) , responseType ) ,
2023-07-17 05:13:48 -04:00
)
log . Trace ( ) . Msgf ( "Writing MapResponse to %s" , mapResponsePath )
2023-07-26 08:42:12 -04:00
err = os . WriteFile ( mapResponsePath , body , perms )
2023-07-17 05:13:48 -04:00
if err != nil {
panic ( err )
}
}
2023-05-26 06:26:34 -04:00
var respBody [ ] byte
if compression == util . ZstdCompression {
respBody = zstdEncode ( jsonBody )
} else {
2023-11-23 02:31:33 -05:00
respBody = jsonBody
2023-05-26 06:26:34 -04:00
}
data := make ( [ ] byte , reservedResponseHeaderSize )
binary . LittleEndian . PutUint32 ( data , uint32 ( len ( respBody ) ) )
data = append ( data , respBody ... )
return data , nil
}
func zstdEncode ( in [ ] byte ) [ ] byte {
encoder , ok := zstdEncoderPool . Get ( ) . ( * zstd . Encoder )
if ! ok {
panic ( "invalid type in sync pool" )
}
out := encoder . EncodeAll ( in , nil )
_ = encoder . Close ( )
zstdEncoderPool . Put ( encoder )
return out
}
var zstdEncoderPool = & sync . Pool {
New : func ( ) any {
encoder , err := smallzstd . NewEncoder (
nil ,
zstd . WithEncoderLevel ( zstd . SpeedFastest ) )
if err != nil {
panic ( err )
}
return encoder
} ,
}
2023-06-29 06:20:22 -04:00
2023-08-09 16:56:21 -04:00
// baseMapResponse returns a tailcfg.MapResponse with
// KeepAlive false and ControlTime set to now.
func ( m * Mapper ) baseMapResponse ( ) tailcfg . MapResponse {
2023-06-29 06:20:22 -04:00
now := time . Now ( )
resp := tailcfg . MapResponse {
KeepAlive : false ,
ControlTime : & now ,
2023-12-09 12:09:24 -05:00
// TODO(kradalby): Implement PingRequest?
2023-06-29 06:20:22 -04:00
}
return resp
}
2023-08-09 16:20:05 -04:00
2023-08-09 16:56:21 -04:00
// baseWithConfigMapResponse returns a tailcfg.MapResponse struct
// with the basic configuration from headscale set.
// It is used in for bigger updates, such as full and lite, not
// incremental.
func ( m * Mapper ) baseWithConfigMapResponse (
2023-09-24 07:42:05 -04:00
node * types . Node ,
2023-08-09 16:56:21 -04:00
pol * policy . ACLPolicy ,
2023-11-23 02:31:33 -05:00
capVer tailcfg . CapabilityVersion ,
2023-08-09 16:56:21 -04:00
) ( * tailcfg . MapResponse , error ) {
resp := m . baseMapResponse ( )
2023-11-23 02:31:33 -05:00
tailnode , err := tailNode ( node , capVer , pol , m . dnsCfg , m . baseDomain , m . randomClientPort )
2023-08-09 16:56:21 -04:00
if err != nil {
return nil , err
}
resp . Node = tailnode
resp . DERPMap = m . derpMap
resp . Domain = m . baseDomain
// Do not instruct clients to collect services we do not
// support or do anything with them
resp . CollectServices = "false"
resp . KeepAlive = false
resp . Debug = & tailcfg . Debug {
2023-09-28 15:33:53 -04:00
DisableLogTail : ! m . logtail ,
2023-08-09 16:56:21 -04:00
}
return & resp , nil
}
2023-09-24 07:42:05 -04:00
func nodeMapToList ( nodes map [ uint64 ] * types . Node ) types . Nodes {
ret := make ( types . Nodes , 0 )
2023-08-09 16:20:05 -04:00
2023-09-24 07:42:05 -04:00
for _ , node := range nodes {
ret = append ( ret , node )
2023-08-09 16:20:05 -04:00
}
return ret
}
2023-08-09 16:56:21 -04:00
// appendPeerChanges mutates a tailcfg.MapResponse with all the
// necessary changes when peers have changed.
func appendPeerChanges (
resp * tailcfg . MapResponse ,
pol * policy . ACLPolicy ,
2023-09-24 07:42:05 -04:00
node * types . Node ,
2023-09-28 15:33:53 -04:00
capVer tailcfg . CapabilityVersion ,
2023-09-24 07:42:05 -04:00
peers types . Nodes ,
changed types . Nodes ,
2023-08-09 16:56:21 -04:00
baseDomain string ,
dnsCfg * tailcfg . DNSConfig ,
2023-09-28 15:33:53 -04:00
randomClientPort bool ,
2023-08-09 16:56:21 -04:00
) error {
fullChange := len ( peers ) == len ( changed )
rules , sshPolicy , err := policy . GenerateFilterAndSSHRules (
pol ,
2023-09-24 07:42:05 -04:00
node ,
2023-08-09 16:56:21 -04:00
peers ,
)
if err != nil {
return err
}
2023-09-24 07:42:05 -04:00
// If there are filter rules present, see if there are any nodes that cannot
2023-08-09 16:56:21 -04:00
// access eachother at all and remove them from the peers.
if len ( rules ) > 0 {
2023-09-24 07:42:05 -04:00
changed = policy . FilterNodesByACL ( node , changed , rules )
2023-08-09 16:56:21 -04:00
}
2023-09-24 07:42:05 -04:00
profiles := generateUserProfiles ( node , changed , baseDomain )
2023-08-09 16:56:21 -04:00
dnsConfig := generateDNSConfig (
dnsCfg ,
baseDomain ,
2023-09-24 07:42:05 -04:00
node ,
2023-08-09 16:56:21 -04:00
peers ,
)
2023-09-28 15:33:53 -04:00
tailPeers , err := tailNodes ( changed , capVer , pol , dnsCfg , baseDomain , randomClientPort )
2023-08-09 16:56:21 -04:00
if err != nil {
return err
}
// Peers is always returned sorted by Node.ID.
sort . SliceStable ( tailPeers , func ( x , y int ) bool {
return tailPeers [ x ] . ID < tailPeers [ y ] . ID
} )
if fullChange {
resp . Peers = tailPeers
} else {
resp . PeersChanged = tailPeers
}
resp . DNSConfig = dnsConfig
2023-09-24 07:42:05 -04:00
resp . PacketFilter = policy . ReduceFilterRules ( node , rules )
2023-08-09 16:56:21 -04:00
resp . UserProfiles = profiles
resp . SSHPolicy = sshPolicy
return nil
}