Consitently use Machine pointers

This commit rewrites a bunch of the code to always use *Machine instead
of a mix of both, and a mix of tailcfg.Node and Machine.

Now we use *Machine, and if tailcfg.Node is needed, it is converted just
before needed.
This commit is contained in:
Kristoffer Dalby 2021-10-02 22:00:09 +01:00
parent 0475eb6ef7
commit 0d4a006536
No known key found for this signature in database
GPG Key ID: 09F62DC067465735
5 changed files with 58 additions and 61 deletions

4
api.go
View File

@ -213,7 +213,7 @@ func (h *Headscale) RegistrationHandler(c *gin.Context) {
c.Data(200, "application/json; charset=utf-8", respBody)
}
func (h *Headscale) getMapResponse(mKey wgkey.Key, req tailcfg.MapRequest, m Machine) (*[]byte, error) {
func (h *Headscale) getMapResponse(mKey wgkey.Key, req tailcfg.MapRequest, m *Machine) (*[]byte, error) {
log.Trace().
Str("func", "getMapResponse").
Str("machine", req.Hostinfo.Hostname).
@ -286,7 +286,7 @@ func (h *Headscale) getMapResponse(mKey wgkey.Key, req tailcfg.MapRequest, m Mac
return &data, nil
}
func (h *Headscale) getMapKeepAliveResponse(mKey wgkey.Key, req tailcfg.MapRequest, m Machine) (*[]byte, error) {
func (h *Headscale) getMapKeepAliveResponse(mKey wgkey.Key, req tailcfg.MapRequest, m *Machine) (*[]byte, error) {
resp := tailcfg.MapResponse{
KeepAlive: true,
}

View File

@ -296,7 +296,7 @@ func (m *Machine) GetHostInfo() (*tailcfg.Hostinfo, error) {
}
func (h *Headscale) notifyChangesToPeers(m *Machine) {
peers, err := h.getPeers(*m)
peers, err := h.getPeers(m)
if err != nil {
log.Error().
Str("func", "notifyChangesToPeers").
@ -363,7 +363,7 @@ func (h *Headscale) closeUpdateChannel(m *Machine) {
h.clientsUpdateChannels.Delete(m.ID)
}
func (h *Headscale) sendRequestOnUpdateChannel(m *tailcfg.Node) error {
func (h *Headscale) sendRequestOnUpdateChannel(m *Machine) error {
h.clientsUpdateChannelMutex.Lock()
defer h.clientsUpdateChannelMutex.Unlock()

View File

@ -16,7 +16,7 @@ func (s *Suite) TestGetMachine(c *check.C) {
_, err = h.GetMachine("test", "testmachine")
c.Assert(err, check.NotNil)
m := Machine{
m := &Machine{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
@ -27,7 +27,7 @@ func (s *Suite) TestGetMachine(c *check.C) {
RegisterMethod: "authKey",
AuthKeyID: uint(pak.ID),
}
h.db.Save(&m)
h.db.Save(m)
m1, err := h.GetMachine("test", "testmachine")
c.Assert(err, check.IsNil)

25
poll.go
View File

@ -140,7 +140,7 @@ func (h *Headscale) PollNetMapHandler(c *gin.Context) {
Str("id", c.Param("id")).
Str("machine", m.Name).
Msg("Loading or creating update channel")
updateChan := h.getOrOpenUpdateChannel(&m)
updateChan := h.getOrOpenUpdateChannel(m)
pollDataChan := make(chan []byte)
// defer close(pollData)
@ -159,7 +159,7 @@ func (h *Headscale) PollNetMapHandler(c *gin.Context) {
// It sounds like we should update the nodes when we have received a endpoint update
// even tho the comments in the tailscale code dont explicitly say so.
go h.notifyChangesToPeers(&m)
go h.notifyChangesToPeers(m)
return
} else if req.OmitPeers && req.Stream {
log.Warn().
@ -184,7 +184,7 @@ func (h *Headscale) PollNetMapHandler(c *gin.Context) {
Str("handler", "PollNetMap").
Str("machine", m.Name).
Msg("Notifying peers")
go h.notifyChangesToPeers(&m)
go h.notifyChangesToPeers(m)
h.PollNetMapStream(c, m, req, mKey, pollDataChan, keepAliveChan, updateChan, cancelKeepAlive)
log.Trace().
@ -199,7 +199,7 @@ func (h *Headscale) PollNetMapHandler(c *gin.Context) {
// to the connected clients.
func (h *Headscale) PollNetMapStream(
c *gin.Context,
m Machine,
m *Machine,
req tailcfg.MapRequest,
mKey wgkey.Key,
pollDataChan chan []byte,
@ -246,7 +246,7 @@ func (h *Headscale) PollNetMapStream(
// TODO: Abstract away all the database calls, this can cause race conditions
// when an outdated machine object is kept alive, e.g. db is update from
// command line, but then overwritten.
err = h.UpdateMachine(&m)
err = h.UpdateMachine(m)
if err != nil {
log.Error().
Str("handler", "PollNetMapStream").
@ -292,7 +292,7 @@ func (h *Headscale) PollNetMapStream(
// TODO: Abstract away all the database calls, this can cause race conditions
// when an outdated machine object is kept alive, e.g. db is update from
// command line, but then overwritten.
err = h.UpdateMachine(&m)
err = h.UpdateMachine(m)
if err != nil {
log.Error().
Str("handler", "PollNetMapStream").
@ -318,7 +318,7 @@ func (h *Headscale) PollNetMapStream(
Str("machine", m.Name).
Str("channel", "update").
Msg("Received a request for update")
if h.isOutdated(&m) {
if h.isOutdated(m) {
log.Debug().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
@ -356,7 +356,7 @@ func (h *Headscale) PollNetMapStream(
// TODO: Abstract away all the database calls, this can cause race conditions
// when an outdated machine object is kept alive, e.g. db is update from
// command line, but then overwritten.
err = h.UpdateMachine(&m)
err = h.UpdateMachine(m)
if err != nil {
log.Error().
Str("handler", "PollNetMapStream").
@ -386,7 +386,7 @@ func (h *Headscale) PollNetMapStream(
// TODO: Abstract away all the database calls, this can cause race conditions
// when an outdated machine object is kept alive, e.g. db is update from
// command line, but then overwritten.
err := h.UpdateMachine(&m)
err := h.UpdateMachine(m)
if err != nil {
log.Error().
Str("handler", "PollNetMapStream").
@ -401,7 +401,7 @@ func (h *Headscale) PollNetMapStream(
cancelKeepAlive <- struct{}{}
h.closeUpdateChannel(&m)
h.closeUpdateChannel(m)
close(pollDataChan)
@ -417,7 +417,7 @@ func (h *Headscale) scheduledPollWorker(
keepAliveChan chan<- []byte,
mKey wgkey.Key,
req tailcfg.MapRequest,
m Machine,
m *Machine,
) {
keepAliveTicker := time.NewTicker(60 * time.Second)
updateCheckerTicker := time.NewTicker(30 * time.Second)
@ -446,8 +446,7 @@ func (h *Headscale) scheduledPollWorker(
case <-updateCheckerTicker.C:
// Send an update request regardless of outdated or not, if data is sent
// to the node is determined in the updateChan consumer block
n, _ := m.toNode(true)
err := h.sendRequestOnUpdateChannel(n)
err := h.sendRequestOnUpdateChannel(m)
if err != nil {
log.Error().
Str("func", "keepAlive").

View File

@ -2,7 +2,6 @@ package headscale
import (
"gopkg.in/check.v1"
"tailscale.com/tailcfg"
)
func (s *Suite) TestBasicSharedNodesInNamespace(c *check.C) {
@ -21,7 +20,7 @@ func (s *Suite) TestBasicSharedNodesInNamespace(c *check.C) {
_, err = h.GetMachine(n1.Name, "test_get_shared_nodes_1")
c.Assert(err, check.NotNil)
m1 := Machine{
m1 := &Machine{
ID: 0,
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
@ -33,12 +32,12 @@ func (s *Suite) TestBasicSharedNodesInNamespace(c *check.C) {
IPAddress: "100.64.0.1",
AuthKeyID: uint(pak1.ID),
}
h.db.Save(&m1)
h.db.Save(m1)
_, err = h.GetMachine(n1.Name, m1.Name)
c.Assert(err, check.IsNil)
m2 := Machine{
m2 := &Machine{
ID: 1,
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
@ -50,22 +49,22 @@ func (s *Suite) TestBasicSharedNodesInNamespace(c *check.C) {
IPAddress: "100.64.0.2",
AuthKeyID: uint(pak2.ID),
}
h.db.Save(&m2)
h.db.Save(m2)
_, err = h.GetMachine(n2.Name, m2.Name)
c.Assert(err, check.IsNil)
p1s, err := h.getPeers(m1)
c.Assert(err, check.IsNil)
c.Assert(len(*p1s), check.Equals, 0)
c.Assert(len(p1s), check.Equals, 0)
err = h.AddSharedMachineToNamespace(&m2, n1)
err = h.AddSharedMachineToNamespace(m2, n1)
c.Assert(err, check.IsNil)
p1sAfter, err := h.getPeers(m1)
c.Assert(err, check.IsNil)
c.Assert(len(*p1sAfter), check.Equals, 1)
c.Assert((*p1sAfter)[0].ID, check.Equals, tailcfg.NodeID(m2.ID))
c.Assert(len(p1sAfter), check.Equals, 1)
c.Assert(p1sAfter[0].ID, check.Equals, m2.ID)
}
func (s *Suite) TestSameNamespace(c *check.C) {
@ -84,7 +83,7 @@ func (s *Suite) TestSameNamespace(c *check.C) {
_, err = h.GetMachine(n1.Name, "test_get_shared_nodes_1")
c.Assert(err, check.NotNil)
m1 := Machine{
m1 := &Machine{
ID: 0,
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
@ -96,12 +95,12 @@ func (s *Suite) TestSameNamespace(c *check.C) {
IPAddress: "100.64.0.1",
AuthKeyID: uint(pak1.ID),
}
h.db.Save(&m1)
h.db.Save(m1)
_, err = h.GetMachine(n1.Name, m1.Name)
c.Assert(err, check.IsNil)
m2 := Machine{
m2 := &Machine{
ID: 1,
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
@ -113,16 +112,16 @@ func (s *Suite) TestSameNamespace(c *check.C) {
IPAddress: "100.64.0.2",
AuthKeyID: uint(pak2.ID),
}
h.db.Save(&m2)
h.db.Save(m2)
_, err = h.GetMachine(n2.Name, m2.Name)
c.Assert(err, check.IsNil)
p1s, err := h.getPeers(m1)
c.Assert(err, check.IsNil)
c.Assert(len(*p1s), check.Equals, 0)
c.Assert(len(p1s), check.Equals, 0)
err = h.AddSharedMachineToNamespace(&m1, n1)
err = h.AddSharedMachineToNamespace(m1, n1)
c.Assert(err, check.Equals, errorSameNamespace)
}
@ -142,7 +141,7 @@ func (s *Suite) TestAlreadyShared(c *check.C) {
_, err = h.GetMachine(n1.Name, "test_get_shared_nodes_1")
c.Assert(err, check.NotNil)
m1 := Machine{
m1 := &Machine{
ID: 0,
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
@ -154,12 +153,12 @@ func (s *Suite) TestAlreadyShared(c *check.C) {
IPAddress: "100.64.0.1",
AuthKeyID: uint(pak1.ID),
}
h.db.Save(&m1)
h.db.Save(m1)
_, err = h.GetMachine(n1.Name, m1.Name)
c.Assert(err, check.IsNil)
m2 := Machine{
m2 := &Machine{
ID: 1,
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
@ -171,18 +170,18 @@ func (s *Suite) TestAlreadyShared(c *check.C) {
IPAddress: "100.64.0.2",
AuthKeyID: uint(pak2.ID),
}
h.db.Save(&m2)
h.db.Save(m2)
_, err = h.GetMachine(n2.Name, m2.Name)
c.Assert(err, check.IsNil)
p1s, err := h.getPeers(m1)
c.Assert(err, check.IsNil)
c.Assert(len(*p1s), check.Equals, 0)
c.Assert(len(p1s), check.Equals, 0)
err = h.AddSharedMachineToNamespace(&m2, n1)
err = h.AddSharedMachineToNamespace(m2, n1)
c.Assert(err, check.IsNil)
err = h.AddSharedMachineToNamespace(&m2, n1)
err = h.AddSharedMachineToNamespace(m2, n1)
c.Assert(err, check.Equals, errorMachineAlreadyShared)
}
@ -202,7 +201,7 @@ func (s *Suite) TestDoNotIncludeRoutesOnShared(c *check.C) {
_, err = h.GetMachine(n1.Name, "test_get_shared_nodes_1")
c.Assert(err, check.NotNil)
m1 := Machine{
m1 := &Machine{
ID: 0,
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
@ -214,12 +213,12 @@ func (s *Suite) TestDoNotIncludeRoutesOnShared(c *check.C) {
IPAddress: "100.64.0.1",
AuthKeyID: uint(pak1.ID),
}
h.db.Save(&m1)
h.db.Save(m1)
_, err = h.GetMachine(n1.Name, m1.Name)
c.Assert(err, check.IsNil)
m2 := Machine{
m2 := &Machine{
ID: 1,
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
@ -231,22 +230,21 @@ func (s *Suite) TestDoNotIncludeRoutesOnShared(c *check.C) {
IPAddress: "100.64.0.2",
AuthKeyID: uint(pak2.ID),
}
h.db.Save(&m2)
h.db.Save(m2)
_, err = h.GetMachine(n2.Name, m2.Name)
c.Assert(err, check.IsNil)
p1s, err := h.getPeers(m1)
c.Assert(err, check.IsNil)
c.Assert(len(*p1s), check.Equals, 0)
c.Assert(len(p1s), check.Equals, 0)
err = h.AddSharedMachineToNamespace(&m2, n1)
err = h.AddSharedMachineToNamespace(m2, n1)
c.Assert(err, check.IsNil)
p1sAfter, err := h.getPeers(m1)
c.Assert(err, check.IsNil)
c.Assert(len(*p1sAfter), check.Equals, 1)
c.Assert(len((*p1sAfter)[0].AllowedIPs), check.Equals, 1)
c.Assert(len(p1sAfter), check.Equals, 1)
}
func (s *Suite) TestComplexSharingAcrossNamespaces(c *check.C) {
@ -274,7 +272,7 @@ func (s *Suite) TestComplexSharingAcrossNamespaces(c *check.C) {
_, err = h.GetMachine(n1.Name, "test_get_shared_nodes_1")
c.Assert(err, check.NotNil)
m1 := Machine{
m1 := &Machine{
ID: 0,
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
@ -286,12 +284,12 @@ func (s *Suite) TestComplexSharingAcrossNamespaces(c *check.C) {
IPAddress: "100.64.0.1",
AuthKeyID: uint(pak1.ID),
}
h.db.Save(&m1)
h.db.Save(m1)
_, err = h.GetMachine(n1.Name, m1.Name)
c.Assert(err, check.IsNil)
m2 := Machine{
m2 := &Machine{
ID: 1,
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
@ -303,12 +301,12 @@ func (s *Suite) TestComplexSharingAcrossNamespaces(c *check.C) {
IPAddress: "100.64.0.2",
AuthKeyID: uint(pak2.ID),
}
h.db.Save(&m2)
h.db.Save(m2)
_, err = h.GetMachine(n2.Name, m2.Name)
c.Assert(err, check.IsNil)
m3 := Machine{
m3 := &Machine{
ID: 2,
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
@ -320,12 +318,12 @@ func (s *Suite) TestComplexSharingAcrossNamespaces(c *check.C) {
IPAddress: "100.64.0.3",
AuthKeyID: uint(pak3.ID),
}
h.db.Save(&m3)
h.db.Save(m3)
_, err = h.GetMachine(n3.Name, m3.Name)
c.Assert(err, check.IsNil)
m4 := Machine{
m4 := &Machine{
ID: 3,
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
@ -337,23 +335,23 @@ func (s *Suite) TestComplexSharingAcrossNamespaces(c *check.C) {
IPAddress: "100.64.0.4",
AuthKeyID: uint(pak4.ID),
}
h.db.Save(&m4)
h.db.Save(m4)
_, err = h.GetMachine(n1.Name, m4.Name)
c.Assert(err, check.IsNil)
p1s, err := h.getPeers(m1)
c.Assert(err, check.IsNil)
c.Assert(len(*p1s), check.Equals, 1) // nodes 1 and 4
c.Assert(len(p1s), check.Equals, 1) // nodes 1 and 4
err = h.AddSharedMachineToNamespace(&m2, n1)
err = h.AddSharedMachineToNamespace(m2, n1)
c.Assert(err, check.IsNil)
p1sAfter, err := h.getPeers(m1)
c.Assert(err, check.IsNil)
c.Assert(len(*p1sAfter), check.Equals, 2) // nodes 1, 2, 4
c.Assert(len(p1sAfter), check.Equals, 2) // nodes 1, 2, 4
pAlone, err := h.getPeers(m3)
c.Assert(err, check.IsNil)
c.Assert(len(*pAlone), check.Equals, 0) // node 3 is alone
c.Assert(len(pAlone), check.Equals, 0) // node 3 is alone
}