Improved tailnode start up handling

This commit is contained in:
Juan Font Alonso 2021-05-24 21:59:03 +02:00
parent d1be440c89
commit 064e448d22

46
api.go
View file

@ -215,25 +215,40 @@ func (h *Headscale) PollNetMapHandler(c *gin.Context) {
return return
} }
log.Printf("[%s] sending initial map", m.Name)
pollData <- *data
// We update our peers if the client is not sending ReadOnly in the MapRequest // We update our peers if the client is not sending ReadOnly in the MapRequest
// so we don't distribute its initial request (it comes with // so we don't distribute its initial request (it comes with
// empty endpoints to peers) // empty endpoints to peers)
if !req.ReadOnly {
peers, _ := h.getPeers(m) // Details on the protocol can be found in https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go#L696
h.pollMu.Lock() log.Printf("[%s] ReadOnly=%t OmitPeers=%t Stream=%t", m.Name, req.ReadOnly, req.OmitPeers, req.Stream)
for _, p := range *peers {
log.Printf("[%s] notifying peer %s (%s)", m.Name, p.Name, p.Addresses[0]) if req.ReadOnly {
if pUp, ok := h.clientsPolling[uint64(p.ID)]; ok { log.Printf("[%s] Client is starting up. Asking for DERP map", m.Name)
pUp <- []byte{} c.Data(200, "application/json; charset=utf-8", *data)
} else { return
log.Printf("[%s] Peer %s does not appear to be polling", m.Name, p.Name)
}
}
h.pollMu.Unlock()
} }
if req.OmitPeers {
log.Printf("[%s] Client is starting up. Ready to receive the peers", m.Name)
c.Data(200, "application/json; charset=utf-8", *data)
return
}
log.Printf("[%s] Client is ready to access the tailnet", m.Name)
log.Printf("[%s] Sending initial map", m.Name)
pollData <- *data
log.Printf("[%s] Notifying peers", m.Name)
peers, _ := h.getPeers(m)
h.pollMu.Lock()
for _, p := range *peers {
log.Printf("[%s] Notifying peer %s (%s)", m.Name, p.Name, p.Addresses[0])
if pUp, ok := h.clientsPolling[uint64(p.ID)]; ok {
pUp <- []byte{}
} else {
log.Printf("[%s] Peer %s does not appear to be polling", m.Name, p.Name)
}
}
h.pollMu.Unlock()
go h.keepAlive(cancelKeepAlive, pollData, mKey, req, m) go h.keepAlive(cancelKeepAlive, pollData, mKey, req, m)
@ -290,6 +305,7 @@ func (h *Headscale) keepAlive(cancel chan []byte, pollData chan []byte, mKey wgc
log.Printf("Error generating the keep alive msg: %s", err) log.Printf("Error generating the keep alive msg: %s", err)
return return
} }
log.Printf("[%s] Sending keepalive", m.Name)
pollData <- *data pollData <- *data
h.pollMu.Unlock() h.pollMu.Unlock()
time.Sleep(60 * time.Second) time.Sleep(60 * time.Second)