mirror of
https://github.com/tailscale/tailscale.git
synced 2026-04-04 06:36:01 -04:00
The new version of app connector (conn25) needs to read DNS responses for domains it is interested in and store and swap out IP addresses. Add a hook to dns manager to enable this. Give the conn25 updated netmaps so that it knows when to assign connecting addresses and from what pool. Assign an address when we see a DNS response for a domain we are interested in, but don't do anything with the address yet. Updates tailscale/corp#34252 Signed-off-by: Fran Bull <fran@tailscale.com>
73 lines
2.3 KiB
Go
73 lines
2.3 KiB
Go
// Copyright (c) Tailscale Inc & contributors
|
|
// SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
package appc
|
|
|
|
import (
|
|
"cmp"
|
|
"slices"
|
|
|
|
"tailscale.com/tailcfg"
|
|
"tailscale.com/types/appctype"
|
|
"tailscale.com/util/mak"
|
|
"tailscale.com/util/set"
|
|
)
|
|
|
|
const AppConnectorsExperimentalAttrName = "tailscale.com/app-connectors-experimental"
|
|
|
|
// PickSplitDNSPeers looks at the netmap peers capabilities and finds which peers
|
|
// want to be connectors for which domains.
|
|
func PickSplitDNSPeers(hasCap func(c tailcfg.NodeCapability) bool, self tailcfg.NodeView, peers map[tailcfg.NodeID]tailcfg.NodeView) map[string][]tailcfg.NodeView {
|
|
var m map[string][]tailcfg.NodeView
|
|
if !hasCap(AppConnectorsExperimentalAttrName) {
|
|
return m
|
|
}
|
|
apps, err := tailcfg.UnmarshalNodeCapViewJSON[appctype.AppConnectorAttr](self.CapMap(), AppConnectorsExperimentalAttrName)
|
|
if err != nil {
|
|
return m
|
|
}
|
|
tagToDomain := make(map[string][]string)
|
|
for _, app := range apps {
|
|
for _, tag := range app.Connectors {
|
|
tagToDomain[tag] = append(tagToDomain[tag], app.Domains...)
|
|
}
|
|
}
|
|
// NodeIDs are Comparable, and we have a map of NodeID to NodeView anyway, so
|
|
// use a Set of NodeIDs to deduplicate, and populate into a []NodeView later.
|
|
var work map[string]set.Set[tailcfg.NodeID]
|
|
for _, peer := range peers {
|
|
if !peer.Valid() || !peer.Hostinfo().Valid() {
|
|
continue
|
|
}
|
|
if isConn, _ := peer.Hostinfo().AppConnector().Get(); !isConn {
|
|
continue
|
|
}
|
|
for _, t := range peer.Tags().All() {
|
|
domains := tagToDomain[t]
|
|
for _, domain := range domains {
|
|
if work[domain] == nil {
|
|
mak.Set(&work, domain, set.Set[tailcfg.NodeID]{})
|
|
}
|
|
work[domain].Add(peer.ID())
|
|
}
|
|
}
|
|
}
|
|
|
|
// Populate m. Make a []tailcfg.NodeView from []tailcfg.NodeID using the peers map.
|
|
// And sort it to our preference.
|
|
for domain, ids := range work {
|
|
nodes := make([]tailcfg.NodeView, 0, ids.Len())
|
|
for id := range ids {
|
|
nodes = append(nodes, peers[id])
|
|
}
|
|
// The ordering of the nodes in the map vals is semantic (dnsConfigForNetmap uses the first node it can
|
|
// get a peer api url for as its split dns target). We can think of it as a preference order, except that
|
|
// we don't (currently 2026-01-14) have any preference over which node is chosen.
|
|
slices.SortFunc(nodes, func(a, b tailcfg.NodeView) int {
|
|
return cmp.Compare(a.ID(), b.ID())
|
|
})
|
|
mak.Set(&m, domain, nodes)
|
|
}
|
|
return m
|
|
}
|