mirror of
https://github.com/tailscale/tailscale.git
synced 2026-04-04 22:53:38 -04:00
kube/authkey,cmd/containerboot: extract shared auth key reissue package
Move auth key reissue logic (set marker, wait for new key, clear marker, read config) into a shared kube/authkey package and update containerboot to use it. No behaviour change.
This commit is contained in:
@@ -21,6 +21,7 @@
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"tailscale.com/client/local"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/kube/authkey"
|
||||
"tailscale.com/kube/egressservices"
|
||||
"tailscale.com/kube/ingressservices"
|
||||
"tailscale.com/kube/kubeapi"
|
||||
@@ -32,7 +33,6 @@
|
||||
)
|
||||
|
||||
const fieldManager = "tailscale-container"
|
||||
const kubeletMountedConfigLn = "..data"
|
||||
|
||||
// kubeClient is a wrapper around Tailscale's internal kube client that knows how to talk to the kube API server. We use
|
||||
// this rather than any of the upstream Kubernetes client libaries to avoid extra imports.
|
||||
@@ -139,12 +139,7 @@ func (kc *kubeClient) resetContainerbootState(ctx context.Context, podUID string
|
||||
|
||||
s := &kubeapi.Secret{
|
||||
Data: map[string][]byte{
|
||||
kubetypes.KeyCapVer: fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion),
|
||||
|
||||
// TODO(tomhjp): Perhaps shouldn't clear device ID and use a different signal, as this could leak tailnet devices.
|
||||
kubetypes.KeyDeviceID: nil,
|
||||
kubetypes.KeyDeviceFQDN: nil,
|
||||
kubetypes.KeyDeviceIPs: nil,
|
||||
kubetypes.KeyCapVer: fmt.Appendf(nil, "%d", tailcfg.CurrentCapabilityVersion),
|
||||
kubetypes.KeyHTTPSEndpoint: nil,
|
||||
egressservices.KeyEgressServices: nil,
|
||||
ingressservices.IngressConfigKey: nil,
|
||||
@@ -169,47 +164,18 @@ func (kc *kubeClient) setAndWaitForAuthKeyReissue(ctx context.Context, client *l
|
||||
return fmt.Errorf("error disconnecting from control: %w", err)
|
||||
}
|
||||
|
||||
err = kc.setReissueAuthKey(ctx, tailscaledConfigAuthKey)
|
||||
err = authkey.SetReissueAuthKey(ctx, kc.Client, kc.stateSecret, tailscaledConfigAuthKey, authkey.TailscaleContainerFieldManager)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set reissue_authkey in Kubernetes Secret: %w", err)
|
||||
}
|
||||
|
||||
err = kc.waitForAuthKeyReissue(ctx, cfg.TailscaledConfigFilePath, tailscaledConfigAuthKey, 10*time.Minute)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to receive new auth key: %w", err)
|
||||
clearFn := func(ctx context.Context) error {
|
||||
return authkey.ClearReissueAuthKey(ctx, kc.Client, kc.stateSecret, authkey.TailscaleContainerFieldManager)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (kc *kubeClient) setReissueAuthKey(ctx context.Context, authKey string) error {
|
||||
s := &kubeapi.Secret{
|
||||
Data: map[string][]byte{
|
||||
kubetypes.KeyReissueAuthkey: []byte(authKey),
|
||||
},
|
||||
}
|
||||
|
||||
log.Printf("Requesting a new auth key from operator")
|
||||
return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, fieldManager)
|
||||
}
|
||||
|
||||
func (kc *kubeClient) waitForAuthKeyReissue(ctx context.Context, configPath string, oldAuthKey string, maxWait time.Duration) error {
|
||||
log.Printf("Waiting for operator to provide new auth key (max wait: %v)", maxWait)
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, maxWait)
|
||||
defer cancel()
|
||||
|
||||
tailscaledCfgDir := filepath.Dir(configPath)
|
||||
toWatch := filepath.Join(tailscaledCfgDir, kubeletMountedConfigLn)
|
||||
|
||||
var (
|
||||
pollTicker <-chan time.Time
|
||||
eventChan <-chan fsnotify.Event
|
||||
)
|
||||
|
||||
pollInterval := 5 * time.Second
|
||||
|
||||
// Try to use fsnotify for faster notification
|
||||
getAuthKey := func() string { return authkey.AuthKeyFromConfig(cfg.TailscaledConfigFilePath) }
|
||||
tailscaledCfgDir := filepath.Dir(cfg.TailscaledConfigFilePath)
|
||||
var notify <-chan struct{}
|
||||
if w, err := fsnotify.NewWatcher(); err != nil {
|
||||
log.Printf("auth key reissue: fsnotify unavailable, using polling: %v", err)
|
||||
} else if err := w.Add(tailscaledCfgDir); err != nil {
|
||||
@@ -217,54 +183,27 @@ func (kc *kubeClient) waitForAuthKeyReissue(ctx context.Context, configPath stri
|
||||
log.Printf("auth key reissue: fsnotify watch failed, using polling: %v", err)
|
||||
} else {
|
||||
defer w.Close()
|
||||
log.Printf("auth key reissue: watching for config changes via fsnotify")
|
||||
eventChan = w.Events
|
||||
}
|
||||
|
||||
// still keep polling if using fsnotify, for logging and in case fsnotify fails
|
||||
pt := time.NewTicker(pollInterval)
|
||||
defer pt.Stop()
|
||||
pollTicker = pt.C
|
||||
|
||||
start := time.Now()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("timeout waiting for auth key reissue after %v", maxWait)
|
||||
case <-pollTicker: // Waits for polling tick, continues when received
|
||||
case event := <-eventChan:
|
||||
if event.Name != toWatch {
|
||||
continue
|
||||
ch := make(chan struct{}, 1)
|
||||
toWatch := filepath.Join(tailscaledCfgDir, "..data")
|
||||
go func() {
|
||||
for ev := range w.Events {
|
||||
if ev.Name == toWatch {
|
||||
select {
|
||||
case ch <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newAuthKey := authkeyFromTailscaledConfig(configPath)
|
||||
if newAuthKey != "" && newAuthKey != oldAuthKey {
|
||||
log.Printf("New auth key received from operator after %v", time.Since(start).Round(time.Second))
|
||||
|
||||
if err := kc.clearReissueAuthKeyRequest(ctx); err != nil {
|
||||
log.Printf("Warning: failed to clear reissue request: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if eventChan == nil && pollTicker != nil {
|
||||
log.Printf("Waiting for new auth key from operator (%v elapsed)", time.Since(start).Round(time.Second))
|
||||
}
|
||||
}()
|
||||
notify = ch
|
||||
}
|
||||
}
|
||||
|
||||
// clearReissueAuthKeyRequest removes the reissue_authkey marker from the Secret
|
||||
// to signal to the operator that we've successfully received the new key.
|
||||
func (kc *kubeClient) clearReissueAuthKeyRequest(ctx context.Context) error {
|
||||
s := &kubeapi.Secret{
|
||||
Data: map[string][]byte{
|
||||
kubetypes.KeyReissueAuthkey: nil,
|
||||
},
|
||||
err = authkey.WaitForAuthKeyReissue(ctx, tailscaledConfigAuthKey, 10*time.Minute, getAuthKey, clearFn, notify)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to receive new auth key: %w", err)
|
||||
}
|
||||
return kc.StrategicMergePatchSecret(ctx, kc.stateSecret, s, fieldManager)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForConsistentState waits for tailscaled to finish writing state if it
|
||||
|
||||
@@ -139,8 +139,8 @@
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/conffile"
|
||||
kubeutils "tailscale.com/k8s-operator"
|
||||
"tailscale.com/kube/authkey"
|
||||
healthz "tailscale.com/kube/health"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
klc "tailscale.com/kube/localclient"
|
||||
@@ -210,7 +210,7 @@ func run() error {
|
||||
|
||||
var tailscaledConfigAuthkey string
|
||||
if isOneStepConfig(cfg) {
|
||||
tailscaledConfigAuthkey = authkeyFromTailscaledConfig(cfg.TailscaledConfigFilePath)
|
||||
tailscaledConfigAuthkey = authkey.AuthKeyFromConfig(cfg.TailscaledConfigFilePath)
|
||||
}
|
||||
|
||||
var kc *kubeClient
|
||||
@@ -1025,11 +1025,3 @@ func serviceIPsFromNetMap(nm *netmap.NetworkMap, fqdn dnsname.FQDN) []netip.Pref
|
||||
|
||||
return prefixes
|
||||
}
|
||||
|
||||
func authkeyFromTailscaledConfig(path string) string {
|
||||
if cfg, err := conffile.Load(path); err == nil && cfg.Parsed.AuthKey != nil {
|
||||
return *cfg.Parsed.AuthKey
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
106
kube/authkey/authkey.go
Normal file
106
kube/authkey/authkey.go
Normal file
@@ -0,0 +1,106 @@
|
||||
// Copyright (c) Tailscale Inc & contributors
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
// Package authkey provides shared logic for handling auth key reissue
|
||||
// requests between tailnet clients (containerboot, k8s-proxy) and the
|
||||
// operator.
|
||||
//
|
||||
// When a client fails to authenticate (expired key, single-use key already
|
||||
// used), it signals the operator by setting a marker in its state Secret.
|
||||
// The operator responds by deleting the old device and issuing a new auth
|
||||
// key. The client watches for the new key and restarts to apply it.
|
||||
package authkey
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"tailscale.com/ipn/conffile"
|
||||
"tailscale.com/kube/kubeapi"
|
||||
"tailscale.com/kube/kubeclient"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
)
|
||||
|
||||
const (
|
||||
TailscaleContainerFieldManager = "tailscale-container"
|
||||
)
|
||||
|
||||
// SetReissueAuthKey sets the reissue_authkey marker in the state Secret to
|
||||
// signal to the operator that a new auth key is needed. The marker value is
|
||||
// the auth key that failed to authenticate.
|
||||
func SetReissueAuthKey(ctx context.Context, kc kubeclient.Client, stateSecretName string, authKey string, fieldManager string) error {
|
||||
s := &kubeapi.Secret{
|
||||
Data: map[string][]byte{
|
||||
kubetypes.KeyReissueAuthkey: []byte(authKey),
|
||||
},
|
||||
}
|
||||
|
||||
log.Printf("Requesting a new auth key from operator")
|
||||
return kc.StrategicMergePatchSecret(ctx, stateSecretName, s, fieldManager)
|
||||
}
|
||||
|
||||
// ClearReissueAuthKey removes the reissue_authkey marker from the state Secret
|
||||
// to signal to the operator that we've successfully received the new key.
|
||||
func ClearReissueAuthKey(ctx context.Context, kc kubeclient.Client, stateSecretName string, fieldManager string) error {
|
||||
s := &kubeapi.Secret{
|
||||
Data: map[string][]byte{
|
||||
kubetypes.KeyReissueAuthkey: nil,
|
||||
},
|
||||
}
|
||||
return kc.StrategicMergePatchSecret(ctx, stateSecretName, s, fieldManager)
|
||||
}
|
||||
|
||||
// WaitForAuthKeyReissue polls getAuthKey for a new auth key different from
|
||||
// oldAuthKey, returning when one is found or maxWait expires. If notify is
|
||||
// non-nil, it is used to wake the loop on config changes; otherwise it falls
|
||||
// back to periodic polling. The clearFn callback is called when a new key is
|
||||
// detected, to clear the reissue marker from the state Secret.
|
||||
func WaitForAuthKeyReissue(ctx context.Context, oldAuthKey string, maxWait time.Duration, getAuthKey func() string, clearFn func(context.Context) error,
|
||||
notify <-chan struct{}) error {
|
||||
log.Printf("Waiting for operator to provide new auth key (max wait: %v)", maxWait)
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, maxWait)
|
||||
defer cancel()
|
||||
|
||||
pollInterval := 5 * time.Second
|
||||
pt := time.NewTicker(pollInterval)
|
||||
defer pt.Stop()
|
||||
|
||||
start := time.Now()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("timeout waiting for auth key reissue after %v", maxWait)
|
||||
case <-pt.C:
|
||||
case <-notify:
|
||||
}
|
||||
|
||||
newAuthKey := getAuthKey()
|
||||
if newAuthKey != "" && newAuthKey != oldAuthKey {
|
||||
log.Printf("New auth key received from operator after %v", time.Since(start).Round(time.Second))
|
||||
if err := clearFn(ctx); err != nil {
|
||||
log.Printf("Warning: failed to clear reissue request: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if notify == nil {
|
||||
log.Printf("Waiting for new auth key from operator (%v elapsed)", time.Since(start).Round(time.Second))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AuthKeyFromConfig extracts the auth key from a tailscaled config file.
|
||||
// Returns empty string if the file cannot be read or contains no auth key.
|
||||
func AuthKeyFromConfig(path string) string {
|
||||
if cfg, err := conffile.Load(path); err == nil && cfg.Parsed.AuthKey != nil {
|
||||
return *cfg.Parsed.AuthKey
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
107
kube/authkey/authkey_test.go
Normal file
107
kube/authkey/authkey_test.go
Normal file
@@ -0,0 +1,107 @@
|
||||
// Copyright (c) Tailscale Inc & contributors
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package authkey
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"tailscale.com/kube/kubeapi"
|
||||
"tailscale.com/kube/kubeclient"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
)
|
||||
|
||||
func TestSetReissueAuthKey(t *testing.T) {
|
||||
var patched map[string][]byte
|
||||
kc := &kubeclient.FakeClient{
|
||||
StrategicMergePatchSecretImpl: func(ctx context.Context, name string, secret *kubeapi.Secret, _ string) error {
|
||||
patched = secret.Data
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
err := SetReissueAuthKey(context.Background(), kc, "test-secret", "old-auth-key", TailscaleContainerFieldManager)
|
||||
if err != nil {
|
||||
t.Fatalf("SetReissueAuthKey() error = %v", err)
|
||||
}
|
||||
|
||||
want := map[string][]byte{
|
||||
kubetypes.KeyReissueAuthkey: []byte("old-auth-key"),
|
||||
}
|
||||
if diff := cmp.Diff(want, patched); diff != "" {
|
||||
t.Errorf("SetReissueAuthKey() mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClearReissueAuthKey(t *testing.T) {
|
||||
var patched map[string][]byte
|
||||
kc := &kubeclient.FakeClient{
|
||||
StrategicMergePatchSecretImpl: func(ctx context.Context, name string, secret *kubeapi.Secret, _ string) error {
|
||||
patched = secret.Data
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
err := ClearReissueAuthKey(context.Background(), kc, "test-secret", TailscaleContainerFieldManager)
|
||||
if err != nil {
|
||||
t.Fatalf("ClearReissueAuthKey() error = %v", err)
|
||||
}
|
||||
|
||||
want := map[string][]byte{
|
||||
kubetypes.KeyReissueAuthkey: nil,
|
||||
}
|
||||
if diff := cmp.Diff(want, patched); diff != "" {
|
||||
t.Errorf("ClearReissueAuthKey() mismatch (-want +got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthKeyFromConfig(t *testing.T) {
|
||||
for name, tc := range map[string]struct {
|
||||
configContent string
|
||||
want string
|
||||
}{
|
||||
"valid_config_with_authkey": {
|
||||
configContent: `{"Version":"alpha0","AuthKey":"test-auth-key"}`,
|
||||
want: "test-auth-key",
|
||||
},
|
||||
"valid_config_without_authkey": {
|
||||
configContent: `{"Version":"alpha0"}`,
|
||||
want: "",
|
||||
},
|
||||
"invalid_config": {
|
||||
configContent: `not valid json`,
|
||||
want: "",
|
||||
},
|
||||
"empty_config": {
|
||||
configContent: ``,
|
||||
want: "",
|
||||
},
|
||||
} {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
configPath := filepath.Join(tmpDir, "config.json")
|
||||
|
||||
if err := os.WriteFile(configPath, []byte(tc.configContent), 0600); err != nil {
|
||||
t.Fatalf("failed to write config file: %v", err)
|
||||
}
|
||||
|
||||
got := AuthKeyFromConfig(configPath)
|
||||
if got != tc.want {
|
||||
t.Errorf("AuthKeyFromConfig() = %q, want %q", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("nonexistent_file", func(t *testing.T) {
|
||||
got := AuthKeyFromConfig("/nonexistent/path/config.json")
|
||||
if got != "" {
|
||||
t.Errorf("AuthKeyFromConfig() = %q, want empty string for nonexistent file", got)
|
||||
}
|
||||
})
|
||||
}
|
||||
Reference in New Issue
Block a user