mirror of
https://github.com/tailscale/tailscale.git
synced 2026-04-04 22:53:38 -04:00
Try to get rid of tstest.Clock
Signed-off-by: Percy Wegmann <percy@tailscale.com>
This commit is contained in:
@@ -11,13 +11,13 @@
|
||||
"slices"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"testing/synctest"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"golang.org/x/net/dns/dnsmessage"
|
||||
"tailscale.com/appc/appctest"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/types/appctype"
|
||||
"tailscale.com/util/clientmetric"
|
||||
"tailscale.com/util/eventbus/eventbustest"
|
||||
@@ -689,50 +689,51 @@ func TestRoutesWithout(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRateLogger(t *testing.T) {
|
||||
clock := tstest.Clock{}
|
||||
wasCalled := false
|
||||
rl := newRateLogger(func() time.Time { return clock.Now() }, 1*time.Second, func(count int64, _ time.Time, _ int64) {
|
||||
if count != 3 {
|
||||
t.Fatalf("count for prev period: got %d, want 3", count)
|
||||
}
|
||||
wasCalled = true
|
||||
})
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
wasCalled := false
|
||||
rl := newRateLogger(time.Now, 1*time.Second, func(count int64, _ time.Time, _ int64) {
|
||||
if count != 3 {
|
||||
t.Fatalf("count for prev period: got %d, want 3", count)
|
||||
}
|
||||
wasCalled = true
|
||||
})
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
clock.Advance(1 * time.Millisecond)
|
||||
for i := 0; i < 3; i++ {
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
rl.update(0)
|
||||
if wasCalled {
|
||||
t.Fatalf("wasCalled: got true, want false")
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
rl.update(0)
|
||||
if wasCalled {
|
||||
t.Fatalf("wasCalled: got true, want false")
|
||||
if !wasCalled {
|
||||
t.Fatalf("wasCalled: got false, want true")
|
||||
}
|
||||
}
|
||||
|
||||
clock.Advance(1 * time.Second)
|
||||
rl.update(0)
|
||||
if !wasCalled {
|
||||
t.Fatalf("wasCalled: got false, want true")
|
||||
}
|
||||
wasCalled = false
|
||||
rl = newRateLogger(time.Now, 1*time.Hour, func(count int64, _ time.Time, _ int64) {
|
||||
if count != 3 {
|
||||
t.Fatalf("count for prev period: got %d, want 3", count)
|
||||
}
|
||||
wasCalled = true
|
||||
})
|
||||
|
||||
wasCalled = false
|
||||
rl = newRateLogger(func() time.Time { return clock.Now() }, 1*time.Hour, func(count int64, _ time.Time, _ int64) {
|
||||
if count != 3 {
|
||||
t.Fatalf("count for prev period: got %d, want 3", count)
|
||||
for i := 0; i < 3; i++ {
|
||||
time.Sleep(1 * time.Minute)
|
||||
rl.update(0)
|
||||
if wasCalled {
|
||||
t.Fatalf("wasCalled: got true, want false")
|
||||
}
|
||||
}
|
||||
wasCalled = true
|
||||
})
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
clock.Advance(1 * time.Minute)
|
||||
time.Sleep(1 * time.Hour)
|
||||
rl.update(0)
|
||||
if wasCalled {
|
||||
t.Fatalf("wasCalled: got true, want false")
|
||||
if !wasCalled {
|
||||
t.Fatalf("wasCalled: got false, want true")
|
||||
}
|
||||
}
|
||||
|
||||
clock.Advance(1 * time.Hour)
|
||||
rl.update(0)
|
||||
if !wasCalled {
|
||||
t.Fatalf("wasCalled: got false, want true")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestRouteStoreMetrics(t *testing.T) {
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
"tailscale.com/kube/k8s-proxy/conf"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/opt"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
@@ -124,7 +124,7 @@ func TestAPIServerProxyReconciler(t *testing.T) {
|
||||
logger: zap.Must(zap.NewDevelopment()).Sugar(),
|
||||
recorder: record.NewFakeRecorder(10),
|
||||
lc: lc,
|
||||
clock: tstest.NewClock(tstest.ClockOpts{}),
|
||||
clock: tstime.StdClock{},
|
||||
operatorID: "self-id",
|
||||
}
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/ptr"
|
||||
"tailscale.com/util/mak"
|
||||
)
|
||||
@@ -57,7 +57,7 @@ func TestConnector(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
cl := tstime.StdClock{}
|
||||
cr := &ConnectorReconciler{
|
||||
Client: fc,
|
||||
recorder: record.NewFakeRecorder(10),
|
||||
@@ -247,7 +247,7 @@ func TestConnectorWithProxyClass(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
cl := tstime.StdClock{}
|
||||
cr := &ConnectorReconciler{
|
||||
Client: fc,
|
||||
clock: cl,
|
||||
@@ -340,7 +340,7 @@ func TestConnectorWithAppConnector(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
cl := tstime.StdClock{}
|
||||
fr := record.NewFakeRecorder(1)
|
||||
cr := &ConnectorReconciler{
|
||||
Client: fc,
|
||||
@@ -440,7 +440,7 @@ func TestConnectorWithMultipleReplicas(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
cl := tstime.StdClock{}
|
||||
fr := record.NewFakeRecorder(1)
|
||||
cr := &ConnectorReconciler{
|
||||
Client: fc,
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
operatorutils "tailscale.com/k8s-operator"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
@@ -65,7 +65,7 @@ func TestDNSRecordsReconciler(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
cl := tstime.StdClock{}
|
||||
// Set the ready condition of the DNSConfig
|
||||
mustUpdateStatus(t, fc, "", "test", func(c *tsapi.DNSConfig) {
|
||||
operatorutils.SetDNSConfigCondition(c, tsapi.NameserverReady, metav1.ConditionTrue, reasonNameserverCreated, reasonNameserverCreated, 0, cl, zl.Sugar())
|
||||
|
||||
@@ -21,12 +21,12 @@
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/egressservices"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/util/mak"
|
||||
)
|
||||
|
||||
func TestTailscaleEgressEndpointSlices(t *testing.T) {
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
svc := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestEgressPodReadiness(t *testing.T) {
|
||||
WithStatusSubresource(&corev1.Pod{}).
|
||||
Build()
|
||||
zl, _ := zap.NewDevelopment()
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
cl := tstime.StdClock{}
|
||||
rec := &egressPodsReconciler{
|
||||
tsNamespace: "operator-ns",
|
||||
Client: fc,
|
||||
@@ -470,7 +470,7 @@ func newSvc(name string, port int32) (*corev1.Service, string) {
|
||||
return svc, fmt.Sprintf("http://%s.operator-ns.svc.cluster.local:%d/healthz", name, port)
|
||||
}
|
||||
|
||||
func podSetReady(pod *corev1.Pod, cl *tstest.Clock) {
|
||||
func podSetReady(pod *corev1.Pod, cl tstime.Clock) {
|
||||
pod.Status.Conditions = append(pod.Status.Conditions, corev1.PodCondition{
|
||||
Type: tsEgressReadinessGate,
|
||||
Status: corev1.ConditionTrue,
|
||||
|
||||
@@ -18,7 +18,6 @@
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
tsoperator "tailscale.com/k8s-operator"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstime"
|
||||
)
|
||||
|
||||
@@ -30,7 +29,7 @@ func TestEgressServiceReadiness(t *testing.T) {
|
||||
WithStatusSubresource(&tsapi.ProxyGroup{}).
|
||||
Build()
|
||||
zl, _ := zap.NewDevelopment()
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
cl := tstime.StdClock{}
|
||||
rec := &egressSvcsReadinessReconciler{
|
||||
tsNamespace: "operator-ns",
|
||||
Client: fc,
|
||||
|
||||
@@ -23,7 +23,6 @@
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/egressservices"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstime"
|
||||
)
|
||||
|
||||
@@ -54,7 +53,7 @@ func TestTailscaleEgressServices(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
|
||||
esr := &egressSvcsReconciler{
|
||||
Client: fc,
|
||||
@@ -130,7 +129,7 @@ func TestTailscaleEgressServices(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func validateReadyService(t *testing.T, fc client.WithWatch, esr *egressSvcsReconciler, svc *corev1.Service, clock *tstest.Clock, zl *zap.Logger, cm *corev1.ConfigMap) {
|
||||
func validateReadyService(t *testing.T, fc client.WithWatch, esr *egressSvcsReconciler, svc *corev1.Service, clock tstime.Clock, zl *zap.Logger, cm *corev1.ConfigMap) {
|
||||
expectReconciled(t, esr, "default", "test")
|
||||
// Verify that a ClusterIP Service has been created.
|
||||
name := findGenNameForEgressSvcResources(t, fc, svc)
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
"tailscale.com/ipn"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/ptr"
|
||||
"tailscale.com/util/mak"
|
||||
)
|
||||
@@ -439,7 +439,7 @@ func TestTailscaleIngressWithServiceMonitor(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIngressProxyClassAnnotation(t *testing.T) {
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
cl := tstime.StdClock{}
|
||||
zl := zap.Must(zap.NewDevelopment())
|
||||
|
||||
pcLEStaging, pcLEStagingFalse, _ := proxyClassesForLEStagingTest()
|
||||
@@ -547,7 +547,7 @@ func TestIngressProxyClassAnnotation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIngressLetsEncryptStaging(t *testing.T) {
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
cl := tstime.StdClock{}
|
||||
zl := zap.Must(zap.NewDevelopment())
|
||||
|
||||
pcLEStaging, pcLEStagingFalse, pcOther := proxyClassesForLEStagingTest()
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
|
||||
operatorutils "tailscale.com/k8s-operator"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/ptr"
|
||||
"tailscale.com/util/mak"
|
||||
)
|
||||
@@ -68,7 +68,7 @@ func TestNameserverReconciler(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
reconciler := &NameserverReconciler{
|
||||
Client: fc,
|
||||
clock: clock,
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstime"
|
||||
)
|
||||
|
||||
func TestGetServicesNodePortRangeFromErr(t *testing.T) {
|
||||
@@ -193,7 +193,7 @@ func TestValidateNodePortRanges(t *testing.T) {
|
||||
|
||||
// as part of this test, we want to create an adjacent ProxyClass in order to ensure that if it clashes with the one created in this test
|
||||
// that we get an error
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
cl := tstime.StdClock{}
|
||||
opc := &tsapi.ProxyClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "other-pc",
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
"testing/synctest"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
@@ -28,7 +29,6 @@
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/net/dns/resolvconffile"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/ptr"
|
||||
"tailscale.com/util/dnsname"
|
||||
@@ -36,185 +36,187 @@
|
||||
)
|
||||
|
||||
func TestLoadBalancerClass(t *testing.T) {
|
||||
fc := fake.NewFakeClient()
|
||||
ft := &fakeTSClient{}
|
||||
zl, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
Client: fc,
|
||||
tsClient: ft,
|
||||
defaultTags: []string{"tag:k8s"},
|
||||
operatorNamespace: "operator-ns",
|
||||
proxyImage: "tailscale/tailscale",
|
||||
},
|
||||
logger: zl.Sugar(),
|
||||
clock: clock,
|
||||
recorder: record.NewFakeRecorder(100),
|
||||
}
|
||||
|
||||
// Create a service that we should manage, but start with a miconfiguration
|
||||
// in the annotations.
|
||||
mustCreate(t, fc, &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
// The apiserver is supposed to set the UID, but the fake client
|
||||
// doesn't. So, set it explicitly because other code later depends
|
||||
// on it being set.
|
||||
UID: types.UID("1234-UID"),
|
||||
Annotations: map[string]string{
|
||||
AnnotationTailnetTargetFQDN: "invalid.example.com",
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
fc := fake.NewFakeClient()
|
||||
ft := &fakeTSClient{}
|
||||
zl, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clock := tstime.StdClock{}
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
Client: fc,
|
||||
tsClient: ft,
|
||||
defaultTags: []string{"tag:k8s"},
|
||||
operatorNamespace: "operator-ns",
|
||||
proxyImage: "tailscale/tailscale",
|
||||
},
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "10.20.30.40",
|
||||
Type: corev1.ServiceTypeLoadBalancer,
|
||||
LoadBalancerClass: ptr.To("tailscale"),
|
||||
},
|
||||
})
|
||||
logger: zl.Sugar(),
|
||||
clock: clock,
|
||||
recorder: record.NewFakeRecorder(100),
|
||||
}
|
||||
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
|
||||
// The expected value of .status.conditions[0].LastTransitionTime until the
|
||||
// proxy becomes ready.
|
||||
t0 := conditionTime(clock)
|
||||
|
||||
// Should have an error about invalid config.
|
||||
want := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
UID: types.UID("1234-UID"),
|
||||
Annotations: map[string]string{
|
||||
AnnotationTailnetTargetFQDN: "invalid.example.com",
|
||||
// Create a service that we should manage, but start with a miconfiguration
|
||||
// in the annotations.
|
||||
mustCreate(t, fc, &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
// The apiserver is supposed to set the UID, but the fake client
|
||||
// doesn't. So, set it explicitly because other code later depends
|
||||
// on it being set.
|
||||
UID: types.UID("1234-UID"),
|
||||
Annotations: map[string]string{
|
||||
AnnotationTailnetTargetFQDN: "invalid.example.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "10.20.30.40",
|
||||
Type: corev1.ServiceTypeLoadBalancer,
|
||||
LoadBalancerClass: ptr.To("tailscale"),
|
||||
},
|
||||
Status: corev1.ServiceStatus{
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "10.20.30.40",
|
||||
Type: corev1.ServiceTypeLoadBalancer,
|
||||
LoadBalancerClass: ptr.To("tailscale"),
|
||||
},
|
||||
})
|
||||
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
|
||||
// The expected value of .status.conditions[0].LastTransitionTime until the
|
||||
// proxy becomes ready.
|
||||
t0 := conditionTime(clock)
|
||||
|
||||
// Should have an error about invalid config.
|
||||
want := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
UID: types.UID("1234-UID"),
|
||||
Annotations: map[string]string{
|
||||
AnnotationTailnetTargetFQDN: "invalid.example.com",
|
||||
},
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "10.20.30.40",
|
||||
Type: corev1.ServiceTypeLoadBalancer,
|
||||
LoadBalancerClass: ptr.To("tailscale"),
|
||||
},
|
||||
Status: corev1.ServiceStatus{
|
||||
Conditions: []metav1.Condition{{
|
||||
Type: string(tsapi.ProxyReady),
|
||||
Status: metav1.ConditionFalse,
|
||||
LastTransitionTime: t0,
|
||||
Reason: reasonProxyInvalid,
|
||||
Message: `unable to provision proxy resources: invalid Service: invalid value of annotation tailscale.com/tailnet-fqdn: "invalid.example.com" does not appear to be a valid MagicDNS name`,
|
||||
}},
|
||||
},
|
||||
}
|
||||
expectEqual(t, fc, want)
|
||||
|
||||
// Delete the misconfiguration so the proxy starts getting created on the
|
||||
// next reconcile.
|
||||
mustUpdate(t, fc, "default", "test", func(s *corev1.Service) {
|
||||
s.ObjectMeta.Annotations = nil
|
||||
})
|
||||
|
||||
time.Sleep(time.Second)
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
|
||||
fullName, shortName := findGenName(t, fc, "default", "test", "svc")
|
||||
opts := configOpts{
|
||||
replicas: ptr.To[int32](1),
|
||||
stsName: shortName,
|
||||
secretName: fullName,
|
||||
namespace: "default",
|
||||
parentType: "svc",
|
||||
hostname: "default-test",
|
||||
clusterTargetIP: "10.20.30.40",
|
||||
app: kubetypes.AppIngressProxy,
|
||||
}
|
||||
|
||||
expectEqual(t, fc, expectedSecret(t, fc, opts))
|
||||
expectEqual(t, fc, expectedHeadlessService(shortName, "svc"))
|
||||
expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs)
|
||||
|
||||
want.Annotations = nil
|
||||
want.ObjectMeta.Finalizers = []string{"tailscale.com/finalizer"}
|
||||
want.Status = corev1.ServiceStatus{
|
||||
Conditions: []metav1.Condition{{
|
||||
Type: string(tsapi.ProxyReady),
|
||||
Status: metav1.ConditionFalse,
|
||||
LastTransitionTime: t0,
|
||||
Reason: reasonProxyInvalid,
|
||||
Message: `unable to provision proxy resources: invalid Service: invalid value of annotation tailscale.com/tailnet-fqdn: "invalid.example.com" does not appear to be a valid MagicDNS name`,
|
||||
LastTransitionTime: t0, // Status is still false, no update to transition time
|
||||
Reason: reasonProxyPending,
|
||||
Message: "no Tailscale hostname known yet, waiting for proxy pod to finish auth",
|
||||
}},
|
||||
},
|
||||
}
|
||||
expectEqual(t, fc, want)
|
||||
|
||||
// Delete the misconfiguration so the proxy starts getting created on the
|
||||
// next reconcile.
|
||||
mustUpdate(t, fc, "default", "test", func(s *corev1.Service) {
|
||||
s.ObjectMeta.Annotations = nil
|
||||
})
|
||||
|
||||
clock.Advance(time.Second)
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
|
||||
fullName, shortName := findGenName(t, fc, "default", "test", "svc")
|
||||
opts := configOpts{
|
||||
replicas: ptr.To[int32](1),
|
||||
stsName: shortName,
|
||||
secretName: fullName,
|
||||
namespace: "default",
|
||||
parentType: "svc",
|
||||
hostname: "default-test",
|
||||
clusterTargetIP: "10.20.30.40",
|
||||
app: kubetypes.AppIngressProxy,
|
||||
}
|
||||
|
||||
expectEqual(t, fc, expectedSecret(t, fc, opts))
|
||||
expectEqual(t, fc, expectedHeadlessService(shortName, "svc"))
|
||||
expectEqual(t, fc, expectedSTS(t, fc, opts), removeResourceReqs)
|
||||
|
||||
want.Annotations = nil
|
||||
want.ObjectMeta.Finalizers = []string{"tailscale.com/finalizer"}
|
||||
want.Status = corev1.ServiceStatus{
|
||||
Conditions: []metav1.Condition{{
|
||||
Type: string(tsapi.ProxyReady),
|
||||
Status: metav1.ConditionFalse,
|
||||
LastTransitionTime: t0, // Status is still false, no update to transition time
|
||||
Reason: reasonProxyPending,
|
||||
Message: "no Tailscale hostname known yet, waiting for proxy pod to finish auth",
|
||||
}},
|
||||
}
|
||||
expectEqual(t, fc, want)
|
||||
|
||||
// Normally the Tailscale proxy pod would come up here and write its info
|
||||
// into the secret. Simulate that, then verify reconcile again and verify
|
||||
// that we get to the end.
|
||||
mustUpdate(t, fc, "operator-ns", fullName, func(s *corev1.Secret) {
|
||||
if s.Data == nil {
|
||||
s.Data = map[string][]byte{}
|
||||
}
|
||||
s.Data["device_id"] = []byte("ts-id-1234")
|
||||
s.Data["device_fqdn"] = []byte("tailscale.device.name.")
|
||||
s.Data["device_ips"] = []byte(`["100.99.98.97", "2c0a:8083:94d4:2012:3165:34a5:3616:5fdf"]`)
|
||||
})
|
||||
clock.Advance(time.Second)
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
want.Status.Conditions = proxyCreatedCondition(clock)
|
||||
want.Status.LoadBalancer = corev1.LoadBalancerStatus{
|
||||
Ingress: []corev1.LoadBalancerIngress{
|
||||
{
|
||||
Hostname: "tailscale.device.name",
|
||||
expectEqual(t, fc, want)
|
||||
|
||||
// Normally the Tailscale proxy pod would come up here and write its info
|
||||
// into the secret. Simulate that, then verify reconcile again and verify
|
||||
// that we get to the end.
|
||||
mustUpdate(t, fc, "operator-ns", fullName, func(s *corev1.Secret) {
|
||||
if s.Data == nil {
|
||||
s.Data = map[string][]byte{}
|
||||
}
|
||||
s.Data["device_id"] = []byte("ts-id-1234")
|
||||
s.Data["device_fqdn"] = []byte("tailscale.device.name.")
|
||||
s.Data["device_ips"] = []byte(`["100.99.98.97", "2c0a:8083:94d4:2012:3165:34a5:3616:5fdf"]`)
|
||||
})
|
||||
time.Sleep(time.Second)
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
want.Status.Conditions = proxyCreatedCondition(clock)
|
||||
want.Status.LoadBalancer = corev1.LoadBalancerStatus{
|
||||
Ingress: []corev1.LoadBalancerIngress{
|
||||
{
|
||||
Hostname: "tailscale.device.name",
|
||||
},
|
||||
{
|
||||
IP: "100.99.98.97",
|
||||
},
|
||||
},
|
||||
{
|
||||
IP: "100.99.98.97",
|
||||
}
|
||||
|
||||
// Perform an additional reconciliation loop here to ensure resources don't change through side effects. Mainly
|
||||
// to prevent infinite reconciliation
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
expectEqual(t, fc, want)
|
||||
|
||||
// Turn the service back into a ClusterIP service, which should make the
|
||||
// operator clean up.
|
||||
mustUpdate(t, fc, "default", "test", func(s *corev1.Service) {
|
||||
s.Spec.Type = corev1.ServiceTypeClusterIP
|
||||
s.Spec.LoadBalancerClass = nil
|
||||
})
|
||||
mustUpdateStatus(t, fc, "default", "test", func(s *corev1.Service) {
|
||||
// Fake client doesn't automatically delete the LoadBalancer status when
|
||||
// changing away from the LoadBalancer type, we have to do
|
||||
// controller-manager's work by hand.
|
||||
s.Status = corev1.ServiceStatus{}
|
||||
})
|
||||
// synchronous StatefulSet deletion triggers a requeue. But, the StatefulSet
|
||||
// didn't create any child resources since this is all faked, so the
|
||||
// deletion goes through immediately.
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
expectMissing[appsv1.StatefulSet](t, fc, "operator-ns", shortName)
|
||||
// The deletion triggers another reconcile, to finish the cleanup.
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
expectMissing[appsv1.StatefulSet](t, fc, "operator-ns", shortName)
|
||||
expectMissing[corev1.Service](t, fc, "operator-ns", shortName)
|
||||
expectMissing[corev1.Secret](t, fc, "operator-ns", fullName)
|
||||
|
||||
// Note that the Tailscale-specific condition status should be gone now.
|
||||
want = &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
UID: types.UID("1234-UID"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Perform an additional reconciliation loop here to ensure resources don't change through side effects. Mainly
|
||||
// to prevent infinite reconciliation
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
expectEqual(t, fc, want)
|
||||
|
||||
// Turn the service back into a ClusterIP service, which should make the
|
||||
// operator clean up.
|
||||
mustUpdate(t, fc, "default", "test", func(s *corev1.Service) {
|
||||
s.Spec.Type = corev1.ServiceTypeClusterIP
|
||||
s.Spec.LoadBalancerClass = nil
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "10.20.30.40",
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
},
|
||||
}
|
||||
expectEqual(t, fc, want)
|
||||
})
|
||||
mustUpdateStatus(t, fc, "default", "test", func(s *corev1.Service) {
|
||||
// Fake client doesn't automatically delete the LoadBalancer status when
|
||||
// changing away from the LoadBalancer type, we have to do
|
||||
// controller-manager's work by hand.
|
||||
s.Status = corev1.ServiceStatus{}
|
||||
})
|
||||
// synchronous StatefulSet deletion triggers a requeue. But, the StatefulSet
|
||||
// didn't create any child resources since this is all faked, so the
|
||||
// deletion goes through immediately.
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
expectMissing[appsv1.StatefulSet](t, fc, "operator-ns", shortName)
|
||||
// The deletion triggers another reconcile, to finish the cleanup.
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
expectMissing[appsv1.StatefulSet](t, fc, "operator-ns", shortName)
|
||||
expectMissing[corev1.Service](t, fc, "operator-ns", shortName)
|
||||
expectMissing[corev1.Secret](t, fc, "operator-ns", fullName)
|
||||
|
||||
// Note that the Tailscale-specific condition status should be gone now.
|
||||
want = &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
UID: types.UID("1234-UID"),
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "10.20.30.40",
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
},
|
||||
}
|
||||
expectEqual(t, fc, want)
|
||||
}
|
||||
|
||||
func TestTailnetTargetFQDNAnnotation(t *testing.T) {
|
||||
@@ -225,7 +227,7 @@ func TestTailnetTargetFQDNAnnotation(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tailnetTargetFQDN := "foo.bar.ts.net."
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
@@ -338,7 +340,7 @@ func TestTailnetTargetIPAnnotation(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tailnetTargetIP := "100.66.66.66"
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
@@ -450,7 +452,7 @@ func TestTailnetTargetIPAnnotation_IPCouldNotBeParsed(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
@@ -521,7 +523,7 @@ func TestTailnetTargetIPAnnotation_InvalidIP(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
@@ -592,7 +594,7 @@ func TestAnnotations(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
@@ -699,7 +701,7 @@ func TestAnnotationIntoLB(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
@@ -832,7 +834,7 @@ func TestLBIntoAnnotation(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
@@ -970,7 +972,7 @@ func TestCustomHostname(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
@@ -1082,7 +1084,7 @@ func TestCustomPriorityClassName(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
@@ -1137,7 +1139,7 @@ func TestCustomPriorityClassName(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServiceProxyClassAnnotation(t *testing.T) {
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
cl := tstime.StdClock{}
|
||||
zl := zap.Must(zap.NewDevelopment())
|
||||
|
||||
pcIfNotPresent := &tsapi.ProxyClass{
|
||||
@@ -1337,7 +1339,7 @@ func TestProxyClassForService(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
@@ -1429,7 +1431,7 @@ func TestDefaultLoadBalancer(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
@@ -1486,7 +1488,7 @@ func TestProxyFirewallMode(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
@@ -1813,7 +1815,7 @@ func Test_authKeyRemoval(t *testing.T) {
|
||||
|
||||
// 1. A new Service that should be exposed via Tailscale gets created, a Secret with a config that contains auth
|
||||
// key is generated.
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
@@ -1881,7 +1883,7 @@ func Test_externalNameService(t *testing.T) {
|
||||
|
||||
// 1. A External name Service that should be exposed via Tailscale gets
|
||||
// created.
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
@@ -1978,7 +1980,7 @@ func Test_metricsResourceCreation(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
@@ -2052,7 +2054,7 @@ func TestIgnorePGService(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
ssr: &tailscaleSTSReconciler{
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
tsoperator "tailscale.com/k8s-operator"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstime"
|
||||
)
|
||||
|
||||
func TestProxyClass(t *testing.T) {
|
||||
@@ -60,7 +60,7 @@ func TestProxyClass(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fr := record.NewFakeRecorder(3) // bump this if you expect a test case to throw more events
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
cl := tstime.StdClock{}
|
||||
pcr := &ProxyClassReconciler{
|
||||
Client: fc,
|
||||
logger: zl.Sugar(),
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
"tailscale.com/kube/k8s-proxy/conf"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/opt"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
@@ -592,7 +592,7 @@ type reconcile struct {
|
||||
tsClient := &fakeTSClient{}
|
||||
zl, _ := zap.NewDevelopment()
|
||||
fr := record.NewFakeRecorder(10)
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
cl := tstime.StdClock{}
|
||||
|
||||
pc := &tsapi.ProxyClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -834,7 +834,7 @@ func TestProxyGroup(t *testing.T) {
|
||||
tsClient := &fakeTSClient{}
|
||||
zl, _ := zap.NewDevelopment()
|
||||
fr := record.NewFakeRecorder(1)
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
cl := tstime.StdClock{}
|
||||
reconciler := &ProxyGroupReconciler{
|
||||
tsNamespace: tsNamespace,
|
||||
tsProxyImage: testProxyImage,
|
||||
@@ -1051,7 +1051,7 @@ func TestProxyGroupTypes(t *testing.T) {
|
||||
Client: fc,
|
||||
log: zl.Sugar(),
|
||||
tsClient: &fakeTSClient{},
|
||||
clock: tstest.NewClock(tstest.ClockOpts{}),
|
||||
clock: tstime.StdClock{},
|
||||
}
|
||||
|
||||
t.Run("egress_type", func(t *testing.T) {
|
||||
@@ -1291,7 +1291,7 @@ func TestKubeAPIServerStatusConditionFlow(t *testing.T) {
|
||||
Client: fc,
|
||||
log: zap.Must(zap.NewDevelopment()).Sugar(),
|
||||
tsClient: &fakeTSClient{},
|
||||
clock: tstest.NewClock(tstest.ClockOpts{}),
|
||||
clock: tstime.StdClock{},
|
||||
}
|
||||
|
||||
expectReconciled(t, r, "", pg.Name)
|
||||
@@ -1344,7 +1344,7 @@ func TestKubeAPIServerType_DoesNotOverwriteServicesConfig(t *testing.T) {
|
||||
Client: fc,
|
||||
log: zap.Must(zap.NewDevelopment()).Sugar(),
|
||||
tsClient: &fakeTSClient{},
|
||||
clock: tstest.NewClock(tstest.ClockOpts{}),
|
||||
clock: tstime.StdClock{},
|
||||
}
|
||||
|
||||
pg := &tsapi.ProxyGroup{
|
||||
@@ -1429,7 +1429,7 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) {
|
||||
Client: fc,
|
||||
log: zap.Must(zap.NewDevelopment()).Sugar(),
|
||||
tsClient: &fakeTSClient{},
|
||||
clock: tstest.NewClock(tstest.ClockOpts{}),
|
||||
clock: tstime.StdClock{},
|
||||
}
|
||||
|
||||
existingServices := []string{"svc1", "svc2"}
|
||||
@@ -1686,7 +1686,7 @@ func proxyClassesForLEStagingTest() (*tsapi.ProxyClass, *tsapi.ProxyClass, *tsap
|
||||
return pcLEStaging, pcLEStagingFalse, pcOther
|
||||
}
|
||||
|
||||
func setProxyClassReady(t *testing.T, fc client.Client, cl *tstest.Clock, name string) *tsapi.ProxyClass {
|
||||
func setProxyClassReady(t *testing.T, fc client.Client, cl tstime.Clock, name string) *tsapi.ProxyClass {
|
||||
t.Helper()
|
||||
pc := &tsapi.ProxyClass{}
|
||||
if err := fc.Get(t.Context(), client.ObjectKey{Name: name}, pc); err != nil {
|
||||
@@ -1838,7 +1838,7 @@ func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyG
|
||||
}
|
||||
|
||||
func TestProxyGroupLetsEncryptStaging(t *testing.T) {
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
cl := tstime.StdClock{}
|
||||
zl := zap.Must(zap.NewDevelopment())
|
||||
|
||||
// Set up test cases- most are shared with non-HA Ingress.
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/kube/ingressservices"
|
||||
"tailscale.com/kube/kubetypes"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/ptr"
|
||||
"tailscale.com/util/mak"
|
||||
|
||||
@@ -112,7 +112,7 @@ func TestServicePGReconciler_UpdateHostname(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, client.Client, *fakeTSClient, *tstest.Clock) {
|
||||
func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, client.Client, *fakeTSClient, tstime.Clock) {
|
||||
// Pre-create the ProxyGroup
|
||||
pg := &tsapi.ProxyGroup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -203,7 +203,7 @@ func setupServiceTest(t *testing.T) (*HAServiceReconciler, *corev1.Secret, clien
|
||||
},
|
||||
}
|
||||
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
cl := tstime.StdClock{}
|
||||
svcPGR := &HAServiceReconciler{
|
||||
Client: fc,
|
||||
tsClient: ft,
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
|
||||
tsoperator "tailscale.com/k8s-operator"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
@@ -52,7 +52,7 @@ func TestRecorder(t *testing.T) {
|
||||
tsClient := &fakeTSClient{}
|
||||
zl, _ := zap.NewDevelopment()
|
||||
fr := record.NewFakeRecorder(2)
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
cl := tstime.StdClock{}
|
||||
reconciler := &RecorderReconciler{
|
||||
tsNamespace: tsNamespace,
|
||||
Client: fc,
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
@@ -79,7 +79,7 @@ func TestClientRecv(t *testing.T) {
|
||||
nc: dummyNetConn{},
|
||||
br: bufio.NewReader(bytes.NewReader(tt.input)),
|
||||
logf: t.Logf,
|
||||
clock: &tstest.Clock{},
|
||||
clock: tstime.StdClock{},
|
||||
}
|
||||
got, err := c.Recv()
|
||||
if err != nil {
|
||||
@@ -186,7 +186,7 @@ func TestClientSendRateLimiting(t *testing.T) {
|
||||
cw := new(countWriter)
|
||||
c := &Client{
|
||||
bw: bufio.NewWriter(cw),
|
||||
clock: &tstest.Clock{},
|
||||
clock: tstime.StdClock{},
|
||||
}
|
||||
c.setSendRateLimiter(ServerInfoMessage{})
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/tailscale/xnet/webdav"
|
||||
"tailscale.com/drive/driveimpl/shared"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstime"
|
||||
)
|
||||
|
||||
func TestStat(t *testing.T) {
|
||||
@@ -276,7 +276,7 @@ func TestRename(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func createFileSystem(t *testing.T) (webdav.FileSystem, string, string, *tstest.Clock) {
|
||||
func createFileSystem(t *testing.T) (webdav.FileSystem, string, string, tstime.Clock) {
|
||||
s1, dir1 := startRemote(t)
|
||||
s2, dir2 := startRemote(t)
|
||||
|
||||
@@ -301,7 +301,7 @@ func createFileSystem(t *testing.T) (webdav.FileSystem, string, string, *tstest.
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
clock := tstest.NewClock(tstest.ClockOpts{Start: time.Now()})
|
||||
clock := tstime.StdClock{}
|
||||
fs := &FS{
|
||||
Clock: clock,
|
||||
StaticRoot: "domain",
|
||||
|
||||
@@ -501,7 +501,7 @@ func(t *testing.T, env *peerAPITestEnv) {
|
||||
ext := &fakeExtension{
|
||||
logf: e.logBuf.Logf,
|
||||
capFileSharing: tt.capSharing,
|
||||
clock: &tstest.Clock{},
|
||||
clock: tstime.StdClock{},
|
||||
taildrop: e.taildrop,
|
||||
}
|
||||
e.ph = &peerAPIHandler{
|
||||
@@ -557,7 +557,7 @@ func TestFileDeleteRace(t *testing.T) {
|
||||
fakeLB := &fakeExtension{
|
||||
logf: t.Logf,
|
||||
capFileSharing: true,
|
||||
clock: &tstest.Clock{},
|
||||
clock: tstime.StdClock{},
|
||||
taildrop: taildropMgr,
|
||||
}
|
||||
buf := make([]byte, 2<<20)
|
||||
|
||||
@@ -984,7 +984,7 @@ func TestCurrentStateETagWarnable(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("no_change", func(t *testing.T) {
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
ht1 := newTracker(clock)
|
||||
|
||||
ht1.SetUnhealthy(testWarnable, Args{})
|
||||
|
||||
@@ -8,11 +8,11 @@
|
||||
"reflect"
|
||||
"slices"
|
||||
"testing"
|
||||
"testing/synctest"
|
||||
"time"
|
||||
|
||||
"tailscale.com/drive"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/netmap"
|
||||
@@ -78,21 +78,17 @@ func TestIsNotableNotify(t *testing.T) {
|
||||
}
|
||||
|
||||
type rateLimitingBusSenderTester struct {
|
||||
tb testing.TB
|
||||
got []*ipn.Notify
|
||||
clock *tstest.Clock
|
||||
s *rateLimitingBusSender
|
||||
tb testing.TB
|
||||
got []*ipn.Notify
|
||||
s *rateLimitingBusSender
|
||||
}
|
||||
|
||||
func (st *rateLimitingBusSenderTester) init() {
|
||||
if st.s != nil {
|
||||
return
|
||||
}
|
||||
st.clock = tstest.NewClock(tstest.ClockOpts{
|
||||
Start: time.Unix(1731777537, 0), // time I wrote this test :)
|
||||
})
|
||||
st.s = &rateLimitingBusSender{
|
||||
clock: tstime.DefaultClock{Clock: st.clock},
|
||||
clock: tstime.DefaultClock{},
|
||||
fn: func(n *ipn.Notify) bool {
|
||||
st.got = append(st.got, n)
|
||||
return true
|
||||
@@ -110,7 +106,7 @@ func (st *rateLimitingBusSenderTester) send(n *ipn.Notify) {
|
||||
|
||||
func (st *rateLimitingBusSenderTester) advance(d time.Duration) {
|
||||
st.tb.Helper()
|
||||
st.clock.Advance(d)
|
||||
time.Sleep(d)
|
||||
select {
|
||||
case <-st.s.flushChan():
|
||||
if !st.s.flush() {
|
||||
@@ -138,83 +134,87 @@ func TestRateLimitingBusSender(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("buffered", func(t *testing.T) {
|
||||
st := &rateLimitingBusSenderTester{tb: t}
|
||||
st.init()
|
||||
st.s.interval = 1 * time.Second
|
||||
st.send(&ipn.Notify{Version: "initial"})
|
||||
if len(st.got) != 1 {
|
||||
t.Fatalf("got %d items; expected 1 (first to flush immediately)", len(st.got))
|
||||
}
|
||||
st.send(nm1)
|
||||
st.send(nm2)
|
||||
st.send(eng1)
|
||||
st.send(eng2)
|
||||
if len(st.got) != 1 {
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
st := &rateLimitingBusSenderTester{tb: t}
|
||||
st.init()
|
||||
st.s.interval = 1 * time.Second
|
||||
st.send(&ipn.Notify{Version: "initial"})
|
||||
if len(st.got) != 1 {
|
||||
t.Fatalf("got %d items; expected still just that first 1", len(st.got))
|
||||
t.Fatalf("got %d items; expected 1 (first to flush immediately)", len(st.got))
|
||||
}
|
||||
st.send(nm1)
|
||||
st.send(nm2)
|
||||
st.send(eng1)
|
||||
st.send(eng2)
|
||||
if len(st.got) != 1 {
|
||||
if len(st.got) != 1 {
|
||||
t.Fatalf("got %d items; expected still just that first 1", len(st.got))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// But moving the clock should flush the rest, collasced into one new one.
|
||||
st.advance(5 * time.Second)
|
||||
if len(st.got) != 2 {
|
||||
t.Fatalf("got %d items; want 2", len(st.got))
|
||||
}
|
||||
gotn := st.got[1]
|
||||
if gotn.NetMap != nm2.NetMap {
|
||||
t.Errorf("got wrong NetMap; got %p", gotn.NetMap)
|
||||
}
|
||||
if gotn.Engine != eng2.Engine {
|
||||
t.Errorf("got wrong Engine; got %p", gotn.Engine)
|
||||
}
|
||||
if t.Failed() {
|
||||
t.Logf("failed Notify was: %v", logger.AsJSON(gotn))
|
||||
}
|
||||
// But moving the clock should flush the rest, collasced into one new one.
|
||||
st.advance(5 * time.Second)
|
||||
if len(st.got) != 2 {
|
||||
t.Fatalf("got %d items; want 2", len(st.got))
|
||||
}
|
||||
gotn := st.got[1]
|
||||
if gotn.NetMap != nm2.NetMap {
|
||||
t.Errorf("got wrong NetMap; got %p", gotn.NetMap)
|
||||
}
|
||||
if gotn.Engine != eng2.Engine {
|
||||
t.Errorf("got wrong Engine; got %p", gotn.Engine)
|
||||
}
|
||||
if t.Failed() {
|
||||
t.Logf("failed Notify was: %v", logger.AsJSON(gotn))
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// Test the Run method
|
||||
t.Run("run", func(t *testing.T) {
|
||||
st := &rateLimitingBusSenderTester{tb: t}
|
||||
st.init()
|
||||
st.s.interval = 1 * time.Second
|
||||
st.s.lastFlush = st.clock.Now() // pretend we just flushed
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
st := &rateLimitingBusSenderTester{tb: t}
|
||||
st.init()
|
||||
st.s.interval = 1 * time.Second
|
||||
st.s.lastFlush = time.Now() // pretend we just flushed
|
||||
|
||||
flushc := make(chan *ipn.Notify, 1)
|
||||
st.s.fn = func(n *ipn.Notify) bool {
|
||||
flushc <- n
|
||||
return true
|
||||
}
|
||||
didSend := make(chan bool, 2)
|
||||
st.s.didSendTestHook = func() { didSend <- true }
|
||||
waitSend := func() {
|
||||
select {
|
||||
case <-didSend:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Error("timeout waiting for call to send")
|
||||
flushc := make(chan *ipn.Notify, 1)
|
||||
st.s.fn = func(n *ipn.Notify) bool {
|
||||
flushc <- n
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
incoming := make(chan *ipn.Notify, 2)
|
||||
go func() {
|
||||
incoming <- nm1
|
||||
waitSend()
|
||||
incoming <- nm2
|
||||
waitSend()
|
||||
st.advance(5 * time.Second)
|
||||
select {
|
||||
case n := <-flushc:
|
||||
if n.NetMap != nm2.NetMap {
|
||||
t.Errorf("got wrong NetMap; got %p", n.NetMap)
|
||||
didSend := make(chan bool, 2)
|
||||
st.s.didSendTestHook = func() { didSend <- true }
|
||||
waitSend := func() {
|
||||
select {
|
||||
case <-didSend:
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Error("timeout waiting for call to send")
|
||||
}
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Error("timeout")
|
||||
}
|
||||
cancel()
|
||||
}()
|
||||
|
||||
st.s.Run(ctx, incoming)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
incoming := make(chan *ipn.Notify, 2)
|
||||
go func() {
|
||||
incoming <- nm1
|
||||
waitSend()
|
||||
incoming <- nm2
|
||||
waitSend()
|
||||
st.advance(5 * time.Second)
|
||||
select {
|
||||
case n := <-flushc:
|
||||
if n.NetMap != nm2.NetMap {
|
||||
t.Errorf("got wrong NetMap; got %p", n.NetMap)
|
||||
}
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Error("timeout")
|
||||
}
|
||||
cancel()
|
||||
}()
|
||||
|
||||
st.s.Run(ctx, incoming)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -32,8 +32,8 @@
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tka"
|
||||
"tailscale.com/tsd"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstest/tkatest"
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/types/persist"
|
||||
@@ -470,7 +470,7 @@ func TestTKASyncTriggersCompact(t *testing.T) {
|
||||
//
|
||||
// Our compaction algorithm preserves AUMs received in the last 14 days, so
|
||||
// we need to backdate the commit times to make the AUMs eligible for compaction.
|
||||
clock := tstest.NewClock(tstest.ClockOpts{})
|
||||
clock := tstime.StdClock{}
|
||||
clock.Advance(-30 * 24 * time.Hour)
|
||||
|
||||
// Set up the TKA authority on the control plane.
|
||||
|
||||
694
tstest/clock.go
694
tstest/clock.go
@@ -1,694 +0,0 @@
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package tstest
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"tailscale.com/tstime"
|
||||
"tailscale.com/util/mak"
|
||||
)
|
||||
|
||||
// ClockOpts is used to configure the initial settings for a Clock. Once the
|
||||
// settings are configured as desired, call NewClock to get the resulting Clock.
|
||||
type ClockOpts struct {
|
||||
// Start is the starting time for the Clock. When FollowRealTime is false,
|
||||
// Start is also the value that will be returned by the first call
|
||||
// to Clock.Now.
|
||||
Start time.Time
|
||||
// Step is the amount of time the Clock will advance whenever Clock.Now is
|
||||
// called. If set to zero, the Clock will only advance when Clock.Advance is
|
||||
// called and/or if FollowRealTime is true.
|
||||
//
|
||||
// FollowRealTime and Step cannot be enabled at the same time.
|
||||
Step time.Duration
|
||||
|
||||
// TimerChannelSize configures the maximum buffered ticks that are
|
||||
// permitted in the channel of any Timer and Ticker created by this Clock.
|
||||
// The special value 0 means to use the default of 1. The buffer may need to
|
||||
// be increased if time is advanced by more than a single tick and proper
|
||||
// functioning of the test requires that the ticks are not lost.
|
||||
TimerChannelSize int
|
||||
|
||||
// FollowRealTime makes the simulated time increment along with real time.
|
||||
// It is a compromise between determinism and the difficulty of explicitly
|
||||
// managing the simulated time via Step or Clock.Advance. When
|
||||
// FollowRealTime is set, calls to Now() and PeekNow() will add the
|
||||
// elapsed real-world time to the simulated time.
|
||||
//
|
||||
// FollowRealTime and Step cannot be enabled at the same time.
|
||||
FollowRealTime bool
|
||||
}
|
||||
|
||||
// NewClock creates a Clock with the specified settings. To create a
|
||||
// Clock with only the default settings, new(Clock) is equivalent, except that
|
||||
// the start time will not be computed until one of the receivers is called.
|
||||
func NewClock(co ClockOpts) *Clock {
|
||||
if co.FollowRealTime && co.Step != 0 {
|
||||
panic("only one of FollowRealTime and Step are allowed in NewClock")
|
||||
}
|
||||
|
||||
return newClockInternal(co, nil)
|
||||
}
|
||||
|
||||
// newClockInternal creates a Clock with the specified settings and allows
|
||||
// specifying a non-standard realTimeClock.
|
||||
func newClockInternal(co ClockOpts, rtClock tstime.Clock) *Clock {
|
||||
if !co.FollowRealTime && rtClock != nil {
|
||||
panic("rtClock can only be set with FollowRealTime enabled")
|
||||
}
|
||||
|
||||
if co.FollowRealTime && rtClock == nil {
|
||||
rtClock = new(tstime.StdClock)
|
||||
}
|
||||
|
||||
c := &Clock{
|
||||
start: co.Start,
|
||||
realTimeClock: rtClock,
|
||||
step: co.Step,
|
||||
timerChannelSize: co.TimerChannelSize,
|
||||
}
|
||||
c.init() // init now to capture the current time when co.Start.IsZero()
|
||||
return c
|
||||
}
|
||||
|
||||
// Clock is a testing clock that advances every time its Now method is
|
||||
// called, beginning at its start time. If no start time is specified using
|
||||
// ClockBuilder, an arbitrary start time will be selected when the Clock is
|
||||
// created and can be retrieved by calling Clock.Start().
|
||||
type Clock struct {
|
||||
// start is the first value returned by Now. It must not be modified after
|
||||
// init is called.
|
||||
start time.Time
|
||||
|
||||
// realTimeClock, if not nil, indicates that the Clock shall move forward
|
||||
// according to realTimeClock + the accumulated calls to Advance. This can
|
||||
// make writing tests easier that require some control over the clock but do
|
||||
// not need exact control over the clock. While step can also be used for
|
||||
// this purpose, it is harder to control how quickly time moves using step.
|
||||
realTimeClock tstime.Clock
|
||||
|
||||
initOnce sync.Once
|
||||
mu sync.Mutex
|
||||
|
||||
// step is how much to advance with each Now call.
|
||||
step time.Duration
|
||||
// present is the last value returned by Now (and will be returned again by
|
||||
// PeekNow).
|
||||
present time.Time
|
||||
// realTime is the time from realTimeClock corresponding to the current
|
||||
// value of present.
|
||||
realTime time.Time
|
||||
// skipStep indicates that the next call to Now should not add step to
|
||||
// present. This occurs after initialization and after Advance.
|
||||
skipStep bool
|
||||
// timerChannelSize is the buffer size to use for channels created by
|
||||
// NewTimer and NewTicker.
|
||||
timerChannelSize int
|
||||
|
||||
events eventManager
|
||||
}
|
||||
|
||||
func (c *Clock) init() {
|
||||
c.initOnce.Do(func() {
|
||||
if c.realTimeClock != nil {
|
||||
c.realTime = c.realTimeClock.Now()
|
||||
}
|
||||
if c.start.IsZero() {
|
||||
if c.realTime.IsZero() {
|
||||
c.start = time.Now()
|
||||
} else {
|
||||
c.start = c.realTime
|
||||
}
|
||||
}
|
||||
if c.timerChannelSize == 0 {
|
||||
c.timerChannelSize = 1
|
||||
}
|
||||
c.present = c.start
|
||||
c.skipStep = true
|
||||
c.events.AdvanceTo(c.present)
|
||||
})
|
||||
}
|
||||
|
||||
// Now returns the virtual clock's current time, and advances it
|
||||
// according to its step configuration.
|
||||
func (c *Clock) Now() time.Time {
|
||||
c.init()
|
||||
rt := c.maybeGetRealTime()
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
step := c.step
|
||||
if c.skipStep {
|
||||
step = 0
|
||||
c.skipStep = false
|
||||
}
|
||||
c.advanceLocked(rt, step)
|
||||
|
||||
return c.present
|
||||
}
|
||||
|
||||
func (c *Clock) maybeGetRealTime() time.Time {
|
||||
if c.realTimeClock == nil {
|
||||
return time.Time{}
|
||||
}
|
||||
return c.realTimeClock.Now()
|
||||
}
|
||||
|
||||
func (c *Clock) advanceLocked(now time.Time, add time.Duration) {
|
||||
if !now.IsZero() {
|
||||
add += now.Sub(c.realTime)
|
||||
c.realTime = now
|
||||
}
|
||||
if add == 0 {
|
||||
return
|
||||
}
|
||||
c.present = c.present.Add(add)
|
||||
c.events.AdvanceTo(c.present)
|
||||
}
|
||||
|
||||
// PeekNow returns the last time reported by Now. If Now has never been called,
|
||||
// PeekNow returns the same value as GetStart.
|
||||
func (c *Clock) PeekNow() time.Time {
|
||||
c.init()
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.present
|
||||
}
|
||||
|
||||
// Advance moves simulated time forward or backwards by a relative amount. Any
|
||||
// Timer or Ticker that is waiting will fire at the requested point in simulated
|
||||
// time. Advance returns the new simulated time. If this Clock follows real time
|
||||
// then the next call to Now will equal the return value of Advance + the
|
||||
// elapsed time since calling Advance. Otherwise, the next call to Now will
|
||||
// equal the return value of Advance, regardless of the current step.
|
||||
func (c *Clock) Advance(d time.Duration) time.Time {
|
||||
c.init()
|
||||
rt := c.maybeGetRealTime()
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.skipStep = true
|
||||
|
||||
c.advanceLocked(rt, d)
|
||||
return c.present
|
||||
}
|
||||
|
||||
// AdvanceTo moves simulated time to a new absolute value. Any Timer or Ticker
|
||||
// that is waiting will fire at the requested point in simulated time. If this
|
||||
// Clock follows real time then the next call to Now will equal t + the elapsed
|
||||
// time since calling Advance. Otherwise, the next call to Now will equal t,
|
||||
// regardless of the configured step.
|
||||
func (c *Clock) AdvanceTo(t time.Time) {
|
||||
c.init()
|
||||
rt := c.maybeGetRealTime()
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.skipStep = true
|
||||
c.realTime = rt
|
||||
c.present = t
|
||||
c.events.AdvanceTo(c.present)
|
||||
}
|
||||
|
||||
// GetStart returns the initial simulated time when this Clock was created.
|
||||
func (c *Clock) GetStart() time.Time {
|
||||
c.init()
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.start
|
||||
}
|
||||
|
||||
// GetStep returns the amount that simulated time advances on every call to Now.
|
||||
func (c *Clock) GetStep() time.Duration {
|
||||
c.init()
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.step
|
||||
}
|
||||
|
||||
// SetStep updates the amount that simulated time advances on every call to Now.
|
||||
func (c *Clock) SetStep(d time.Duration) {
|
||||
c.init()
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.step = d
|
||||
}
|
||||
|
||||
// SetTimerChannelSize changes the channel size for any Timer or Ticker created
|
||||
// in the future. It does not affect those that were already created.
|
||||
func (c *Clock) SetTimerChannelSize(n int) {
|
||||
c.init()
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.timerChannelSize = n
|
||||
}
|
||||
|
||||
// NewTicker returns a Ticker that uses this Clock for accessing the current
|
||||
// time.
|
||||
func (c *Clock) NewTicker(d time.Duration) (tstime.TickerController, <-chan time.Time) {
|
||||
c.init()
|
||||
rt := c.maybeGetRealTime()
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.advanceLocked(rt, 0)
|
||||
t := &Ticker{
|
||||
nextTrigger: c.present.Add(d),
|
||||
period: d,
|
||||
em: &c.events,
|
||||
}
|
||||
t.init(c.timerChannelSize)
|
||||
return t, t.C
|
||||
}
|
||||
|
||||
// NewTimer returns a Timer that uses this Clock for accessing the current
|
||||
// time.
|
||||
func (c *Clock) NewTimer(d time.Duration) (tstime.TimerController, <-chan time.Time) {
|
||||
c.init()
|
||||
rt := c.maybeGetRealTime()
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.advanceLocked(rt, 0)
|
||||
t := &Timer{
|
||||
nextTrigger: c.present.Add(d),
|
||||
em: &c.events,
|
||||
}
|
||||
t.init(c.timerChannelSize, nil)
|
||||
return t, t.C
|
||||
}
|
||||
|
||||
// AfterFunc returns a Timer that calls f when it fires, using this Clock for
|
||||
// accessing the current time.
|
||||
func (c *Clock) AfterFunc(d time.Duration, f func()) tstime.TimerController {
|
||||
c.init()
|
||||
rt := c.maybeGetRealTime()
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.advanceLocked(rt, 0)
|
||||
t := &Timer{
|
||||
nextTrigger: c.present.Add(d),
|
||||
em: &c.events,
|
||||
}
|
||||
t.init(c.timerChannelSize, f)
|
||||
return t
|
||||
}
|
||||
|
||||
// Since subtracts specified duration from Now().
|
||||
func (c *Clock) Since(t time.Time) time.Duration {
|
||||
return c.Now().Sub(t)
|
||||
}
|
||||
|
||||
// eventHandler offers a common interface for Timer and Ticker events to avoid
|
||||
// code duplication in eventManager.
|
||||
type eventHandler interface {
|
||||
// Fire signals the event. The provided time is written to the event's
|
||||
// channel as the current time. The return value is the next time this event
|
||||
// should fire, otherwise if it is zero then the event will be removed from
|
||||
// the eventManager.
|
||||
Fire(time.Time) time.Time
|
||||
}
|
||||
|
||||
// event tracks details about an upcoming Timer or Ticker firing.
|
||||
type event struct {
|
||||
position int // The current index in the heap, needed for heap.Fix and heap.Remove.
|
||||
when time.Time // A cache of the next time the event triggers to avoid locking issues if we were to get it from eh.
|
||||
eh eventHandler
|
||||
}
|
||||
|
||||
// eventManager tracks pending events created by Timer and Ticker. eventManager
|
||||
// implements heap.Interface for efficient lookups of the next event.
|
||||
type eventManager struct {
|
||||
// clock is a real time clock for scheduling events with. When clock is nil,
|
||||
// events only fire when AdvanceTo is called by the simulated clock that
|
||||
// this eventManager belongs to. When clock is not nil, events may fire when
|
||||
// timer triggers.
|
||||
clock tstime.Clock
|
||||
|
||||
mu sync.Mutex
|
||||
now time.Time
|
||||
heap []*event
|
||||
reverseLookup map[eventHandler]*event
|
||||
|
||||
// timer is an AfterFunc that triggers at heap[0].when.Sub(now) relative to
|
||||
// the time represented by clock. In other words, if clock is real world
|
||||
// time, then if an event is scheduled 1 second into the future in the
|
||||
// simulated time, then the event will trigger after 1 second of actual test
|
||||
// execution time (unless the test advances simulated time, in which case
|
||||
// the timer is updated accordingly). This makes tests easier to write in
|
||||
// situations where the simulated time only needs to be partially
|
||||
// controlled, and the test writer wishes for simulated time to pass with an
|
||||
// offset but still synchronized with the real world.
|
||||
//
|
||||
// In the future, this could be extended to allow simulated time to run at a
|
||||
// multiple of real world time.
|
||||
timer tstime.TimerController
|
||||
}
|
||||
|
||||
func (em *eventManager) handleTimer() {
|
||||
rt := em.clock.Now()
|
||||
em.AdvanceTo(rt)
|
||||
}
|
||||
|
||||
// Push implements heap.Interface.Push and must only be called by heap funcs
|
||||
// with em.mu already held.
|
||||
func (em *eventManager) Push(x any) {
|
||||
e, ok := x.(*event)
|
||||
if !ok {
|
||||
panic("incorrect event type")
|
||||
}
|
||||
if e == nil {
|
||||
panic("nil event")
|
||||
}
|
||||
|
||||
mak.Set(&em.reverseLookup, e.eh, e)
|
||||
e.position = len(em.heap)
|
||||
em.heap = append(em.heap, e)
|
||||
}
|
||||
|
||||
// Pop implements heap.Interface.Pop and must only be called by heap funcs with
|
||||
// em.mu already held.
|
||||
func (em *eventManager) Pop() any {
|
||||
e := em.heap[len(em.heap)-1]
|
||||
em.heap = em.heap[:len(em.heap)-1]
|
||||
delete(em.reverseLookup, e.eh)
|
||||
return e
|
||||
}
|
||||
|
||||
// Len implements sort.Interface.Len and must only be called by heap funcs with
|
||||
// em.mu already held.
|
||||
func (em *eventManager) Len() int {
|
||||
return len(em.heap)
|
||||
}
|
||||
|
||||
// Less implements sort.Interface.Less and must only be called by heap funcs
|
||||
// with em.mu already held.
|
||||
func (em *eventManager) Less(i, j int) bool {
|
||||
return em.heap[i].when.Before(em.heap[j].when)
|
||||
}
|
||||
|
||||
// Swap implements sort.Interface.Swap and must only be called by heap funcs
|
||||
// with em.mu already held.
|
||||
func (em *eventManager) Swap(i, j int) {
|
||||
em.heap[i], em.heap[j] = em.heap[j], em.heap[i]
|
||||
em.heap[i].position = i
|
||||
em.heap[j].position = j
|
||||
}
|
||||
|
||||
// Reschedule adds/updates/deletes an event in the heap, whichever
|
||||
// operation is applicable (use a zero time to delete).
|
||||
func (em *eventManager) Reschedule(eh eventHandler, t time.Time) {
|
||||
em.mu.Lock()
|
||||
defer em.mu.Unlock()
|
||||
defer em.updateTimerLocked()
|
||||
|
||||
e, ok := em.reverseLookup[eh]
|
||||
if !ok {
|
||||
if t.IsZero() {
|
||||
// eh is not scheduled and also not active, so do nothing.
|
||||
return
|
||||
}
|
||||
// eh is not scheduled but is active, so add it.
|
||||
heap.Push(em, &event{
|
||||
when: t,
|
||||
eh: eh,
|
||||
})
|
||||
em.processEventsLocked(em.now) // This is always safe and required when !t.After(em.now).
|
||||
return
|
||||
}
|
||||
|
||||
if t.IsZero() {
|
||||
// e is scheduled but not active, so remove it.
|
||||
heap.Remove(em, e.position)
|
||||
return
|
||||
}
|
||||
|
||||
// e is scheduled and active, so update it.
|
||||
e.when = t
|
||||
heap.Fix(em, e.position)
|
||||
em.processEventsLocked(em.now) // This is always safe and required when !t.After(em.now).
|
||||
}
|
||||
|
||||
// AdvanceTo updates the current time to tm and fires all events scheduled
|
||||
// before or equal to tm. When an event fires, it may request rescheduling and
|
||||
// the rescheduled events will be combined with the other existing events that
|
||||
// are waiting, and will be run in the unified ordering. A poorly behaved event
|
||||
// may theoretically prevent this from ever completing, but both Timer and
|
||||
// Ticker require positive steps into the future.
|
||||
func (em *eventManager) AdvanceTo(tm time.Time) {
|
||||
em.mu.Lock()
|
||||
defer em.mu.Unlock()
|
||||
defer em.updateTimerLocked()
|
||||
|
||||
em.processEventsLocked(tm)
|
||||
em.now = tm
|
||||
}
|
||||
|
||||
// Now returns the cached current time. It is intended for use by a Timer or
|
||||
// Ticker that needs to convert a relative time to an absolute time.
|
||||
func (em *eventManager) Now() time.Time {
|
||||
em.mu.Lock()
|
||||
defer em.mu.Unlock()
|
||||
return em.now
|
||||
}
|
||||
|
||||
func (em *eventManager) processEventsLocked(tm time.Time) {
|
||||
for len(em.heap) > 0 && !em.heap[0].when.After(tm) {
|
||||
// Ideally some jitter would be added here but it's difficult to do so
|
||||
// in a deterministic fashion.
|
||||
em.now = em.heap[0].when
|
||||
|
||||
if nextFire := em.heap[0].eh.Fire(em.now); !nextFire.IsZero() {
|
||||
em.heap[0].when = nextFire
|
||||
heap.Fix(em, 0)
|
||||
} else {
|
||||
heap.Pop(em)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (em *eventManager) updateTimerLocked() {
|
||||
if em.clock == nil {
|
||||
return
|
||||
}
|
||||
if len(em.heap) == 0 {
|
||||
if em.timer != nil {
|
||||
em.timer.Stop()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
timeToEvent := em.heap[0].when.Sub(em.now)
|
||||
if em.timer == nil {
|
||||
em.timer = em.clock.AfterFunc(timeToEvent, em.handleTimer)
|
||||
return
|
||||
}
|
||||
em.timer.Reset(timeToEvent)
|
||||
}
|
||||
|
||||
// Ticker is a time.Ticker lookalike for use in tests that need to control when
|
||||
// events fire. Ticker could be made standalone in future but for now is
|
||||
// expected to be paired with a Clock and created by Clock.NewTicker.
|
||||
type Ticker struct {
|
||||
C <-chan time.Time // The channel on which ticks are delivered.
|
||||
|
||||
// em is the eventManager to be notified when nextTrigger changes.
|
||||
// eventManager has its own mutex, and the pointer is immutable, therefore
|
||||
// em can be accessed without holding mu.
|
||||
em *eventManager
|
||||
|
||||
c chan<- time.Time // The writer side of C.
|
||||
|
||||
mu sync.Mutex
|
||||
|
||||
// nextTrigger is the time of the ticker's next scheduled activation. When
|
||||
// Fire activates the ticker, nextTrigger is the timestamp written to the
|
||||
// channel.
|
||||
nextTrigger time.Time
|
||||
|
||||
// period is the duration that is added to nextTrigger when the ticker
|
||||
// fires.
|
||||
period time.Duration
|
||||
}
|
||||
|
||||
func (t *Ticker) init(channelSize int) {
|
||||
if channelSize <= 0 {
|
||||
panic("ticker channel size must be non-negative")
|
||||
}
|
||||
c := make(chan time.Time, channelSize)
|
||||
t.c = c
|
||||
t.C = c
|
||||
t.em.Reschedule(t, t.nextTrigger)
|
||||
}
|
||||
|
||||
// Fire triggers the ticker. curTime is the timestamp to write to the channel.
|
||||
// The next trigger time for the ticker is updated to the last computed trigger
|
||||
// time + the ticker period (set at creation or using Reset). The next trigger
|
||||
// time is computed this way to match standard time.Ticker behavior, which
|
||||
// prevents accumulation of long term drift caused by delays in event execution.
|
||||
func (t *Ticker) Fire(curTime time.Time) time.Time {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
if t.nextTrigger.IsZero() {
|
||||
return time.Time{}
|
||||
}
|
||||
select {
|
||||
case t.c <- curTime:
|
||||
default:
|
||||
}
|
||||
t.nextTrigger = t.nextTrigger.Add(t.period)
|
||||
|
||||
return t.nextTrigger
|
||||
}
|
||||
|
||||
// Reset adjusts the Ticker's period to d and reschedules the next fire time to
|
||||
// the current simulated time + d.
|
||||
func (t *Ticker) Reset(d time.Duration) {
|
||||
if d <= 0 {
|
||||
// The standard time.Ticker requires a positive period.
|
||||
panic("non-positive period for Ticker.Reset")
|
||||
}
|
||||
|
||||
now := t.em.Now()
|
||||
|
||||
t.mu.Lock()
|
||||
t.resetLocked(now.Add(d), d)
|
||||
t.mu.Unlock()
|
||||
|
||||
t.em.Reschedule(t, t.nextTrigger)
|
||||
}
|
||||
|
||||
// ResetAbsolute adjusts the Ticker's period to d and reschedules the next fire
|
||||
// time to nextTrigger.
|
||||
func (t *Ticker) ResetAbsolute(nextTrigger time.Time, d time.Duration) {
|
||||
if nextTrigger.IsZero() {
|
||||
panic("zero nextTrigger time for ResetAbsolute")
|
||||
}
|
||||
if d <= 0 {
|
||||
panic("non-positive period for ResetAbsolute")
|
||||
}
|
||||
|
||||
t.mu.Lock()
|
||||
t.resetLocked(nextTrigger, d)
|
||||
t.mu.Unlock()
|
||||
|
||||
t.em.Reschedule(t, t.nextTrigger)
|
||||
}
|
||||
|
||||
func (t *Ticker) resetLocked(nextTrigger time.Time, d time.Duration) {
|
||||
t.nextTrigger = nextTrigger
|
||||
t.period = d
|
||||
}
|
||||
|
||||
// Stop deactivates the Ticker.
|
||||
func (t *Ticker) Stop() {
|
||||
t.mu.Lock()
|
||||
t.nextTrigger = time.Time{}
|
||||
t.mu.Unlock()
|
||||
|
||||
t.em.Reschedule(t, t.nextTrigger)
|
||||
}
|
||||
|
||||
// Timer is a time.Timer lookalike for use in tests that need to control when
|
||||
// events fire. Timer could be made standalone in future but for now must be
|
||||
// paired with a Clock and created by Clock.NewTimer.
|
||||
type Timer struct {
|
||||
C <-chan time.Time // The channel on which ticks are delivered.
|
||||
|
||||
// em is the eventManager to be notified when nextTrigger changes.
|
||||
// eventManager has its own mutex, and the pointer is immutable, therefore
|
||||
// em can be accessed without holding mu.
|
||||
em *eventManager
|
||||
|
||||
f func(time.Time) // The function to call when the timer expires.
|
||||
|
||||
mu sync.Mutex
|
||||
|
||||
// nextTrigger is the time of the ticker's next scheduled activation. When
|
||||
// Fire activates the ticker, nextTrigger is the timestamp written to the
|
||||
// channel.
|
||||
nextTrigger time.Time
|
||||
}
|
||||
|
||||
func (t *Timer) init(channelSize int, afterFunc func()) {
|
||||
if channelSize <= 0 {
|
||||
panic("ticker channel size must be non-negative")
|
||||
}
|
||||
c := make(chan time.Time, channelSize)
|
||||
t.C = c
|
||||
if afterFunc == nil {
|
||||
t.f = func(curTime time.Time) {
|
||||
select {
|
||||
case c <- curTime:
|
||||
default:
|
||||
}
|
||||
}
|
||||
} else {
|
||||
t.f = func(_ time.Time) { afterFunc() }
|
||||
}
|
||||
t.em.Reschedule(t, t.nextTrigger)
|
||||
}
|
||||
|
||||
// Fire triggers the ticker. curTime is the timestamp to write to the channel.
|
||||
// The next trigger time for the ticker is updated to the last computed trigger
|
||||
// time + the ticker period (set at creation or using Reset). The next trigger
|
||||
// time is computed this way to match standard time.Ticker behavior, which
|
||||
// prevents accumulation of long term drift caused by delays in event execution.
|
||||
func (t *Timer) Fire(curTime time.Time) time.Time {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
if t.nextTrigger.IsZero() {
|
||||
return time.Time{}
|
||||
}
|
||||
t.nextTrigger = time.Time{}
|
||||
t.f(curTime)
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// Reset reschedules the next fire time to the current simulated time + d.
|
||||
// Reset reports whether the timer was still active before the reset.
|
||||
func (t *Timer) Reset(d time.Duration) bool {
|
||||
if d <= 0 {
|
||||
// The standard time.Timer requires a positive delay.
|
||||
panic("non-positive delay for Timer.Reset")
|
||||
}
|
||||
|
||||
return t.reset(t.em.Now().Add(d))
|
||||
}
|
||||
|
||||
// ResetAbsolute reschedules the next fire time to nextTrigger.
|
||||
// ResetAbsolute reports whether the timer was still active before the reset.
|
||||
func (t *Timer) ResetAbsolute(nextTrigger time.Time) bool {
|
||||
if nextTrigger.IsZero() {
|
||||
panic("zero nextTrigger time for ResetAbsolute")
|
||||
}
|
||||
|
||||
return t.reset(nextTrigger)
|
||||
}
|
||||
|
||||
// Stop deactivates the Timer. Stop reports whether the timer was active before
|
||||
// stopping.
|
||||
func (t *Timer) Stop() bool {
|
||||
return t.reset(time.Time{})
|
||||
}
|
||||
|
||||
func (t *Timer) reset(nextTrigger time.Time) bool {
|
||||
t.mu.Lock()
|
||||
wasActive := !t.nextTrigger.IsZero()
|
||||
t.nextTrigger = nextTrigger
|
||||
t.mu.Unlock()
|
||||
|
||||
t.em.Reschedule(t, t.nextTrigger)
|
||||
return wasActive
|
||||
}
|
||||
2474
tstest/clock_test.go
2474
tstest/clock_test.go
File diff suppressed because it is too large
Load Diff
@@ -108,6 +108,8 @@ func (c DefaultClock) Since(t time.Time) time.Duration {
|
||||
// time precisely, something required for certain types of tests to be possible
|
||||
// at all, speeds up execution by not needing to sleep, and can dramatically
|
||||
// reduce the risk of flakes due to tests executing too slowly or quickly.
|
||||
//
|
||||
// DEPRECATED: now that testing/synctest is available, use that for testing.
|
||||
type Clock interface {
|
||||
// Now returns the current time, as in time.Now.
|
||||
Now() time.Time
|
||||
|
||||
Reference in New Issue
Block a user