diff --git a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml index 92decef17..4d59b4aad 100644 --- a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml @@ -40,6 +40,9 @@ rules: - apiGroups: ["tailscale.com"] resources: ["tailnets", "tailnets/status"] verbs: ["get", "list", "watch", "update"] +- apiGroups: ["tailscale.com"] + resources: ["proxygrouppolicies", "proxygrouppolicies/status"] + verbs: ["get", "list", "watch", "update"] - apiGroups: ["tailscale.com"] resources: ["recorders", "recorders/status"] verbs: ["get", "list", "watch", "update"] @@ -47,6 +50,9 @@ rules: resources: ["customresourcedefinitions"] verbs: ["get", "list", "watch"] resourceNames: ["servicemonitors.monitoring.coreos.com"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingadmissionpolicies", "validatingadmissionpolicybindings"] + verbs: ["list", "create", "delete", "update", "get", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index b31e45eb7..08dae0e7b 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -5318,6 +5318,16 @@ rules: - list - watch - update + - apiGroups: + - tailscale.com + resources: + - proxygrouppolicies + - proxygrouppolicies/status + verbs: + - get + - list + - watch + - update - apiGroups: - tailscale.com resources: diff --git a/cmd/k8s-operator/e2e/ingress_test.go b/cmd/k8s-operator/e2e/ingress_test.go index eb05efa0c..5339b0583 100644 --- a/cmd/k8s-operator/e2e/ingress_test.go +++ b/cmd/k8s-operator/e2e/ingress_test.go @@ -14,6 +14,7 @@ corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + kube "tailscale.com/k8s-operator" "tailscale.com/tstest" "tailscale.com/types/ptr" diff --git a/cmd/k8s-operator/e2e/main_test.go b/cmd/k8s-operator/e2e/main_test.go index cb5c35c00..23fef0f8e 100644 --- a/cmd/k8s-operator/e2e/main_test.go +++ b/cmd/k8s-operator/e2e/main_test.go @@ -60,6 +60,23 @@ func createAndCleanup(t *testing.T, cl client.Client, obj client.Object) { }) } +func createAndCleanupErr(t *testing.T, cl client.Client, obj client.Object) error { + t.Helper() + + err := cl.Create(t.Context(), obj) + if err != nil { + return err + } + + t.Cleanup(func() { + if err = cl.Delete(t.Context(), obj); err != nil { + t.Errorf("error cleaning up %s %s/%s: %s", obj.GetObjectKind().GroupVersionKind(), obj.GetNamespace(), obj.GetName(), err) + } + }) + + return nil +} + func get(ctx context.Context, cl client.Client, obj client.Object) error { return cl.Get(ctx, client.ObjectKeyFromObject(obj), obj) } diff --git a/cmd/k8s-operator/e2e/pebble.go b/cmd/k8s-operator/e2e/pebble.go index a3ccb50cd..5fcb35e05 100644 --- a/cmd/k8s-operator/e2e/pebble.go +++ b/cmd/k8s-operator/e2e/pebble.go @@ -12,6 +12,7 @@ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" + "tailscale.com/types/ptr" ) diff --git a/cmd/k8s-operator/e2e/proxy_test.go b/cmd/k8s-operator/e2e/proxy_test.go index f7d11d278..2d4fa53cc 100644 --- a/cmd/k8s-operator/e2e/proxy_test.go +++ b/cmd/k8s-operator/e2e/proxy_test.go @@ -16,6 +16,7 @@ apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" + "tailscale.com/ipn" "tailscale.com/tstest" ) diff --git a/cmd/k8s-operator/e2e/proxygrouppolicy_test.go b/cmd/k8s-operator/e2e/proxygrouppolicy_test.go new file mode 100644 index 000000000..e2c7588c2 --- /dev/null +++ b/cmd/k8s-operator/e2e/proxygrouppolicy_test.go @@ -0,0 +1,143 @@ +package e2e + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/types/ptr" +) + +// See [TestMain] for test requirements. +func TestProxyGroupPolicy(t *testing.T) { + // Apply deny-all policy + denyAllPolicy := &tsapi.ProxyGroupPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + Spec: tsapi.ProxyGroupPolicySpec{ + Ingress: []string{}, + Egress: []string{}, + }, + } + + createAndCleanup(t, kubeClient, denyAllPolicy) + + // Attempt to create an egress Service within the default namespace, the above policy should + // reject it. + egressService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "egress-to-proxy-group", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + "tailscale.com/tailnet-fqdn": "test.something.ts.net", + "tailscale.com/proxy-group": "test", + }, + }, + Spec: corev1.ServiceSpec{ + ExternalName: "placeholder", + Type: corev1.ServiceTypeExternalName, + Ports: []corev1.ServicePort{ + { + Port: 8080, + Protocol: corev1.ProtocolTCP, + Name: "http", + }, + }, + }, + } + + if err := createAndCleanupErr(t, kubeClient, egressService); err == nil { + t.Fatal("expected error when creating egress Service") + } + + // Attempt to create an ingress Service within the default namespace, the above policy should + // reject it. + ingressService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress-to-proxy-group", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + LoadBalancerClass: ptr.To("tailscale"), + Ports: []corev1.ServicePort{ + { + Port: 8080, + Protocol: corev1.ProtocolTCP, + Name: "http", + }, + }, + }, + } + + if err := createAndCleanupErr(t, kubeClient, ingressService); err == nil { + t.Fatal("expected error when creating ingress Service") + } + + // Attempt to create an Ingress within the default namespace, the above policy should reject it + ingress := &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress-to-proxy-group", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "nginx", + Port: networkingv1.ServiceBackendPort{ + Number: 80, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + { + Hosts: []string{"nginx"}, + }, + }, + }, + } + + if err := createAndCleanupErr(t, kubeClient, ingress); err == nil { + t.Fatal("expected error when creating ingress Service") + } + + // Add policy to allow ingress/egress using the "test" proxy-group. This should be merged with the deny-all + // policy so they do not conflict. + allowTestPolicy := &tsapi.ProxyGroupPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "allow-test", + Namespace: metav1.NamespaceDefault, + }, + Spec: tsapi.ProxyGroupPolicySpec{ + Ingress: []string{"test"}, + Egress: []string{"test"}, + }, + } + + createAndCleanup(t, kubeClient, allowTestPolicy) + + // With this policy in place, the above ingress/egress resources should be allowed to be created. + if err := createAndCleanupErr(t, kubeClient, egressService); err != nil { + t.Fatalf("expected no error when creating egress Service, got %v", err) + } + + if err := createAndCleanupErr(t, kubeClient, ingressService); err != nil { + t.Fatalf("expected no error when creating ingress Service, got %v", err) + } + + if err := createAndCleanupErr(t, kubeClient, ingress); err != nil { + t.Fatalf("expected no error when creating Ingress, got %v", err) + } +} diff --git a/cmd/k8s-operator/e2e/setup.go b/cmd/k8s-operator/e2e/setup.go index baf763ac6..845a59145 100644 --- a/cmd/k8s-operator/e2e/setup.go +++ b/cmd/k8s-operator/e2e/setup.go @@ -52,6 +52,7 @@ "sigs.k8s.io/kind/pkg/cluster" "sigs.k8s.io/kind/pkg/cluster/nodeutils" "sigs.k8s.io/kind/pkg/cmd" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" diff --git a/cmd/k8s-operator/e2e/ssh.go b/cmd/k8s-operator/e2e/ssh.go index 8000d1326..371c44f9d 100644 --- a/cmd/k8s-operator/e2e/ssh.go +++ b/cmd/k8s-operator/e2e/ssh.go @@ -24,6 +24,7 @@ "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" + tailscaleroot "tailscale.com" "tailscale.com/types/ptr" ) diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 4f48c1812..81f62d477 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -54,6 +54,7 @@ "tailscale.com/ipn/store/kubestore" apiproxy "tailscale.com/k8s-operator/api-proxy" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/reconciler/proxygrouppolicy" "tailscale.com/k8s-operator/reconciler/tailnet" "tailscale.com/kube/kubetypes" "tailscale.com/tsnet" @@ -337,6 +338,14 @@ func runReconcilers(opts reconcilerOpts) { startlog.Fatalf("could not register tailnet reconciler: %v", err) } + proxyGroupPolicyOptions := proxygrouppolicy.ReconcilerOptions{ + Client: mgr.GetClient(), + } + + if err = proxygrouppolicy.NewReconciler(proxyGroupPolicyOptions).Register(mgr); err != nil { + startlog.Fatalf("could not register proxygrouppolicy reconciler: %v", err) + } + svcFilter := handler.EnqueueRequestsFromMapFunc(serviceHandler) svcChildFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("svc")) // If a ProxyClass changes, enqueue all Services labeled with that diff --git a/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy.go b/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy.go new file mode 100644 index 000000000..1f5aa3982 --- /dev/null +++ b/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy.go @@ -0,0 +1,387 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package proxygrouppolicy provides reconciliation logic for the ProxyGroupPolicy custom resource definition. It is +// responsible for generating ValidatingAdmissionPolicy resources that limit users to a set number of ProxyGroup +// names that can be used within Service and Ingress resources via the "tailscale.com/proxy-group" annotation. +package proxygrouppolicy + +import ( + "context" + "fmt" + "sort" + "strconv" + "strings" + + admr "k8s.io/api/admissionregistration/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/types/ptr" + "tailscale.com/util/set" +) + +type ( + // The Reconciler type is a reconcile.TypedReconciler implementation used to manage the reconciliation of + // ProxyGroupPolicy custom resources. + Reconciler struct { + client.Client + } + + // The ReconcilerOptions type contains configuration values for the Reconciler. + ReconcilerOptions struct { + // The client for interacting with the Kubernetes API. + Client client.Client + } +) + +const reconcilerName = "proxygrouppolicy-reconciler" + +// NewReconciler returns a new instance of the Reconciler type. It watches specifically for changes to ProxyGroupPolicy +// custom resources. The ReconcilerOptions can be used to modify the behaviour of the Reconciler. +func NewReconciler(options ReconcilerOptions) *Reconciler { + return &Reconciler{ + Client: options.Client, + } +} + +// Register the Reconciler onto the given manager.Manager implementation. +func (r *Reconciler) Register(mgr manager.Manager) error { + return builder. + ControllerManagedBy(mgr). + For(&tsapi.ProxyGroupPolicy{}). + Named(reconcilerName). + Complete(r) +} + +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + // Rather than working on a single ProxyGroupPolicy resource, we list all that exist within the + // same namespace as the one we're reconciling so that we can merge them into a single pair of + // ValidatingAdmissionPolicy resources. + var policies tsapi.ProxyGroupPolicyList + if err := r.List(ctx, &policies, client.InNamespace(req.Namespace)); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to list ProxyGroupPolicy resources %q: %w", req.NamespacedName, err) + } + + if len(policies.Items) == 0 { + // If we've got no items in the list, we go and delete any policies and bindings that + // may exist. + return r.delete(ctx, req.Namespace) + } + + return r.createOrUpdate(ctx, req.Namespace, policies) +} + +func (r *Reconciler) delete(ctx context.Context, namespace string) (reconcile.Result, error) { + ingress := "ts-ingress-" + namespace + egress := "ts-egress-" + namespace + + objects := []client.Object{ + &admr.ValidatingAdmissionPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: ingress, + }, + }, + &admr.ValidatingAdmissionPolicyBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: ingress, + }, + }, + &admr.ValidatingAdmissionPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: egress, + }, + }, + &admr.ValidatingAdmissionPolicyBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: egress, + }, + }, + } + + for _, obj := range objects { + err := r.Delete(ctx, obj) + switch { + case apierrors.IsNotFound(err): + // A resource may have already been deleted in a previous reconciliation that failed for + // some reason, so we'll ignore it if it doesn't exist. + continue + case err != nil: + return reconcile.Result{}, fmt.Errorf("failed to delete %s %q: %w", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName(), err) + } + } + + return reconcile.Result{}, nil +} + +func (r *Reconciler) createOrUpdate(ctx context.Context, namespace string, policies tsapi.ProxyGroupPolicyList) (reconcile.Result, error) { + ingressNames := set.Set[string]{} + egressNames := set.Set[string]{} + + // If this namespace has multiple ProxyGroupPolicy resources, we'll reduce them down to just their distinct + // egress/ingress names. + for _, policy := range policies.Items { + ingressNames.AddSlice(policy.Spec.Ingress) + egressNames.AddSlice(policy.Spec.Egress) + } + + ingress, err := r.generateIngressPolicy(ctx, namespace, ingressNames) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to generate ingress policy: %w", err) + } + + ingressBinding, err := r.generatePolicyBinding(ctx, namespace, ingress) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to generate ingress policy binding: %w", err) + } + + egress, err := r.generateEgressPolicy(ctx, namespace, egressNames) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to generate egress policy: %w", err) + } + + egressBinding, err := r.generatePolicyBinding(ctx, namespace, egress) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to generate egress policy binding: %w", err) + } + + objects := []client.Object{ + ingress, + ingressBinding, + egress, + egressBinding, + } + + for _, obj := range objects { + // Attempt to perform an update first as we'll only create these once and continually update them, so it's + // more likely that an update is needed instead of creation. If the resource does not exist, we'll + // create it. + err = r.Update(ctx, obj) + switch { + case apierrors.IsNotFound(err): + if err = r.Create(ctx, obj); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to create %s %q: %w", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName(), err) + } + case err != nil: + return reconcile.Result{}, fmt.Errorf("failed to update %s %q: %w", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName(), err) + } + } + + return reconcile.Result{}, nil +} + +const ( + // ingressCEL enforces proxy-group annotation rules for Ingress resources. + // + // Logic: + // + // - If the object is NOT an Ingress → allow (this validation is irrelevant) + // - If the annotation is absent → allow (annotation is optional) + // - If the annotation is present → its value must be in the allowlist + // + // Empty allowlist behavior: + // If the list is empty, any present annotation will fail membership, + // effectively acting as "deny-all". + ingressCEL = `request.kind.kind != "Ingress" || !("tailscale.com/proxy-group" in object.metadata.annotations) || object.metadata.annotations["tailscale.com/proxy-group"] in [%s]` + + // ingressServiceCEL enforces proxy-group annotation rules for Services + // that are using the tailscale load balancer. + // + // Logic: + // + // - If the object is NOT a Service → allow + // - If Service does NOT use loadBalancerClass "tailscale" → allow + // (egress policy will handle those) + // - If annotation is absent → allow + // - If annotation is present → must be in allowlist + // + // This makes ingress policy apply ONLY to tailscale Services. + ingressServiceCEL = `request.kind.kind != "Service" || !(has(object.spec.loadBalancerClass) && object.spec.loadBalancerClass == "tailscale") || !("tailscale.com/proxy-group" in object.metadata.annotations) || object.metadata.annotations["tailscale.com/proxy-group"] in [%s]` + + // egressCEL enforces proxy-group annotation rules for Services that + // are NOT using the tailscale load balancer. + // + // Logic: + // + // - If Service uses loadBalancerClass "tailscale" → allow + // (ingress policy handles those) + // - If annotation is absent → allow + // - If annotation is present → must be in allowlist + // + // Empty allowlist behavior: + // Any present annotation is rejected ("deny-all"). + // + // This expression is mutually exclusive with ingressServiceCEL, + // preventing policy conflicts. + egressCEL = `(has(object.spec.loadBalancerClass) && object.spec.loadBalancerClass == "tailscale") || !("tailscale.com/proxy-group" in object.metadata.annotations) || object.metadata.annotations["tailscale.com/proxy-group"] in [%s]` +) + +func (r *Reconciler) generateIngressPolicy(ctx context.Context, namespace string, names set.Set[string]) (*admr.ValidatingAdmissionPolicy, error) { + name := "ts-ingress-" + namespace + + var policy admr.ValidatingAdmissionPolicy + err := r.Get(ctx, client.ObjectKey{Name: name}, &policy) + switch { + case apierrors.IsNotFound(err): + break + case err != nil: + return nil, fmt.Errorf("failed to get ValidatingAdmissionPolicy %q: %w", name, err) + } + + return &admr.ValidatingAdmissionPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + ResourceVersion: policy.ResourceVersion, + }, + Spec: admr.ValidatingAdmissionPolicySpec{ + FailurePolicy: ptr.To(admr.Fail), + MatchConstraints: &admr.MatchResources{ + // The operator allows ingress via Ingress resources & Service resources (that use the "tailscale" load + // balancer class), so we have two resource rules here with multiple validation expressions that attempt + // to keep out of each other's way. + ResourceRules: []admr.NamedRuleWithOperations{ + { + RuleWithOperations: admr.RuleWithOperations{ + Operations: []admr.OperationType{ + admr.Create, + admr.Update, + }, + Rule: admr.Rule{ + APIGroups: []string{"networking.k8s.io"}, + APIVersions: []string{"*"}, + Resources: []string{"ingresses"}, + }, + }, + }, + { + RuleWithOperations: admr.RuleWithOperations{ + Operations: []admr.OperationType{ + admr.Create, + admr.Update, + }, + Rule: admr.Rule{ + APIGroups: []string{""}, + APIVersions: []string{"v1"}, + Resources: []string{"services"}, + }, + }, + }, + }, + }, + Validations: []admr.Validation{ + generateValidation(names, ingressCEL), + generateValidation(names, ingressServiceCEL), + }, + }, + }, nil +} + +func (r *Reconciler) generateEgressPolicy(ctx context.Context, namespace string, names set.Set[string]) (*admr.ValidatingAdmissionPolicy, error) { + name := "ts-egress-" + namespace + + var policy admr.ValidatingAdmissionPolicy + err := r.Get(ctx, client.ObjectKey{Name: name}, &policy) + switch { + case apierrors.IsNotFound(err): + break + case err != nil: + return nil, fmt.Errorf("failed to get ValidatingAdmissionPolicy %q: %w", name, err) + } + + return &admr.ValidatingAdmissionPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + ResourceVersion: policy.ResourceVersion, + }, + Spec: admr.ValidatingAdmissionPolicySpec{ + FailurePolicy: ptr.To(admr.Fail), + MatchConstraints: &admr.MatchResources{ + ResourceRules: []admr.NamedRuleWithOperations{ + { + RuleWithOperations: admr.RuleWithOperations{ + Operations: []admr.OperationType{ + admr.Create, + admr.Update, + }, + Rule: admr.Rule{ + APIGroups: []string{""}, + APIVersions: []string{"v1"}, + Resources: []string{"services"}, + }, + }, + }, + }, + }, + Validations: []admr.Validation{ + generateValidation(names, egressCEL), + }, + }, + }, nil +} + +const ( + denyMessage = `Annotation "tailscale.com/proxy-group" cannot be used on this resource in this namespace` + messageFormat = `If set, annotation "tailscale.com/proxy-group" must be one of [%s]` +) + +func generateValidation(names set.Set[string], format string) admr.Validation { + values := names.Slice() + + // We use a sort here so that the order of the proxy-group names are consistent + // across reconciliation loops. + sort.Strings(values) + + quoted := make([]string, len(values)) + for i, v := range values { + quoted[i] = strconv.Quote(v) + } + + joined := strings.Join(quoted, ",") + message := fmt.Sprintf(messageFormat, strings.Join(values, ", ")) + if len(values) == 0 { + message = denyMessage + } + + return admr.Validation{ + Expression: fmt.Sprintf(format, joined), + Message: message, + } +} + +func (r *Reconciler) generatePolicyBinding(ctx context.Context, namespace string, policy *admr.ValidatingAdmissionPolicy) (*admr.ValidatingAdmissionPolicyBinding, error) { + var binding admr.ValidatingAdmissionPolicyBinding + err := r.Get(ctx, client.ObjectKey{Name: policy.Name}, &binding) + switch { + case apierrors.IsNotFound(err): + break + case err != nil: + return nil, fmt.Errorf("failed to get ValidatingAdmissionPolicyBinding %q: %w", policy.Name, err) + } + + return &admr.ValidatingAdmissionPolicyBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: policy.Name, + ResourceVersion: binding.ResourceVersion, + }, + Spec: admr.ValidatingAdmissionPolicyBindingSpec{ + PolicyName: policy.Name, + ValidationActions: []admr.ValidationAction{ + admr.Deny, + }, + MatchResources: &admr.MatchResources{ + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": namespace, + }, + }, + }, + }, + }, nil +} diff --git a/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy_test.go b/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy_test.go new file mode 100644 index 000000000..e00468cc8 --- /dev/null +++ b/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy_test.go @@ -0,0 +1,214 @@ +package proxygrouppolicy_test + +import ( + "slices" + "strings" + "testing" + + admr "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/reconciler/proxygrouppolicy" +) + +func TestReconciler_Reconcile(t *testing.T) { + t.Parallel() + + tt := []struct { + Name string + Request reconcile.Request + ExpectedPolicyCount int + ExistingResources []client.Object + ExpectsError bool + }{ + { + Name: "single policy, denies all", + ExpectedPolicyCount: 2, + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + }, + ExistingResources: []client.Object{ + &tsapi.ProxyGroupPolicy{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + Spec: tsapi.ProxyGroupPolicySpec{ + Ingress: []string{}, + Egress: []string{}, + }, + }, + }, + }, + { + Name: "multiple policies merged", + ExpectedPolicyCount: 2, + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + }, + ExistingResources: []client.Object{ + &tsapi.ProxyGroupPolicy{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + Spec: tsapi.ProxyGroupPolicySpec{ + Ingress: []string{}, + Egress: []string{}, + }, + }, + &tsapi.ProxyGroupPolicy{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "allow-one", + Namespace: metav1.NamespaceDefault, + }, + Spec: tsapi.ProxyGroupPolicySpec{ + Ingress: []string{ + "test-ingress", + }, + Egress: []string{}, + }, + }, + }, + }, + { + Name: "no policies, no child resources", + ExpectedPolicyCount: 0, + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + }, + }, + } + + for _, tc := range tt { + t.Run(tc.Name, func(t *testing.T) { + bldr := fake.NewClientBuilder().WithScheme(tsapi.GlobalScheme) + bldr = bldr.WithObjects(tc.ExistingResources...) + + fc := bldr.Build() + opts := proxygrouppolicy.ReconcilerOptions{ + Client: fc, + } + + reconciler := proxygrouppolicy.NewReconciler(opts) + _, err := reconciler.Reconcile(t.Context(), tc.Request) + if tc.ExpectsError && err == nil { + t.Fatalf("expected error, got none") + } + + if !tc.ExpectsError && err != nil { + t.Fatalf("expected no error, got %v", err) + } + + var policies admr.ValidatingAdmissionPolicyList + if err = fc.List(t.Context(), &policies); err != nil { + t.Fatal(err) + } + + if len(policies.Items) != tc.ExpectedPolicyCount { + t.Fatalf("expected %d ValidatingAdmissionPolicy resources, got %d", tc.ExpectedPolicyCount, len(policies.Items)) + } + + var bindings admr.ValidatingAdmissionPolicyBindingList + if err = fc.List(t.Context(), &bindings); err != nil { + t.Fatal(err) + } + + if len(bindings.Items) != tc.ExpectedPolicyCount { + t.Fatalf("expected %d ValidatingAdmissionPolicyBinding resources, got %d", tc.ExpectedPolicyCount, len(bindings.Items)) + } + + for _, binding := range bindings.Items { + actual, ok := binding.Spec.MatchResources.NamespaceSelector.MatchLabels["kubernetes.io/metadata.name"] + if !ok || actual != metav1.NamespaceDefault { + t.Fatalf("expected binding to be for default namespace, got %v", actual) + } + + if !slices.Contains(binding.Spec.ValidationActions, admr.Deny) { + t.Fatalf("expected binding to be deny, got %v", binding.Spec.ValidationActions) + } + } + + for _, policy := range policies.Items { + // Each ValidatingAdmissionPolicy must be set to fail (rejecting resources). + if policy.Spec.FailurePolicy == nil || *policy.Spec.FailurePolicy != admr.Fail { + t.Fatalf("expected fail policy, got %v", *policy.Spec.FailurePolicy) + } + + // Each ValidatingAdmissionPolicy must have a matching ValidatingAdmissionPolicyBinding + bound := slices.ContainsFunc(bindings.Items, func(obj admr.ValidatingAdmissionPolicyBinding) bool { + return obj.Spec.PolicyName == policy.Name + }) + if !bound { + t.Fatalf("expected policy %s to be bound, but wasn't", policy.Name) + } + + // Each ValidatingAdmissionPolicy must be set to evaluate on creation and update of resources. + for _, rule := range policy.Spec.MatchConstraints.ResourceRules { + if !slices.Contains(rule.Operations, admr.Update) { + t.Fatal("expected ingress rule to act on update, but doesn't") + } + + if !slices.Contains(rule.Operations, admr.Create) { + t.Fatal("expected ingress rule to act on create, but doesn't") + } + } + + // Egress policies should only act on Service resources. + if strings.Contains(policy.Name, "egress") { + if len(policy.Spec.MatchConstraints.ResourceRules) != 1 { + t.Fatalf("expected exactly one matching resource, got %d", len(policy.Spec.MatchConstraints.ResourceRules)) + } + + rule := policy.Spec.MatchConstraints.ResourceRules[0] + + if !slices.Contains(rule.Resources, "services") { + t.Fatal("expected egress rule to act on services, but doesn't") + } + + if len(policy.Spec.Validations) != 1 { + t.Fatalf("expected exactly one validation, got %d", len(policy.Spec.Validations)) + } + } + + // Ingress policies should act on both Ingress and Service resources. + if strings.Contains(policy.Name, "ingress") { + if len(policy.Spec.MatchConstraints.ResourceRules) != 2 { + t.Fatalf("expected exactly two matching resources, got %d", len(policy.Spec.MatchConstraints.ResourceRules)) + } + + ingressRule := policy.Spec.MatchConstraints.ResourceRules[0] + if !slices.Contains(ingressRule.Resources, "ingresses") { + t.Fatal("expected ingress rule to act on ingresses, but doesn't") + } + + serviceRule := policy.Spec.MatchConstraints.ResourceRules[1] + if !slices.Contains(serviceRule.Resources, "services") { + t.Fatal("expected ingress rule to act on services, but doesn't") + } + + if len(policy.Spec.Validations) != 2 { + t.Fatalf("expected exactly two validations, got %d", len(policy.Spec.Validations)) + } + } + } + }) + } +}