From eb3d35c8b59c853eec0e66a48e22faacc7a82859 Mon Sep 17 00:00:00 2001 From: David Bond Date: Wed, 18 Feb 2026 09:34:55 +0000 Subject: [PATCH] cmd/k8s-operator,k8s-operator: define ProxyGroupPolicy reconciler (#18654) This commit implements a reconciler for the new `ProxyGroupPolicy` custom resource. When created, all `ProxyGroupPolicy` resources within the same namespace are merged into two `ValidatingAdmissionPolicy` resources, one for egress and one for ingress. These policies use CEL expressions to limit the usage of the "tailscale.com/proxy-group" annotation on `Service` and `Ingress` resources on create & update. Included here is also a new e2e test that ensures that resources that violate the policy return an error on creation, and that once the policy is changed to allow them they can be created. Closes: https://github.com/tailscale/corp/issues/36830 Signed-off-by: David Bond --- cmd/k8s-operator/depaware.txt | 1 + .../deploy/chart/templates/.gitignore | 1 + .../deploy/chart/templates/operator-rbac.yaml | 6 + .../tailscale.com_proxygrouppolicies.yaml | 4 - .../deploy/manifests/operator.yaml | 158 +++++++ cmd/k8s-operator/e2e/doc.go | 2 +- cmd/k8s-operator/e2e/ingress_test.go | 1 + cmd/k8s-operator/e2e/main_test.go | 19 +- cmd/k8s-operator/e2e/pebble.go | 1 + cmd/k8s-operator/e2e/proxy_test.go | 1 + cmd/k8s-operator/e2e/proxygrouppolicy_test.go | 161 ++++++++ cmd/k8s-operator/e2e/setup.go | 1 + cmd/k8s-operator/e2e/ssh.go | 1 + cmd/k8s-operator/generate/main.go | 33 +- cmd/k8s-operator/operator.go | 9 + .../apis/v1alpha1/types_proxygrouppolicy.go | 4 - .../proxygrouppolicy/proxygrouppolicy.go | 391 ++++++++++++++++++ .../proxygrouppolicy/proxygrouppolicy_test.go | 217 ++++++++++ 18 files changed, 987 insertions(+), 24 deletions(-) create mode 100644 cmd/k8s-operator/e2e/proxygrouppolicy_test.go create mode 100644 k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy.go create mode 100644 k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy_test.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 677891ad7..cd87d4987 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -832,6 +832,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/k8s-operator/apis from tailscale.com/k8s-operator/apis/v1alpha1 tailscale.com/k8s-operator/apis/v1alpha1 from tailscale.com/cmd/k8s-operator+ tailscale.com/k8s-operator/reconciler from tailscale.com/k8s-operator/reconciler/tailnet + tailscale.com/k8s-operator/reconciler/proxygrouppolicy from tailscale.com/cmd/k8s-operator tailscale.com/k8s-operator/reconciler/tailnet from tailscale.com/cmd/k8s-operator tailscale.com/k8s-operator/sessionrecording from tailscale.com/k8s-operator/api-proxy tailscale.com/k8s-operator/sessionrecording/spdy from tailscale.com/k8s-operator/sessionrecording diff --git a/cmd/k8s-operator/deploy/chart/templates/.gitignore b/cmd/k8s-operator/deploy/chart/templates/.gitignore index f480bb57d..185ea9e2b 100644 --- a/cmd/k8s-operator/deploy/chart/templates/.gitignore +++ b/cmd/k8s-operator/deploy/chart/templates/.gitignore @@ -9,3 +9,4 @@ /proxygroup.yaml /recorder.yaml /tailnet.yaml +/proxygrouppolicy.yaml diff --git a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml index 92decef17..4d59b4aad 100644 --- a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml @@ -40,6 +40,9 @@ rules: - apiGroups: ["tailscale.com"] resources: ["tailnets", "tailnets/status"] verbs: ["get", "list", "watch", "update"] +- apiGroups: ["tailscale.com"] + resources: ["proxygrouppolicies", "proxygrouppolicies/status"] + verbs: ["get", "list", "watch", "update"] - apiGroups: ["tailscale.com"] resources: ["recorders", "recorders/status"] verbs: ["get", "list", "watch", "update"] @@ -47,6 +50,9 @@ rules: resources: ["customresourcedefinitions"] verbs: ["get", "list", "watch"] resourceNames: ["servicemonitors.monitoring.coreos.com"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingadmissionpolicies", "validatingadmissionpolicybindings"] + verbs: ["list", "create", "delete", "update", "get", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygrouppolicies.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygrouppolicies.yaml index 51edcb56f..d1425fba8 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxygrouppolicies.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxygrouppolicies.yaml @@ -19,10 +19,6 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date - - description: Status of the deployed ProxyGroupPolicy resources. - jsonPath: .status.conditions[?(@.type == "ProxyGroupPolicyReady")].reason - name: Status - type: string name: v1alpha1 schema: openAPIV3Schema: diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index b31e45eb7..597641bde 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -3290,6 +3290,142 @@ spec: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.0 + name: proxygrouppolicies.tailscale.com +spec: + group: tailscale.com + names: + kind: ProxyGroupPolicy + listKind: ProxyGroupPolicyList + plural: proxygrouppolicies + shortNames: + - pgp + singular: proxygrouppolicy + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + Spec describes the desired state of the ProxyGroupPolicy. + More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + egress: + description: |- + Names of ProxyGroup resources that can be used by Service resources within this namespace. An empty list + denotes that no egress via ProxyGroups is allowed within this namespace. + items: + type: string + type: array + ingress: + description: |- + Names of ProxyGroup resources that can be used by Ingress resources within this namespace. An empty list + denotes that no ingress via ProxyGroups is allowed within this namespace. + items: + type: string + type: array + type: object + status: + description: |- + Status describes the status of the ProxyGroupPolicy. This is set + and managed by the Tailscale operator. + properties: + conditions: + items: + description: Condition contains details for one aspect of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.17.0 @@ -5318,6 +5454,16 @@ rules: - list - watch - update + - apiGroups: + - tailscale.com + resources: + - proxygrouppolicies + - proxygrouppolicies/status + verbs: + - get + - list + - watch + - update - apiGroups: - tailscale.com resources: @@ -5338,6 +5484,18 @@ rules: - get - list - watch + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingadmissionpolicies + - validatingadmissionpolicybindings + verbs: + - list + - create + - delete + - update + - get + - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/cmd/k8s-operator/e2e/doc.go b/cmd/k8s-operator/e2e/doc.go index c0cc36316..27d10e637 100644 --- a/cmd/k8s-operator/e2e/doc.go +++ b/cmd/k8s-operator/e2e/doc.go @@ -24,5 +24,5 @@ // // * go // * container runtime with the docker daemon API available -// * devcontrol: ./tool/go run ./cmd/devcontrol --generate-test-devices=k8s-operator-e2e --scenario-output-dir=/tmp/k8s-operator-e2e --test-dns=http://localhost:8055 +// * devcontrol: ./tool/go run --tags=tailscale_saas ./cmd/devcontrol --generate-test-devices=k8s-operator-e2e --scenario-output-dir=/tmp/k8s-operator-e2e --test-dns=http://localhost:8055 package e2e diff --git a/cmd/k8s-operator/e2e/ingress_test.go b/cmd/k8s-operator/e2e/ingress_test.go index eb05efa0c..5339b0583 100644 --- a/cmd/k8s-operator/e2e/ingress_test.go +++ b/cmd/k8s-operator/e2e/ingress_test.go @@ -14,6 +14,7 @@ corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + kube "tailscale.com/k8s-operator" "tailscale.com/tstest" "tailscale.com/types/ptr" diff --git a/cmd/k8s-operator/e2e/main_test.go b/cmd/k8s-operator/e2e/main_test.go index cb5c35c00..02f614014 100644 --- a/cmd/k8s-operator/e2e/main_test.go +++ b/cmd/k8s-operator/e2e/main_test.go @@ -54,12 +54,29 @@ func createAndCleanup(t *testing.T, cl client.Client, obj client.Object) { t.Cleanup(func() { // Use context.Background() for cleanup, as t.Context() is cancelled // just before cleanup functions are called. - if err := cl.Delete(context.Background(), obj); err != nil { + if err = cl.Delete(context.Background(), obj); err != nil { t.Errorf("error cleaning up %s %s/%s: %s", obj.GetObjectKind().GroupVersionKind(), obj.GetNamespace(), obj.GetName(), err) } }) } +func createAndCleanupErr(t *testing.T, cl client.Client, obj client.Object) error { + t.Helper() + + err := cl.Create(t.Context(), obj) + if err != nil { + return err + } + + t.Cleanup(func() { + if err = cl.Delete(context.Background(), obj); err != nil { + t.Errorf("error cleaning up %s %s/%s: %s", obj.GetObjectKind().GroupVersionKind(), obj.GetNamespace(), obj.GetName(), err) + } + }) + + return nil +} + func get(ctx context.Context, cl client.Client, obj client.Object) error { return cl.Get(ctx, client.ObjectKeyFromObject(obj), obj) } diff --git a/cmd/k8s-operator/e2e/pebble.go b/cmd/k8s-operator/e2e/pebble.go index a3ccb50cd..5fcb35e05 100644 --- a/cmd/k8s-operator/e2e/pebble.go +++ b/cmd/k8s-operator/e2e/pebble.go @@ -12,6 +12,7 @@ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" + "tailscale.com/types/ptr" ) diff --git a/cmd/k8s-operator/e2e/proxy_test.go b/cmd/k8s-operator/e2e/proxy_test.go index f7d11d278..2d4fa53cc 100644 --- a/cmd/k8s-operator/e2e/proxy_test.go +++ b/cmd/k8s-operator/e2e/proxy_test.go @@ -16,6 +16,7 @@ apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" + "tailscale.com/ipn" "tailscale.com/tstest" ) diff --git a/cmd/k8s-operator/e2e/proxygrouppolicy_test.go b/cmd/k8s-operator/e2e/proxygrouppolicy_test.go new file mode 100644 index 000000000..f8126499b --- /dev/null +++ b/cmd/k8s-operator/e2e/proxygrouppolicy_test.go @@ -0,0 +1,161 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package e2e + +import ( + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/types/ptr" +) + +// See [TestMain] for test requirements. +func TestProxyGroupPolicy(t *testing.T) { + if tnClient == nil { + t.Skip("TestProxyGroupPolicy requires a working tailnet client") + } + + // Apply deny-all policy + denyAllPolicy := &tsapi.ProxyGroupPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + Spec: tsapi.ProxyGroupPolicySpec{ + Ingress: []string{}, + Egress: []string{}, + }, + } + + createAndCleanup(t, kubeClient, denyAllPolicy) + <-time.After(time.Second * 2) + + // Attempt to create an egress Service within the default namespace, the above policy should + // reject it. + egressService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "egress-to-proxy-group", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + "tailscale.com/tailnet-fqdn": "test.something.ts.net", + "tailscale.com/proxy-group": "test", + }, + }, + Spec: corev1.ServiceSpec{ + ExternalName: "placeholder", + Type: corev1.ServiceTypeExternalName, + Ports: []corev1.ServicePort{ + { + Port: 8080, + Protocol: corev1.ProtocolTCP, + Name: "http", + }, + }, + }, + } + + err := createAndCleanupErr(t, kubeClient, egressService) + switch { + case err != nil && strings.Contains(err.Error(), "ValidatingAdmissionPolicy"): + case err != nil: + t.Fatalf("expected forbidden error, got: %v", err) + default: + t.Fatal("expected error when creating egress service") + } + + // Attempt to create an ingress Service within the default namespace, the above policy should + // reject it. + ingressService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress-to-proxy-group", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeLoadBalancer, + LoadBalancerClass: ptr.To("tailscale"), + Ports: []corev1.ServicePort{ + { + Port: 8080, + Protocol: corev1.ProtocolTCP, + Name: "http", + }, + }, + }, + } + + err = createAndCleanupErr(t, kubeClient, ingressService) + switch { + case err != nil && strings.Contains(err.Error(), "ValidatingAdmissionPolicy"): + case err != nil: + t.Fatalf("expected forbidden error, got: %v", err) + default: + t.Fatal("expected error when creating ingress service") + } + + // Attempt to create an Ingress within the default namespace, the above policy should reject it + ingress := &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ingress-to-proxy-group", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "nginx", + Port: networkingv1.ServiceBackendPort{ + Number: 80, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + { + Hosts: []string{"nginx"}, + }, + }, + }, + } + + err = createAndCleanupErr(t, kubeClient, ingress) + switch { + case err != nil && strings.Contains(err.Error(), "ValidatingAdmissionPolicy"): + case err != nil: + t.Fatalf("expected forbidden error, got: %v", err) + default: + t.Fatal("expected error when creating ingress") + } + + // Add policy to allow ingress/egress using the "test" proxy-group. This should be merged with the deny-all + // policy so they do not conflict. + allowTestPolicy := &tsapi.ProxyGroupPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "allow-test", + Namespace: metav1.NamespaceDefault, + }, + Spec: tsapi.ProxyGroupPolicySpec{ + Ingress: []string{"test"}, + Egress: []string{"test"}, + }, + } + + createAndCleanup(t, kubeClient, allowTestPolicy) + <-time.After(time.Second * 2) + + // With this policy in place, the above ingress/egress resources should be allowed to be created. + createAndCleanup(t, kubeClient, egressService) + createAndCleanup(t, kubeClient, ingressService) + createAndCleanup(t, kubeClient, ingress) +} diff --git a/cmd/k8s-operator/e2e/setup.go b/cmd/k8s-operator/e2e/setup.go index baf763ac6..845a59145 100644 --- a/cmd/k8s-operator/e2e/setup.go +++ b/cmd/k8s-operator/e2e/setup.go @@ -52,6 +52,7 @@ "sigs.k8s.io/kind/pkg/cluster" "sigs.k8s.io/kind/pkg/cluster/nodeutils" "sigs.k8s.io/kind/pkg/cmd" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" diff --git a/cmd/k8s-operator/e2e/ssh.go b/cmd/k8s-operator/e2e/ssh.go index 8000d1326..371c44f9d 100644 --- a/cmd/k8s-operator/e2e/ssh.go +++ b/cmd/k8s-operator/e2e/ssh.go @@ -24,6 +24,7 @@ "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" + tailscaleroot "tailscale.com" "tailscale.com/types/ptr" ) diff --git a/cmd/k8s-operator/generate/main.go b/cmd/k8s-operator/generate/main.go index 9a910da3e..840812ea3 100644 --- a/cmd/k8s-operator/generate/main.go +++ b/cmd/k8s-operator/generate/main.go @@ -20,20 +20,22 @@ ) const ( - operatorDeploymentFilesPath = "cmd/k8s-operator/deploy" - connectorCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_connectors.yaml" - proxyClassCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxyclasses.yaml" - dnsConfigCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_dnsconfigs.yaml" - recorderCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_recorders.yaml" - proxyGroupCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxygroups.yaml" - tailnetCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_tailnets.yaml" - helmTemplatesPath = operatorDeploymentFilesPath + "/chart/templates" - connectorCRDHelmTemplatePath = helmTemplatesPath + "/connector.yaml" - proxyClassCRDHelmTemplatePath = helmTemplatesPath + "/proxyclass.yaml" - dnsConfigCRDHelmTemplatePath = helmTemplatesPath + "/dnsconfig.yaml" - recorderCRDHelmTemplatePath = helmTemplatesPath + "/recorder.yaml" - proxyGroupCRDHelmTemplatePath = helmTemplatesPath + "/proxygroup.yaml" - tailnetCRDHelmTemplatePath = helmTemplatesPath + "/tailnet.yaml" + operatorDeploymentFilesPath = "cmd/k8s-operator/deploy" + connectorCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_connectors.yaml" + proxyClassCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxyclasses.yaml" + dnsConfigCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_dnsconfigs.yaml" + recorderCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_recorders.yaml" + proxyGroupCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxygroups.yaml" + tailnetCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_tailnets.yaml" + proxyGroupPolicyCRDPath = operatorDeploymentFilesPath + "/crds/tailscale.com_proxygrouppolicies.yaml" + helmTemplatesPath = operatorDeploymentFilesPath + "/chart/templates" + connectorCRDHelmTemplatePath = helmTemplatesPath + "/connector.yaml" + proxyClassCRDHelmTemplatePath = helmTemplatesPath + "/proxyclass.yaml" + dnsConfigCRDHelmTemplatePath = helmTemplatesPath + "/dnsconfig.yaml" + recorderCRDHelmTemplatePath = helmTemplatesPath + "/recorder.yaml" + proxyGroupCRDHelmTemplatePath = helmTemplatesPath + "/proxygroup.yaml" + tailnetCRDHelmTemplatePath = helmTemplatesPath + "/tailnet.yaml" + proxyGroupPolicyCRDHelmTemplatePath = helmTemplatesPath + "/proxygrouppolicy.yaml" helmConditionalStart = "{{ if .Values.installCRDs -}}\n" helmConditionalEnd = "{{- end -}}" @@ -157,6 +159,7 @@ func generate(baseDir string) error { {recorderCRDPath, recorderCRDHelmTemplatePath}, {proxyGroupCRDPath, proxyGroupCRDHelmTemplatePath}, {tailnetCRDPath, tailnetCRDHelmTemplatePath}, + {proxyGroupPolicyCRDPath, proxyGroupPolicyCRDHelmTemplatePath}, } { if err := addCRDToHelm(crd.crdPath, crd.templatePath); err != nil { return fmt.Errorf("error adding %s CRD to Helm templates: %w", crd.crdPath, err) @@ -173,6 +176,8 @@ func cleanup(baseDir string) error { dnsConfigCRDHelmTemplatePath, recorderCRDHelmTemplatePath, proxyGroupCRDHelmTemplatePath, + tailnetCRDHelmTemplatePath, + proxyGroupPolicyCRDHelmTemplatePath, } { if err := os.Remove(filepath.Join(baseDir, path)); err != nil && !os.IsNotExist(err) { return fmt.Errorf("error cleaning up %s: %w", path, err) diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 4f48c1812..81f62d477 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -54,6 +54,7 @@ "tailscale.com/ipn/store/kubestore" apiproxy "tailscale.com/k8s-operator/api-proxy" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/reconciler/proxygrouppolicy" "tailscale.com/k8s-operator/reconciler/tailnet" "tailscale.com/kube/kubetypes" "tailscale.com/tsnet" @@ -337,6 +338,14 @@ func runReconcilers(opts reconcilerOpts) { startlog.Fatalf("could not register tailnet reconciler: %v", err) } + proxyGroupPolicyOptions := proxygrouppolicy.ReconcilerOptions{ + Client: mgr.GetClient(), + } + + if err = proxygrouppolicy.NewReconciler(proxyGroupPolicyOptions).Register(mgr); err != nil { + startlog.Fatalf("could not register proxygrouppolicy reconciler: %v", err) + } + svcFilter := handler.EnqueueRequestsFromMapFunc(serviceHandler) svcChildFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("svc")) // If a ProxyClass changes, enqueue all Services labeled with that diff --git a/k8s-operator/apis/v1alpha1/types_proxygrouppolicy.go b/k8s-operator/apis/v1alpha1/types_proxygrouppolicy.go index dd06380c2..551811693 100644 --- a/k8s-operator/apis/v1alpha1/types_proxygrouppolicy.go +++ b/k8s-operator/apis/v1alpha1/types_proxygrouppolicy.go @@ -18,7 +18,6 @@ // +kubebuilder:subresource:status // +kubebuilder:resource:scope=Namespaced,shortName=pgp // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "ProxyGroupPolicyReady")].reason`,description="Status of the deployed ProxyGroupPolicy resources." type ProxyGroupPolicy struct { metav1.TypeMeta `json:",inline"` @@ -62,6 +61,3 @@ type ProxyGroupPolicyStatus struct { // +optional Conditions []metav1.Condition `json:"conditions"` } - -// ProxyGroupPolicyReady is set to True if the ProxyGroupPolicy is available for use by operator workloads. -const ProxyGroupPolicyReady ConditionType = "ProxyGroupPolicyReady" diff --git a/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy.go b/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy.go new file mode 100644 index 000000000..0541a5cf3 --- /dev/null +++ b/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy.go @@ -0,0 +1,391 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +// Package proxygrouppolicy provides reconciliation logic for the ProxyGroupPolicy custom resource definition. It is +// responsible for generating ValidatingAdmissionPolicy resources that limit users to a set number of ProxyGroup +// names that can be used within Service and Ingress resources via the "tailscale.com/proxy-group" annotation. +package proxygrouppolicy + +import ( + "context" + "fmt" + "sort" + "strconv" + "strings" + + admr "k8s.io/api/admissionregistration/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/types/ptr" + "tailscale.com/util/set" +) + +type ( + // The Reconciler type is a reconcile.TypedReconciler implementation used to manage the reconciliation of + // ProxyGroupPolicy custom resources. + Reconciler struct { + client.Client + } + + // The ReconcilerOptions type contains configuration values for the Reconciler. + ReconcilerOptions struct { + // The client for interacting with the Kubernetes API. + Client client.Client + } +) + +const reconcilerName = "proxygrouppolicy-reconciler" + +// NewReconciler returns a new instance of the Reconciler type. It watches specifically for changes to ProxyGroupPolicy +// custom resources. The ReconcilerOptions can be used to modify the behaviour of the Reconciler. +func NewReconciler(options ReconcilerOptions) *Reconciler { + return &Reconciler{ + Client: options.Client, + } +} + +// Register the Reconciler onto the given manager.Manager implementation. +func (r *Reconciler) Register(mgr manager.Manager) error { + return builder. + ControllerManagedBy(mgr). + For(&tsapi.ProxyGroupPolicy{}). + Named(reconcilerName). + Complete(r) +} + +func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + // Rather than working on a single ProxyGroupPolicy resource, we list all that exist within the + // same namespace as the one we're reconciling so that we can merge them into a single pair of + // ValidatingAdmissionPolicy resources. + var policies tsapi.ProxyGroupPolicyList + if err := r.List(ctx, &policies, client.InNamespace(req.Namespace)); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to list ProxyGroupPolicy resources %q: %w", req.NamespacedName, err) + } + + if len(policies.Items) == 0 { + // If we've got no items in the list, we go and delete any policies and bindings that + // may exist. + return r.delete(ctx, req.Namespace) + } + + return r.createOrUpdate(ctx, req.Namespace, policies) +} + +func (r *Reconciler) delete(ctx context.Context, namespace string) (reconcile.Result, error) { + ingress := "ts-ingress-" + namespace + egress := "ts-egress-" + namespace + + objects := []client.Object{ + &admr.ValidatingAdmissionPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: ingress, + }, + }, + &admr.ValidatingAdmissionPolicyBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: ingress, + }, + }, + &admr.ValidatingAdmissionPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: egress, + }, + }, + &admr.ValidatingAdmissionPolicyBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: egress, + }, + }, + } + + for _, obj := range objects { + err := r.Delete(ctx, obj) + switch { + case apierrors.IsNotFound(err): + // A resource may have already been deleted in a previous reconciliation that failed for + // some reason, so we'll ignore it if it doesn't exist. + continue + case err != nil: + return reconcile.Result{}, fmt.Errorf("failed to delete %s %q: %w", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName(), err) + } + } + + return reconcile.Result{}, nil +} + +func (r *Reconciler) createOrUpdate(ctx context.Context, namespace string, policies tsapi.ProxyGroupPolicyList) (reconcile.Result, error) { + ingressNames := set.Set[string]{} + egressNames := set.Set[string]{} + + // If this namespace has multiple ProxyGroupPolicy resources, we'll reduce them down to just their distinct + // egress/ingress names. + for _, policy := range policies.Items { + ingressNames.AddSlice(policy.Spec.Ingress) + egressNames.AddSlice(policy.Spec.Egress) + } + + ingress, err := r.generateIngressPolicy(ctx, namespace, ingressNames) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to generate ingress policy: %w", err) + } + + ingressBinding, err := r.generatePolicyBinding(ctx, namespace, ingress) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to generate ingress policy binding: %w", err) + } + + egress, err := r.generateEgressPolicy(ctx, namespace, egressNames) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to generate egress policy: %w", err) + } + + egressBinding, err := r.generatePolicyBinding(ctx, namespace, egress) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to generate egress policy binding: %w", err) + } + + objects := []client.Object{ + ingress, + ingressBinding, + egress, + egressBinding, + } + + for _, obj := range objects { + // Attempt to perform an update first as we'll only create these once and continually update them, so it's + // more likely that an update is needed instead of creation. If the resource does not exist, we'll + // create it. + err = r.Update(ctx, obj) + switch { + case apierrors.IsNotFound(err): + if err = r.Create(ctx, obj); err != nil { + return reconcile.Result{}, fmt.Errorf("failed to create %s %q: %w", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName(), err) + } + case err != nil: + return reconcile.Result{}, fmt.Errorf("failed to update %s %q: %w", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetName(), err) + } + } + + return reconcile.Result{}, nil +} + +const ( + // ingressCEL enforces proxy-group annotation rules for Ingress resources. + // + // Logic: + // + // - If the object is NOT an Ingress → allow (this validation is irrelevant) + // - If the annotation is absent → allow (annotation is optional) + // - If the annotation is present → its value must be in the allowlist + // + // Empty allowlist behavior: + // If the list is empty, any present annotation will fail membership, + // effectively acting as "deny-all". + ingressCEL = `request.kind.kind != "Ingress" || !("tailscale.com/proxy-group" in object.metadata.annotations) || object.metadata.annotations["tailscale.com/proxy-group"] in [%s]` + + // ingressServiceCEL enforces proxy-group annotation rules for Services + // that are using the tailscale load balancer. + // + // Logic: + // + // - If the object is NOT a Service → allow + // - If Service does NOT use loadBalancerClass "tailscale" → allow + // (egress policy will handle those) + // - If annotation is absent → allow + // - If annotation is present → must be in allowlist + // + // This makes ingress policy apply ONLY to tailscale Services. + ingressServiceCEL = `request.kind.kind != "Service" || !((has(object.spec.loadBalancerClass) && object.spec.loadBalancerClass == "tailscale") || ("tailscale.com/expose" in object.metadata.annotations && object.metadata.annotations["tailscale.com/expose"] == "true")) || (!("tailscale.com/proxy-group" in object.metadata.annotations) || object.metadata.annotations["tailscale.com/proxy-group"] in [%s])` + // egressCEL enforces proxy-group annotation rules for Services that + // are NOT using the tailscale load balancer. + // + // Logic: + // + // - If Service uses loadBalancerClass "tailscale" → allow + // (ingress policy handles those) + // - If Service uses "tailscale.com/expose" → allow + // (ingress policy handles those) + // - If annotation is absent → allow + // - If annotation is present → must be in allowlist + // + // Empty allowlist behavior: + // Any present annotation is rejected ("deny-all"). + // + // This expression is mutually exclusive with ingressServiceCEL, + // preventing policy conflicts. + egressCEL = `((has(object.spec.loadBalancerClass) && object.spec.loadBalancerClass == "tailscale") || ("tailscale.com/expose" in object.metadata.annotations && object.metadata.annotations["tailscale.com/expose"] == "true")) || !("tailscale.com/proxy-group" in object.metadata.annotations) || object.metadata.annotations["tailscale.com/proxy-group"] in [%s]` +) + +func (r *Reconciler) generateIngressPolicy(ctx context.Context, namespace string, names set.Set[string]) (*admr.ValidatingAdmissionPolicy, error) { + name := "ts-ingress-" + namespace + + var policy admr.ValidatingAdmissionPolicy + err := r.Get(ctx, client.ObjectKey{Name: name}, &policy) + switch { + case apierrors.IsNotFound(err): + // If it's not found, we can create a new one. We only want the existing one for + // its resource version. + case err != nil: + return nil, fmt.Errorf("failed to get ValidatingAdmissionPolicy %q: %w", name, err) + } + + return &admr.ValidatingAdmissionPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + ResourceVersion: policy.ResourceVersion, + }, + Spec: admr.ValidatingAdmissionPolicySpec{ + FailurePolicy: ptr.To(admr.Fail), + MatchConstraints: &admr.MatchResources{ + // The operator allows ingress via Ingress resources & Service resources (that use the "tailscale" load + // balancer class), so we have two resource rules here with multiple validation expressions that attempt + // to keep out of each other's way. + ResourceRules: []admr.NamedRuleWithOperations{ + { + RuleWithOperations: admr.RuleWithOperations{ + Operations: []admr.OperationType{ + admr.Create, + admr.Update, + }, + Rule: admr.Rule{ + APIGroups: []string{"networking.k8s.io"}, + APIVersions: []string{"*"}, + Resources: []string{"ingresses"}, + }, + }, + }, + { + RuleWithOperations: admr.RuleWithOperations{ + Operations: []admr.OperationType{ + admr.Create, + admr.Update, + }, + Rule: admr.Rule{ + APIGroups: []string{""}, + APIVersions: []string{"v1"}, + Resources: []string{"services"}, + }, + }, + }, + }, + }, + Validations: []admr.Validation{ + generateValidation(names, ingressCEL), + generateValidation(names, ingressServiceCEL), + }, + }, + }, nil +} + +func (r *Reconciler) generateEgressPolicy(ctx context.Context, namespace string, names set.Set[string]) (*admr.ValidatingAdmissionPolicy, error) { + name := "ts-egress-" + namespace + + var policy admr.ValidatingAdmissionPolicy + err := r.Get(ctx, client.ObjectKey{Name: name}, &policy) + switch { + case apierrors.IsNotFound(err): + // If it's not found, we can create a new one. We only want the existing one for + // its resource version. + case err != nil: + return nil, fmt.Errorf("failed to get ValidatingAdmissionPolicy %q: %w", name, err) + } + + return &admr.ValidatingAdmissionPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + ResourceVersion: policy.ResourceVersion, + }, + Spec: admr.ValidatingAdmissionPolicySpec{ + FailurePolicy: ptr.To(admr.Fail), + MatchConstraints: &admr.MatchResources{ + ResourceRules: []admr.NamedRuleWithOperations{ + { + RuleWithOperations: admr.RuleWithOperations{ + Operations: []admr.OperationType{ + admr.Create, + admr.Update, + }, + Rule: admr.Rule{ + APIGroups: []string{""}, + APIVersions: []string{"v1"}, + Resources: []string{"services"}, + }, + }, + }, + }, + }, + Validations: []admr.Validation{ + generateValidation(names, egressCEL), + }, + }, + }, nil +} + +const ( + denyMessage = `Annotation "tailscale.com/proxy-group" cannot be used on this resource in this namespace` + messageFormat = `If set, annotation "tailscale.com/proxy-group" must be one of [%s]` +) + +func generateValidation(names set.Set[string], format string) admr.Validation { + values := names.Slice() + + // We use a sort here so that the order of the proxy-group names are consistent + // across reconciliation loops. + sort.Strings(values) + + quoted := make([]string, len(values)) + for i, v := range values { + quoted[i] = strconv.Quote(v) + } + + joined := strings.Join(quoted, ",") + message := fmt.Sprintf(messageFormat, strings.Join(values, ", ")) + if len(values) == 0 { + message = denyMessage + } + + return admr.Validation{ + Expression: fmt.Sprintf(format, joined), + Message: message, + } +} + +func (r *Reconciler) generatePolicyBinding(ctx context.Context, namespace string, policy *admr.ValidatingAdmissionPolicy) (*admr.ValidatingAdmissionPolicyBinding, error) { + var binding admr.ValidatingAdmissionPolicyBinding + err := r.Get(ctx, client.ObjectKey{Name: policy.Name}, &binding) + switch { + case apierrors.IsNotFound(err): + // If it's not found, we can create a new one. We only want the existing one for + // its resource version. + case err != nil: + return nil, fmt.Errorf("failed to get ValidatingAdmissionPolicyBinding %q: %w", policy.Name, err) + } + + return &admr.ValidatingAdmissionPolicyBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: policy.Name, + ResourceVersion: binding.ResourceVersion, + }, + Spec: admr.ValidatingAdmissionPolicyBindingSpec{ + PolicyName: policy.Name, + ValidationActions: []admr.ValidationAction{ + admr.Deny, + }, + MatchResources: &admr.MatchResources{ + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "kubernetes.io/metadata.name": namespace, + }, + }, + }, + }, + }, nil +} diff --git a/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy_test.go b/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy_test.go new file mode 100644 index 000000000..6710eac74 --- /dev/null +++ b/k8s-operator/reconciler/proxygrouppolicy/proxygrouppolicy_test.go @@ -0,0 +1,217 @@ +// Copyright (c) Tailscale Inc & contributors +// SPDX-License-Identifier: BSD-3-Clause + +package proxygrouppolicy_test + +import ( + "slices" + "strings" + "testing" + + admr "k8s.io/api/admissionregistration/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/k8s-operator/reconciler/proxygrouppolicy" +) + +func TestReconciler_Reconcile(t *testing.T) { + t.Parallel() + + tt := []struct { + Name string + Request reconcile.Request + ExpectedPolicyCount int + ExistingResources []client.Object + ExpectsError bool + }{ + { + Name: "single policy, denies all", + ExpectedPolicyCount: 2, + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + }, + ExistingResources: []client.Object{ + &tsapi.ProxyGroupPolicy{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + Spec: tsapi.ProxyGroupPolicySpec{ + Ingress: []string{}, + Egress: []string{}, + }, + }, + }, + }, + { + Name: "multiple policies merged", + ExpectedPolicyCount: 2, + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + }, + ExistingResources: []client.Object{ + &tsapi.ProxyGroupPolicy{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + Spec: tsapi.ProxyGroupPolicySpec{ + Ingress: []string{}, + Egress: []string{}, + }, + }, + &tsapi.ProxyGroupPolicy{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "allow-one", + Namespace: metav1.NamespaceDefault, + }, + Spec: tsapi.ProxyGroupPolicySpec{ + Ingress: []string{ + "test-ingress", + }, + Egress: []string{}, + }, + }, + }, + }, + { + Name: "no policies, no child resources", + ExpectedPolicyCount: 0, + Request: reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "deny-all", + Namespace: metav1.NamespaceDefault, + }, + }, + }, + } + + for _, tc := range tt { + t.Run(tc.Name, func(t *testing.T) { + bldr := fake.NewClientBuilder().WithScheme(tsapi.GlobalScheme) + bldr = bldr.WithObjects(tc.ExistingResources...) + + fc := bldr.Build() + opts := proxygrouppolicy.ReconcilerOptions{ + Client: fc, + } + + reconciler := proxygrouppolicy.NewReconciler(opts) + _, err := reconciler.Reconcile(t.Context(), tc.Request) + if tc.ExpectsError && err == nil { + t.Fatalf("expected error, got none") + } + + if !tc.ExpectsError && err != nil { + t.Fatalf("expected no error, got %v", err) + } + + var policies admr.ValidatingAdmissionPolicyList + if err = fc.List(t.Context(), &policies); err != nil { + t.Fatal(err) + } + + if len(policies.Items) != tc.ExpectedPolicyCount { + t.Fatalf("expected %d ValidatingAdmissionPolicy resources, got %d", tc.ExpectedPolicyCount, len(policies.Items)) + } + + var bindings admr.ValidatingAdmissionPolicyBindingList + if err = fc.List(t.Context(), &bindings); err != nil { + t.Fatal(err) + } + + if len(bindings.Items) != tc.ExpectedPolicyCount { + t.Fatalf("expected %d ValidatingAdmissionPolicyBinding resources, got %d", tc.ExpectedPolicyCount, len(bindings.Items)) + } + + for _, binding := range bindings.Items { + actual, ok := binding.Spec.MatchResources.NamespaceSelector.MatchLabels["kubernetes.io/metadata.name"] + if !ok || actual != metav1.NamespaceDefault { + t.Fatalf("expected binding to be for default namespace, got %v", actual) + } + + if !slices.Contains(binding.Spec.ValidationActions, admr.Deny) { + t.Fatalf("expected binding to be deny, got %v", binding.Spec.ValidationActions) + } + } + + for _, policy := range policies.Items { + // Each ValidatingAdmissionPolicy must be set to fail (rejecting resources). + if policy.Spec.FailurePolicy == nil || *policy.Spec.FailurePolicy != admr.Fail { + t.Fatalf("expected fail policy, got %v", *policy.Spec.FailurePolicy) + } + + // Each ValidatingAdmissionPolicy must have a matching ValidatingAdmissionPolicyBinding + bound := slices.ContainsFunc(bindings.Items, func(obj admr.ValidatingAdmissionPolicyBinding) bool { + return obj.Spec.PolicyName == policy.Name + }) + if !bound { + t.Fatalf("expected policy %s to be bound, but wasn't", policy.Name) + } + + // Each ValidatingAdmissionPolicy must be set to evaluate on creation and update of resources. + for _, rule := range policy.Spec.MatchConstraints.ResourceRules { + if !slices.Contains(rule.Operations, admr.Update) { + t.Fatal("expected ingress rule to act on update, but doesn't") + } + + if !slices.Contains(rule.Operations, admr.Create) { + t.Fatal("expected ingress rule to act on create, but doesn't") + } + } + + // Egress policies should only act on Service resources. + if strings.Contains(policy.Name, "egress") { + if len(policy.Spec.MatchConstraints.ResourceRules) != 1 { + t.Fatalf("expected exactly one matching resource, got %d", len(policy.Spec.MatchConstraints.ResourceRules)) + } + + rule := policy.Spec.MatchConstraints.ResourceRules[0] + + if !slices.Contains(rule.Resources, "services") { + t.Fatal("expected egress rule to act on services, but doesn't") + } + + if len(policy.Spec.Validations) != 1 { + t.Fatalf("expected exactly one validation, got %d", len(policy.Spec.Validations)) + } + } + + // Ingress policies should act on both Ingress and Service resources. + if strings.Contains(policy.Name, "ingress") { + if len(policy.Spec.MatchConstraints.ResourceRules) != 2 { + t.Fatalf("expected exactly two matching resources, got %d", len(policy.Spec.MatchConstraints.ResourceRules)) + } + + ingressRule := policy.Spec.MatchConstraints.ResourceRules[0] + if !slices.Contains(ingressRule.Resources, "ingresses") { + t.Fatal("expected ingress rule to act on ingresses, but doesn't") + } + + serviceRule := policy.Spec.MatchConstraints.ResourceRules[1] + if !slices.Contains(serviceRule.Resources, "services") { + t.Fatal("expected ingress rule to act on services, but doesn't") + } + + if len(policy.Spec.Validations) != 2 { + t.Fatalf("expected exactly two validations, got %d", len(policy.Spec.Validations)) + } + } + } + }) + } +}