istio
1815 строк · 63.3 Кб
1// Copyright Istio Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15package ambient16
17import (18"fmt"19"net/netip"20"path/filepath"21"strings"22"testing"23"time"24
25corev1 "k8s.io/api/core/v1"26metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"27"k8s.io/apimachinery/pkg/runtime/schema"28k8sv1 "sigs.k8s.io/gateway-api/apis/v1"29k8sbeta "sigs.k8s.io/gateway-api/apis/v1beta1"30
31"istio.io/api/meta/v1alpha1"32"istio.io/api/networking/v1alpha3"33auth "istio.io/api/security/v1beta1"34"istio.io/api/type/v1beta1"35apiv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3"36clientsecurityv1beta1 "istio.io/client-go/pkg/apis/security/v1beta1"37"istio.io/istio/pilot/pkg/config/kube/crd"38"istio.io/istio/pilot/pkg/features"39"istio.io/istio/pilot/pkg/model"40"istio.io/istio/pilot/pkg/serviceregistry/util/xdsfake"41"istio.io/istio/pilot/test/util"42"istio.io/istio/pkg/cluster"43"istio.io/istio/pkg/config"44"istio.io/istio/pkg/config/constants"45"istio.io/istio/pkg/config/labels"46"istio.io/istio/pkg/config/schema/gvk"47"istio.io/istio/pkg/config/schema/gvr"48"istio.io/istio/pkg/config/schema/kind"49kubeclient "istio.io/istio/pkg/kube"50"istio.io/istio/pkg/kube/controllers"51"istio.io/istio/pkg/kube/kclient/clienttest"52"istio.io/istio/pkg/kube/krt"53"istio.io/istio/pkg/network"54"istio.io/istio/pkg/slices"55"istio.io/istio/pkg/test"56"istio.io/istio/pkg/test/util/assert"57"istio.io/istio/pkg/test/util/file"58"istio.io/istio/pkg/test/util/retry"59"istio.io/istio/pkg/util/protomarshal"60"istio.io/istio/pkg/util/sets"61"istio.io/istio/pkg/workloadapi"62"istio.io/istio/pkg/workloadapi/security"63)
64
65const (66testNS = "ns1"67systemNS = "istio-system"68testNW = "testnetwork"69testC = "cluster0"70)
71
72func init() {73features.EnableAmbientWaypoints = true74features.EnableAmbientControllers = true75}
76
77func TestAmbientIndex_NetworkAndClusterIDs(t *testing.T) {78cases := []struct {79name string80cluster cluster.ID81network network.ID82}{83{84name: "values unset",85cluster: "",86network: "",87},88{89name: "values set",90cluster: testC,91network: testNW,92},93}94
95for _, c := range cases {96t.Run(c.name, func(t *testing.T) {97s := newAmbientTestServer(t, c.cluster, c.network)98s.addPods(t, "127.0.0.1", "pod1", "sa1", map[string]string{"app": "a"}, nil, true, corev1.PodRunning)99s.assertEvent(t, s.podXdsName("pod1"))100s.assertAddresses(t, s.addrXdsName("127.0.0.1"), "pod1")101})102}103}
104
105func TestAmbientIndex_WorkloadNotFound(t *testing.T) {106s := newAmbientTestServer(t, testC, testNW)107
108// Add a pod.109s.addPods(t, "127.0.0.1", "pod1", "sa1", map[string]string{"app": "a"}, nil, true, corev1.PodRunning)110
111// Lookup a different address and verify nothing is returned.112s.assertAddresses(t, s.addrXdsName("10.0.0.1"))113}
114
115func TestAmbientIndex_LookupWorkloads(t *testing.T) {116s := newAmbientTestServer(t, testC, testNW)117
118s.addPods(t, "127.0.0.1", "pod1", "sa1", map[string]string{"app": "a"}, nil, true, corev1.PodRunning)119s.assertAddresses(t, "", "pod1")120s.assertEvent(t, s.podXdsName("pod1"))121
122s.addPods(t, "127.0.0.2", "pod2", "sa1", map[string]string{"app": "a", "other": "label"}, nil, true, corev1.PodRunning)123s.addPods(t, "127.0.0.3", "pod3", "sa1", map[string]string{"app": "other"}, nil, true, corev1.PodRunning)124s.assertAddresses(t, "", "pod1", "pod2", "pod3")125s.assertAddresses(t, s.addrXdsName("127.0.0.1"), "pod1")126s.assertAddresses(t, s.addrXdsName("127.0.0.2"), "pod2")127for _, key := range []string{s.podXdsName("pod3"), s.addrXdsName("127.0.0.3")} {128assert.Equal(t, s.lookup(key), []model.AddressInfo{129{130Address: &workloadapi.Address{131Type: &workloadapi.Address_Workload{132Workload: &workloadapi.Workload{133Name: "pod3",134Namespace: testNS,135Addresses: [][]byte{netip.MustParseAddr("127.0.0.3").AsSlice()},136Network: testNW,137ServiceAccount: "sa1",138Uid: s.podXdsName("pod3"),139Node: "node1",140CanonicalName: "other",141CanonicalRevision: "latest",142WorkloadType: workloadapi.WorkloadType_POD,143WorkloadName: "pod3",144ClusterId: testC,145Status: workloadapi.WorkloadStatus_HEALTHY,146},147},148},149},150})151}152s.assertEvent(t, s.podXdsName("pod2"))153s.assertEvent(t, s.podXdsName("pod3"))154}
155
156func TestAmbientIndex_ServiceAttachedWaypoints(t *testing.T) {157test.SetForTest(t, &features.EnableAmbientControllers, true)158s := newAmbientTestServer(t, testC, testNW)159
160s.addWaypoint(t, "10.0.0.10", "test-wp", "default", true)161
162s.addPods(t, "127.0.0.1", "pod1", "sa1", map[string]string{"app": "a"}, nil, true, corev1.PodRunning)163s.assertEvent(t, s.podXdsName("pod1"))164
165// Now add a service that will select pods with label "a".166s.addService(t, "svc1",167map[string]string{},168map[string]string{},169[]int32{80}, map[string]string{"app": "a"}, "10.0.0.1")170s.assertEvent(t, s.podXdsName("pod1"), s.svcXdsName("svc1"))171
172s.addService(t, "svc1",173map[string]string{},174map[string]string{constants.AmbientUseWaypoint: "test-wp"},175[]int32{80}, map[string]string{"app": "a"}, "10.0.0.1")176s.assertEvent(t, s.svcXdsName("svc1"))177s.assertNoEvent(t)178
179// We should now see the waypoint service IP when we look up the annotated svc180assert.Equal(t,181s.lookup(s.addrXdsName("10.0.0.1"))[0].Address.GetService().Waypoint.GetAddress().Address,182netip.MustParseAddr("10.0.0.10").AsSlice())183}
184
185func TestAmbientIndex_ServiceSelectsCorrectWorkloads(t *testing.T) {186s := newAmbientTestServer(t, testC, testNW)187
188// Add 2 pods with the "a" label, and one without.189// We should get an event for the new Service and the two *Pod* IPs impacted190s.addPods(t, "127.0.0.1", "pod1", "sa1", map[string]string{"app": "a"}, nil, true, corev1.PodRunning)191s.assertEvent(t, s.podXdsName("pod1"))192s.addPods(t, "127.0.0.2", "pod2", "sa1", map[string]string{"app": "a", "other": "label"}, nil, true, corev1.PodRunning)193s.assertEvent(t, s.podXdsName("pod2"))194s.addPods(t, "127.0.0.3", "pod3", "sa1", map[string]string{"app": "other"}, nil, true, corev1.PodRunning)195s.assertEvent(t, s.podXdsName("pod3"))196s.clearEvents()197
198// Now add a service that will select pods with label "a".199s.addService(t, "svc1",200map[string]string{},201map[string]string{},202[]int32{80}, map[string]string{"app": "a"}, "10.0.0.1")203s.assertEvent(t, s.podXdsName("pod1"), s.podXdsName("pod2"), s.svcXdsName("svc1"))204
205// Services should appear with workloads when we get all resources.206s.assertAddresses(t, "", "pod1", "pod2", "pod3", "svc1")207
208// Look up the resources by VIP.209s.assertAddresses(t, s.addrXdsName("10.0.0.1"), "pod1", "pod2", "svc1")210
211s.clearEvents()212
213// Add a new pod to the service, we should see it214s.addPods(t, "127.0.0.4", "pod4", "sa1", map[string]string{"app": "a"}, nil, true, corev1.PodRunning)215s.assertAddresses(t, "", "pod1", "pod2", "pod3", "pod4", "svc1")216s.assertAddresses(t, s.addrXdsName("10.0.0.1"), "pod1", "pod2", "pod4", "svc1")217s.assertEvent(t, s.podXdsName("pod4"))218s.clearEvents()219
220// Delete it, should remove from the Service as well221s.deletePod(t, "pod4")222s.assertAddresses(t, "", "pod1", "pod2", "pod3", "svc1")223s.assertAddresses(t, s.addrXdsName("10.0.0.1"), "pod1", "pod2", "svc1")224s.assertAddresses(t, s.addrXdsName("127.0.0.4")) // Should not be accessible anymore225s.assertAddresses(t, s.podXdsName("pod4"))226s.assertEvent(t, s.podXdsName("pod4"))227s.clearEvents()228
229// Update Service to have a more restrictive label selector230s.addService(t, "svc1",231map[string]string{},232map[string]string{},233[]int32{80}, map[string]string{"app": "a", "other": "label"}, "10.0.0.1")234s.assertAddresses(t, "", "pod1", "pod2", "pod3", "svc1")235s.assertAddresses(t, s.addrXdsName("10.0.0.1"), "pod2", "svc1")236s.assertEvent(t, s.podXdsName("pod1"))237s.clearEvents()238
239// Update a pod to add it to the service240s.addPods(t, "127.0.0.3", "pod3", "sa1", map[string]string{"app": "a", "other": "label"}, nil, true, corev1.PodRunning)241s.assertAddresses(t, "", "pod1", "pod2", "pod3", "svc1")242s.assertAddresses(t, s.addrXdsName("10.0.0.1"), "pod2", "pod3", "svc1")243s.assertEvent(t, s.podXdsName("pod3"))244s.clearEvents()245
246// And remove it again247s.addPods(t, "127.0.0.3", "pod3", "sa1", map[string]string{"app": "a"}, nil, true, corev1.PodRunning)248s.assertAddresses(t, "", "pod1", "pod2", "pod3", "svc1")249s.assertAddresses(t, s.addrXdsName("10.0.0.1"), "pod2", "svc1")250s.assertEvent(t, s.podXdsName("pod3"))251s.clearEvents()252
253// Delete the service entirely254s.deleteService(t, "svc1")255s.assertAddresses(t, "", "pod1", "pod2", "pod3")256s.assertAddresses(t, s.addrXdsName("10.0.0.1"))257s.assertEvent(t, s.podXdsName("pod2"), s.svcXdsName("svc1"))258}
259
260func TestAmbientIndex_WaypointConfiguredOnlyWhenReady(t *testing.T) {261s := newAmbientTestServer(t, testC, testNW)262
263s.addPods(t,264"127.0.0.1",265"pod1",266"sa1",267map[string]string{"app": "a"},268map[string]string{constants.AmbientUseWaypoint: "waypoint-sa1"},269true,270corev1.PodRunning)271s.assertEvent(t, s.podXdsName("pod1"))272s.addPods(t,273"127.0.0.2",274"pod2",275"sa2",276map[string]string{"app": "b"},277map[string]string{constants.AmbientUseWaypoint: "waypoint-sa2"},278true,279corev1.PodRunning)280s.assertEvent(t, s.podXdsName("pod2"))281
282s.addWaypoint(t, "10.0.0.1", "waypoint-sa1", "sa1", false)283s.addWaypoint(t, "10.0.0.2", "waypoint-sa2", "sa2", true)284s.assertEvent(t, s.podXdsName("pod2"))285
286// make waypoint-sa1 ready287s.addWaypoint(t, "10.0.0.1", "waypoint-sa1", "sa1", true)288// if waypoint-sa1 was configured when not ready "pod2" assertions should skip the "pod1" xds event and this should fail289s.assertEvent(t, s.podXdsName("pod1"))290}
291
292func TestAmbientIndex_WaypointAddressAddedToWorkloads(t *testing.T) {293s := newAmbientTestServer(t, testC, testNW)294
295s.ns.Update(&corev1.Namespace{296ObjectMeta: metav1.ObjectMeta{297Name: testNS,298Annotations: map[string]string{299constants.AmbientUseWaypoint: "waypoint-ns",300},301},302})303
304// Add pods for app "a".305s.addPods(t, "127.0.0.1", "pod1", "sa1", map[string]string{"app": "a"}, nil, true, corev1.PodRunning)306s.assertEvent(t, s.podXdsName("pod1"))307s.addPods(t, "127.0.0.2", "pod2", "sa1", map[string]string{"app": "a", "other": "label"}, nil, true, corev1.PodRunning)308s.assertEvent(t, s.podXdsName("pod2"))309s.addPods(t, "127.0.0.3", "pod3", "sa1", map[string]string{"app": "a"}, nil, true, corev1.PodRunning)310s.assertEvent(t, s.podXdsName("pod3"))311// Add pods for app "b".312s.addPods(t,313"127.0.0.4",314"pod4",315"sa2",316map[string]string{"app": "b"},317map[string]string{constants.AmbientUseWaypoint: "waypoint-sa2"},318true,319corev1.PodRunning)320s.assertEvent(t, s.podXdsName("pod4"))321
322s.addWaypoint(t, "10.0.0.2", "waypoint-ns", "", true)323// All these workloads updated, so push them324s.assertEvent(t, s.podXdsName("pod1"),325s.podXdsName("pod2"),326s.podXdsName("pod3"),327)328
329// Add a waypoint proxy pod for namespace330s.addPods(t, "127.0.0.200", "waypoint-ns-pod", "namespace-wide",331map[string]string{332constants.ManagedGatewayLabel: constants.ManagedGatewayMeshControllerLabel,333constants.GatewayNameLabel: "namespace-wide",334}, nil, true, corev1.PodRunning)335s.assertEvent(t, s.podXdsName("waypoint-ns-pod"))336// create the waypoint service337s.addService(t, "waypoint-ns",338map[string]string{constants.ManagedGatewayLabel: constants.ManagedGatewayMeshControllerLabel},339map[string]string{},340[]int32{80}, map[string]string{constants.GatewayNameLabel: "namespace-wide"}, "10.0.0.2")341s.assertEvent(t,342s.podXdsName("waypoint-ns-pod"),343s.svcXdsName("waypoint-ns"),344)345s.assertAddresses(t, "", "pod1", "pod2", "pod3", "pod4", "waypoint-ns", "waypoint-ns-pod")346
347s.addWaypoint(t, "10.0.0.3", "waypoint-sa2", "sa2", true)348s.assertEvent(t, s.podXdsName("pod4"))349// Add a waypoint proxy pod for sa2350s.addPods(t, "127.0.0.250", "waypoint-sa2-pod", "service-account",351map[string]string{352constants.ManagedGatewayLabel: constants.ManagedGatewayMeshControllerLabel,353constants.GatewayNameLabel: "service-account",354}, nil, true, corev1.PodRunning)355s.assertEvent(t, s.podXdsName("waypoint-sa2-pod"))356// create the waypoint service357s.addService(t, "waypoint-sa2",358map[string]string{constants.ManagedGatewayLabel: constants.ManagedGatewayMeshControllerLabel},359map[string]string{},360[]int32{80}, map[string]string{constants.GatewayNameLabel: "service-account"}, "10.0.0.3")361s.assertEvent(t,362s.podXdsName("waypoint-sa2-pod"),363s.svcXdsName("waypoint-sa2"),364)365s.assertAddresses(t, "", "pod1", "pod2", "pod3", "pod4", "waypoint-ns", "waypoint-ns-pod", "waypoint-sa2-pod", "waypoint-sa2")366
367// We should now see the waypoint service IP368assert.Equal(t,369s.lookup(s.addrXdsName("127.0.0.3"))[0].Address.GetWorkload().Waypoint.GetAddress().Address,370netip.MustParseAddr("10.0.0.2").AsSlice())371
372assert.Equal(t,373s.lookup(s.addrXdsName("127.0.0.4"))[0].Address.GetWorkload().Waypoint.GetAddress().Address,374netip.MustParseAddr("10.0.0.3").AsSlice())375
376// Lookup for service VIP should return Workload and Service AddressInfo objects377assert.Equal(t,378len(s.lookup(s.addrXdsName("10.0.0.2"))),3792)380for _, k := range s.lookup(s.addrXdsName("10.0.0.2")) {381switch k.Type.(type) {382case *workloadapi.Address_Workload:383assert.Equal(t, k.Address.GetWorkload().Name, "waypoint-ns-pod")384assert.Equal(t, k.Address.GetWorkload().Waypoint, nil)385case *workloadapi.Address_Service:386assert.Equal(t, k.Address.GetService().Name, "waypoint-ns")387}388}389
390// Lookup for service via namespace/hostname returns Service and Workload AddressInfo391assert.Equal(t,392len(s.lookup(s.svcXdsName("waypoint-ns"))), 2)393for _, k := range s.lookup(s.svcXdsName("waypoint-ns")) {394switch k.Type.(type) {395case *workloadapi.Address_Workload:396assert.Equal(t, k.Address.GetWorkload().Name, "waypoint-ns-pod")397assert.Equal(t, k.Address.GetWorkload().Waypoint, nil)398case *workloadapi.Address_Service:399assert.Equal(t, k.Address.GetService().Hostname, s.hostnameForService("waypoint-ns"))400}401}402
403// Add another waypoint pod, expect no updates for other pods since waypoint address refers to service VIP404s.addPods(t, "127.0.0.201", "waypoint2-ns-pod", "namespace-wide",405map[string]string{406constants.ManagedGatewayLabel: constants.ManagedGatewayMeshControllerLabel,407constants.GatewayNameLabel: "namespace-wide",408}, nil, true, corev1.PodRunning)409s.assertEvent(t, s.podXdsName("waypoint2-ns-pod"))410assert.Equal(t,411s.lookup(s.addrXdsName("127.0.0.3"))[0].Address.GetWorkload().Waypoint.GetAddress().Address,412netip.MustParseAddr("10.0.0.2").AsSlice())413// Waypoints do not have waypoints414assert.Equal(t,415s.lookup(s.addrXdsName("127.0.0.200"))[0].Address.GetWorkload().Waypoint,416nil)417
418// make sure looking up the waypoint for a wl by network and address functions correctly419assert.Equal(t, len(s.Waypoint(testNW, "127.0.0.1")), 1)420for _, k := range s.Waypoint(testNW, "127.0.0.1") {421assert.Equal(t, k.AsSlice(), netip.MustParseAddr("10.0.0.2").AsSlice())422}423
424s.addService(t, "svc1",425map[string]string{},426map[string]string{},427[]int32{80}, map[string]string{"app": "a"}, "10.0.0.1")428s.assertAddresses(t, s.addrXdsName("10.0.0.1"), "pod1", "pod2", "pod3", "svc1")429// Send update for the workloads as well...430s.assertEvent(t, s.podXdsName("pod1"),431s.podXdsName("pod2"),432s.podXdsName("pod3"),433s.svcXdsName("svc1"),434)435// Make sure Service sees waypoints as well436assert.Equal(t,437s.lookup(s.addrXdsName("10.0.0.1"))[1].Address.GetWorkload().Waypoint.GetAddress().Address,438netip.MustParseAddr("10.0.0.2").AsSlice())439
440// Delete a waypoint441s.deletePod(t, "waypoint2-ns-pod")442s.assertEvent(t, s.podXdsName("waypoint2-ns-pod"))443
444// Workload should not be updated since service has not changed445assert.Equal(t,446s.lookup(s.addrXdsName("127.0.0.3"))[0].Address.GetWorkload().Waypoint.GetAddress().Address,447netip.MustParseAddr("10.0.0.2").AsSlice())448
449// As should workload via Service450assert.Equal(t,451s.lookup(s.addrXdsName("10.0.0.1"))[1].Address.GetWorkload().Waypoint.GetAddress().Address,452netip.MustParseAddr("10.0.0.2").AsSlice())453
454s.addPods(t, "127.0.0.201", "waypoint2-sa", "waypoint-sa",455map[string]string{constants.ManagedGatewayLabel: constants.ManagedGatewayMeshControllerLabel},456map[string]string{constants.WaypointServiceAccount: "sa2"}, true, corev1.PodRunning)457s.assertEvent(t, s.podXdsName("waypoint2-sa"))458// Unrelated SA should not change anything459assert.Equal(t,460s.lookup(s.addrXdsName("127.0.0.3"))[0].Address.GetWorkload().Waypoint.GetAddress().Address,461netip.MustParseAddr("10.0.0.2").AsSlice())462
463// Adding a new pod should also see the waypoint464s.addPods(t, "127.0.0.6", "pod6", "sa1", map[string]string{"app": "a"}, nil, true, corev1.PodRunning)465s.assertEvent(t, s.podXdsName("pod6"))466assert.Equal(t,467s.lookup(s.addrXdsName("127.0.0.6"))[0].Address.GetWorkload().Waypoint.GetAddress().Address,468netip.MustParseAddr("10.0.0.2").AsSlice())469
470s.deletePod(t, "pod6")471s.assertEvent(t, s.podXdsName("pod6"))472
473s.deletePod(t, "pod3")474s.assertEvent(t, s.podXdsName("pod3"))475s.deletePod(t, "pod2")476s.assertEvent(t, s.podXdsName("pod2"))477
478s.deleteWaypoint(t, "waypoint-ns")479s.assertEvent(t, s.podXdsName("pod1"))480s.deleteService(t, "waypoint-ns")481s.assertEvent(t,482s.podXdsName("waypoint-ns-pod"),483s.svcXdsName("waypoint-ns"))484
485s.deleteWaypoint(t, "waypoint-sa2")486s.assertEvent(t, s.podXdsName("pod4"))487s.deleteService(t, "waypoint-sa2")488s.assertEvent(t,489s.podXdsName("waypoint-sa2-pod"),490s.svcXdsName("waypoint-sa2"))491assert.Equal(t,492s.lookup(s.addrXdsName("10.0.0.1"))[1].Address.GetWorkload().Waypoint,493nil)494}
495
496// TODO(nmittler): Consider splitting this into multiple, smaller tests.
497func TestAmbientIndex_Policy(t *testing.T) {498s := newAmbientTestServer(t, testC, testNW)499
500s.addPods(t, "127.0.0.1", "pod1", "sa1", map[string]string{"app": "a"}, nil, true, corev1.PodRunning)501s.assertEvent(t, s.podXdsName("pod1"))502s.addPods(t, "127.0.0.200", "waypoint-ns-pod", "namespace-wide",503map[string]string{504constants.ManagedGatewayLabel: constants.ManagedGatewayMeshControllerLabel,505constants.GatewayNameLabel: "namespace-wide",506}, nil, true, corev1.PodRunning)507s.assertEvent(t, s.podXdsName("waypoint-ns-pod"))508s.addPods(t, "127.0.0.201", "waypoint2-sa", "waypoint-sa",509map[string]string{constants.ManagedGatewayLabel: constants.ManagedGatewayMeshControllerLabel},510map[string]string{constants.WaypointServiceAccount: "sa2"}, true, corev1.PodRunning)511s.assertEvent(t, s.podXdsName("waypoint2-sa"))512s.addWaypoint(t, "10.0.0.2", "waypoint-ns", "", true)513s.ns.Update(&corev1.Namespace{514ObjectMeta: metav1.ObjectMeta{515Name: testNS,516Annotations: map[string]string{517constants.AmbientUseWaypoint: "waypoint-ns",518},519},520})521s.assertEvent(t, s.podXdsName("pod1"))522s.addService(t, "waypoint-ns",523map[string]string{constants.ManagedGatewayLabel: constants.ManagedGatewayMeshControllerLabel},524map[string]string{},525[]int32{80}, map[string]string{constants.GatewayNameLabel: "namespace-wide"}, "10.0.0.2")526s.assertUnorderedEvent(t, s.podXdsName("waypoint-ns-pod"), s.svcXdsName("waypoint-ns"))527s.clearEvents()528selectorPolicyName := "selector"529
530// Test that PeerAuthentications are added to the ambient index531s.addPolicy(t, "global", systemNS, nil, gvk.PeerAuthentication, func(c controllers.Object) {532pol := c.(*clientsecurityv1beta1.PeerAuthentication)533pol.Spec.Mtls = &auth.PeerAuthentication_MutualTLS{534Mode: auth.PeerAuthentication_MutualTLS_PERMISSIVE,535}536})537s.clearEvents()538
539s.addPolicy(t, "namespace", testNS, nil, gvk.PeerAuthentication, func(c controllers.Object) {540pol := c.(*clientsecurityv1beta1.PeerAuthentication)541pol.Spec.Mtls = &auth.PeerAuthentication_MutualTLS{542Mode: auth.PeerAuthentication_MutualTLS_STRICT,543}544})545// Should add the static policy to all pods in the ns1 namespace since the effective mode is STRICT546s.assertEvent(t, s.podXdsName("pod1"), s.podXdsName("waypoint-ns-pod"), s.podXdsName("waypoint2-sa"))547assert.Equal(t,548s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,549[]string{fmt.Sprintf("istio-system/%s", staticStrictPolicyName)})550s.clearEvents()551
552s.addPolicy(t, selectorPolicyName, testNS, map[string]string{"app": "a"}, gvk.PeerAuthentication, func(c controllers.Object) {553pol := c.(*clientsecurityv1beta1.PeerAuthentication)554pol.Spec.Mtls = &auth.PeerAuthentication_MutualTLS{555Mode: auth.PeerAuthentication_MutualTLS_STRICT,556}557})558// Expect no event since the effective policy doesn't change559assert.Equal(t,560s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,561[]string{fmt.Sprintf("istio-system/%s", staticStrictPolicyName)})562
563// Change the workload policy to be permissive564s.addPolicy(t, selectorPolicyName, testNS, map[string]string{"app": "a"}, gvk.PeerAuthentication, func(c controllers.Object) {565pol := c.(*clientsecurityv1beta1.PeerAuthentication)566pol.Spec.Mtls = &auth.PeerAuthentication_MutualTLS{567Mode: auth.PeerAuthentication_MutualTLS_PERMISSIVE,568}569})570s.assertEvent(t, s.podXdsName("pod1")) // Static policy should be removed since it isn't STRICT571assert.Equal(t,572s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,573nil)574
575// Add a port-level STRICT exception to the workload policy576s.addPolicy(t, selectorPolicyName, testNS, map[string]string{"app": "a"}, gvk.PeerAuthentication, func(c controllers.Object) {577pol := c.(*clientsecurityv1beta1.PeerAuthentication)578pol.Spec.Mtls = &auth.PeerAuthentication_MutualTLS{579Mode: auth.PeerAuthentication_MutualTLS_PERMISSIVE,580}581pol.Spec.PortLevelMtls = map[uint32]*auth.PeerAuthentication_MutualTLS{5829090: {583Mode: auth.PeerAuthentication_MutualTLS_STRICT,584},585}586})587s.assertEvent(t, s.podXdsName("pod1")) // Selector policy should be added back since there is now a STRICT exception588time.Sleep(time.Second)589assert.Equal(t,590s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,591[]string{fmt.Sprintf("ns1/%s", model.GetAmbientPolicyConfigName(model.ConfigKey{592Kind: kind.PeerAuthentication,593Name: selectorPolicyName,594Namespace: "ns1",595}))})596
597// Pod not in selector policy, but namespace policy should take effect (hence static policy)598s.addPods(t, "127.0.0.2", "pod2", "sa1", map[string]string{"app": "not-a"}, nil, true, corev1.PodRunning)599s.assertEvent(t, s.podXdsName("pod2"))600assert.Equal(t,601s.lookup(s.addrXdsName("127.0.0.2"))[0].Address.GetWorkload().AuthorizationPolicies,602[]string{fmt.Sprintf("istio-system/%s", staticStrictPolicyName)})603
604// Add it to the policy by updating its selector605s.addPods(t, "127.0.0.2", "pod2", "sa1", map[string]string{"app": "a"}, nil, true, corev1.PodRunning)606s.assertEvent(t, s.podXdsName("pod2"))607assert.Equal(t,608s.lookup(s.addrXdsName("127.0.0.2"))[0].Address.GetWorkload().AuthorizationPolicies,609[]string{fmt.Sprintf("ns1/%s", model.GetAmbientPolicyConfigName(model.ConfigKey{610Kind: kind.PeerAuthentication,611Name: selectorPolicyName,612Namespace: "ns1",613}))})614
615// Add global selector policy; nothing should happen since PeerAuthentication doesn't support global mesh wide selectors616s.addPolicy(t, "global-selector", systemNS, map[string]string{"app": "a"}, gvk.PeerAuthentication, func(c controllers.Object) {617pol := c.(*clientsecurityv1beta1.PeerAuthentication)618pol.Spec.Mtls = &auth.PeerAuthentication_MutualTLS{619Mode: auth.PeerAuthentication_MutualTLS_STRICT,620}621})622assert.Equal(t,623s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,624[]string{fmt.Sprintf("ns1/%s", model.GetAmbientPolicyConfigName(model.ConfigKey{625Kind: kind.PeerAuthentication,626Name: selectorPolicyName,627Namespace: "ns1",628}))})629
630// Delete global selector policy631s.pa.Delete("global-selector", systemNS)632
633// Update workload policy to be PERMISSIVE634s.addPolicy(t, selectorPolicyName, testNS, map[string]string{"app": "a"}, gvk.PeerAuthentication, func(c controllers.Object) {635pol := c.(*clientsecurityv1beta1.PeerAuthentication)636pol.Spec.Mtls = &auth.PeerAuthentication_MutualTLS{637Mode: auth.PeerAuthentication_MutualTLS_PERMISSIVE,638}639pol.Spec.PortLevelMtls = map[uint32]*auth.PeerAuthentication_MutualTLS{6409090: {641Mode: auth.PeerAuthentication_MutualTLS_PERMISSIVE,642},643}644})645// There should be an event since effective policy moves to PERMISSIVE646s.assertEvent(t, s.podXdsName("pod1"), s.podXdsName("pod2"))647assert.Equal(t,648s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,649nil)650
651// Change namespace policy to be PERMISSIVE652s.addPolicy(t, "namespace", testNS, nil, gvk.PeerAuthentication, func(c controllers.Object) {653pol := c.(*clientsecurityv1beta1.PeerAuthentication)654pol.Spec.Mtls = &auth.PeerAuthentication_MutualTLS{655Mode: auth.PeerAuthentication_MutualTLS_PERMISSIVE,656}657})658
659// All pods have an event (since we're only testing one namespace) but still no policies attached660s.assertEvent(t, s.podXdsName("waypoint-ns-pod"), s.podXdsName("waypoint2-sa"))661assert.Equal(t,662s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,663nil)664
665// Change workload policy to be STRICT and remove port-level overrides666s.addPolicy(t, selectorPolicyName, testNS, map[string]string{"app": "a"}, gvk.PeerAuthentication, func(c controllers.Object) {667pol := c.(*clientsecurityv1beta1.PeerAuthentication)668pol.Spec.Mtls = &auth.PeerAuthentication_MutualTLS{669Mode: auth.PeerAuthentication_MutualTLS_STRICT,670}671pol.Spec.PortLevelMtls = nil672})673
674// Selected pods receive an event675s.assertEvent(t, s.podXdsName("pod1"), s.podXdsName("pod2"))676assert.Equal(t,677s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,678[]string{fmt.Sprintf("istio-system/%s", staticStrictPolicyName)}) // Effective mode is STRICT so set policy679
680// Add a permissive port-level override681s.addPolicy(t, selectorPolicyName, testNS, map[string]string{"app": "a"}, gvk.PeerAuthentication, func(c controllers.Object) {682pol := c.(*clientsecurityv1beta1.PeerAuthentication)683pol.Spec.Mtls = &auth.PeerAuthentication_MutualTLS{684Mode: auth.PeerAuthentication_MutualTLS_STRICT,685}686pol.Spec.PortLevelMtls = map[uint32]*auth.PeerAuthentication_MutualTLS{6879090: {688Mode: auth.PeerAuthentication_MutualTLS_PERMISSIVE,689},690}691})692s.assertEvent(t, s.podXdsName("pod1"), s.podXdsName("pod2")) // Matching pods receive an event693assert.Equal(t,694s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,695[]string{fmt.Sprintf("ns1/%s", model.GetAmbientPolicyConfigName(model.ConfigKey{696Kind: kind.PeerAuthentication,697Name: selectorPolicyName,698Namespace: "ns1",699}))})700
701// Set workload policy to be UNSET with a STRICT port-level override702s.addPolicy(t, selectorPolicyName, testNS, map[string]string{"app": "a"}, gvk.PeerAuthentication, func(c controllers.Object) {703pol := c.(*clientsecurityv1beta1.PeerAuthentication)704pol.Spec.Mtls = nil // equivalent to UNSET705pol.Spec.PortLevelMtls = map[uint32]*auth.PeerAuthentication_MutualTLS{7069090: {707Mode: auth.PeerAuthentication_MutualTLS_STRICT,708},709}710})711// The policy should still be added since the effective policy is PERMISSIVE712assert.Equal(t,713s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,714[]string{fmt.Sprintf("ns1/%s", model.GetAmbientPolicyConfigName(model.ConfigKey{715Kind: kind.PeerAuthentication,716Name: selectorPolicyName,717Namespace: "ns1",718}))})719
720// Change namespace policy back to STRICT721s.addPolicy(t, "namespace", testNS, nil, gvk.PeerAuthentication, func(c controllers.Object) {722pol := c.(*clientsecurityv1beta1.PeerAuthentication)723pol.Spec.Mtls = &auth.PeerAuthentication_MutualTLS{724Mode: auth.PeerAuthentication_MutualTLS_STRICT,725}726})727// All pods have an event (since we're only testing one namespace)728s.assertEvent(t, s.podXdsName("pod1"), s.podXdsName("pod2"), s.podXdsName("waypoint-ns-pod"), s.podXdsName("waypoint2-sa"))729assert.Equal(t,730s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,731[]string{fmt.Sprintf("istio-system/%s", staticStrictPolicyName)}) // Effective mode is STRICT so set static policy732
733// Set workload policy to be UNSET with a PERMISSIVE port-level override734s.addPolicy(t, selectorPolicyName, testNS, map[string]string{"app": "a"}, gvk.PeerAuthentication, func(c controllers.Object) {735pol := c.(*clientsecurityv1beta1.PeerAuthentication)736pol.Spec.Mtls = nil // equivalent to UNSET737pol.Spec.PortLevelMtls = map[uint32]*auth.PeerAuthentication_MutualTLS{7389090: {739Mode: auth.PeerAuthentication_MutualTLS_PERMISSIVE,740},741}742})743s.assertEvent(t, s.podXdsName("pod1"), s.podXdsName("pod2")) // Matching pods receive an event744// The policy should still be added since the effective policy is STRICT745assert.Equal(t,746s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,747[]string{fmt.Sprintf("istio-system/%s", staticStrictPolicyName), fmt.Sprintf("ns1/%s", model.GetAmbientPolicyConfigName(model.ConfigKey{748Kind: kind.PeerAuthentication,749Name: selectorPolicyName,750Namespace: "ns1",751}))})752
753// Clear PeerAuthentication from workload754s.pa.Delete("selector", testNS)755s.assertEvent(t, s.podXdsName("pod1"), s.podXdsName("pod2"))756// Effective policy is still STRICT so the static policy should still be set757assert.Equal(t,758s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,759[]string{fmt.Sprintf("istio-system/%s", staticStrictPolicyName)})760
761// Now remove the namespace and global policies along with the pods762s.pa.Delete("namespace", testNS)763s.pa.Delete("global", systemNS)764s.deletePod(t, "pod2")765s.assertEvent(t, s.podXdsName("pod2"), s.podXdsName("pod1"))766s.clearEvents()767
768// Test AuthorizationPolicies769s.addPolicy(t, "global", systemNS, nil, gvk.AuthorizationPolicy, nil)770s.addPolicy(t, "namespace", testNS, nil, gvk.AuthorizationPolicy, nil)771assert.Equal(t,772s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,773nil)774
775s.addPolicy(t, selectorPolicyName, testNS, map[string]string{"app": "a"}, gvk.AuthorizationPolicy, nil)776s.assertEvent(t, s.podXdsName("pod1"))777assert.Equal(t,778s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,779[]string{"ns1/selector"})780
781// Pod not in policy782s.addPods(t, "127.0.0.2", "pod3", "sa1", map[string]string{"app": "not-a"}, nil, true, corev1.PodRunning)783s.assertEvent(t, s.podXdsName("pod3"))784assert.Equal(t,785s.lookup(s.addrXdsName("127.0.0.2"))[0].Address.GetWorkload().AuthorizationPolicies,786nil)787
788// Add it to the policy by updating its selector789s.addPods(t, "127.0.0.2", "pod3", "sa1", map[string]string{"app": "a"}, nil, true, corev1.PodRunning)790s.assertEvent(t, s.podXdsName("pod3"))791assert.Equal(t,792s.lookup(s.addrXdsName("127.0.0.2"))[0].Address.GetWorkload().AuthorizationPolicies,793[]string{"ns1/selector"})794
795s.addPolicy(t, "global-selector", systemNS, map[string]string{"app": "a"}, gvk.AuthorizationPolicy, nil)796s.assertEvent(t, s.podXdsName("pod1"), s.podXdsName("pod3"))797
798assert.Equal(t,799s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,800[]string{"istio-system/global-selector", "ns1/selector"})801
802// Update selector to not select803s.addPolicy(t, "global-selector", systemNS, map[string]string{"app": "not-a"}, gvk.AuthorizationPolicy, nil)804s.assertEvent(t, s.podXdsName("pod1"), s.podXdsName("pod3"))805
806assert.Equal(t,807s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,808[]string{"ns1/selector"})809
810// Add STRICT global PeerAuthentication811s.addPolicy(t, "strict", systemNS, nil, gvk.PeerAuthentication, func(c controllers.Object) {812pol := c.(*clientsecurityv1beta1.PeerAuthentication)813pol.Spec.Mtls = &auth.PeerAuthentication_MutualTLS{814Mode: auth.PeerAuthentication_MutualTLS_STRICT,815}816})817// Every workload should receive an event818s.assertEvent(t, s.podXdsName("pod1"), s.podXdsName("pod3"), s.podXdsName("waypoint-ns-pod"), s.podXdsName("waypoint2-sa"))819// Static STRICT policy should be sent820assert.Equal(t,821s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,822[]string{"ns1/selector", fmt.Sprintf("istio-system/%s", staticStrictPolicyName)})823
824// Now add a STRICT workload PeerAuthentication825s.addPolicy(t, "selector-strict", testNS, map[string]string{"app": "a"}, gvk.PeerAuthentication, func(c controllers.Object) {826pol := c.(*clientsecurityv1beta1.PeerAuthentication)827pol.Spec.Mtls = &auth.PeerAuthentication_MutualTLS{828Mode: auth.PeerAuthentication_MutualTLS_STRICT,829}830})831// Effective policy is still STRICT so only static policy should be referenced832assert.Equal(t,833s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,834[]string{"ns1/selector", fmt.Sprintf("istio-system/%s", staticStrictPolicyName)})835
836// Change the workload policy to PERMISSIVE837s.addPolicy(t, "selector-strict", testNS, map[string]string{"app": "a"}, gvk.PeerAuthentication, func(c controllers.Object) {838pol := c.(*clientsecurityv1beta1.PeerAuthentication)839pol.Spec.Mtls = &auth.PeerAuthentication_MutualTLS{840Mode: auth.PeerAuthentication_MutualTLS_PERMISSIVE,841}842})843s.assertEvent(t, s.podXdsName("pod1"), s.podXdsName("pod3")) // Matching workloads should receive an event844// Static STRICT policy should disappear845assert.Equal(t,846s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,847[]string{"ns1/selector"})848
849// Change the workload policy to DISABLE850s.addPolicy(t, "selector-strict", testNS, map[string]string{"app": "a"}, gvk.PeerAuthentication, func(c controllers.Object) {851pol := c.(*clientsecurityv1beta1.PeerAuthentication)852pol.Spec.Mtls = &auth.PeerAuthentication_MutualTLS{853Mode: auth.PeerAuthentication_MutualTLS_DISABLE,854}855})856
857// No event because there's effectively no change858
859// Static STRICT policy should disappear860assert.Equal(t,861s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,862[]string{"ns1/selector"})863
864// Now make the workload policy STRICT but have a PERMISSIVE port-level override865s.addPolicy(t, "selector-strict", testNS, map[string]string{"app": "a"}, gvk.PeerAuthentication, func(c controllers.Object) {866pol := c.(*clientsecurityv1beta1.PeerAuthentication)867pol.Spec.Mtls = &auth.PeerAuthentication_MutualTLS{868Mode: auth.PeerAuthentication_MutualTLS_STRICT,869}870pol.Spec.PortLevelMtls = map[uint32]*auth.PeerAuthentication_MutualTLS{8719090: {872Mode: auth.PeerAuthentication_MutualTLS_PERMISSIVE,873},874}875})876s.assertEvent(t, s.podXdsName("pod1"), s.podXdsName("pod3")) // Matching workloads should receive an event877// Workload policy should be added since there's a port level exclusion878assert.Equal(t,879s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,880[]string{"ns1/selector", fmt.Sprintf("ns1/%s", model.GetAmbientPolicyConfigName(model.ConfigKey{881Kind: kind.PeerAuthentication,882Name: "selector-strict",883Namespace: "ns1",884}))})885
886// Now add a rule allowing a specific source principal to the workload AuthorizationPolicy887s.addPolicy(t, selectorPolicyName, testNS, map[string]string{"app": "a"}, gvk.AuthorizationPolicy, func(c controllers.Object) {888pol := c.(*clientsecurityv1beta1.AuthorizationPolicy)889pol.Spec.Rules = []*auth.Rule{890{891From: []*auth.Rule_From{{Source: &auth.Source{Principals: []string{"cluster.local/ns/ns1/sa/sa1"}}}},892},893}894})895// No event since workload policy should still be there (both workloads' policy references remain the same).896// Since PeerAuthentications are translated into DENY policies we can safely apply them897// alongside ALLOW authorization policies898assert.Equal(t,899s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,900[]string{"ns1/selector", fmt.Sprintf("ns1/%s", model.GetAmbientPolicyConfigName(model.ConfigKey{901Kind: kind.PeerAuthentication,902Name: "selector-strict",903Namespace: "ns1",904}))})905
906s.authz.Delete("selector", testNS)907s.assertEvent(t, s.podXdsName("pod1"), s.podXdsName("pod3"))908assert.Equal(t,909s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,910[]string{fmt.Sprintf("ns1/%s", model.GetAmbientPolicyConfigName(model.ConfigKey{911Kind: kind.PeerAuthentication,912Name: "selector-strict",913Namespace: "ns1",914}))})915
916// Delete selector policy917s.pa.Delete("selector-strict", testNS)918s.assertEvent(t, s.podXdsName("pod1"), s.podXdsName("pod3")) // Matching workloads should receive an event919// Static STRICT policy should now be sent because of the global policy920assert.Equal(t,921s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,922[]string{fmt.Sprintf("istio-system/%s", staticStrictPolicyName)})923
924// Delete global policy925s.pa.Delete("strict", systemNS)926// Every workload should receive an event927s.assertEvent(t, s.podXdsName("pod1"), s.podXdsName("pod3"), s.podXdsName("waypoint-ns-pod"), s.podXdsName("waypoint2-sa"))928// Now no policies are in effect929assert.Equal(t,930s.lookup(s.addrXdsName("127.0.0.1"))[0].Address.GetWorkload().AuthorizationPolicies,931nil)932
933s.clearEvents()934s.addPolicy(t, "gateway-targeted", testNS, nil, gvk.AuthorizationPolicy, func(o controllers.Object) {935p := o.(*clientsecurityv1beta1.AuthorizationPolicy)936p.Spec.TargetRef = &v1beta1.PolicyTargetReference{937Group: gvk.KubernetesGateway.Group,938Kind: gvk.KubernetesGateway.Kind,939Name: "dummy-waypoint",940}941})942// there should be no event for creation of a gateway-targeted policy because we should not configure WDS with a policy943// when expressed user intent is specifically to have that policy enforced by a gateway944s.assertNoEvent(t)945}
946
947func TestPodLifecycleWorkloadGates(t *testing.T) {948s := newAmbientTestServer(t, "", "")949
950s.addPods(t, "127.0.0.1", "pod1", "sa1", map[string]string{"app": "a"}, nil, true, corev1.PodRunning)951s.assertEvent(t, "//Pod/ns1/pod1")952s.assertWorkloads(t, "", workloadapi.WorkloadStatus_HEALTHY, "pod1")953
954s.addPods(t, "127.0.0.2", "pod2", "sa1", map[string]string{"app": "a", "other": "label"}, nil, false, corev1.PodRunning)955s.addPods(t, "127.0.0.3", "pod3", "sa1", map[string]string{"app": "other"}, nil, false, corev1.PodPending)956s.addPods(t, "", "pod4", "sa1", map[string]string{"app": "another"}, nil, false, corev1.PodPending)957s.assertEvent(t, "//Pod/ns1/pod2")958// Still healthy959s.assertWorkloads(t, "", workloadapi.WorkloadStatus_HEALTHY, "pod1")960// Unhealthy961s.assertWorkloads(t, "", workloadapi.WorkloadStatus_UNHEALTHY, "pod2", "pod3")962// pod3 is pending but have be assigned IP963// pod4 is pending and not have IP964}
965
966func TestAddressInformation(t *testing.T) {967s := newAmbientTestServer(t, testC, testNW)968
969// Add 2 pods with the "a" label, and one without.970// We should get an event for the new Service and the two *Pod* IPs impacted971s.addPods(t, "127.0.0.1", "pod1", "sa1", map[string]string{"app": "a"}, nil, true, corev1.PodRunning)972s.assertEvent(t, s.podXdsName("pod1"))973s.addPods(t, "127.0.0.2", "pod2", "sa1", map[string]string{"app": "a", "other": "label"}, nil, true, corev1.PodRunning)974s.assertEvent(t, s.podXdsName("pod2"))975s.addPods(t, "127.0.0.3", "pod3", "sa1", map[string]string{"app": "other"}, nil, true, corev1.PodRunning)976s.assertEvent(t, s.podXdsName("pod3"))977s.clearEvents()978
979// Now add a service that will select pods with label "a".980s.addService(t, "svc1",981map[string]string{},982map[string]string{},983[]int32{80}, map[string]string{"app": "a"}, "10.0.0.1")984s.assertEvent(t, s.podXdsName("pod1"), s.podXdsName("pod2"), s.svcXdsName("svc1"))985
986addrs, _ := s.AddressInformation(sets.New[string](s.svcXdsName("svc1"), s.podXdsName("pod2")))987got := sets.New[string]()988for _, addr := range addrs {989if got.Contains(addr.ResourceName()) {990t.Fatalf("got duplicate address %v", addr.ResourceName())991}992got.Insert(addr.ResourceName())993}994}
995
996func TestRBACConvert(t *testing.T) {997files := file.ReadDirOrFail(t, "testdata")998if len(files) == 0 {999// Just in case1000t.Fatal("expected test cases")1001}1002for _, f := range files {1003name := filepath.Base(f)1004if !strings.Contains(name, "-in.yaml") {1005continue1006}1007t.Run(name, func(t *testing.T) {1008pol, _, err := crd.ParseInputs(file.AsStringOrFail(t, f))1009assert.NoError(t, err)1010var o *security.Authorization1011switch pol[0].GroupVersionKind {1012case gvk.AuthorizationPolicy:1013o = convertAuthorizationPolicy(systemNS, &clientsecurityv1beta1.AuthorizationPolicy{1014TypeMeta: metav1.TypeMeta{},1015ObjectMeta: metav1.ObjectMeta{1016Name: pol[0].Name,1017Namespace: pol[0].Namespace,1018},1019Spec: *((pol[0].Spec).(*auth.AuthorizationPolicy)), //nolint: govet1020})1021case gvk.PeerAuthentication:1022o = convertPeerAuthentication(systemNS, &clientsecurityv1beta1.PeerAuthentication{1023TypeMeta: metav1.TypeMeta{},1024ObjectMeta: metav1.ObjectMeta{1025Name: pol[0].Name,1026Namespace: pol[0].Namespace,1027},1028Spec: *((pol[0].Spec).(*auth.PeerAuthentication)), //nolint: govet1029})1030default:1031t.Fatalf("unknown kind %v", pol[0].GroupVersionKind)1032}1033msg := ""1034if o != nil {1035msg, err = protomarshal.ToYAML(o)1036assert.NoError(t, err)1037}1038golden := filepath.Join("testdata", strings.ReplaceAll(name, "-in", ""))1039util.CompareContent(t, []byte(msg), golden)1040})1041}1042}
1043
1044func TestEmptyVIPsExcluded(t *testing.T) {1045testSVC := corev1.Service{1046Spec: corev1.ServiceSpec{1047ClusterIP: "",1048},1049Status: corev1.ServiceStatus{1050LoadBalancer: corev1.LoadBalancerStatus{1051Ingress: []corev1.LoadBalancerIngress{1052{1053IP: "",1054},1055},1056},1057},1058}1059vips := getVIPs(&testSVC)1060assert.Equal(t, 0, len(vips), "optional IP fields should be ignored if empty")1061}
1062
1063// assertWaypointAddressForPod takes a pod name for key and the expected waypoint IP Address
1064// if the IP is empty we assume you're asserting that the pod's waypoint address is nil
1065// will assert that the GW address for the pod's waypoint is the expected address
1066// nolint: unparam
1067func (s *ambientTestServer) assertWaypointAddressForPod(t *testing.T, key, expectedIP string) {1068t.Helper()1069var expectedAddress *workloadapi.GatewayAddress1070if expectedIP != "" { // "" is assumed to mean a nil address1071expectedAddress = &workloadapi.GatewayAddress{1072Destination: &workloadapi.GatewayAddress_Address{1073Address: &workloadapi.NetworkAddress{1074Address: netip.MustParseAddr(expectedIP).AsSlice(),1075},1076},1077HboneMtlsPort: 15008,1078}1079}1080workloads := s.lookup(s.podXdsName(key))1081if len(workloads) < 1 {1082t.Log("no workloads provided, assertion must fail")1083t.Fail()1084}1085for _, workload := range workloads {1086assert.Equal(t, expectedAddress.String(), workload.GetWorkload().GetWaypoint().String())1087}1088}
1089
1090func TestUpdateWaypointForWorkload(t *testing.T) {1091s := newAmbientTestServer(t, "", "")1092
1093// add our waypoints but they won't be used until annotations are added1094// add a new waypoint1095s.addWaypoint(t, "10.0.0.2", "waypoint-sa1", "sa1", true)1096// Add a namespace waypoint to the pod1097s.addWaypoint(t, "10.0.0.1", "waypoint-ns", "", true)1098
1099s.addPods(t, "127.0.0.1", "pod1", "sa1", map[string]string{"app": "a"}, nil, true, corev1.PodRunning)1100s.assertAddresses(t, "", "pod1")1101s.assertEvent(t, s.podXdsName("pod1"))1102// assert that no waypoint is being used1103s.assertWaypointAddressForPod(t, "pod1", "")1104
1105// let use a waypoint by namespace annotation1106s.ns.Update(&corev1.Namespace{1107ObjectMeta: metav1.ObjectMeta{1108Name: testNS,1109Annotations: map[string]string{1110constants.AmbientUseWaypoint: "waypoint-ns",1111},1112},1113})1114s.assertEvent(t, s.podXdsName("pod1"))1115s.assertWaypointAddressForPod(t, "pod1", "10.0.0.1")1116
1117// annotate pod1 to use a different waypoint than the namespace specifies1118s.annotatePod(t, "pod1", testNS, map[string]string{constants.AmbientUseWaypoint: "waypoint-sa1"})1119s.assertEvent(t, s.podXdsName("pod1"))1120// assert that we're using the correct waypoint for pod11121s.assertWaypointAddressForPod(t, "pod1", "10.0.0.2")1122
1123// remove the use-waypoint annotation from pod11124s.annotatePod(t, "pod1", testNS, map[string]string{})1125s.assertEvent(t, s.podXdsName("pod1"))1126// assert that pod1 is using the waypoint specified on the namespace1127s.assertWaypointAddressForPod(t, "pod1", "10.0.0.1")1128
1129// unannotate the namespace too1130s.ns.Update(&corev1.Namespace{1131ObjectMeta: metav1.ObjectMeta{1132Name: testNS,1133Annotations: map[string]string{},1134},1135})1136s.assertEvent(t, s.podXdsName("pod1"))1137// assert that we're once again using no waypoint1138s.assertWaypointAddressForPod(t, "pod1", "")1139
1140// annotate pod2 to use a waypoint1141s.annotatePod(t, "pod1", testNS, map[string]string{constants.AmbientUseWaypoint: "waypoint-sa1"})1142s.assertEvent(t, s.podXdsName("pod1"))1143// assert that the correct waypoint was configured1144s.assertWaypointAddressForPod(t, "pod1", "10.0.0.2")1145
1146// add a namespace annotation to use the namespace-scope waypoint1147s.ns.Update(&corev1.Namespace{1148ObjectMeta: metav1.ObjectMeta{1149Name: testNS,1150Annotations: map[string]string{1151constants.AmbientUseWaypoint: "waypoint-ns",1152},1153},1154})1155// pod2 should not experience any xds event1156s.assertNoEvent(t)1157// assert that pod2 is still using the waypoint specified in it's annotation1158s.assertWaypointAddressForPod(t, "pod1", "10.0.0.2")1159
1160// assert local waypoint opt-out works as expected1161s.annotatePod(t, "pod1", testNS, map[string]string{constants.AmbientUseWaypoint: "#none"})1162s.assertEvent(t, s.podXdsName("pod1"))1163// assert that we're using no waypoint1164s.assertWaypointAddressForPod(t, "pod1", "")1165// check that the other opt out also works1166s.annotatePod(t, "pod1", testNS, map[string]string{constants.AmbientUseWaypoint: "~"})1167s.assertNoEvent(t)1168s.assertWaypointAddressForPod(t, "pod1", "")1169}
1170
1171func TestWorkloadsForWaypoint(t *testing.T) {1172s := newAmbientTestServer(t, "", testNW)1173
1174assertWaypoint := func(t *testing.T, waypointNetwork string, waypointAddress string, expected ...string) {1175t.Helper()1176wl := sets.New(slices.Map(s.WorkloadsForWaypoint(model.WaypointKey{1177Network: waypointNetwork,1178Addresses: []string{waypointAddress},1179}), func(e model.WorkloadInfo) string {1180return e.ResourceName()1181})...)1182assert.Equal(t, wl, sets.New(expected...))1183}1184// Add a namespace waypoint to the pod1185s.addWaypoint(t, "10.0.0.1", "waypoint-ns", "", true)1186s.addWaypoint(t, "10.0.0.2", "waypoint-sa1", "sa1", true)1187
1188s.addPods(t, "127.0.0.1", "pod1", "sa1", map[string]string{"app": "a"}, nil, true, corev1.PodRunning)1189s.assertEvent(t, s.podXdsName("pod1"))1190s.addPods(t, "127.0.0.2", "pod2", "sa2", map[string]string{"app": "a"}, nil, true, corev1.PodRunning)1191s.assertEvent(t, s.podXdsName("pod2"))1192
1193s.ns.Update(&corev1.Namespace{1194ObjectMeta: metav1.ObjectMeta{1195Name: testNS,1196Annotations: map[string]string{1197constants.AmbientUseWaypoint: "waypoint-ns",1198},1199},1200})1201
1202s.assertEvent(t, s.podXdsName("pod1"), s.podXdsName("pod2"))1203assertWaypoint(t, testNW, "10.0.0.1", s.podXdsName("pod1"), s.podXdsName("pod2"))1204// TODO: should this be returned? Or should it be filtered because such a waypoint does not exist1205
1206// Add a service account waypoint to the pod1207s.annotatePod(t, "pod1", testNS, map[string]string{constants.AmbientUseWaypoint: "waypoint-sa1"})1208s.assertEvent(t, s.podXdsName("pod1"))1209
1210assertWaypoint(t, testNW, "10.0.0.2", s.podXdsName("pod1"))1211assertWaypoint(t, testNW, "10.0.0.1", s.podXdsName("pod2"))1212
1213// Revert back1214s.annotatePod(t, "pod1", testNS, map[string]string{})1215s.assertEvent(t, s.podXdsName("pod1"))1216
1217assertWaypoint(t, testNW, "10.0.0.1", s.podXdsName("pod1"), s.podXdsName("pod2"))1218}
1219
1220func TestWorkloadsForWaypointOrder(t *testing.T) {1221test.SetForTest(t, &features.EnableAmbientControllers, true)1222s := newAmbientTestServer(t, "", testNW)1223
1224assertOrderedWaypoint := func(t *testing.T, network, address string, expected ...string) {1225t.Helper()1226wls := s.WorkloadsForWaypoint(model.WaypointKey{1227Network: network,1228Addresses: []string{address},1229})1230wl := make([]string, len(wls))1231for i, e := range wls {1232wl[i] = e.ResourceName()1233}1234assert.Equal(t, wl, expected)1235}1236s.addWaypoint(t, "10.0.0.1", "waypoint", "", true)1237
1238// expected order is pod3, pod1, pod2, which is the order of creation1239s.addPods(t,1240"127.0.0.3",1241"pod3",1242"sa3",1243map[string]string{"app": "a"},1244map[string]string{constants.AmbientUseWaypoint: "waypoint"},1245true,1246corev1.PodRunning)1247s.assertEvent(t, s.podXdsName("pod3"))1248s.addPods(t,1249"127.0.0.1",1250"pod1",1251"sa1",1252map[string]string{"app": "a"},1253map[string]string{constants.AmbientUseWaypoint: "waypoint"},1254true,1255corev1.PodRunning)1256s.assertEvent(t, s.podXdsName("pod1"))1257s.addPods(t,1258"127.0.0.2",1259"pod2",1260"sa2",1261map[string]string{"app": "a"},1262map[string]string{constants.AmbientUseWaypoint: "waypoint"},1263true,1264corev1.PodRunning)1265s.assertEvent(t, s.podXdsName("pod2"))1266assertOrderedWaypoint(t, testNW, "10.0.0.1",1267s.podXdsName("pod3"), s.podXdsName("pod1"), s.podXdsName("pod2"))1268}
1269
1270// This is a regression test for a case where policies added after pods were not applied when
1271// querying by service
1272func TestPolicyAfterPod(t *testing.T) {1273s := newAmbientTestServer(t, testC, testNW)1274
1275s.addService(t, "svc1",1276map[string]string{},1277map[string]string{},1278[]int32{80}, map[string]string{"app": "a"}, "10.0.0.1")1279s.assertEvent(t, s.svcXdsName("svc1"))1280s.addPods(t, "127.0.0.1", "pod1", "sa1", map[string]string{"app": "a"}, nil, true, corev1.PodRunning)1281s.assertEvent(t, s.podXdsName("pod1"))1282s.addPolicy(t, "selector", testNS, map[string]string{"app": "a"}, gvk.AuthorizationPolicy, nil)1283s.assertEvent(t, s.podXdsName("pod1"))1284assert.Equal(t, s.lookup(s.svcXdsName("svc1"))[1].GetWorkload().GetAuthorizationPolicies(), []string{"ns1/selector"})1285}
1286
1287type ambientTestServer struct {1288*index1289clusterID cluster.ID1290network network.ID1291fx *xdsfake.Updater1292pc clienttest.TestClient[*corev1.Pod]1293sc clienttest.TestWriter[*corev1.Service]1294ns clienttest.TestWriter[*corev1.Namespace]1295grc clienttest.TestWriter[*k8sbeta.Gateway]1296se clienttest.TestWriter[*apiv1alpha3.ServiceEntry]1297we clienttest.TestWriter[*apiv1alpha3.WorkloadEntry]1298pa clienttest.TestWriter[*clientsecurityv1beta1.PeerAuthentication]1299authz clienttest.TestWriter[*clientsecurityv1beta1.AuthorizationPolicy]1300t *testing.T1301}
1302
1303func newAmbientTestServer(t *testing.T, clusterID cluster.ID, networkID network.ID) *ambientTestServer {1304up := xdsfake.NewFakeXDS()1305up.SplitEvents = true1306cl := kubeclient.NewFakeClient()1307for _, crd := range []schema.GroupVersionResource{1308gvr.AuthorizationPolicy,1309gvr.PeerAuthentication,1310gvr.KubernetesGateway,1311gvr.WorkloadEntry,1312gvr.ServiceEntry,1313} {1314clienttest.MakeCRD(t, cl, crd)1315}1316idx := New(Options{1317Client: cl,1318SystemNamespace: systemNS,1319DomainSuffix: "company.com",1320ClusterID: clusterID,1321XDSUpdater: up,1322LookupNetwork: func(endpointIP string, labels labels.Instance) network.ID {1323return networkID1324},1325})1326cl.RunAndWait(test.NewStop(t))1327
1328t.Cleanup(func() {1329if t.Failed() {1330idx := idx.(*index)1331krt.Dump(idx.authorizationPolicies)1332krt.Dump(idx.workloads.Collection)1333krt.Dump(idx.services.Collection)1334krt.Dump(idx.waypoints.Collection)1335}1336})1337a := &ambientTestServer{1338t: t,1339clusterID: clusterID,1340network: networkID,1341index: idx.(*index),1342fx: up,1343pc: clienttest.NewDirectClient[*corev1.Pod, corev1.Pod, *corev1.PodList](t, cl),1344sc: clienttest.NewWriter[*corev1.Service](t, cl),1345ns: clienttest.NewWriter[*corev1.Namespace](t, cl),1346grc: clienttest.NewWriter[*k8sbeta.Gateway](t, cl),1347se: clienttest.NewWriter[*apiv1alpha3.ServiceEntry](t, cl),1348we: clienttest.NewWriter[*apiv1alpha3.WorkloadEntry](t, cl),1349pa: clienttest.NewWriter[*clientsecurityv1beta1.PeerAuthentication](t, cl),1350authz: clienttest.NewWriter[*clientsecurityv1beta1.AuthorizationPolicy](t, cl),1351}1352
1353// ns is more important now that we want to be able to annotate ns for svc, wl waypoint selection1354// always create the testNS enabled for ambient1355a.ns.Create(&corev1.Namespace{1356ObjectMeta: metav1.ObjectMeta{1357Name: testNS,1358Labels: map[string]string{"istio.io/dataplane-mode": "ambient"},1359},1360})1361
1362return a1363}
1364
1365func (s *ambientTestServer) addWaypoint(t *testing.T, ip, name, sa string, ready bool) {1366t.Helper()1367
1368fromSame := k8sv1.NamespacesFromSame1369gatewaySpec := k8sbeta.GatewaySpec{1370GatewayClassName: constants.WaypointGatewayClassName,1371Listeners: []k8sbeta.Listener{1372{1373Name: "mesh",1374Port: 15008,1375Protocol: "HBONE",1376AllowedRoutes: &k8sbeta.AllowedRoutes{1377Namespaces: &k8sbeta.RouteNamespaces{1378From: &fromSame,1379},1380},1381},1382},1383}1384
1385gateway := k8sbeta.Gateway{1386TypeMeta: metav1.TypeMeta{1387Kind: gvk.KubernetesGateway.Kind,1388APIVersion: gvk.KubernetesGateway.GroupVersion(),1389},1390ObjectMeta: metav1.ObjectMeta{1391Name: name,1392Namespace: testNS,1393},1394Spec: gatewaySpec,1395Status: k8sbeta.GatewayStatus{},1396}1397if sa != "" {1398annotations := make(map[string]string, 1)1399annotations[constants.WaypointServiceAccount] = sa1400gateway.Annotations = annotations1401}1402if ready {1403addrType := k8sbeta.IPAddressType1404gateway.Status = k8sbeta.GatewayStatus{1405// addresses:1406// - type: IPAddress1407// value: 10.96.59.1881408Addresses: []k8sv1.GatewayStatusAddress{1409{1410Type: &addrType,1411Value: ip,1412},1413},1414}1415}1416s.grc.CreateOrUpdate(&gateway)1417}
1418
1419func (s *ambientTestServer) deleteWaypoint(t *testing.T, name string) {1420t.Helper()1421s.grc.Delete(name, testNS)1422}
1423
1424func (s *ambientTestServer) addPods(t *testing.T, ip string, name, sa string, labels map[string]string,1425annotations map[string]string, markReady bool, phase corev1.PodPhase,1426) {1427t.Helper()1428pod := generatePod(ip, name, testNS, sa, "node1", labels, annotations)1429
1430p := s.pc.Get(name, pod.Namespace)1431if p == nil {1432// Apiserver doesn't allow Create to modify the pod status; in real world it's a 2 part process1433pod.Status = corev1.PodStatus{}1434newPod := s.pc.Create(pod)1435if markReady {1436setPodReady(newPod)1437}1438newPod.Status.PodIP = ip1439newPod.Status.Phase = phase1440newPod.Status.PodIPs = []corev1.PodIP{1441{1442IP: ip,1443},1444}1445s.pc.UpdateStatus(newPod)1446} else {1447s.pc.Update(pod)1448}1449}
1450
1451// just overwrites the annotations
1452// nolint: unparam
1453func (s *ambientTestServer) annotatePod(t *testing.T, name, ns string, annotations map[string]string) {1454t.Helper()1455
1456p := s.pc.Get(name, ns)1457if p == nil {1458return1459}1460p.ObjectMeta.Annotations = annotations1461s.pc.Update(p)1462}
1463
1464func (s *ambientTestServer) addWorkloadEntries(t *testing.T, ip string, name, sa string, labels map[string]string) {1465t.Helper()1466s.we.CreateOrUpdate(generateWorkloadEntry(ip, name, "ns1", sa, labels, nil))1467}
1468
1469func generateWorkloadEntry(ip, name, namespace, saName string, labels map[string]string, annotations map[string]string) *apiv1alpha3.WorkloadEntry {1470return &apiv1alpha3.WorkloadEntry{1471ObjectMeta: metav1.ObjectMeta{1472Name: name,1473Labels: labels,1474Annotations: annotations,1475Namespace: namespace,1476},1477Spec: v1alpha3.WorkloadEntry{1478Address: ip,1479ServiceAccount: saName,1480Labels: labels,1481},1482}1483}
1484
1485func (s *ambientTestServer) deleteWorkloadEntry(t *testing.T, name string) {1486t.Helper()1487s.we.Delete(name, "ns1")1488}
1489
1490func (s *ambientTestServer) addServiceEntry(t *testing.T,1491hostStr string,1492addresses []string,1493name,1494ns string,1495labels map[string]string,1496epAddresses []string,1497) {1498t.Helper()1499
1500se := &apiv1alpha3.ServiceEntry{1501ObjectMeta: metav1.ObjectMeta{1502Name: name,1503Namespace: ns,1504Labels: labels,1505},1506Spec: *generateServiceEntry(hostStr, addresses, labels, epAddresses),1507Status: v1alpha1.IstioStatus{},1508}1509s.se.CreateOrUpdate(se)1510}
1511
1512func generateServiceEntry(host string, addresses []string, labels map[string]string, epAddresses []string) *v1alpha3.ServiceEntry {1513var endpoints []*v1alpha3.WorkloadEntry1514var workloadSelector *v1alpha3.WorkloadSelector1515
1516if epAddresses == nil {1517workloadSelector = &v1alpha3.WorkloadSelector{1518Labels: labels,1519}1520} else {1521endpoints = []*v1alpha3.WorkloadEntry{}1522for _, addr := range epAddresses {1523endpoints = append(endpoints, &v1alpha3.WorkloadEntry{1524Address: addr,1525Labels: labels,1526Ports: map[string]uint32{1527"http": 8081, // we will override the SE http port1528},1529})1530}1531}1532
1533return &v1alpha3.ServiceEntry{1534Hosts: []string{host},1535Addresses: addresses,1536Ports: []*v1alpha3.ServicePort{1537{1538Name: "http",1539Number: 80,1540TargetPort: 8080,1541},1542},1543WorkloadSelector: workloadSelector,1544Endpoints: endpoints,1545}1546}
1547
1548func (s *ambientTestServer) deleteServiceEntry(t *testing.T, name, ns string) {1549t.Helper()1550s.se.Delete(name, ns)1551}
1552
1553func (s *ambientTestServer) assertAddresses(t *testing.T, lookup string, names ...string) {1554t.Helper()1555want := sets.New(names...)1556assert.EventuallyEqual(t, func() sets.String {1557addresses := s.lookup(lookup)1558have := sets.New[string]()1559for _, address := range addresses {1560switch addr := address.Address.Type.(type) {1561case *workloadapi.Address_Workload:1562have.Insert(addr.Workload.Name)1563case *workloadapi.Address_Service:1564have.Insert(addr.Service.Name)1565}1566}1567return have1568}, want, retry.Timeout(time.Second*3))1569}
1570
1571func (s *ambientTestServer) assertWorkloads(t *testing.T, lookup string, state workloadapi.WorkloadStatus, names ...string) {1572t.Helper()1573want := sets.New(names...)1574assert.EventuallyEqual(t, func() sets.String {1575workloads := s.lookup(lookup)1576have := sets.New[string]()1577for _, wl := range workloads {1578switch addr := wl.Address.Type.(type) {1579case *workloadapi.Address_Workload:1580if addr.Workload.Status == state {1581have.Insert(addr.Workload.Name)1582}1583}1584}1585return have1586}, want, retry.Timeout(time.Second*3))1587}
1588
1589// Make sure there are no two workloads in the index with similar UIDs
1590func (s *ambientTestServer) assertUniqueWorkloads(t *testing.T) {1591t.Helper()1592uids := sets.New[string]()1593workloads := s.lookup("")1594for _, wl := range workloads {1595if wl.GetWorkload() != nil && uids.InsertContains(wl.GetWorkload().GetUid()) {1596t.Fatal("Index has workloads with the same UID")1597}1598}1599}
1600
1601func (s *ambientTestServer) deletePolicy(name, ns string, kind config.GroupVersionKind,1602) {1603switch kind {1604case gvk.AuthorizationPolicy:1605s.authz.Delete(name, ns)1606case gvk.PeerAuthentication:1607s.pa.Delete(name, ns)1608}1609}
1610
1611func (s *ambientTestServer) addPolicy(t *testing.T, name, ns string, selector map[string]string,1612kind config.GroupVersionKind, modify func(controllers.Object),1613) {1614t.Helper()1615var sel *v1beta1.WorkloadSelector1616if selector != nil {1617sel = &v1beta1.WorkloadSelector{1618MatchLabels: selector,1619}1620}1621switch kind {1622case gvk.AuthorizationPolicy:1623pol := &clientsecurityv1beta1.AuthorizationPolicy{1624ObjectMeta: metav1.ObjectMeta{1625Name: name,1626Namespace: ns,1627},1628Spec: auth.AuthorizationPolicy{1629Selector: sel,1630},1631}1632if modify != nil {1633modify(pol)1634}1635s.authz.CreateOrUpdate(pol)1636case gvk.PeerAuthentication:1637pol := &clientsecurityv1beta1.PeerAuthentication{1638ObjectMeta: metav1.ObjectMeta{1639Name: name,1640Namespace: ns,1641},1642Spec: auth.PeerAuthentication{1643Selector: sel,1644},1645}1646if modify != nil {1647modify(pol)1648}1649s.pa.CreateOrUpdate(pol)1650}1651}
1652
1653func (s *ambientTestServer) deletePod(t *testing.T, name string) {1654t.Helper()1655s.pc.Delete(name, testNS)1656}
1657
1658func (s *ambientTestServer) assertEvent(t *testing.T, ip ...string) {1659t.Helper()1660s.assertUnorderedEvent(t, ip...)1661}
1662
1663func (s *ambientTestServer) assertUnorderedEvent(t *testing.T, ip ...string) {1664t.Helper()1665ev := []xdsfake.Event{}1666for _, i := range ip {1667ev = append(ev, xdsfake.Event{Type: "xds", ID: i})1668}1669s.fx.MatchOrFail(t, ev...)1670}
1671
1672func (s *ambientTestServer) assertNoEvent(t *testing.T) {1673t.Helper()1674s.fx.AssertEmpty(t, time.Millisecond*10)1675}
1676
1677func (s *ambientTestServer) deleteService(t *testing.T, name string) {1678t.Helper()1679s.sc.Delete(name, testNS)1680}
1681
1682func (s *ambientTestServer) addService(t *testing.T, name string, labels, annotations map[string]string,1683ports []int32, selector map[string]string, ip string,1684) {1685t.Helper()1686service := generateService(name, testNS, labels, annotations, ports, selector, ip)1687s.sc.CreateOrUpdate(service)1688}
1689
1690func (s *ambientTestServer) lookup(key string) []model.AddressInfo {1691if key == "" {1692return s.All()1693}1694return s.Lookup(key)1695}
1696
1697func (s *ambientTestServer) clearEvents() {1698s.fx.Clear()1699}
1700
1701// Returns the XDS resource name for the given pod.
1702func (s *ambientTestServer) podXdsName(name string) string {1703return fmt.Sprintf("%s//Pod/%s/%s",1704s.clusterID, testNS, name)1705}
1706
1707// Returns the XDS resource name for the given address.
1708func (s *ambientTestServer) addrXdsName(addr string) string {1709return string(s.network) + "/" + addr1710}
1711
1712// Returns the XDS resource name for the given service.
1713func (s *ambientTestServer) svcXdsName(serviceName string) string {1714return fmt.Sprintf("%s/%s", testNS, s.hostnameForService(serviceName))1715}
1716
1717// Returns the hostname for the given service.
1718func (s *ambientTestServer) hostnameForService(serviceName string) string {1719return fmt.Sprintf("%s.%s.svc.company.com", serviceName, testNS)1720}
1721
1722// Returns the XDS resource name for the given WorkloadEntry.
1723func (s *ambientTestServer) wleXdsName(wleName string) string {1724return fmt.Sprintf("%s/networking.istio.io/WorkloadEntry/%s/%s",1725s.clusterID, testNS, wleName)1726}
1727
1728// Returns the XDS resource name for the given ServiceEntry IP address.
1729func (s *ambientTestServer) seIPXdsName(name string, ip string) string {1730return fmt.Sprintf("%s/networking.istio.io/ServiceEntry/%s/%s/%s",1731s.clusterID, testNS, name, ip)1732}
1733
1734func generatePod(ip, name, namespace, saName, node string, labels map[string]string, annotations map[string]string) *corev1.Pod {1735automount := false1736return &corev1.Pod{1737ObjectMeta: metav1.ObjectMeta{1738Name: name,1739Labels: labels,1740Annotations: annotations,1741Namespace: namespace,1742CreationTimestamp: metav1.Time{1743Time: time.Now(),1744},1745},1746Spec: corev1.PodSpec{1747ServiceAccountName: saName,1748NodeName: node,1749AutomountServiceAccountToken: &automount,1750// Validation requires this1751Containers: []corev1.Container{1752{1753Name: "test",1754Image: "ununtu",1755},1756},1757},1758// The cache controller uses this as key, required by our impl.1759Status: corev1.PodStatus{1760Conditions: []corev1.PodCondition{1761{1762Type: corev1.PodReady,1763Status: corev1.ConditionTrue,1764LastTransitionTime: metav1.Now(),1765},1766},1767PodIP: ip,1768HostIP: ip,1769PodIPs: []corev1.PodIP{1770{1771IP: ip,1772},1773},1774Phase: corev1.PodRunning,1775},1776}1777}
1778
1779func setPodReady(pod *corev1.Pod) {1780pod.Status.Conditions = []corev1.PodCondition{1781{1782Type: corev1.PodReady,1783Status: corev1.ConditionTrue,1784LastTransitionTime: metav1.Now(),1785},1786}1787}
1788
1789func generateService(name, namespace string, labels, annotations map[string]string,1790ports []int32, selector map[string]string, ip string,1791) *corev1.Service {1792svcPorts := make([]corev1.ServicePort, 0)1793for _, p := range ports {1794svcPorts = append(svcPorts, corev1.ServicePort{1795Name: "tcp-port",1796Port: p,1797Protocol: "http",1798})1799}1800
1801return &corev1.Service{1802ObjectMeta: metav1.ObjectMeta{1803Name: name,1804Namespace: namespace,1805Annotations: annotations,1806Labels: labels,1807},1808Spec: corev1.ServiceSpec{1809ClusterIP: ip,1810Ports: svcPorts,1811Selector: selector,1812Type: corev1.ServiceTypeClusterIP,1813},1814}1815}
1816