istio
327 строк · 11.0 Кб
1// Copyright Istio Authors
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15package controller
16
17import (
18"context"
19"fmt"
20"reflect"
21"testing"
22"time"
23
24v1 "k8s.io/api/core/v1"
25metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
26"k8s.io/apimachinery/pkg/types"
27"k8s.io/client-go/kubernetes"
28
29"istio.io/istio/pilot/pkg/model"
30"istio.io/istio/pilot/pkg/serviceregistry/util/xdsfake"
31"istio.io/istio/pkg/config/labels"
32"istio.io/istio/pkg/kube/kclient/clienttest"
33"istio.io/istio/pkg/test"
34"istio.io/istio/pkg/test/util/assert"
35"istio.io/istio/pkg/test/util/retry"
36"istio.io/istio/pkg/util/sets"
37)
38
39// Prepare k8s. This can be used in multiple tests, to
40// avoid duplicating creation, which can be tricky. It can be used with the fake or
41// standalone apiserver.
42func initTestEnv(t *testing.T, ki kubernetes.Interface, fx *xdsfake.Updater) {
43cleanup(ki)
44for _, n := range []string{"nsa", "nsb"} {
45_, err := ki.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{
46ObjectMeta: metav1.ObjectMeta{
47Name: n,
48Labels: map[string]string{
49"istio-injection": "enabled",
50},
51},
52}, metav1.CreateOptions{})
53if err != nil {
54t.Fatalf("failed creating test namespace: %v", err)
55}
56
57// K8S 1.10 also checks if service account exists
58_, err = ki.CoreV1().ServiceAccounts(n).Create(context.TODO(), &v1.ServiceAccount{
59ObjectMeta: metav1.ObjectMeta{
60Name: "default",
61Annotations: map[string]string{
62"kubernetes.io/enforce-mountable-secrets": "false",
63},
64},
65Secrets: []v1.ObjectReference{
66{
67Name: "default-token-2",
68UID: "1",
69},
70},
71}, metav1.CreateOptions{})
72if err != nil {
73t.Fatalf("failed creating test service account: %v", err)
74}
75
76_, err = ki.CoreV1().Secrets(n).Create(context.TODO(), &v1.Secret{
77ObjectMeta: metav1.ObjectMeta{
78Name: "default-token-2",
79Annotations: map[string]string{
80"kubernetes.io/service-account.name": "default",
81"kubernetes.io/service-account.uid": "1",
82},
83},
84Type: v1.SecretTypeServiceAccountToken,
85Data: map[string][]byte{
86"token": []byte("1"),
87},
88}, metav1.CreateOptions{})
89if err != nil {
90t.Fatalf("failed creating test secret: %v", err)
91}
92}
93fx.Clear()
94}
95
96func cleanup(ki kubernetes.Interface) {
97for _, n := range []string{"nsa", "nsb"} {
98n := n
99pods, err := ki.CoreV1().Pods(n).List(context.TODO(), metav1.ListOptions{})
100if err == nil {
101// Make sure the pods don't exist
102for _, pod := range pods.Items {
103_ = ki.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{})
104}
105}
106}
107}
108
109func TestPodCache(t *testing.T) {
110c, fx := NewFakeControllerWithOptions(t, FakeControllerOptions{
111WatchedNamespaces: "nsa,nsb",
112})
113
114initTestEnv(t, c.client.Kube(), fx)
115
116// Namespace must be lowercase (nsA doesn't work)
117pods := []*v1.Pod{
118generatePod("128.0.0.1", "cpod1", "nsa", "", "", map[string]string{"app": "test-app"}, map[string]string{}),
119generatePod("128.0.0.2", "cpod2", "nsa", "", "", map[string]string{"app": "prod-app-1"}, map[string]string{}),
120generatePod("128.0.0.3", "cpod3", "nsb", "", "", map[string]string{"app": "prod-app-2"}, map[string]string{}),
121}
122
123addPods(t, c, fx, pods...)
124
125// Verify podCache
126wantLabels := map[string]labels.Instance{
127"128.0.0.1": {"app": "test-app"},
128"128.0.0.2": {"app": "prod-app-1"},
129"128.0.0.3": {"app": "prod-app-2"},
130}
131for addr, wantTag := range wantLabels {
132pod := c.pods.getPodsByIP(addr)
133if pod == nil {
134t.Error("Not found ", addr)
135continue
136}
137if !reflect.DeepEqual(wantTag, labels.Instance(pod[0].Labels)) {
138t.Errorf("Expected %v got %v", wantTag, labels.Instance(pod[0].Labels))
139}
140}
141
142// This pod exists, but should not be in the cache because it is in a
143// namespace not watched by the controller.
144assert.Equal(t, c.pods.getPodsByIP("128.0.0.4"), nil)
145
146// This pod should not be in the cache because it never existed.
147assert.Equal(t, c.pods.getPodsByIP("128.0.0.128"), nil)
148}
149
150func TestHostNetworkPod(t *testing.T) {
151c, fx := NewFakeControllerWithOptions(t, FakeControllerOptions{})
152pods := clienttest.Wrap(t, c.podsClient)
153events := assert.NewTracker[string](t)
154c.AppendWorkloadHandler(func(instance *model.WorkloadInstance, event model.Event) {
155events.Record(fmt.Sprintf("%v/%v", instance.Name, event))
156})
157initTestEnv(t, c.client.Kube(), fx)
158createPod := func(ip, name string) {
159addPods(t, c, fx, generatePod(ip, name, "ns", "1", "", map[string]string{}, map[string]string{}))
160}
161
162createPod("128.0.0.1", "pod1")
163assert.Equal(t, c.pods.getPodKeys("128.0.0.1"), []types.NamespacedName{{Name: "pod1", Namespace: "ns"}})
164events.WaitOrdered("pod1/add", "pod1/update")
165createPod("128.0.0.1", "pod2")
166events.WaitOrdered("pod2/add", "pod2/update")
167assert.Equal(t, sets.New(c.pods.getPodKeys("128.0.0.1")...), sets.New(
168types.NamespacedName{Name: "pod1", Namespace: "ns"},
169types.NamespacedName{Name: "pod2", Namespace: "ns"},
170))
171
172p := c.pods.getPodByKey(types.NamespacedName{Name: "pod1", Namespace: "ns"})
173if p == nil || p.Name != "pod1" {
174t.Fatalf("unexpected pod: %v", p)
175}
176pods.Delete("pod1", "ns")
177pods.Delete("pod2", "ns")
178events.WaitOrdered("pod1/delete", "pod2/delete")
179}
180
181// Regression test for https://github.com/istio/istio/issues/20676
182func TestIPReuse(t *testing.T) {
183c, fx := NewFakeControllerWithOptions(t, FakeControllerOptions{})
184pods := clienttest.Wrap(t, c.podsClient)
185initTestEnv(t, c.client.Kube(), fx)
186
187createPod := func(ip, name string) {
188addPods(t, c, fx, generatePod(ip, name, "ns", "1", "", map[string]string{}, map[string]string{}))
189}
190
191createPod("128.0.0.1", "pod")
192assert.Equal(t, c.pods.getPodKeys("128.0.0.1"), []types.NamespacedName{{Name: "pod", Namespace: "ns"}})
193
194// Change the pod IP. This can happen if the pod moves to another node, for example.
195createPod("128.0.0.2", "pod")
196assert.Equal(t, c.pods.getPodKeys("128.0.0.2"), []types.NamespacedName{{Name: "pod", Namespace: "ns"}})
197assert.Equal(t, c.pods.getPodKeys("128.0.0.1"), nil)
198
199// A new pod is created with the old IP. We should get new-pod, not pod
200createPod("128.0.0.1", "new-pod")
201assert.Equal(t, c.pods.getPodKeys("128.0.0.1"), []types.NamespacedName{{Name: "new-pod", Namespace: "ns"}})
202
203// A new pod is created with the same IP. This happens with hostNetwork, or maybe we miss an update somehow.
204createPod("128.0.0.1", "another-pod")
205assert.Equal(t, sets.New(c.pods.getPodKeys("128.0.0.1")...), sets.New(
206types.NamespacedName{Name: "new-pod", Namespace: "ns"},
207types.NamespacedName{Name: "another-pod", Namespace: "ns"},
208))
209
210fetch := func() sets.Set[types.NamespacedName] { return sets.New(c.pods.getPodKeys("128.0.0.1")...) }
211pods.Delete("another-pod", "ns")
212assert.EventuallyEqual(t, fetch, sets.New(types.NamespacedName{Name: "new-pod", Namespace: "ns"}))
213
214pods.Delete("new-pod", "ns")
215assert.EventuallyEqual(t, fetch, nil)
216}
217
218func waitForPod(t test.Failer, c *FakeController, ip string) {
219retry.UntilOrFail(t, func() bool {
220c.pods.RLock()
221defer c.pods.RUnlock()
222if _, ok := c.pods.podsByIP[ip]; ok {
223return true
224}
225return false
226})
227}
228
229func waitForNode(t test.Failer, c *FakeController, name string) {
230retry.UntilOrFail(t, func() bool {
231return c.nodes.Get(name, "") != nil
232}, retry.Timeout(time.Second*1), retry.Delay(time.Millisecond*5))
233}
234
235// Checks that events from the watcher create the proper internal structures
236func TestPodCacheEvents(t *testing.T) {
237t.Parallel()
238c, _ := NewFakeControllerWithOptions(t, FakeControllerOptions{})
239
240ns := "default"
241podCache := c.pods
242
243handled := 0
244podCache.c.handlers.AppendWorkloadHandler(func(*model.WorkloadInstance, model.Event) {
245handled++
246})
247
248f := podCache.onEvent
249
250ip := "172.0.3.35"
251pod1 := metav1.ObjectMeta{Name: "pod1", Namespace: ns}
252if err := f(nil, &v1.Pod{ObjectMeta: pod1}, model.EventAdd); err != nil {
253t.Error(err)
254}
255
256notReadyCondition := []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionFalse}}
257readyCondition := []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
258
259if err := f(nil,
260&v1.Pod{ObjectMeta: pod1, Status: v1.PodStatus{Conditions: notReadyCondition, PodIP: ip, Phase: v1.PodPending}},
261model.EventUpdate); err != nil {
262t.Error(err)
263}
264if handled != 0 {
265t.Errorf("notified workload handler %d times, want %d", handled, 0)
266}
267
268if err := f(nil, &v1.Pod{ObjectMeta: pod1, Status: v1.PodStatus{Conditions: readyCondition, PodIP: ip, Phase: v1.PodPending}}, model.EventUpdate); err != nil {
269t.Error(err)
270}
271if handled != 1 {
272t.Errorf("notified workload handler %d times, want %d", handled, 1)
273}
274assert.Equal(t, c.pods.getPodKeys(ip), []types.NamespacedName{{Name: "pod1", Namespace: "default"}})
275
276if err := f(nil,
277&v1.Pod{ObjectMeta: pod1, Status: v1.PodStatus{Conditions: readyCondition, PodIP: ip, Phase: v1.PodFailed}}, model.EventUpdate); err != nil {
278t.Error(err)
279}
280if handled != 2 {
281t.Errorf("notified workload handler %d times, want %d", handled, 2)
282}
283assert.Equal(t, podCache.getPodKeys(ip), nil)
284
285pod1.DeletionTimestamp = &metav1.Time{Time: time.Now()}
286if err := f(nil, &v1.Pod{ObjectMeta: pod1, Status: v1.PodStatus{PodIP: ip, Phase: v1.PodFailed}}, model.EventUpdate); err != nil {
287t.Error(err)
288}
289if handled != 2 {
290t.Errorf("notified workload handler %d times, want %d", handled, 2)
291}
292
293pod2 := metav1.ObjectMeta{Name: "pod2", Namespace: ns}
294if err := f(nil, &v1.Pod{ObjectMeta: pod2, Status: v1.PodStatus{Conditions: readyCondition, PodIP: ip, Phase: v1.PodRunning}}, model.EventAdd); err != nil {
295t.Error(err)
296}
297if handled != 3 {
298t.Errorf("notified workload handler %d times, want %d", handled, 3)
299}
300assert.Equal(t, sets.New(c.pods.getPodKeys(ip)...), sets.New(types.NamespacedName{Name: "pod2", Namespace: "default"}))
301
302if err := f(nil, &v1.Pod{ObjectMeta: pod1, Status: v1.PodStatus{PodIP: ip, Phase: v1.PodFailed}}, model.EventDelete); err != nil {
303t.Error(err)
304}
305if handled != 3 {
306t.Errorf("notified workload handler %d times, want %d", handled, 3)
307}
308assert.Equal(t, sets.New(c.pods.getPodKeys(ip)...), sets.New(types.NamespacedName{Name: "pod2", Namespace: "default"}))
309
310if err := f(nil, &v1.Pod{ObjectMeta: pod2, Spec: v1.PodSpec{
311RestartPolicy: v1.RestartPolicyOnFailure,
312}, Status: v1.PodStatus{Conditions: readyCondition, PodIP: ip, Phase: v1.PodFailed}}, model.EventUpdate); err != nil {
313t.Error(err)
314}
315if handled != 4 {
316t.Errorf("notified workload handler %d times, want %d", handled, 4)
317}
318
319assert.Equal(t, c.pods.getPodsByIP(ip), nil)
320
321if err := f(nil, &v1.Pod{ObjectMeta: pod2, Status: v1.PodStatus{Conditions: readyCondition, PodIP: ip, Phase: v1.PodFailed}}, model.EventDelete); err != nil {
322t.Error(err)
323}
324if handled != 4 {
325t.Errorf("notified workload handler %d times, want %d", handled, 5)
326}
327}
328