podman

Форк
0
/
200-pod.bats 
765 строк · 27.7 Кб
1
#!/usr/bin/env bats
2

3
load helpers
4
load helpers.network
5

6
LOOPDEVICE=
7

8
# This is a long ugly way to clean up pods and remove the pause image
9
function teardown() {
10
    run_podman pod rm -f -t 0 -a
11
    run_podman rm -f -t 0 -a
12
    run_podman rmi --ignore $(pause_image)
13
    basic_teardown
14
    if [[ -n "$LOOPDEVICE" ]]; then
15
        losetup -d $LOOPDEVICE
16
    fi
17
}
18

19

20
@test "podman pod - basic tests" {
21
    run_podman pod list --noheading
22
    is "$output" "" "baseline: empty results from list --noheading"
23

24
    run_podman pod ls -n
25
    is "$output" "" "baseline: empty results from ls -n"
26

27
    run_podman pod ps --noheading
28
    is "$output" "" "baseline: empty results from ps --noheading"
29
}
30

31
@test "podman pod top - containers in different PID namespaces" {
32
    # With infra=false, we don't get a /pause container
33
    no_infra='--infra=false'
34
    run_podman pod create $no_infra
35
    podid="$output"
36

37
    # Start two containers...
38
    run_podman run -d --pod $podid $IMAGE top -d 2
39
    cid1="$output"
40
    run_podman run -d --pod $podid $IMAGE top -d 2
41
    cid2="$output"
42

43
    # ...and wait for them to actually start.
44
    wait_for_output "PID \+PPID \+USER " $cid1
45
    wait_for_output "PID \+PPID \+USER " $cid2
46

47
    # Both containers have emitted at least one top-like line.
48
    # Now run 'pod top', and expect two 'top -d 2' processes running.
49
    run_podman pod top $podid
50
    is "$output" ".*root.*top -d 2.*root.*top -d 2" "two 'top' containers"
51

52
    # By default (podman pod create w/ default --infra) there should be
53
    # a /pause container.
54
    if [ -z "$no_infra" ]; then
55
        is "$output" ".*0 \+1 \+0 \+[0-9. ?s]\+/pause" "there is a /pause container"
56
    fi
57

58
    # Cannot remove pod while containers are still running. Error messages
59
    # differ slightly between local and remote; these are the common elements.
60
    run_podman 125 pod rm $podid
61
    assert "${lines[0]}" =~ "Error: not all containers could be removed from pod $podid: removing pod containers.*" \
62
           "pod rm while busy: error message line 1 of 3"
63
    assert "${lines[1]}" =~ "cannot remove container .* as it is running - running or paused containers cannot be removed without force: container state improper" \
64
           "pod rm while busy: error message line 2 of 3"
65
    assert "${lines[2]}" =~ "cannot remove container .* as it is running - running or paused containers cannot be removed without force: container state improper" \
66
           "pod rm while busy: error message line 3 of 3"
67

68
    # Clean up
69
    run_podman --noout pod rm -f -t 0 $podid
70
    is "$output" "" "output should be empty"
71
}
72

73

74
@test "podman pod create - custom volumes" {
75
    skip_if_remote "CONTAINERS_CONF_OVERRIDE only affects server side"
76
    image="i.do/not/exist:image"
77
    tmpdir=$PODMAN_TMPDIR/pod-test
78
    mkdir -p $tmpdir
79
    containersconf=$tmpdir/containers.conf
80
    cat >$containersconf <<EOF
81
[containers]
82
volumes = ["/tmp:/foobar"]
83
EOF
84

85
    CONTAINERS_CONF_OVERRIDE=$containersconf run_podman pod create
86
    podid="$output"
87

88
    CONTAINERS_CONF_OVERRIDE=$containersconf run_podman create --pod $podid $IMAGE grep foobar /proc/mounts
89
}
90

91

92
@test "podman pod create - custom infra image" {
93
    skip_if_remote "CONTAINERS_CONF_OVERRIDE only affects server side"
94
    image="i.do/not/exist:image"
95
    tmpdir=$PODMAN_TMPDIR/pod-test
96
    mkdir -p $tmpdir
97
    containersconf=$tmpdir/containers.conf
98
    cat >$containersconf <<EOF
99
[engine]
100
infra_image="$image"
101
EOF
102

103
    run_podman 125 pod create --infra-image $image
104
    is "$output" ".*initializing source docker://$image:.*"
105

106
    CONTAINERS_CONF_OVERRIDE=$containersconf run_podman 125 pod create
107
    is "$output" ".*initializing source docker://$image:.*"
108

109
    CONTAINERS_CONF_OVERRIDE=$containersconf run_podman 125 create --pod new:test $IMAGE
110
    is "$output" ".*initializing source docker://$image:.*"
111
}
112

113
@test "podman pod - communicating between pods" {
114
    podname=pod$(random_string)
115
    run_podman 1 pod exists $podname
116
    run_podman pod create --infra=true --name=$podname
117
    podid="$output"
118
    run_podman pod exists $podname
119
    run_podman pod exists $podid
120

121
    # (Assert that output is formatted, not a one-line blob: #8021)
122
    run_podman pod inspect $podname
123
    assert "${#lines[*]}" -ge 10 "Output from 'pod inspect'; see #8011"
124

125
    # Randomly-assigned port in the 5xxx range
126
    port=$(random_free_port)
127

128
    # Listener. This will exit as soon as it receives a message.
129
    run_podman run -d --pod $podname $IMAGE nc -l -p $port
130
    cid1="$output"
131

132
    # (While we're here, test the 'Pod' field of 'podman ps'. Expect two ctrs)
133
    run_podman ps --format '{{.Pod}}'
134
    newline="
135
"
136
    is "$output" "${podid:0:12}${newline}${podid:0:12}" "ps shows 2 pod IDs"
137

138
    # Talker: send the message via common port on localhost
139
    message=$(random_string 15)
140
    run_podman run --rm --pod $podname $IMAGE \
141
               sh -c "echo $message | nc 127.0.0.1 $port"
142

143
    # Back to the first (listener) container. Make sure message was received.
144
    run_podman logs $cid1
145
    is "$output" "$message" "message sent from one container to another"
146

147
    # Clean up. First the nc -l container...
148
    run_podman rm $cid1
149

150
    # ...then rm the pod, then rmi the pause image so we don't leave strays.
151
    run_podman pod rm $podname
152

153
    # Pod no longer exists
154
    run_podman 1 pod exists $podid
155
    run_podman 1 pod exists $podname
156
}
157

158
@test "podman pod - communicating via /dev/shm " {
159
    podname=pod$(random_string)
160
    run_podman 1 pod exists $podname
161
    run_podman pod create --infra=true --name=$podname
162
    podid="$output"
163
    run_podman pod exists $podname
164
    run_podman pod exists $podid
165

166
    run_podman run --rm --pod $podname $IMAGE touch /dev/shm/test1
167
    run_podman run --rm --pod $podname $IMAGE ls /dev/shm/test1
168
    is "$output" "/dev/shm/test1"
169

170
    # ...then rm the pod, then rmi the pause image so we don't leave strays.
171
    run_podman pod rm $podname
172

173
    # Pod no longer exists
174
    run_podman 1 pod exists $podid
175
    run_podman 1 pod exists $podname
176
}
177

178
# Random byte
179
function octet() {
180
    echo $(( $RANDOM & 255 ))
181
}
182

183
# random MAC address: convention seems to be that 2nd lsb=1, lsb=0
184
# (i.e. 0bxxxxxx10) in the first octet guarantees a private space.
185
# FIXME: I can't find a definitive reference for this though
186
# Generate the address IN CAPS (A-F), but we will test it in lowercase.
187
function random_mac() {
188
    local mac=$(printf "%02X" $(( $(octet) & 242 | 2 )) )
189
    for i in $(seq 2 6); do
190
        mac+=$(printf ":%02X" $(octet))
191
    done
192

193
    echo $mac
194
}
195

196
# Random RFC1918 IP address
197
function random_ip() {
198
    local ip="172.20"
199
    for i in 1 2;do
200
        ip+=$(printf ".%d" $(octet))
201
    done
202
    echo $ip
203
}
204

205
@test "podman pod create - hashtag AllTheOptions" {
206
    mac=$(random_mac)
207
    add_host_ip=$(random_ip)
208
    add_host_n=$(random_string | tr A-Z a-z).$(random_string | tr A-Z a-z).xyz
209

210
    dns_server=$(random_ip)
211
    dns_opt="ndots:$(octet)"
212
    dns_search=$(random_string 15 | tr A-Z a-z).abc
213

214
    hostname=$(random_string | tr A-Z a-z).$(random_string | tr A-Z a-z).net
215

216
    labelname=$(random_string 11)
217
    labelvalue=$(random_string 22)
218

219
    pod_id_file=${PODMAN_TMPDIR}/pod-id-file
220

221
    # Randomly-assigned ports in the 5xxx and 6xxx range
222
    port_in=$(random_free_port 5000-5999)
223
    port_out=$(random_free_port 6000-6999)
224

225
    # Create a pod with all the desired options
226
    # FIXME: --ip=$ip fails:
227
    #      Error adding network: failed to allocate all requested IPs
228
    local mac_option="--mac-address=$mac"
229

230
    # Create a custom image so we can test --infra-image and -command.
231
    # It will have a randomly generated infra command, using the
232
    # existing 'pause' script in our testimage. We assign a bogus
233
    # entrypoint to confirm that --infra-command will override.
234
    local infra_image="infra_$(random_string 10 | tr A-Z a-z)"
235
    local infra_command="/pause_$(random_string 10)"
236
    local infra_name="infra_container_$(random_string 10 | tr A-Z a-z)"
237
    run_podman build -t $infra_image - << EOF
238
FROM $IMAGE
239
RUN ln /home/podman/pause $infra_command
240
ENTRYPOINT ["/original-entrypoint-should-be-overridden"]
241
EOF
242

243
    if is_rootless; then
244
        mac_option=
245
    fi
246
    run_podman pod create --name=mypod                   \
247
               --pod-id-file=$pod_id_file                \
248
               $mac_option                               \
249
               --hostname=$hostname                      \
250
               --add-host   "$add_host_n:$add_host_ip"   \
251
               --dns        "$dns_server"                \
252
               --dns-search "$dns_search"                \
253
               --dns-option "$dns_opt"                   \
254
               --publish    "$port_out:$port_in"         \
255
               --label      "${labelname}=${labelvalue}" \
256
               --infra-image   "$infra_image"            \
257
               --infra-command "$infra_command"          \
258
               --infra-name "$infra_name"
259
    pod_id="$output"
260

261
    # Check --pod-id-file
262
    is "$(<$pod_id_file)" "$pod_id" "contents of pod-id-file"
263

264
    # Get ID of infra container
265
    run_podman pod inspect --format '{{(index .Containers 0).ID}}' mypod
266
    local infra_cid="$output"
267
    # confirm that entrypoint is what we set
268
    run_podman container inspect --format '{{.Config.Entrypoint}}' $infra_cid
269
    is "$output" "[${infra_command}]" "infra-command took effect"
270
    # confirm that infra container name is set
271
    run_podman container inspect --format '{{.Name}}' $infra_cid
272
    is "$output" "$infra_name" "infra-name took effect"
273

274
    # Check each of the options
275
    if [ -n "$mac_option" ]; then
276
        run_podman run --rm --pod mypod $IMAGE ip link show
277
        # 'ip' outputs hex in lower-case, ${expr,,} converts UC to lc
278
        is "$output" ".* link/ether ${mac,,} " "requested MAC address was set"
279
    fi
280

281
    run_podman run --rm --pod mypod $IMAGE hostname
282
    is "$output" "$hostname" "--hostname set the hostname"
283
    run_podman 125 run --rm --pod mypod --hostname foobar $IMAGE hostname
284
    is "$output" ".*invalid config provided: cannot set hostname when joining the pod UTS namespace: invalid configuration" "--hostname should not be allowed in share UTS pod"
285

286
    run_podman run --rm --pod $pod_id $IMAGE cat /etc/hosts
287
    is "$output" ".*$add_host_ip[[:blank:]]$add_host_n" "--add-host was added"
288
    is "$output" ".*	$hostname"            "--hostname is in /etc/hosts"
289
    #               ^^^^ this must be a tab, not a space
290

291
    run_podman run --rm --pod mypod $IMAGE cat /etc/resolv.conf
292
    is "$output" ".*nameserver $dns_server"  "--dns [server] was added"
293
    is "$output" ".*search $dns_search"      "--dns-search was added"
294
    is "$output" ".*options $dns_opt"        "--dns-option was added"
295

296
    # pod inspect
297
    run_podman pod inspect --format '{{.Name}}: {{.ID}} : {{.NumContainers}} : {{.Labels}}' mypod
298
    is "$output" "mypod: $pod_id : 1 : map\[${labelname}:${labelvalue}]" \
299
       "pod inspect --format ..."
300

301
    # pod ps
302
    run_podman pod ps --format '{{.ID}} {{.Name}} {{.Status}} {{.Labels}}'
303
    is "$output" "${pod_id:0:12} mypod Running map\[${labelname}:${labelvalue}]"  "pod ps"
304

305
    run_podman pod ps --no-trunc --filter "label=${labelname}=${labelvalue}" --format '{{.ID}}'
306
    is "$output" "$pod_id" "pod ps --filter label=..."
307

308
    # Test local port forwarding, as well as 'ps' output showing ports
309
    # Run 'nc' in a container, waiting for input on the published port.
310
    c_name=$(random_string 15)
311
    run_podman run -d --pod mypod --name $c_name $IMAGE nc -l -p $port_in
312
    cid="$output"
313

314
    # Try running another container also listening on the same port.
315
    run_podman 1 run --pod mypod --name dsfsdfsdf $IMAGE nc -l -p $port_in
316
    is "$output" "nc: bind: Address in use" \
317
       "two containers cannot bind to same port"
318

319
    # make sure we can ping; failure here might mean that capabilities are wrong
320
    run_podman run --rm --pod mypod $IMAGE ping -c1 127.0.0.1
321
    run_podman run --rm --pod mypod $IMAGE ping -c1 $hostname
322

323
    # While the container is still running, run 'podman ps' (no --format)
324
    # and confirm that the output includes the published port
325
    run_podman ps --filter id=$cid
326
    is "${lines[1]}" "${cid:0:12}  $IMAGE  nc -l -p $port_in .* 0.0.0.0:$port_out->$port_in/tcp  $c_name" \
327
       "output of 'podman ps'"
328

329
    # send a random string to the container. This will cause the container
330
    # to output the string to its logs, then exit.
331
    teststring=$(random_string 30)
332
    echo "$teststring" | nc 127.0.0.1 $port_out
333

334
    # Confirm that the container log output is the string we sent it.
335
    run_podman wait $cid
336
    run_podman logs $cid
337
    is "$output" "$teststring" "test string received on container"
338

339
    # Finally, confirm the infra-container and -command. We run this late,
340
    # not at pod creation, to give the infra container time to start & log.
341
    run_podman logs $infra_cid
342
    is "$output" "Confirmed: testimage pause invoked as $infra_command" \
343
       "pod ran with our desired infra container + command"
344

345
    # Clean up
346
    run_podman rm $cid
347
    run_podman pod rm -t 0 -f --pod-id-file $pod_id_file
348
    if [[ -e $pod_id_file ]]; then
349
        die "pod-id-file $pod_id_file should be removed along with pod"
350
    fi
351
    run_podman rmi $infra_image
352
}
353

354
@test "podman pod create should fail when infra-name is already in use" {
355
    local infra_name="infra_container_$(random_string 10 | tr A-Z a-z)"
356
    local infra_image="quay.io/libpod/k8s-pause:3.5"
357
    local pod_name="$(random_string 10 | tr A-Z a-z)"
358

359
    run_podman --noout pod create --name $pod_name --infra-name "$infra_name" --infra-image "$infra_image"
360
    is "$output" "" "output from pod create should be empty"
361

362
    run_podman 125 pod create --infra-name "$infra_name"
363
    assert "$output" =~ "^Error: .*: the container name \"$infra_name\" is already in use by .* You have to remove that container to be able to reuse that name: that name is already in use" \
364
           "Trying to create two pods with same infra-name"
365

366
    run_podman pod rm -f $pod_name
367
    run_podman rmi $infra_image
368
}
369

370
@test "podman pod create --share" {
371
    local pod_name="$(random_string 10 | tr A-Z a-z)"
372
    run_podman 125 pod create --share bogus --name $pod_name
373
    is "$output" ".*invalid kernel namespace to share: bogus. Options are: cgroup, ipc, net, pid, uts or none" \
374
       "pod test for bogus --share option"
375
    run_podman pod create --share ipc --name $pod_name
376
    run_podman pod inspect $pod_name --format "{{.SharedNamespaces}}"
377
    is "$output" "[ipc]"
378
    run_podman run --rm --pod $pod_name --hostname foobar $IMAGE hostname
379
    is "$output" "foobar" "--hostname should work with non share UTS namespace"
380
    run_podman pod create --share +pid --replace --name $pod_name
381
    run_podman pod inspect $pod_name --format "{{.SharedNamespaces}}"
382
    for ns in uts pid ipc net; do
383
        is "$output" ".*$ns"
384
    done
385
}
386

387
@test "podman pod create --pod new:$POD --hostname" {
388
    local pod_name="$(random_string 10 | tr A-Z a-z)"
389
    run_podman run --rm --pod "new:$pod_name" --hostname foobar $IMAGE hostname
390
    is "$output" "foobar" "--hostname should work when creating a new:pod"
391
    run_podman pod rm $pod_name
392
    run_podman run --rm --pod "new:$pod_name" $IMAGE hostname
393
    is "$output" "$pod_name" "new:POD should have hostname name set to podname"
394
}
395

396
@test "podman rm --force to remove infra container" {
397
    local pod_name="$(random_string 10 | tr A-Z a-z)"
398
    run_podman create --pod "new:$pod_name" $IMAGE
399
    container_ID="$output"
400
    run_podman pod inspect --format "{{.InfraContainerID}}" $pod_name
401
    infra_ID="$output"
402

403
    run_podman 125 container rm $infra_ID
404
    is "$output" ".* and cannot be removed without removing the pod"
405
    run_podman 125 container rm --force $infra_ID
406
    is "$output" ".* and cannot be removed without removing the pod"
407

408
    run_podman container rm --depend $infra_ID
409
    is "$output" ".*$infra_ID.*"
410
    is "$output" ".*$container_ID.*"
411

412
    # Now make sure that --force --all works as well
413
    run_podman create --pod "new:$pod_name" $IMAGE
414
    container_1_ID="$output"
415
    run_podman create --pod "$pod_name" $IMAGE
416
    container_2_ID="$output"
417
    run_podman create $IMAGE
418
    container_3_ID="$output"
419
    run_podman pod inspect --format "{{.InfraContainerID}}" $pod_name
420
    infra_ID="$output"
421

422
    run_podman container rm --force --all $infraID
423
    is "$output" ".*$infra_ID.*"
424
    is "$output" ".*$container_1_ID.*"
425
    is "$output" ".*$container_2_ID.*"
426
    is "$output" ".*$container_3_ID.*"
427
}
428

429
@test "podman pod create share net" {
430
    run_podman pod create --name test
431
    run_podman pod inspect test --format {{.InfraConfig.HostNetwork}}
432
    is "$output" "false" "Default network sharing should be false"
433
    run_podman pod rm test
434

435
    run_podman pod create --share ipc  --network private test
436
    run_podman pod inspect test --format {{.InfraConfig.HostNetwork}}
437
    is "$output" "false" "Private network sharing with only ipc should be false"
438
    run_podman pod rm test
439

440
    local name="$(random_string 10 | tr A-Z a-z)"
441
    run_podman pod create --name $name --share net  --network private
442
    run_podman pod inspect $name --format {{.InfraConfig.HostNetwork}}
443
    is "$output" "false" "Private network sharing with only net should be false"
444

445
    run_podman pod create --share net --network host --replace $name
446
    run_podman pod inspect $name --format {{.InfraConfig.HostNetwork}}
447
    is "$output" "true" "Host network sharing with only net should be true"
448
    run_podman pod rm $name
449

450
    run_podman pod create --name test --share ipc --network host
451
    run_podman pod inspect test --format {{.InfraConfig.HostNetwork}}
452
    is "$output" "true" "Host network sharing with only ipc should be true"
453
    run_podman pod rm test
454
}
455

456
@test "pod exit policies" {
457
    # Test setting exit policies
458
    run_podman pod create
459
    podID="$output"
460
    run_podman pod inspect $podID --format "{{.ExitPolicy}}"
461
    is "$output" "continue" "default exit policy"
462
    run_podman pod rm $podID
463

464
    run_podman pod create --exit-policy stop
465
    podID="$output"
466
    run_podman pod inspect $podID --format "{{.ExitPolicy}}"
467
    is "$output" "stop" "custom exit policy"
468
    run_podman pod rm $podID
469

470
    run_podman 125 pod create --exit-policy invalid
471
    is "$output" "Error: .*running pod create option: invalid pod exit policy: \"invalid\"" "invalid exit policy"
472

473
    # Test exit-policy behaviour
474
    run_podman pod create --exit-policy continue
475
    podID="$output"
476
    run_podman run --pod $podID $IMAGE true
477
    run_podman pod inspect $podID --format "{{.State}}"
478
    _ensure_pod_state $podID Degraded
479
    run_podman pod rm $podID
480

481
    run_podman pod create --exit-policy stop
482
    podID="$output"
483
    run_podman run --pod $podID $IMAGE true
484
    run_podman pod inspect $podID --format "{{.State}}"
485
    _ensure_pod_state $podID Exited
486
    run_podman pod rm -t -1 -f $podID
487
}
488

489
@test "pod exit policies - play kube" {
490
    # play-kube sets the exit policy to "stop"
491
    local name="$(random_string 10 | tr A-Z a-z)"
492

493
    kubeFile="apiVersion: v1
494
kind: Pod
495
metadata:
496
  name: $name-pod
497
spec:
498
  containers:
499
  - command:
500
    - \"true\"
501
    image: $IMAGE
502
    name: ctr
503
  restartPolicy: OnFailure"
504

505
    echo "$kubeFile" > $PODMAN_TMPDIR/test.yaml
506
    run_podman play kube $PODMAN_TMPDIR/test.yaml
507
    run_podman pod inspect $name-pod --format "{{.ExitPolicy}}"
508
    is "$output" "stop" "custom exit policy"
509
    _ensure_pod_state $name-pod Exited
510
    run_podman pod rm $name-pod
511
}
512

513
@test "pod resource limits" {
514
    skip_if_remote "resource limits only implemented on non-remote"
515
    skip_if_rootless "resource limits only work with root"
516
    skip_if_cgroupsv1 "resource limits only meaningful on cgroups V2"
517

518
    # create loopback device
519
    lofile=${PODMAN_TMPDIR}/disk.img
520
    fallocate -l 1k  ${lofile}
521
    LOOPDEVICE=$(losetup --show -f $lofile)
522

523
    # tr needed because losetup seems to use %2d
524
    lomajmin=$(losetup -l --noheadings --output MAJ:MIN $LOOPDEVICE | tr -d ' ')
525
    run grep -w bfq /sys/block/$(basename ${LOOPDEVICE})/queue/scheduler
526
    if [ $status -ne 0 ]; then
527
        losetup -d $LOOPDEVICE
528
        LOOPDEVICE=
529
        skip "BFQ scheduler is not supported on the system"
530
    fi
531
    echo bfq > /sys/block/$(basename ${LOOPDEVICE})/queue/scheduler
532

533
    # FIXME: #15464: blkio-weight-device not working
534
    expected_limits="
535
cpu.max         | 500000 100000
536
memory.max      | 5242880
537
memory.swap.max | 1068498944
538
io.bfq.weight   | default 50
539
io.max          | $lomajmin rbps=1048576 wbps=1048576 riops=max wiops=max
540
"
541

542
    for cgm in systemd cgroupfs; do
543
        local name=resources-$cgm
544
        run_podman --cgroup-manager=$cgm pod create --name=$name --cpus=5 --memory=5m --memory-swap=1g --cpu-shares=1000 --cpuset-cpus=0 --cpuset-mems=0 --device-read-bps=${LOOPDEVICE}:1mb --device-write-bps=${LOOPDEVICE}:1mb --blkio-weight=50
545
        run_podman --cgroup-manager=$cgm pod start $name
546
        run_podman pod inspect --format '{{.CgroupPath}}' $name
547
        local cgroup_path="$output"
548

549
        while read unit expect; do
550
            local actual=$(< /sys/fs/cgroup/$cgroup_path/$unit)
551
            is "$actual" "$expect" "resource limit under $cgm: $unit"
552
        done < <(parse_table "$expected_limits")
553
        run_podman --cgroup-manager=$cgm pod rm -f $name
554
    done
555

556
    # Clean up, and prevent duplicate cleanup in teardown
557
    losetup -d $LOOPDEVICE
558
    LOOPDEVICE=
559
}
560

561
@test "podman pod ps doesn't race with pod rm" {
562
    # create a few pods
563
    for i in {0..10}; do
564
        run_podman pod create
565
    done
566

567
    # and delete them
568
    $PODMAN pod rm -a &
569

570
    # pod ps should not fail while pods are deleted
571
    run_podman pod ps -q
572

573
    # wait for pod rm -a
574
    wait
575
}
576

577
@test "podman pod rm --force bogus" {
578
    run_podman 1 pod rm bogus
579
    is "$output" "Error: .*bogus.*: no such pod" "Should print error"
580
    run_podman pod rm -t -1 --force bogus
581
    is "$output" "" "Should print no output"
582

583
    run_podman pod create --name testpod
584
    run_podman pod rm --force bogus testpod
585
    assert "$output" =~ "[0-9a-f]{64}" "rm pod"
586
    run_podman pod ps -q
587
    assert "$output" = "" "no pods listed"
588
}
589

590
@test "podman pod create on failure" {
591
    podname=pod$(random_string)
592
    nwname=pod$(random_string)
593

594
    run_podman 125 pod create --network $nwname --name $podname
595
    # FIXME: podman and podman-remote do not return the same error message
596
    # but consistency would be nice
597
    is "$output" "Error: .*unable to find network with name or ID $nwname: network not found"
598

599
    # Make sure the pod doesn't get created on failure
600
    run_podman 1 pod exists $podname
601
}
602

603
@test "podman pod create restart tests" {
604
    podname=pod$(random_string)
605

606
    run_podman pod create --restart=on-failure --name $podname
607
    run_podman create --name test-ctr --pod $podname $IMAGE
608
    run_podman container inspect --format '{{ .HostConfig.RestartPolicy.Name }}' test-ctr
609
    is "$output" "on-failure" "container inherits from pod"
610

611
    run_podman create --replace --restart=always --name test-ctr --pod $podname $IMAGE
612
    run_podman container inspect --format '{{ .HostConfig.RestartPolicy.Name }}' test-ctr
613
    is "$output" "always" "container overrides restart policy from pod"
614

615
    run_podman pod rm -f -a
616
}
617

618
# Helper used by pod ps --filter test. Creates one pod or container
619
# with a UNIQUE two-character CID prefix.
620
function thingy_with_unique_id() {
621
    local what="$1"; shift              # pod or container
622
    local how="$1"; shift               # e.g. "--name p1c1 --pod p1"
623

624
    while :;do
625
          local try_again=
626

627
          run_podman $what create $how
628
          # This is our return value; it propagates up to caller's namespace
629
          id="$output"
630

631
          # Make sure the first two characters aren't already used in an ID
632
          for existing_id in "$@"; do
633
              if [[ -z "$try_again" ]]; then
634
                  if [[ "${existing_id:0:2}" == "${id:0:2}" ]]; then
635
                      run_podman $what rm $id
636
                      try_again=1
637
                  fi
638
              fi
639
          done
640

641
          if [[ -z "$try_again" ]]; then
642
              # Nope! groovy! caller gets $id
643
              return
644
          fi
645
    done
646
}
647

648
@test "podman pod ps --filter" {
649
    local -A podid
650
    local -A ctrid
651

652
    # Setup: create three pods, each with three containers, all of them with
653
    # unique (distinct) first two characters of their pod/container ID.
654
    for p in 1 2 3;do
655
        # no infra, please! That creates an extra container with a CID
656
        # that may collide with our other ones, and it's too hard to fix.
657
        thingy_with_unique_id "pod" "--infra=false --name p${p}" \
658
                              ${podid[*]} ${ctrid[*]}
659
        podid[$p]=$id
660

661
        for c in 1 2 3; do
662
            thingy_with_unique_id "container" \
663
                                  "--pod p${p} --name p${p}c${c} $IMAGE true" \
664
                                  ${podid[*]} ${ctrid[*]}
665
            ctrid[$p$c]=$id
666
        done
667
    done
668

669
    # for debugging; without this, on test failure it's too hard to
670
    # associate IDs with names
671
    run_podman pod ps
672
    run_podman ps -a
673

674
    # Test: ps and filter for each pod and container, by ID
675
    for p in 1 2 3; do
676
        local pid=${podid[$p]}
677

678
        # Search by short pod ID, longer pod ID, pod ID regex, and pod name
679
        # ps by short ID, longer ID, regex, and name
680
        for filter in "id=${pid:0:2}" "id=${pid:0:10}" "id=^${pid:0:2}" "name=p$p"; do
681
            run_podman pod ps --filter=$filter --format '{{.Name}}:{{.Id}}'
682
            assert "$output" == "p$p:${pid:0:12}" "pod $p, filter=$filter"
683
        done
684

685
        # ps by negation (regex) of our pid, should find all other pods
686
        f1="^[^${pid:0:1}]"
687
        f2="^.[^${pid:1:1}]"
688
        run_podman pod ps --filter=id="$f1" --filter=id="$f2" --format '{{.Name}}'
689
        assert "${#lines[*]}" == "2" "filter=$f1 + $f2 finds 2 pods"
690
        assert "$output" !~ "p$p"    "filter=$f1 + $f2 does not find p$p"
691

692
        # Search by *container* ID
693
        for c in 1 2 3;do
694
            local cid=${ctrid[$p$c]}
695
            for filter in "ctr-ids=${cid:0:2}" "ctr-ids=^${cid:0:2}.*"; do
696
                run_podman pod ps --filter=$filter --format '{{.Name}}:{{.Id}}'
697
                assert "$output" == "p${p}:${pid:0:12}" \
698
                       "pod $p, container $c, filter=$filter"
699
            done
700
        done
701
    done
702

703
    # Multiple filters, multiple pods
704
    run_podman pod ps --filter=ctr-ids=${ctrid[12]} \
705
                      --filter=ctr-ids=${ctrid[23]} \
706
                      --filter=ctr-ids=${ctrid[31]} \
707
                      --format='{{.Name}}' --sort=name
708
    assert "$(echo $output)" == "p1 p2 p3" "multiple ctr-ids filters"
709

710
    # Clean up
711
    run_podman pod rm -f -a
712
    run_podman rm -f -a
713
}
714

715

716
@test "podman pod cleans cgroup and keeps limits" {
717
    skip_if_remote "we cannot check cgroup settings"
718
    skip_if_rootless_cgroupsv1 "rootless cannot use cgroups on v1"
719

720
    for infra in true false; do
721
        run_podman pod create --infra=$infra --memory=256M
722
        podid="$output"
723
        run_podman run -d --pod $podid $IMAGE top -d 2
724

725
        run_podman pod inspect $podid --format "{{.CgroupPath}}"
726
        result="$output"
727
        assert "$result" =~ "/" ".CgroupPath is a valid path"
728

729
        if is_cgroupsv2; then
730
           cgroup_path=/sys/fs/cgroup/$result
731
        else
732
           cgroup_path=/sys/fs/cgroup/memory/$result
733
        fi
734

735
        if test ! -e $cgroup_path; then
736
            die "the cgroup $cgroup_path does not exist"
737
        fi
738

739
        run_podman pod stop -t 0 $podid
740
        if test -e $cgroup_path; then
741
            die "the cgroup $cgroup_path should not exist after pod stop"
742
        fi
743

744
        run_podman pod start $podid
745
        if test ! -e $cgroup_path; then
746
            die "the cgroup $cgroup_path does not exist"
747
        fi
748

749
        # validate that cgroup limits are in place after a restart
750
        # issue #19175
751
        if is_cgroupsv2; then
752
           memory_limit_file=$cgroup_path/memory.max
753
        else
754
           memory_limit_file=$cgroup_path/memory.limit_in_bytes
755
        fi
756
        assert "$(< $memory_limit_file)" = "268435456" "Contents of $memory_limit_file"
757

758
        run_podman pod rm -t 0 -f $podid
759
        if test -e $cgroup_path; then
760
            die "the cgroup $cgroup_path should not exist after pod rm"
761
        fi
762
    done
763
}
764

765
# vim: filetype=sh
766

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.