1
#!/usr/bin/env bats -*- bats -*-
10
# This is a long ugly way to clean up pods and remove the pause image
12
run_podman pod rm -t 0 -f -a
13
run_podman rm -t 0 -f -a
14
run_podman image list --format '{{.ID}} {{.Repository}}'
15
while read id name; do
16
if [[ "$name" =~ /podman-pause ]]; then
38
value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
50
allowPrivilegeEscalation: true
55
readOnlyRootFilesystem: false
57
- mountPath: /testdir:z
58
name: home-podman-testdir
64
name: home-podman-testdir
68
RELABEL="system_u:object_r:container_file_t:s0"
70
@test "podman kube with stdin" {
71
TESTDIR=$PODMAN_TMPDIR/testdir
73
echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
75
run_podman kube play - < $PODMAN_TMPDIR/test.yaml
76
if selinux_enabled; then
78
is "$output" "${RELABEL} $TESTDIR" "selinux relabel should have happened"
81
# Make sure that the K8s pause image isn't pulled but the local podman-pause is built.
83
run_podman 1 image exists k8s.gcr.io/pause
84
run_podman 1 image exists registry.k8s.io/pause
85
run_podman image exists $(pause_image)
87
run_podman stop -a -t 0
88
run_podman pod rm -t 0 -f test_pod
92
# Testing that the "podman play" cmd still works now that
93
# "podman kube" is an option.
94
TESTDIR=$PODMAN_TMPDIR/testdir
96
echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
97
run_podman play kube $PODMAN_TMPDIR/test.yaml
98
if selinux_enabled; then
100
is "$output" "${RELABEL} $TESTDIR" "selinux relabel should have happened"
103
# Now rerun twice to make sure nothing gets removed
104
run_podman 125 play kube $PODMAN_TMPDIR/test.yaml
105
is "$output" ".* is in use: pod already exists"
106
run_podman 125 play kube $PODMAN_TMPDIR/test.yaml
107
is "$output" ".* is in use: pod already exists"
109
run_podman stop -a -t 0
110
run_podman pod rm -t 0 -f test_pod
113
# helper function: writes a yaml file with customizable values
114
function _write_test_yaml() {
115
local outfile=$PODMAN_TMPDIR/test.yaml
117
# Function args must all be of the form 'keyword=value' (value may be null)
119
local labels="app: test"
120
local name="test_pod"
125
# This will error on 'foo=' (no value). That's totally OK.
126
local value=$(expr "$i" : '[^=]*=\(.*\)')
128
annotations=*) annotations="$value" ;;
129
labels=*) labels="$value" ;;
130
name=*) name="$value" ;;
131
command=*) command="$value" ;;
132
image=*) image="$value" ;;
133
ctrname=*) ctrname="$value" ;;
134
*) die "_write_test_yaml: cannot grok '$i'" ;;
138
# These three header lines are common to all yamls.
139
# Note: use >> (append), not > (overwrite), for multi-pod test
146
if [[ -n "$annotations" ]]; then
147
echo " annotations:" >>$outfile
148
echo " $annotations" >>$outfile
150
if [[ -n "$labels" ]]; then
151
echo " labels:" >>$outfile
152
echo " $labels" >>$outfile
154
if [[ -n "$name" ]]; then
155
echo " name: $name" >>$outfile
158
# We always have spec and container lines...
159
echo "spec:" >>$outfile
160
echo " containers:" >>$outfile
161
# ...but command is optional. If absent, assume our caller will fill it in.
162
if [[ -n "$command" ]]; then
174
@test "podman play --service-container" {
175
skip_if_remote "service containers only work locally"
177
# Create the YAMl file
178
yaml_source="$PODMAN_TMPDIR/test.yaml"
179
_write_test_yaml command=top
181
# Run `play kube` in the background as it will wait for the service
183
timeout --foreground -v --kill=10 60 \
184
$PODMAN play kube --service-container=true --log-driver journald $yaml_source &>/dev/null &
186
# Wait for the container to be running
187
container_a=test_pod-test
189
for i in $(seq 1 20); do
190
run_podman "?" container wait $container_a --condition="running"
191
if [[ $status == 0 ]]; then
199
if [[ -z "$container_running" ]]; then
200
die "container $container_a did not start"
203
# The name of the service container is predictable: the first 12 characters
204
# of the hash of the YAML file followed by the "-service" suffix
205
yaml_sha=$(sha256sum $yaml_source)
206
service_container="${yaml_sha:0:12}-service"
208
# Make sure that the service container exists and runs.
209
run_podman container inspect $service_container --format "{{.State.Running}}"
212
run_podman container inspect $service_container --format '{{.Config.StopTimeout}}'
213
is "$output" "10" "StopTimeout should be initialized to 10"
215
# Stop the *main* container and make sure that
216
# 1) The pod transitions to Exited
217
# 2) The service container is stopped
218
# #) The service container is marked as a service container
219
run_podman stop test_pod-test
220
_ensure_pod_state test_pod Exited
221
_ensure_container_running $service_container false
222
run_podman container inspect $service_container --format "{{.IsService}}"
225
# Restart the pod, make sure the service is running again
226
run_podman pod restart test_pod
227
run_podman container inspect $service_container --format "{{.State.Running}}"
230
# Check for an error when trying to remove the service container
231
run_podman 125 container rm $service_container
232
is "$output" "Error: container .* is the service container of pod(s) .* and cannot be removed without removing the pod(s)"
233
run_podman 125 container rm --force $service_container
234
is "$output" "Error: container .* is the service container of pod(s) .* and cannot be removed without removing the pod(s)"
236
# Kill the pod and make sure the service is not running
237
run_podman pod kill test_pod
238
_ensure_container_running $service_container false
240
# Remove the pod and make sure the service is removed along with it
241
run_podman pod rm test_pod
242
run_podman 1 container exists $service_container
245
@test "podman kube --network" {
246
TESTDIR=$PODMAN_TMPDIR/testdir
248
echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
250
run_podman kube play --network host $PODMAN_TMPDIR/test.yaml
251
is "$output" "Pod:.*" "podman kube play should work with --network host"
253
run_podman pod inspect --format "{{.InfraConfig.HostNetwork}}" test_pod
254
is "$output" "true" ".InfraConfig.HostNetwork"
255
run_podman stop -a -t 0
256
run_podman pod rm -t 0 -f test_pod
258
if has_slirp4netns; then
259
run_podman kube play --network slirp4netns:port_handler=slirp4netns $PODMAN_TMPDIR/test.yaml
260
run_podman pod inspect --format {{.InfraContainerID}} "${lines[1]}"
262
run_podman container inspect --format "{{.HostConfig.NetworkMode}}" $infraID
263
is "$output" "slirp4netns" "network mode slirp4netns is set for the container"
266
run_podman stop -a -t 0
267
run_podman pod rm -t 0 -f test_pod
269
run_podman kube play --network none $PODMAN_TMPDIR/test.yaml
270
run_podman pod inspect --format {{.InfraContainerID}} "${lines[1]}"
272
run_podman container inspect --format "{{.HostConfig.NetworkMode}}" $infraID
273
is "$output" "none" "network mode none is set for the container"
275
run_podman kube down $PODMAN_TMPDIR/test.yaml
276
run_podman 125 inspect test_pod-test
277
is "$output" ".*Error: no such object: \"test_pod-test\""
282
@test "podman kube play read-only" {
283
YAML=$PODMAN_TMPDIR/test.yml
285
# --restart=no is crucial: without that, the "podman wait" below
286
# will spin for indeterminate time.
287
run_podman create --pod new:pod1 --restart=no --name test1 $IMAGE touch /testrw
288
run_podman create --pod pod1 --read-only --restart=no --name test2 $IMAGE touch /testro
289
run_podman create --pod pod1 --read-only --restart=no --name test3 $IMAGE sh -c "echo "#!echo hi" > /tmp/testtmp; chmod +x /tmp/test/tmp; /tmp/testtmp"
291
# Generate and run from yaml. (The "cat" is for debugging failures)
292
run_podman kube generate pod1 -f $YAML
294
run_podman kube play --replace $YAML
296
# Wait for all containers and check their exit statuses
297
run_podman wait pod1-test1 pod1-test2 pod1-test3
298
is "${lines[0]}" 0 "exit status: touch /file on read/write container"
299
is "${lines[1]}" 1 "exit status: touch /file on read-only container"
300
is "${lines[2]}" 0 "exit status: touch on /tmp is always ok, even on read-only container"
302
# Confirm config settings
303
run_podman container inspect --format '{{.HostConfig.ReadonlyRootfs}}' pod1-test1 pod1-test2 pod1-test3
304
is "${lines[0]}" "false" "ReadonlyRootfs - container 1"
305
is "${lines[1]}" "true" "ReadonlyRootfs - container 2"
306
is "${lines[2]}" "true" "ReadonlyRootfs - container 3"
309
run_podman kube down - < $YAML
310
run_podman 1 container exists pod1-test1
311
run_podman 1 container exists pod1-test2
312
run_podman 1 container exists pod1-test3
315
@test "podman kube play read-only from containers.conf" {
316
containersconf=$PODMAN_TMPDIR/containers.conf
317
cat >$containersconf <<EOF
322
YAML=$PODMAN_TMPDIR/test.yml
324
# --restart=no is crucial: without that, the "podman wait" below
325
# will spin for indeterminate time.
326
CONTAINERS_CONF_OVERRIDE="$containersconf" run_podman create --pod new:pod1 --read-only=false --restart=no --name test1 $IMAGE touch /testrw
327
CONTAINERS_CONF_OVERRIDE="$containersconf" run_podman create --pod pod1 --restart=no --name test2 $IMAGE touch /testro
328
CONTAINERS_CONF_OVERRIDE="$containersconf" run_podman create --pod pod1 --restart=no --name test3 $IMAGE touch /tmp/testtmp
330
# Inspect settings in created containers
331
CONTAINERS_CONF_OVERRIDE="$containersconf" run_podman container inspect --format '{{.HostConfig.ReadonlyRootfs}}' test1 test2 test3
332
is "${lines[0]}" "false" "ReadonlyRootfs - container 1, created"
333
is "${lines[1]}" "true" "ReadonlyRootfs - container 2, created"
334
is "${lines[2]}" "true" "ReadonlyRootfs - container 3, created"
336
# Now generate and run kube.yaml on a machine without the defaults set
337
CONTAINERS_CONF_OVERRIDE="$containersconf" run_podman kube generate pod1 -f $YAML
340
run_podman kube play --replace $YAML
342
# Wait for all containers and check their exit statuses
343
run_podman wait pod1-test1 pod1-test2 pod1-test3
344
is "${lines[0]}" 0 "exit status: touch /file on read/write container"
345
is "${lines[1]}" 1 "exit status: touch /file on read-only container"
346
is "${lines[2]}" 0 "exit status: touch on /tmp is always ok, even on read-only container"
348
# Confirm settings again
349
run_podman container inspect --format '{{.HostConfig.ReadonlyRootfs}}' pod1-test1 pod1-test2 pod1-test3
350
is "${lines[0]}" "false" "ReadonlyRootfs - container 1, post-run"
351
is "${lines[1]}" "true" "ReadonlyRootfs - container 2, post-run"
352
is "${lines[2]}" "true" "ReadonlyRootfs - container 3, post-run"
355
run_podman kube down - < $YAML
356
run_podman 1 container exists pod1-test1
357
run_podman 1 container exists pod1-test2
358
run_podman 1 container exists pod1-test3
361
@test "podman play with user from image" {
362
TESTDIR=$PODMAN_TMPDIR/testdir
365
_write_test_yaml command=id image=userimage
367
cat > $PODMAN_TMPDIR/Containerfile << _EOF
372
# Unset the PATH during build and make sure that all default env variables
373
# are correctly set for the created container.
374
run_podman build --unsetenv PATH -t userimage $PODMAN_TMPDIR
375
run_podman image inspect userimage --format "{{.Config.Env}}"
376
is "$output" "\[\]" "image does not set PATH - env is empty"
378
run_podman play kube --start=false $PODMAN_TMPDIR/test.yaml
379
run_podman inspect --format "{{ .Config.User }}" test_pod-test
380
is "$output" bin "expect container within pod to run as the bin user"
381
run_podman inspect --format "{{ .Config.Env }}" test_pod-test
382
is "$output" ".*PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin.*" "expect PATH to be set"
383
is "$output" ".*container=podman.*" "expect container to be set"
385
run_podman stop -a -t 0
386
run_podman pod rm -t 0 -f test_pod
387
run_podman rmi -f userimage:latest
390
@test "podman play --build --context-dir" {
391
skip_if_remote "--build is not supported in context remote"
393
mkdir -p $PODMAN_TMPDIR/userimage
394
cat > $PODMAN_TMPDIR/userimage/Containerfile << _EOF
399
_write_test_yaml command=id image=quay.io/libpod/userimage
400
run_podman 125 play kube --build --start=false $PODMAN_TMPDIR/test.yaml
401
run_podman play kube --replace --context-dir=$PODMAN_TMPDIR --build --start=false $PODMAN_TMPDIR/test.yaml
402
run_podman inspect --format "{{ .Config.User }}" test_pod-test
403
is "$output" bin "expect container within pod to run as the bin user"
405
run_podman stop -a -t 0
406
run_podman pod rm -t 0 -f test_pod
407
run_podman rmi -f userimage:latest
410
run_podman play kube --replace --build --start=false $PODMAN_TMPDIR/test.yaml
411
run_podman inspect --format "{{ .Config.User }}" test_pod-test
412
is "$output" bin "expect container within pod to run as the bin user"
414
run_podman stop -a -t 0
415
run_podman pod rm -t 0 -f test_pod
416
run_podman rmi -f userimage:latest
419
# Occasionally a remnant storage container is left behind which causes
420
# podman play kube --replace to fail. This tests created a conflicting
421
# storage container name using buildah to make sure --replace, still
422
# functions proplery by removing the storage container.
423
@test "podman kube play --replace external storage" {
424
TESTDIR=$PODMAN_TMPDIR/testdir
426
echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
427
run_podman play kube $PODMAN_TMPDIR/test.yaml
428
# Force removal of container
429
run_podman rm --force -t0 test_pod-test
430
# Create external container using buildah with same name
431
buildah from --name test_pod-test $IMAGE
432
# --replace deletes the buildah container and replace it with new one
433
run_podman play kube --replace $PODMAN_TMPDIR/test.yaml
435
run_podman stop -a -t 0
436
run_podman pod rm -t 0 -f test_pod
437
run_podman rmi -f userimage:latest
440
@test "podman kube --annotation" {
441
TESTDIR=$PODMAN_TMPDIR/testdir
442
RANDOMSTRING=$(random_string 15)
443
ANNOTATION_WITH_COMMA="comma,$(random_string 5)"
445
echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
446
run_podman kube play --annotation "name=$RANDOMSTRING" \
447
--annotation "anno=$ANNOTATION_WITH_COMMA" $PODMAN_TMPDIR/test.yaml
448
run_podman inspect --format "{{ .Config.Annotations }}" test_pod-test
449
is "$output" ".*name:$RANDOMSTRING" "Annotation should be added to pod"
450
is "$output" ".*anno:$ANNOTATION_WITH_COMMA" "Annotation with comma should be added to pod"
453
run_podman 125 kube play --annotation "val" $PODMAN_TMPDIR/test.yaml
454
assert "$output" == "Error: annotation \"val\" must include an '=' sign" "invalid annotation error"
456
run_podman stop -a -t 0
457
run_podman pod rm -t 0 -f test_pod
460
@test "podman play Yaml deprecated --no-trunc annotation" {
461
RANDOMSTRING=$(random_string 65)
463
_write_test_yaml "annotations=test: ${RANDOMSTRING}" command=id
464
run_podman play kube --no-trunc - < $PODMAN_TMPDIR/test.yaml
467
@test "podman kube play - default log driver" {
468
TESTDIR=$PODMAN_TMPDIR/testdir
470
echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
471
# Get the default log driver
472
run_podman info --format "{{.Host.LogDriver}}"
473
default_driver=$output
475
# Make sure that the default log driver is used
476
run_podman kube play $PODMAN_TMPDIR/test.yaml
477
run_podman inspect --format "{{.HostConfig.LogConfig.Type}}" test_pod-test
478
is "$output" "$default_driver" "play kube uses default log driver"
480
run_podman kube down $PODMAN_TMPDIR/test.yaml
481
run_podman 125 inspect test_pod-test
482
is "$output" ".*Error: no such object: \"test_pod-test\""
487
@test "podman kube play - URL" {
488
TESTDIR=$PODMAN_TMPDIR/testdir
490
echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
491
echo READY > $PODMAN_TMPDIR/ready
493
HOST_PORT=$(random_free_port)
494
SERVER=http://127.0.0.1:$HOST_PORT
496
run_podman run -d --name myyaml -p "$HOST_PORT:80" \
497
-v $PODMAN_TMPDIR/test.yaml:/var/www/testpod.yaml:Z \
498
-v $PODMAN_TMPDIR/ready:/var/www/ready:Z \
500
$IMAGE /bin/busybox-extras httpd -f -p 80
502
wait_for_port 127.0.0.1 $HOST_PORT
503
wait_for_command_output "curl -s -S $SERVER/ready" "READY"
505
run_podman kube play $SERVER/testpod.yaml
506
run_podman inspect test_pod-test --format "{{.State.Running}}"
508
run_podman kube down $SERVER/testpod.yaml
509
run_podman 125 inspect test_pod-test
510
is "$output" ".*Error: no such object: \"test_pod-test\""
512
run_podman pod rm -a -f
513
run_podman rm -a -f -t0
516
@test "podman play with init container" {
517
_write_test_yaml command=
518
cat >>$PODMAN_TMPDIR/test.yaml <<EOF
532
run_podman kube play $PODMAN_TMPDIR/test.yaml
533
assert "$output" !~ "level=" "init containers should not generate logrus.Error"
534
run_podman inspect --format "{{.State.ExitCode}}" test_pod-testCtr
535
is "$output" "0" "init container should have created /dev/shm/test1"
537
run_podman kube down $PODMAN_TMPDIR/test.yaml
540
@test "podman kube play - hostport" {
541
HOST_PORT=$(random_free_port)
543
cat >>$PODMAN_TMPDIR/test.yaml <<EOF
551
run_podman kube play $PODMAN_TMPDIR/test.yaml
552
run_podman pod inspect test_pod --format "{{.InfraConfig.PortBindings}}"
553
assert "$output" = "map[$HOST_PORT/tcp:[{0.0.0.0 $HOST_PORT}]]"
554
run_podman kube down $PODMAN_TMPDIR/test.yaml
556
run_podman pod rm -a -f
560
@test "podman kube play - multi-pod YAML" {
561
skip_if_remote "service containers only work locally"
562
skip_if_journald_unavailable
564
# Create the YAMl file, with two pods, each with one container
565
yaml_source="$PODMAN_TMPDIR/test.yaml"
567
_write_test_yaml labels="app: pod$n" name="pod$n" ctrname="ctr$n" command=top
569
# Separator between two yaml halves
570
if [[ $n = 1 ]]; then
571
echo "---" >>$yaml_source
575
# Run `play kube` in the background as it will wait for the service
577
timeout --foreground -v --kill=10 60 \
578
$PODMAN play kube --service-container=true --log-driver journald $yaml_source &>/dev/null &
580
# The name of the service container is predictable: the first 12 characters
581
# of the hash of the YAML file followed by the "-service" suffix
582
yaml_sha=$(sha256sum $yaml_source)
583
service_container="${yaml_sha:0:12}-service"
584
# Wait for the containers to be running
585
container_1=pod1-ctr1
586
container_2=pod2-ctr2
588
for i in $(seq 1 20); do
589
run_podman "?" container wait $container_1 $container_2 $service_container --condition="running"
590
if [[ $status == 0 ]]; then
598
if [[ -z "$containers_running" ]]; then
599
die "container $container_1, $container_2 and/or $service_container did not start"
602
# Stop the pods, make sure that no ugly error logs show up and that the
603
# service container will implicitly get stopped as well
604
run_podman pod stop pod1 pod2
605
assert "$output" !~ "Stopping"
606
_ensure_container_running $service_container false
608
run_podman kube down $yaml_source
611
@test "podman kube generate filetype" {
612
YAML=$PODMAN_TMPDIR/test.yml
613
run_podman create --pod new:pod1 --security-opt label=level:s0:c1,c2 --security-opt label=filetype:usr_t -v myvol:/myvol --name test1 $IMAGE true
614
run_podman kube generate pod1 -f $YAML
616
is "$output" ".*filetype: usr_t" "Generated YAML file should contain filetype usr_t"
617
run_podman pod rm --force pod1
618
run_podman volume rm -t -1 myvol --force
620
run_podman kube play $YAML
621
if selinux_enabled; then
622
run_podman inspect pod1-test1 --format "{{ .MountLabel }}"
623
is "$output" "system_u:object_r:usr_t:s0:c1,c2" "Generated container should use filetype usr_t"
624
run_podman volume inspect myvol --format '{{ .Mountpoint }}'
627
is "$output" "system_u:object_r:usr_t:s0 $path" "volume should be labeled with usr_t type"
629
run_podman kube down $YAML
630
run_podman volume rm myvol --force
633
# kube play --wait=true, where we clear up the created containers, pods, and volumes when a kill or sigterm is triggered
634
@test "podman kube play --wait with siginterrupt" {
635
cname=c$(random_string 15)
636
fname="/tmp/play_kube_wait_$(random_string 6).yaml"
637
run_podman container create --name $cname $IMAGE top
638
run_podman kube generate -f $fname $cname
640
# delete the container we generated from
641
run_podman rm -f $cname
643
# force a timeout to happen so that the kube play command is killed
644
# and expect the timeout code 124 to happen so that we can clean up
646
PODMAN_TIMEOUT=15 run_podman 124 kube play --wait $fname
648
local delta_t=$((t1 - t0))
649
assert $delta_t -le 20 \
650
"podman kube play did not get killed within 10 seconds"
652
# there should be no containers running or created
654
is "$output" "" "There should be no containers"
655
run_podman rmi $(pause_image)
658
@test "podman kube play --wait - wait for pod to exit" {
659
fname="/tmp/play_kube_wait_$(random_string 6).yaml"
677
run_podman kube play --wait $fname
679
# debug to see what container is being left behind after the cleanup
680
# there should be no containers running or created
681
run_podman ps -a --noheading
682
is "$output" "" "There should be no containers"
684
run_podman rmi $(pause_image)
687
@test "podman kube play with configmaps" {
688
configmap_file=${PODMAN_TMPDIR}/play_kube_configmap_configmaps$(random_string 6),withcomma.yaml
706
pod_file=${PODMAN_TMPDIR}/play_kube_configmap_pod$(random_string 6).yaml
737
run_podman kube play --configmap=$configmap_file $pod_file
738
run_podman wait test_pod-server
740
# systemd logs are unreliable; we may need to retry a few times
741
# https://github.com/systemd/systemd/issues/28650
743
while [[ $retries -gt 0 ]]; do
744
run_podman logs test_pod-server
745
test -n "$output" && break
747
retries=$((retries - 1))
749
assert "$retries" -gt 0 "Timed out waiting for podman logs"
750
assert "$output" = "foo:bar" "output from podman logs"
752
run_podman kube down $pod_file
755
@test "podman kube with --authfile=/tmp/bogus" {
756
TESTDIR=$PODMAN_TMPDIR/testdir
758
echo "$testYaml" | sed "s|TESTDIR|${TESTDIR}|g" > $PODMAN_TMPDIR/test.yaml
759
bogus=$PODMAN_TMPDIR/bogus-authfile
761
run_podman 125 kube play --authfile=$bogus - < $PODMAN_TMPDIR/test.yaml
762
is "$output" "Error: credential file is not accessible: faccessat $bogus: no such file or directory" \
763
"$command should fail with not such file"
766
@test "podman kube play with umask from containers.conf" {
767
skip_if_remote "remote does not support CONTAINERS_CONF*"
768
YAML=$PODMAN_TMPDIR/test.yaml
770
containersConf=$PODMAN_TMPDIR/containers.conf
771
touch $containersConf
772
cat >$containersConf <<EOF
778
ctrInPod="ctr-pod-ctr"
780
run_podman create --restart never --name $ctr $IMAGE sh -c "touch /umask-test;stat -c '%a' /umask-test"
781
run_podman kube generate -f $YAML $ctr
782
CONTAINERS_CONF_OVERRIDE="$containersConf" run_podman kube play $YAML
783
run_podman container inspect --format '{{ .Config.Umask }}' $ctrInPod
784
is "${output}" "0472"
785
# Confirm that umask actually takes effect. Might take a second or so.
787
while [[ $retries -gt 0 ]]; do
788
run_podman logs $ctrInPod
789
test -n "$output" && break
791
retries=$((retries - 1))
793
assert "$retries" -gt 0 "Timed out waiting for container output"
794
assert "$output" = "204" "stat() on created file"
796
run_podman kube down $YAML
801
@test "podman kube generate tmpfs on /tmp" {
802
KUBE=$PODMAN_TMPDIR/kube.yaml
803
run_podman create --name test $IMAGE sleep 100
804
run_podman kube generate test -f $KUBE
805
run_podman kube play $KUBE
806
run_podman exec test-pod-test sh -c "mount | grep /tmp"
807
assert "$output" !~ "noexec" "mounts on /tmp should not be noexec"
808
run_podman kube down $KUBE
809
run_podman pod rm -a -f -t 0
810
run_podman rm -a -f -t 0
813
@test "podman kube play - pull policy" {
814
skip_if_remote "pull debug logs only work locally"
816
yaml_source="$PODMAN_TMPDIR/test.yaml"
817
_write_test_yaml command=true
819
# Exploit a debug message to make sure the expected pull policy is used
820
run_podman --debug kube play $yaml_source
821
assert "$output" =~ "Pulling image $IMAGE \(policy\: missing\)" "default pull policy is missing"
822
run_podman kube down $yaml_source
824
local_image="localhost/name:latest"
825
run_podman tag $IMAGE $local_image
827
_write_test_yaml command=true image=$local_image
829
run_podman --debug kube play $yaml_source
830
assert "$output" =~ "Pulling image $local_image \(policy\: newer\)" "pull policy is set to newhen pulling latest tag"
831
run_podman kube down $yaml_source
833
run_podman rmi $local_image
836
@test "podman kube play healthcheck should wait initialDelaySeconds before updating status (healthy)" {
837
fname="$PODMAN_TMPDIR/play_kube_healthy_$(random_string 6).yaml"
851
- touch /tmp/healthy && sleep 100
857
initialDelaySeconds: 3
862
run_podman kube play $fname
863
ctrName="liveness-exec-liveness"
865
# Keep checking status. For the first 2 seconds it must be 'starting'
867
while [[ $SECONDS -le $((t0 + 2)) ]]; do
868
run_podman inspect $ctrName --format "1-{{.State.Health.Status}}"
869
assert "$output" == "1-starting" "Health.Status at $((SECONDS - t0))"
873
# After 3 seconds it may take another second to go healthy. Wait.
875
while [[ $SECONDS -le $((t0 + 3)) ]]; do
876
run_podman inspect $ctrName --format "2-{{.State.Health.Status}}"
877
if [[ "$output" = "2-healthy" ]]; then
882
assert $output == "2-healthy" "After 3 seconds"
884
run_podman kube down $fname
889
@test "podman kube play healthcheck should wait initialDelaySeconds before updating status (unhealthy)" {
890
fname="$PODMAN_TMPDIR/play_kube_unhealthy_$(random_string 6).yaml"
904
- touch /tmp/healthy && sleep 100
910
initialDelaySeconds: 3
915
run_podman kube play $fname
916
ctrName="liveness-exec-liveness"
918
# Keep checking status. For the first 2 seconds it must be 'starting'
920
while [[ $SECONDS -le $((t0 + 2)) ]]; do
921
run_podman inspect $ctrName --format "1-{{.State.Health.Status}}"
922
assert "$output" == "1-starting" "Health.Status at $((SECONDS - t0))"
926
# After 3 seconds it may take another second to go unhealthy. Wait.
928
while [[ $SECONDS -le $((t0 + 3)) ]]; do
929
run_podman inspect $ctrName --format "2-{{.State.Health.Status}}"
930
if [[ "$output" = "2-unhealthy" ]]; then
935
assert $output == "2-unhealthy" "After 3 seconds"
937
run_podman kube down $fname
942
@test "podman play --build private registry" {
943
skip_if_remote "--build is not supported in context remote"
945
local registry=localhost:${PODMAN_LOGIN_REGISTRY_PORT}
946
local from_image=$registry/quadlet_image_test:$(random_string)
947
local authfile=$PODMAN_TMPDIR/authfile.json
949
mkdir -p $PODMAN_TMPDIR/userimage
950
cat > $PODMAN_TMPDIR/userimage/Containerfile << _EOF
955
# Start the registry and populate the authfile that we can use for the test.
957
run_podman login --authfile=$authfile \
959
--username ${PODMAN_LOGIN_USER} \
960
--password ${PODMAN_LOGIN_PASS} \
963
# Push the test image to the registry
964
run_podman image tag $IMAGE $from_image
965
run_podman image push --tls-verify=false --authfile=$authfile $from_image
967
# Remove the local image to make sure it will be pulled again
968
run_podman image rm --ignore $from_image
970
_write_test_yaml command=id image=userimage
971
run_podman 125 play kube --build --start=false $PODMAN_TMPDIR/test.yaml
972
assert "$output" "=~" \
973
"Error: short-name resolution enforced but cannot prompt without a TTY|Resolving \"userimage\" using unqualified-search registries" \
974
"The error message does match any of the expected ones"
976
run_podman play kube --replace --context-dir=$PODMAN_TMPDIR --tls-verify=false --authfile=$authfile --build --start=false $PODMAN_TMPDIR/test.yaml
977
run_podman inspect --format "{{ .Config.User }}" test_pod-test
978
is "$output" bin "expect container within pod to run as the bin user"
980
run_podman stop -a -t 0
981
run_podman pod rm -t 0 -f test_pod
982
run_podman rmi -f userimage:latest $from_image