1
#!/usr/bin/env bats -*- bats -*-
3
# Tests generated configurations for systemd.
10
SERVICE_NAME="podman_test_$(random_string)"
12
UNIT_FILE="$UNIT_DIR/$SERVICE_NAME.service"
13
TEMPLATE_FILE="$UNIT_DIR/$SERVICE_NAME@.service"
16
skip_if_remote "systemd tests are meaningless over remote"
22
if [[ -e "$UNIT_FILE" ]]; then
23
run systemctl stop "$SERVICE_NAME"
24
if [ $status -ne 0 ]; then
25
echo "# WARNING: systemctl stop failed in teardown: $output" >&3
29
systemctl daemon-reload
35
# Helper to start a systemd service running a container
36
function service_setup() {
37
# January 2024: we can no longer do "run_podman generate systemd" followed
38
# by "echo $output >file", because generate-systemd is deprecated and now
39
# says so loudly, to stderr, with no way to silence it. Since BATS gloms
40
# stdout + stderr, that warning goes to the unit file. (Today's systemd
41
# is forgiving about that, but RHEL8 systemd chokes with EINVAL)
44
run_podman generate systemd --files --name \
45
-e http_proxy -e https_proxy -e no_proxy \
46
-e HTTP_PROXY -e HTTPS_PROXY -e NO_PROXY \
48
mv "container-$cname.service" $UNIT_FILE
52
systemctl daemon-reload
54
# Also test enabling services (see #12438).
55
run systemctl enable "$SERVICE_NAME"
56
assert $status -eq 0 "Error enabling systemd unit $SERVICE_NAME: $output"
58
systemctl_start "$SERVICE_NAME"
60
run systemctl status "$SERVICE_NAME"
61
assert $status -eq 0 "systemctl status $SERVICE_NAME: $output"
64
# Helper to stop a systemd service running a container
65
function service_cleanup() {
66
run systemctl stop "$SERVICE_NAME"
67
assert $status -eq 0 "Error stopping systemd unit $SERVICE_NAME: $output"
69
# Regression test for #11304: confirm that unit stops into correct state
70
local expected_state="$1"
71
if [[ -n "$expected_state" ]]; then
72
run systemctl show --property=ActiveState "$SERVICE_NAME"
73
assert "$output" = "ActiveState=$expected_state" \
74
"state of service after systemctl stop"
77
run systemctl disable "$SERVICE_NAME"
78
assert $status -eq 0 "Error disabling systemd unit $SERVICE_NAME: $output"
81
systemctl daemon-reload
84
# These tests can fail in dev. environment because of SELinux.
85
# quick fix: chcon -t container_runtime_exec_t ./bin/podman
86
@test "podman generate - systemd - basic" {
87
# Warn when a custom restart policy is used without --new (see #15284)
88
run_podman create --restart=always $IMAGE
90
run_podman 0+w generate systemd $cid
91
require_warning "Container $cid has restart policy .*always.* which can lead to issues on shutdown" \
92
"generate systemd emits warning"
95
cname=$(random_string)
96
# See #7407 for --pull=always.
97
run_podman create --pull=always --name $cname --label "io.containers.autoupdate=registry" $IMAGE \
98
sh -c "trap 'echo Received SIGTERM, finishing; exit' SIGTERM; echo WAITING; while :; do sleep 0.1; done"
100
# Start systemd service to run this container
103
# Give container time to start; make sure output looks top-like
105
run_podman logs $cname
106
is "$output" ".*WAITING.*" "running is waiting for signal"
108
# All good. Stop service, clean up.
109
# Also make sure the service is in the `inactive` state (see #11304).
110
service_cleanup inactive
113
@test "podman autoupdate local" {
114
# Note that the entrypoint may be a JSON string which requires preserving the quotes (see #12477)
115
cname=$(random_string)
117
# Create a scratch image (copy of our regular one)
118
image_copy=base$(random_string | tr A-Z a-z)
119
run_podman tag $IMAGE $image_copy
121
# Create a container based on that
122
run_podman create --name $cname --label "io.containers.autoupdate=local" --entrypoint '["top"]' $image_copy
124
# Start systemd service to run this container
127
# Give container time to start; make sure output looks top-like
128
wait_for_output 'Load average' $cname
130
# Run auto-update and check that it restarted the container
131
run_podman commit --change "CMD=/bin/bash" $cname $image_copy
132
run_podman auto-update
133
is "$output" ".*$SERVICE_NAME.*" "autoupdate local restarted container"
135
# All good. Stop service, clean up.
137
run_podman rmi $image_copy
140
# These tests can fail in dev. environment because of SELinux.
141
# quick fix: chcon -t container_runtime_exec_t ./bin/podman
142
@test "podman generate systemd - envar" {
143
cname=$(random_string)
144
FOO=value BAR=%s run_podman create --name $cname --env FOO -e BAR --env MYVAR=myval \
145
$IMAGE sh -c 'printenv && sleep 100'
147
# Start systemd service to run this container
150
# Give container time to start; make sure output looks top-like
152
run_podman logs $cname
153
is "$output" ".*FOO=value.*" "FOO environment variable set"
154
is "$output" ".*BAR=%s.*" "BAR environment variable set"
155
is "$output" ".*MYVAR=myval.*" "MYVAL environment variable set"
157
# All good. Stop service, clean up.
161
# Regression test for #11438
162
@test "podman generate systemd - restart policy & timeouts" {
163
cname=$(random_string)
164
run_podman create --restart=always --name $cname $IMAGE
165
run_podman generate systemd --new $cname
166
is "$output" ".*Restart=always.*" "Use container's restart policy if set"
167
run_podman generate systemd --new --restart-policy=on-failure $cname
168
is "$output" ".*Restart=on-failure.*" "Override container's restart policy"
170
cname2=$(random_string)
171
run_podman create --restart=unless-stopped --name $cname2 $IMAGE
172
run_podman generate systemd --new $cname2
173
is "$output" ".*Restart=always.*" "unless-stopped translated to always"
175
cname3=$(random_string)
176
run_podman create --restart=on-failure:42 --name $cname3 $IMAGE
177
run_podman generate systemd --new $cname3
178
is "$output" ".*Restart=on-failure.*" "on-failure:xx is parsed correctly"
179
is "$output" ".*StartLimitBurst=42.*" "on-failure:xx is parsed correctly"
181
run_podman rm -t 0 -f $cname $cname2 $cname3
184
function set_listen_env() {
185
export LISTEN_PID="100" LISTEN_FDS="1" LISTEN_FDNAMES="listen_fdnames"
188
function unset_listen_env() {
189
unset LISTEN_PID LISTEN_FDS LISTEN_FDNAMES
192
function check_listen_env() {
196
is "$output" "$stdenv" "LISTEN Environment did not pass: $context"
198
out=$(for o in $output; do echo $o; done| sort)
202
LISTEN_FDNAMES=listen_fdnames" | sort)
205
is "$out" "$std" "LISTEN Environment passed: $context"
209
@test "podman pass LISTEN environment " {
210
# Note that `--hostname=host1` makes sure that all containers have the same
212
run_podman run --hostname=host1 --rm $IMAGE printenv
217
run_podman run --hostname=host1 --rm $IMAGE printenv
219
check_listen_env "$stdenv" "podman run"
222
run_podman create --hostname=host1 --rm $IMAGE printenv
225
run_podman start --attach $cid
227
check_listen_env "$stdenv" "podman start"
230
@test "podman generate - systemd template" {
231
cname=$(random_string)
232
run_podman create --name $cname $IMAGE top
234
# See note in service_setup() above re: using --files
237
run_podman generate systemd --template --files -n $cname
238
mv "container-$cname.service" $TEMPLATE_FILE
240
run_podman rm -f $cname
242
systemctl daemon-reload
244
INSTANCE="$SERVICE_NAME@1.service"
245
systemctl_start "$INSTANCE"
247
run systemctl status "$INSTANCE"
248
assert $status -eq 0 "systemctl status $INSTANCE: $output"
250
run systemctl stop "$INSTANCE"
251
assert $status -eq 0 "Error stopping systemd unit $INSTANCE: $output"
254
systemctl daemon-reload
257
@test "podman generate - systemd template no support for pod" {
258
cname=$(random_string)
259
podname=$(random_string)
260
run_podman pod create --name $podname
261
run_podman run --pod $podname -dt --name $cname $IMAGE top
263
run_podman 125 generate systemd --new --template -n $podname
264
is "$output" ".*--template is not supported for pods.*" "Error message contains 'not supported'"
266
run_podman rm -f $cname
267
run_podman pod rm -f $podname
268
run_podman rmi $(pause_image)
271
@test "podman generate - systemd template only used on --new" {
272
cname=$(random_string)
273
run_podman create --name $cname $IMAGE top
274
run_podman 125 generate systemd --new=false --template -n $cname
275
is "$output" ".*--template cannot be set" "Error message should be '--template requires --new'"
278
@test "podman --cgroup=cgroupfs doesn't show systemd warning" {
279
DBUS_SESSION_BUS_ADDRESS= run_podman --log-level warning --cgroup-manager=cgroupfs info -f ''
280
is "$output" "" "output should be empty"
283
@test "podman --systemd sets container_uuid" {
284
run_podman run --systemd=always --name test $IMAGE printenv container_uuid
285
container_uuid=$output
286
run_podman inspect test --format '{{ .ID }}'
287
is "${container_uuid}" "${output:0:32}" "UUID should be first 32 chars of Container id"
290
@test "podman --systemd fails on cgroup v1 with a private cgroupns" {
293
run_podman 126 run --systemd=always --cgroupns=private $IMAGE true
294
assert "$output" =~ ".*cgroup namespace is not supported with cgroup v1 and systemd mode"
297
# https://github.com/containers/podman/issues/13153
298
@test "podman rootless-netns pasta processes should be in different cgroup" {
299
is_rootless || skip "only meaningful for rootless"
301
cname=$(random_string)
302
local netname=testnet-$(random_string 10)
304
# create network and container with network
305
run_podman network create $netname
306
run_podman create --name $cname --network $netname $IMAGE top
308
# run container in systemd unit
311
# run second container with network
312
cname2=$(random_string)
313
run_podman run -d --name $cname2 --network $netname $IMAGE top
315
# stop systemd container
318
pasta_iface=$(default_ifname)
320
# now check that the rootless netns slirp4netns process is still alive and working
321
run_podman unshare --rootless-netns ip addr
322
is "$output" ".*$pasta_iface.*" "pasta interface exists in the netns"
323
run_podman exec $cname2 nslookup google.com
325
run_podman rm -f -t0 $cname2
326
run_podman network rm -f $netname
329
@test "podman create --health-on-failure=kill" {
330
cname=c_$(random_string)
331
run_podman create --name $cname \
332
--health-cmd /home/podman/healthcheck \
333
--health-on-failure=kill \
335
--restart=on-failure \
336
$IMAGE /home/podman/pause
338
# run container in systemd unit
341
run_podman container inspect $cname --format "{{.ID}}"
344
run_podman healthcheck run $cname
346
# Now cause the healthcheck to fail
347
run_podman exec $cname touch /uh-oh
349
# healthcheck should now fail, with exit status 1 and 'unhealthy' output
350
run_podman 1 healthcheck run $cname
351
is "$output" "unhealthy" "output from 'podman healthcheck run'"
353
# What is expected to happen now:
354
# 1) The container gets killed as the health check has failed
355
# 2) Systemd restarts the service as the restart policy is set to "on-failure"
356
# 3) The /uh-oh file is gone and $cname has another ID
358
# Wait at most 10 seconds for the service to be restarted
360
while [[ $timeout -gt 1 ]]; do
362
# - status 0, old container is still terminating: sleep and retry
363
# - status 0, new CID: yay, break
364
# - status 1, container not found: sleep and retry
365
run_podman '?' container inspect $cname --format '{{.ID}}'
366
if [[ $status == 0 ]]; then
367
if [[ "$output" != "$oldID" ]]; then
372
let timeout=$timeout-1
375
run_podman healthcheck run $cname
377
# stop systemd container
381
@test "podman-kube@.service template" {
382
install_kube_template
383
# Create the YAMl file
384
yaml_source="$PODMAN_TMPDIR/test.yaml"
385
cat >$yaml_source <<EOF
390
io.containers.autoupdate: "local"
391
io.containers.autoupdate/b: "registry"
400
- echo a stdout; echo a stderr 1>&2; sleep inf
406
- echo b stdout; echo b stderr 1>&2; sleep inf
411
# Dispatch the YAML file
412
service_name="podman-kube@$(systemd-escape $yaml_source).service"
413
systemctl_start $service_name
414
systemctl is-active $service_name
416
# Make sure that Podman is the service's MainPID
417
run systemctl show --property=MainPID --value $service_name
418
is "$(</proc/$output/comm)" "conmon" "podman is the service mainPID"
420
# The name of the service container is predictable: the first 12 characters
421
# of the hash of the YAML file followed by the "-service" suffix
422
yaml_sha=$(sha256sum $yaml_source)
423
service_container="${yaml_sha:0:12}-service"
425
# Make sure that the service container exists and runs.
426
run_podman container inspect $service_container --format "{{.State.Running}}"
429
# Check for an error when trying to remove the service container
430
run_podman 125 container rm $service_container
431
is "$output" "Error: container .* is the service container of pod(s) .* and cannot be removed without removing the pod(s)"
433
# containers/podman/issues/17482: verify that the log-driver for the Pod's containers is NOT passthrough
434
for name in "a" "b"; do
435
run_podman container inspect test_pod-${name} --format "{{.HostConfig.LogConfig.Type}}"
436
assert $output != "passthrough"
437
# check that we can get the logs with passthrough when we run in a systemd unit
438
run_podman logs test_pod-$name
439
assert "$output" == "$name stdout
440
$name stderr" "logs work with passthrough"
443
# we cannot assume the ordering between a b, this depends on timing and would flake in CI
444
# use --names so we do not have to get the ID
445
run_podman pod logs --names test_pod
446
assert "$output" =~ ".*^test_pod-a a stdout.*" "logs from container a shown"
447
assert "$output" =~ ".*^test_pod-b b stdout.*" "logs from container b shown"
449
# Add a simple `auto-update --dry-run` test here to avoid too much redundancy
450
# with 255-auto-update.bats
451
run_podman auto-update --dry-run --format "{{.Unit}},{{.Container}},{{.Image}},{{.Updated}},{{.Policy}}"
452
is "$output" ".*$service_name,.* (test_pod-a),$IMAGE,false,local.*" "global auto-update policy gets applied"
453
is "$output" ".*$service_name,.* (test_pod-b),$IMAGE,false,registry.*" "container-specified auto-update policy gets applied"
455
# Kill the pod and make sure the service is not running.
456
run_podman pod kill test_pod
458
# echos are for debugging test flakes
459
echo "$_LOG_PROMPT systemctl is-active $service_name"
460
run systemctl is-active $service_name
462
if [[ "$output" == "inactive" ]]; then
467
is "$output" "inactive" "systemd service transitioned to 'inactive' state: $service_name"
469
# Now stop and start the service again.
470
systemctl stop $service_name
471
systemctl_start $service_name
472
systemctl is-active $service_name
473
run_podman container inspect $service_container --format "{{.State.Running}}"
477
systemctl stop $service_name
478
run_podman 1 container exists $service_container
479
run_podman 1 pod exists test_pod
480
run_podman rmi $(pause_image)
481
rm -f $UNIT_DIR/$unit_name
484
@test "podman generate - systemd - DEPRECATED" {
485
run_podman generate systemd --help
486
is "$output" ".*[DEPRECATED] command:"
487
is "$output" ".*\[DEPRECATED\] Generate systemd units.*"
488
run_podman create --name test $IMAGE
489
run_podman generate systemd test >/dev/null
490
is "$output" ".*[DEPRECATED] command:"
491
run_podman generate --help
492
is "$output" ".*\[DEPRECATED\] Generate systemd units"