podman

Форк
0
/
255-auto-update.bats 
697 строк · 25.1 Кб
1
#!/usr/bin/env bats   -*- bats -*-
2
#
3
# Tests for automatically update images for containerized services
4
#
5

6
load helpers
7
load helpers.network
8
load helpers.registry
9
load helpers.systemd
10

11
export SNAME_FILE
12

13
function setup() {
14
    skip_if_remote "systemd tests are meaningless over remote"
15
    basic_setup
16

17
    SNAME_FILE=${PODMAN_TMPDIR}/services
18
}
19

20
function teardown() {
21
    if [[ -e $SNAME_FILE ]]; then
22
        while read line; do
23
            if [[ "$line" =~ "podman-auto-update" ]]; then
24
                echo "Stop timer: $line.timer"
25
                systemctl stop $line.timer
26
                systemctl disable $line.timer
27
            else
28
                systemctl stop $line
29
            fi
30
            rm -f $UNIT_DIR/$line.{service,timer}
31
        done < $SNAME_FILE
32

33
        rm -f $SNAME_FILE
34
    fi
35
    SNAME_FILE=
36

37
    run_podman rmi -f                              \
38
            quay.io/libpod/alpine:latest           \
39
            quay.io/libpod/busybox:latest          \
40
            quay.io/libpod/localtest:latest        \
41
            quay.io/libpod/autoupdatebroken:latest \
42
            quay.io/libpod/test:latest
43

44
    # The rollback tests may leave some dangling images behind, so let's prune
45
    # them to leave a clean state.
46
    run_podman image prune -f
47
    basic_teardown
48
}
49

50
# This functions is used for handle the basic step in auto-update related
51
# tests. Including following steps:
52
#   1. Generate a random container name and echo it to output.
53
#   2. Tag the fake image before test
54
#   3. Start a container with io.containers.autoupdate
55
#   4. Generate the service file from the container
56
#   5. Remove the origin container
57
#   6. Start the container from service
58
#   7. Use this fully-qualified image instead of 2)
59
function generate_service() {
60
    local target_img_basename=$1
61
    local autoupdate=$2
62
    local command=$3
63
    local extraArgs=$4
64
    local noTag=$5
65
    local requires=$6
66

67
    # Unless specified, set a default command.
68
    if [[ -z "$command" ]]; then
69
        command="top -d 120"
70
    fi
71

72
    # Container name. Include the autoupdate type, to make debugging easier.
73
    # IMPORTANT: variable 'cname' is passed (out of scope) up to caller!
74
    cname=c_${autoupdate//\'/}_$(random_string)
75
    target_img="quay.io/libpod/$target_img_basename:latest"
76
    if [[ -n "$7" ]]; then
77
        target_img="$7"
78
    fi
79

80
    if [[ -z "$noTag" ]]; then
81
        run_podman tag $IMAGE $target_img
82
    fi
83

84
    if [[ -n "$autoupdate" ]]; then
85
        label="--label io.containers.autoupdate=$autoupdate"
86
    else
87
        label=""
88
    fi
89

90
    if [[ -n "$requires" ]]; then
91
        requires="--requires=$requires"
92
    fi
93

94
    run_podman create $extraArgs --name $cname $label $target_img $command
95

96
    (cd $UNIT_DIR; run_podman generate systemd --new --files --name $requires $cname)
97
    echo "container-$cname" >> $SNAME_FILE
98
    run_podman rm -t 0 -f $cname
99

100
    systemctl daemon-reload
101
    systemctl_start container-$cname
102
    systemctl status container-$cname
103

104
    # Original image ID.
105
    # IMPORTANT: variable 'ori_image' is passed (out of scope) up to caller!
106
    run_podman inspect --format "{{.Image}}" $cname
107
    ori_image=$output
108
}
109

110
function _wait_service_ready() {
111
    local sname=$1
112

113
    local timeout=6
114
    while [[ $timeout -gt 1 ]]; do
115
        if systemctl -q is-active $sname; then
116
            return
117
        fi
118
        sleep 1
119
        let timeout=$timeout-1
120
    done
121

122
    # Print service status as debug information before failed the case
123
    systemctl status $sname
124
    die "Timed out waiting for $sname to start"
125
}
126

127
# Wait for container to update, as confirmed by its image ID changing
128
function _confirm_update() {
129
    local cname=$1
130
    local old_iid=$2
131

132
    # Image has already been pulled, so this shouldn't take too long
133
    local timeout=10
134
    while [[ $timeout -gt 0 ]]; do
135
        sleep 1
136
        run_podman '?' inspect --format "{{.Image}}" $cname
137
        if [[ $status != 0 ]]; then
138
            if [[ $output =~ (no such object|does not exist in database): ]]; then
139
                # this is ok, it just means the container is being restarted
140
                :
141
            else
142
                die "podman inspect $cname failed unexpectedly"
143
            fi
144
        elif [[ $output != $old_iid ]]; then
145
            return
146
        fi
147
        timeout=$((timeout - 1))
148
    done
149

150
    die "Timed out waiting for $cname to update; old IID=$old_iid"
151
}
152

153
@test "podman auto-update - validate input" {
154
    # Fully-qualified image reference is required
155
    run_podman create --label io.containers.autoupdate=registry $IMAGE
156
    run_podman rm -f "$output"
157

158
    # Short name does not work
159
    shortname="shortname:latest"
160
    run_podman image tag $IMAGE $shortname
161
    run_podman 125 create --label io.containers.autoupdate=registry $shortname
162
    is "$output" "Error: short name: auto updates require fully-qualified image reference: \"$shortname\""
163

164
    # Requires docker (or no) transport
165
    archive=$PODMAN_TMPDIR/archive.tar
166
    run_podman save -o $archive $IMAGE
167
    run_podman 125 create --label io.containers.autoupdate=registry docker-archive:$archive
168
    is "$output" ".*Error: auto updates require the docker image transport but image is of transport \"docker-archive\""
169

170
    run_podman rmi $shortname
171
}
172

173
# This test can fail in dev. environment because of SELinux.
174
# quick fix: chcon -t container_runtime_exec_t ./bin/podman
175
@test "podman auto-update - label io.containers.autoupdate=image" {
176
    since=$(date --iso-8601=seconds)
177
    run_podman auto-update
178
    is "$output" ""
179
    run_podman events --filter type=system --since $since --stream=false
180
    is "$output" ""
181

182
    # Generate two units.  The first "parent" to be auto updated, the second
183
    # "child" depends on/requires the "parent" and is expected to get restarted
184
    # as well on auto updates (regression test for #18926).
185
    generate_service alpine image
186
    ctr_parent=$cname
187
    _wait_service_ready container-$ctr_parent.service
188

189
    generate_service alpine image "" "" "" "container-$ctr_parent.service"
190
    ctr_child=$cname
191
    _wait_service_ready container-$ctr_child.service
192
    run_podman container inspect --format "{{.ID}}" $ctr_child
193
    old_child_id=$output
194

195
    since=$(date --iso-8601=seconds)
196
    run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
197
    is "$output" ".*container-$ctr_parent.service,quay.io/libpod/alpine:latest,pending,registry.*" "Image update is pending."
198
    run_podman events --filter type=system --since $since --stream=false
199
    is "$output" ".* system auto-update"
200

201
    since=$(date --iso-8601=seconds)
202
    run_podman auto-update --rollback=false --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
203
    is "$output" "Trying to pull.*" "Image is updated."
204
    is "$output" ".*container-$ctr_parent.service,quay.io/libpod/alpine:latest,true,registry.*" "Image is updated."
205
    run_podman events --filter type=system --since $since --stream=false
206
    is "$output" ".* system auto-update"
207

208
    # Confirm that the update was successful and that the child container/unit
209
    # has been restarted as well.
210
    _confirm_update $ctr_parent $ori_image
211
    run_podman container inspect --format "{{.ID}}" $ctr_child
212
    assert "$output" != "$old_child_id" \
213
        "child container/unit has not been restarted during update"
214
    run_podman container inspect --format "{{.ID}}" $ctr_child
215
    run_podman container inspect --format "{{.State.Status}}" $ctr_child
216
    is "$output" "running" "child container is in running state"
217
}
218

219
@test "podman auto-update - label io.containers.autoupdate=image with rollback" {
220
    # FIXME: this test should exercise the authfile label to have a regression
221
    # test for #11171.
222

223
    # Note: the autoupdatebroken image is empty on purpose so it cannot be
224
    # executed and force a rollback.  The rollback test for the local policy
225
    # is exercising the case where the container doesn't send a ready message.
226
    image=quay.io/libpod/autoupdatebroken
227

228
    run_podman tag $IMAGE $image
229
    generate_service autoupdatebroken image
230

231
    _wait_service_ready container-$cname.service
232
    run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
233
    is "$output" ".*container-$cname.service,$image:latest,pending,registry.*" "Image update is pending."
234

235
    run_podman container inspect --format "{{.Image}}" $cname
236
    oldID="$output"
237

238
    run_podman inspect --format "{{.ID}}" $cname
239
    containerID="$output"
240

241
    run_podman auto-update --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
242
    is "$output" "Trying to pull.*" "Image is updated."
243
    is "$output" ".*container-$cname.service,$image:latest,rolled back,registry.*" "Image has been rolled back."
244

245
    run_podman container inspect --format "{{.Image}}" $cname
246
    is "$output" "$oldID" "container rolled back to previous image"
247

248
    run_podman container inspect --format "{{.ID}}" $cname
249
    assert "$output" != "$containerID" \
250
           "container has not been restarted during rollback"
251
}
252

253
@test "podman auto-update - label io.containers.autoupdate=disabled" {
254
    generate_service alpine disabled
255

256
    _wait_service_ready container-$cname.service
257
    run_podman auto-update
258
    is "$output" "" "Image is not updated when autoupdate=disabled."
259

260
    run_podman inspect --format "{{.Image}}" $cname
261
    is "$output" "$ori_image" "Image ID should not change"
262
}
263

264
@test "podman auto-update - label io.containers.autoupdate=fakevalue" {
265
    fakevalue=fake_$(random_string)
266
    generate_service alpine $fakevalue
267

268
    _wait_service_ready container-$cname.service
269
    run_podman 125 auto-update
270
    is "$output" ".*invalid auto-update policy.*" "invalid policy setup"
271

272
    run_podman inspect --format "{{.Image}}" $cname
273
    is "$output" "$ori_image" "Image ID should not change"
274
}
275

276
@test "podman auto-update - label io.containers.autoupdate=local" {
277
    generate_service localtest local
278
    _wait_service_ready container-$cname.service
279

280
    image=quay.io/libpod/localtest:latest
281
    run_podman commit --change CMD=/bin/bash $cname $image
282
    run_podman image inspect --format "{{.ID}}" $image
283

284
    run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
285
    is "$output" ".*container-$cname.service,quay.io/libpod/localtest:latest,pending,local.*" "Image update is pending."
286

287
    run_podman auto-update --rollback=false --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
288
    is "$output" ".*container-$cname.service,quay.io/libpod/localtest:latest,true,local.*" "Image is updated."
289

290
    _confirm_update $cname $ori_image
291
}
292

293
# This test can fail in dev. environment because of SELinux.
294
# quick fix: chcon -t container_runtime_exec_t ./bin/podman
295
@test "podman auto-update - label io.containers.autoupdate=local with rollback" {
296
    # sdnotify fails with runc 1.0.0-3-dev2 on Ubuntu. Let's just
297
    # assume that we work only with crun, nothing else.
298
    # [copied from 260-sdnotify.bats]
299
    runtime=$(podman_runtime)
300
    if [[ "$runtime" != "crun" ]]; then
301
        skip "this test only works with crun, not $runtime"
302
    fi
303

304
    _prefetch $SYSTEMD_IMAGE
305

306
    dockerfile1=$PODMAN_TMPDIR/Dockerfile.1
307
    cat >$dockerfile1 <<EOF
308
FROM $SYSTEMD_IMAGE
309
RUN echo -e "#!/bin/sh\n\
310
printenv NOTIFY_SOCKET; echo READY; systemd-notify --ready;\n\
311
trap 'echo Received SIGTERM, finishing; exit' SIGTERM; echo WAITING; while :; do sleep 0.1; done" \
312
>> /runme
313
RUN chmod +x /runme
314
EOF
315

316
    dockerfile2=$PODMAN_TMPDIR/Dockerfile.2
317
    cat >$dockerfile2 <<EOF
318
FROM $SYSTEMD_IMAGE
319
RUN echo -e "#!/bin/sh\n\
320
exit 1" >> /runme
321
RUN chmod +x /runme
322
EOF
323
    image=test
324

325
    # Generate a healthy image that will run correctly.
326
    run_podman build -t quay.io/libpod/$image -f $dockerfile1
327

328
    generate_service $image local /runme --sdnotify=container noTag
329
    _wait_service_ready container-$cname.service
330

331
    run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
332
    is "$output" ".*container-$cname.service,quay.io/libpod/$image:latest,false,local.*" "No update available"
333

334
    # Generate an unhealthy image that will fail.
335
    run_podman build -t quay.io/libpod/$image -f $dockerfile2
336
    run_podman image inspect --format "{{.ID}}" $image
337
    newID="$output"
338

339
    run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
340
    is "$output" ".*container-$cname.service,quay.io/libpod/$image:latest,pending,local.*" "Image updated is pending"
341

342
    # Note: we rollback automatically by default.
343
    run_podman auto-update --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
344
    is "$output" ".*container-$cname.service,quay.io/libpod/$image:latest,rolled back,local.*" "Rolled back to old image"
345

346
    # Make sure that new container is not using the new image ID anymore.
347
    _confirm_update $cname $newID
348
}
349

350
@test "podman auto-update with multiple services" {
351
    # Preserve original image ID, to confirm that it changes (or not)
352
    run_podman inspect --format "{{.Id}}" $IMAGE
353
    local img_id="$output"
354

355
    local cnames=()
356
    local -A expect_update
357
    local -A will_update=([image]=1 [registry]=1 [local]=1)
358

359
    local fakevalue=fake_$(random_string)
360
    for auto_update in image registry "" disabled "''" $fakevalue local
361
    do
362
        local img_base="alpine"
363
        if [[ $auto_update == "registry" ]]; then
364
            img_base="busybox"
365
        elif [[ $auto_update == "local" ]]; then
366
            img_base="localtest"
367
        fi
368
        generate_service $img_base $auto_update
369
        cnames+=($cname)
370
        if [[ $auto_update == "local" ]]; then
371
            local_cname=$cname
372
        fi
373

374
        if [[ -n "$auto_update" && -n "${will_update[$auto_update]}" ]]; then
375
            expect_update[$cname]=1
376
        fi
377
    done
378

379
    # Make sure all services are ready.
380
    for cname in "${cnames[@]}"; do
381
        _wait_service_ready container-$cname.service
382
    done
383
    run_podman commit --change CMD=/bin/bash $local_cname quay.io/libpod/localtest:latest
384
    # Exit code is expected, due to invalid 'fakevalue'
385
    run_podman 125 auto-update --rollback=false
386
    update_log=$output
387
    is "$update_log" ".*invalid auto-update policy.*" "invalid policy setup"
388
    is "$update_log" ".*Error: invalid auto-update policy.*" "invalid policy setup"
389

390
    local n_updated=$(grep -c 'Trying to pull' <<<"$update_log")
391
    is "$n_updated" "2" "Number of images updated from registry."
392

393
    for cname in "${!expect_update[@]}"; do
394
        is "$update_log" ".*$cname.*" "container with auto-update policy image updated"
395
        # Just because podman says it fetched, doesn't mean it actually updated
396
        _confirm_update $cname $img_id
397
    done
398

399
    # Final confirmation that all image IDs have/haven't changed
400
    for cname in "${cnames[@]}"; do
401
        run_podman inspect --format "{{.Image}}" $cname
402
        if [[ -n "${expect_update[$cname]}" ]]; then
403
            assert "$output" != "$img_id" "$cname: image ID did not change"
404
        else
405
            assert "$output" = "$img_id" "Image ID should not be changed."
406
        fi
407
    done
408
}
409

410
@test "podman auto-update using systemd" {
411
    skip_if_journald_unavailable
412

413
    generate_service alpine image
414

415
    cat >$UNIT_DIR/podman-auto-update-$cname.timer <<EOF
416
[Unit]
417
Description=Podman auto-update testing timer
418

419
[Timer]
420
OnCalendar=*-*-* *:*:0/2
421
Persistent=true
422

423
[Install]
424
WantedBy=timers.target
425
EOF
426
    cat >$UNIT_DIR/podman-auto-update-$cname.service <<EOF
427
[Unit]
428
Description=Podman auto-update testing service
429
Documentation=man:podman-auto-update(1)
430
Wants=network-online.target
431
After=network-online.target
432

433
[Service]
434
Type=oneshot
435
ExecStart=$PODMAN auto-update
436
Environment="http_proxy=${http_proxy}"
437
Environment="HTTP_PROXY=${HTTP_PROXY}"
438
Environment="https_proxy=${https_proxy}"
439
Environment="HTTPS_PROXY=${HTTPS_PROXY}"
440
Environment="no_proxy=${no_proxy}"
441
Environment="NO_PROXY=${NO_PROXY}"
442

443
[Install]
444
WantedBy=default.target
445
EOF
446

447
    echo "podman-auto-update-$cname" >> $SNAME_FILE
448
    systemctl enable --now podman-auto-update-$cname.timer
449
    systemctl list-timers --all
450

451
    # systemd       <245 displays 'Started Podman auto-update ...'
452
    # systemd 245 - <250 displays 'Finished Podman auto-update ...'
453
    # systemd 250 - ???? displays 'Finished <unit name> - Podman auto-...'
454
    local expect='(Started|Finished.*) Podman auto-update testing service'
455
    local failed_start=failed
456
    local count=0
457
    while [ $count -lt 120 ]; do
458
        run journalctl -n 15 -u podman-auto-update-$cname.service
459
        if [[ "$output" =~ $expect ]]; then
460
            failed_start=
461
            break
462
        fi
463
        ((count+=1))
464
        sleep 1
465
    done
466

467
    if [[ -n "$failed_start" ]]; then
468
        echo "journalctl output:"
469
        sed -e 's/^/  /' <<<"$output"
470
        die "Did not find expected string '$expect' in journalctl output for $cname"
471
    fi
472

473
    _confirm_update $cname $ori_image
474
}
475

476
@test "podman-kube@.service template with rollback" {
477
    # sdnotify fails with runc 1.0.0-3-dev2 on Ubuntu. Let's just
478
    # assume that we work only with crun, nothing else.
479
    # [copied from 260-sdnotify.bats]
480
    runtime=$(podman_runtime)
481
    if [[ "$runtime" != "crun" ]]; then
482
        skip "this test only works with crun, not $runtime"
483
    fi
484

485
    _prefetch $SYSTEMD_IMAGE
486
    install_kube_template
487

488
    dockerfile1=$PODMAN_TMPDIR/Dockerfile.1
489
    cat >$dockerfile1 <<EOF
490
FROM $SYSTEMD_IMAGE
491
RUN echo -e "#!/bin/sh\n\
492
printenv NOTIFY_SOCKET; echo READY; systemd-notify --ready;\n\
493
trap 'echo Received SIGTERM, finishing; exit' SIGTERM; echo WAITING; while :; do sleep 0.1; done" \
494
>> /runme
495
RUN chmod +x /runme
496
EOF
497

498
    dockerfile2=$PODMAN_TMPDIR/Dockerfile.2
499
    cat >$dockerfile2 <<EOF
500
FROM $SYSTEMD_IMAGE
501
RUN echo -e "#!/bin/sh\n\
502
exit 1" >> /runme
503
RUN chmod +x /runme
504
EOF
505
    local_image=localhost/image:$(random_string 10)
506

507
    # Generate a healthy image that will run correctly.
508
    run_podman build -t $local_image -f $dockerfile1
509
    run_podman image inspect --format "{{.ID}}" $local_image
510
    oldID="$output"
511

512
    # Create the YAMl file
513
    yaml_source="$PODMAN_TMPDIR/test.yaml"
514
    cat >$yaml_source <<EOF
515
apiVersion: v1
516
kind: Pod
517
metadata:
518
  annotations:
519
      io.containers.autoupdate: "registry"
520
      io.containers.autoupdate/b: "local"
521
      io.containers.sdnotify/b: "container"
522
  labels:
523
    app: test
524
  name: test_pod
525
spec:
526
  containers:
527
  - command:
528
    - top
529
    image: $IMAGE
530
    name: a
531
  - command:
532
    - /runme
533
    image: $local_image
534
    name: b
535
EOF
536

537
    # Dispatch the YAML file
538
    service_name="podman-kube@$(systemd-escape $yaml_source).service"
539
    systemctl_start $service_name
540
    systemctl is-active $service_name
541

542
    # Make sure the containers are properly configured
543
    run_podman auto-update --dry-run --format "{{.Unit}},{{.Container}},{{.Image}},{{.Updated}},{{.Policy}}"
544
    is "$output" ".*$service_name,.* (test_pod-a),$IMAGE,false,registry.*" "global auto-update policy gets applied"
545
    is "$output" ".*$service_name,.* (test_pod-b),$local_image,false,local.*" "container-specified auto-update policy gets applied"
546

547
    # Generate a broken image that will fail.
548
    run_podman build -t $local_image -f $dockerfile2
549
    run_podman image inspect --format "{{.ID}}" $local_image
550
    newID="$output"
551

552
    assert "$oldID" != "$newID" "broken image really is a new one"
553

554
    # Make sure container b sees the new image
555
    run_podman auto-update --dry-run --format "{{.Unit}},{{.Container}},{{.Image}},{{.Updated}},{{.Policy}}"
556
    is "$output" ".*$service_name,.* (test_pod-a),$IMAGE,false,registry.*" "global auto-update policy gets applied"
557
    is "$output" ".*$service_name,.* (test_pod-b),$local_image,pending,local.*" "container b sees the new image"
558

559
    # Now update and check for the rollback
560
    run_podman auto-update --format "{{.Unit}},{{.Container}},{{.Image}},{{.Updated}},{{.Policy}}"
561
    is "$output" ".*$service_name,.* (test_pod-a),$IMAGE,rolled back,registry.*" "container a was rolled back as the update of b failed"
562
    is "$output" ".*$service_name,.* (test_pod-b),$local_image,rolled back,local.*" "container b was rolled back as its update has failed"
563

564
    # Clean up
565
    systemctl stop $service_name
566
    run_podman rmi -f $(pause_image) $local_image $newID $oldID
567
    rm -f $UNIT_DIR/$unit_name
568
}
569

570
@test "podman auto-update - pod" {
571
    dockerfile=$PODMAN_TMPDIR/Dockerfile
572
    cat >$dockerfile <<EOF
573
FROM $IMAGE
574
RUN touch /123
575
EOF
576

577
    podname=$(random_string)
578
    ctrname=$(random_string)
579
    podunit="$UNIT_DIR/pod-$podname.service.*"
580
    ctrunit="$UNIT_DIR/container-$ctrname.service.*"
581
    local_image=localhost/image:$(random_string 10)
582

583
    run_podman tag $IMAGE $local_image
584

585
    run_podman pod create --name=$podname
586
    run_podman create --label "io.containers.autoupdate=local" --pod=$podname --name=$ctrname $local_image top
587

588
    # cd into the unit dir to generate the two files.
589
    pushd "$UNIT_DIR"
590
    run_podman generate systemd --name --new --files $podname
591
    is "$output" ".*$podunit.*"
592
    is "$output" ".*$ctrunit.*"
593
    popd
594

595
    systemctl daemon-reload
596

597
    systemctl_start pod-$podname.service
598
    _wait_service_ready container-$ctrname.service
599

600
    run_podman pod inspect --format "{{.State}}" $podname
601
    is "$output" "Running" "pod is in running state"
602
    run_podman container inspect --format "{{.State.Status}}" $ctrname
603
    is "$output" "running" "container is in running state"
604

605
    run_podman pod inspect --format "{{.ID}}" $podname
606
    podid="$output"
607
    run_podman container inspect --format "{{.ID}}" $ctrname
608
    ctrid="$output"
609

610
    # Note that the pod's unit is listed below, not the one of the container.
611
    run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
612
    is "$output" ".*pod-$podname.service,$local_image,false,local.*" "No update available"
613

614
    run_podman build -t $local_image -f $dockerfile
615

616
    run_podman auto-update --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
617
    is "$output" ".*pod-$podname.service,$local_image,pending,local.*" "Image updated is pending"
618

619
    run_podman auto-update --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
620
    is "$output" ".*pod-$podname.service,$local_image,true,local.*" "Service has been restarted"
621
    _wait_service_ready container-$ctrname.service
622

623
    run_podman pod inspect --format "{{.ID}}" $podname
624
    assert "$output" != "$podid" "pod has been recreated"
625
    run_podman container inspect --format "{{.ID}}" $ctrname
626
    assert "$output" != "$ctrid" "container has been recreated"
627

628
    run systemctl stop pod-$podname.service
629
    assert $status -eq 0 "Error stopping pod systemd unit: $output"
630

631
    run_podman pod rm -f $podname
632
    run_podman rmi $local_image $(pause_image)
633
    rm -f $podunit $ctrunit
634
    systemctl daemon-reload
635
}
636

637
@test "podman-auto-update --authfile"  {
638
    # Test the three supported ways of using authfiles with auto updates
639
    # 1) Passed via --authfile CLI flag
640
    # 2) Passed via the REGISTRY_AUTH_FILE env variable
641
    # 3) Via a label at container creation where 1) and 2) will be ignored
642

643
    registry=localhost:${PODMAN_LOGIN_REGISTRY_PORT}
644
    image_on_local_registry=$registry/name:tag
645
    authfile=$PODMAN_TMPDIR/authfile.json
646

647
    # First, start the registry and populate the authfile that we can use for the test.
648
    start_registry
649
    run_podman login --authfile=$authfile \
650
        --tls-verify=false \
651
        --username ${PODMAN_LOGIN_USER} \
652
        --password ${PODMAN_LOGIN_PASS} \
653
        $registry
654

655
    # Push the image to the registry and pull it down again to make sure we
656
    # have the identical digest in the local storage
657
    run_podman push --tls-verify=false --creds "${PODMAN_LOGIN_USER}:${PODMAN_LOGIN_PASS}" $IMAGE $image_on_local_registry
658
    run_podman pull --tls-verify=false --creds "${PODMAN_LOGIN_USER}:${PODMAN_LOGIN_PASS}" $image_on_local_registry
659

660
    # Generate a systemd service with the "registry" auto-update policy running
661
    # "top" inside the image we just pushed to the local registry.
662
    generate_service "" registry top "" "" "" $image_on_local_registry
663
    ctr=$cname
664
    _wait_service_ready container-$ctr.service
665

666
    run_podman 125 auto-update
667
    is "$output" \
668
       ".*Error: checking image updates for container .*: x509: .*"
669

670
    run_podman 125 auto-update --tls-verify=false
671
    is "$output" \
672
       ".*Error: checking image updates for container .*: authentication required"
673

674
    # Test 1)
675
    run_podman auto-update --authfile=$authfile --tls-verify=false --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
676
    is "$output" "container-$ctr.service,$image_on_local_registry,false,registry" "auto-update works with authfile"
677

678
    # Test 2)
679
    REGISTRY_AUTH_FILE=$authfile run_podman auto-update --tls-verify=false --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
680
    is "$output" "container-$ctr.service,$image_on_local_registry,false,registry" "auto-update works with env var"
681
    systemctl stop container-$ctr.service
682
    run_podman rm -f -t0 --ignore $ctr
683

684
    # Create a container with the auth-file label
685
    generate_service "" registry top "--label io.containers.autoupdate.authfile=$authfile" "" "" $image_on_local_registry
686
    ctr=$cname
687
    _wait_service_ready container-$ctr.service
688

689
    # Test 3)
690
    # Also make sure that the label takes precedence over the CLI flag.
691
    run_podman auto-update --authfile=/dev/null --tls-verify=false --dry-run --format "{{.Unit}},{{.Image}},{{.Updated}},{{.Policy}}"
692
    is "$output" "container-$ctr.service,$image_on_local_registry,false,registry" "auto-update works with authfile container label"
693
    run_podman rm -f -t0 --ignore $ctr
694
    run_podman rmi $image_on_local_registry
695
}
696

697
# vim: filetype=sh
698

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.