15
"github.com/containers/common/pkg/resize"
16
"github.com/containers/podman/v5/libpod/define"
17
"github.com/containers/podman/v5/libpod/events"
18
"github.com/containers/podman/v5/pkg/signal"
19
"github.com/containers/storage/pkg/archive"
20
spec "github.com/opencontainers/runtime-spec/specs-go"
21
"github.com/sirupsen/logrus"
22
"golang.org/x/sys/unix"
25
// Init creates a container in the OCI runtime, moving a container from
26
// ContainerStateConfigured, ContainerStateStopped, or ContainerStateExited to
27
// ContainerStateCreated. Once in Created state, Conmon will be running, which
28
// allows the container to be attached to. The container can subsequently
29
// transition to ContainerStateRunning via Start(), or be transitioned back to
30
// ContainerStateConfigured by Cleanup() (which will stop conmon and unmount the
32
// Init requires that all dependency containers be started (e.g. pod infra
33
// containers). The `recursive` parameter will, if set to true, start these
34
// dependency containers before initializing this container.
35
func (c *Container) Init(ctx context.Context, recursive bool) error {
40
if err := c.syncContainer(); err != nil {
45
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateStopped, define.ContainerStateExited) {
46
return fmt.Errorf("container %s has already been created in runtime: %w", c.ID(), define.ErrCtrStateInvalid)
50
if err := c.checkDependenciesAndHandleError(); err != nil {
54
if err := c.startDependencies(ctx); err != nil {
59
if err := c.prepare(); err != nil {
60
if err2 := c.cleanup(ctx); err2 != nil {
61
logrus.Errorf("Cleaning up container %s: %v", c.ID(), err2)
66
if c.state.State == define.ContainerStateStopped {
67
// Reinitialize the container
68
return c.reinit(ctx, false)
71
// Initialize the container for the first time
72
return c.init(ctx, false)
75
// Start starts the given container.
76
// Start will accept container in ContainerStateConfigured,
77
// ContainerStateCreated, ContainerStateStopped, and ContainerStateExited, and
78
// transition them to ContainerStateRunning (all containers not in
79
// ContainerStateCreated will make an intermediate stop there via the Init API).
80
// Once in ContainerStateRunning, the container can be transitioned to
81
// ContainerStatePaused via Pause(), or to ContainerStateStopped by the process
82
// stopping (either due to exit, or being forced to stop by the Kill or Stop API
84
// Start requires that all dependency containers (e.g. pod infra containers) are
85
// running before starting the container. The recursive parameter, if set, will start all
86
// dependencies before starting this container.
87
func (c *Container) Start(ctx context.Context, recursive bool) (finalErr error) {
91
// As this is the first defer, it's the last thing to
92
// happen in the function - so `defer c.lock.Unlock()`
93
// below already fired.
99
if err := saveContainerError(c, finalErr); err != nil {
107
defer c.lock.Unlock()
109
if err := c.syncContainer(); err != nil {
113
if err := c.prepareToStart(ctx, recursive); err != nil {
117
// Start the container
121
// Update updates the given container.
122
// Either resource limits or restart policy can be updated.
123
// Either resourcs or restartPolicy must not be nil.
124
// If restartRetries is not nil, restartPolicy must be set and must be "on-failure".
125
func (c *Container) Update(resources *spec.LinuxResources, restartPolicy *string, restartRetries *uint) error {
128
defer c.lock.Unlock()
130
if err := c.syncContainer(); err != nil {
135
if c.ensureState(define.ContainerStateRemoving) {
136
return fmt.Errorf("container %s is being removed, cannot update: %w", c.ID(), define.ErrCtrStateInvalid)
139
return c.update(resources, restartPolicy, restartRetries)
142
// StartAndAttach starts a container and attaches to it.
143
// This acts as a combination of the Start and Attach APIs, ensuring proper
144
// ordering of the two such that no output from the container is lost (e.g. the
145
// Attach call occurs before Start).
146
// In overall functionality, it is identical to the Start call, with the added
147
// side effect that an attach session will also be started.
148
func (c *Container) StartAndAttach(ctx context.Context, streams *define.AttachStreams, keys string, resize <-chan resize.TerminalSize, recursive bool) (retChan <-chan error, finalErr error) {
152
// As this is the first defer, it's the last thing to
153
// happen in the function - so `defer c.lock.Unlock()`
154
// below already fired.
157
defer c.lock.Unlock()
160
if err := saveContainerError(c, finalErr); err != nil {
168
defer c.lock.Unlock()
170
if err := c.syncContainer(); err != nil {
175
if err := c.prepareToStart(ctx, recursive); err != nil {
178
attachChan := make(chan error)
180
// We need to ensure that we don't return until start() fired in attach.
181
// Use a channel to sync
182
startedChan := make(chan bool)
184
// Attach to the container before starting it
187
if c.LogDriver() != define.PassthroughLogging && c.LogDriver() != define.PassthroughTTYLogging {
188
registerResizeFunc(resize, c.bundlePath())
191
opts := new(AttachOptions)
192
opts.Streams = streams
193
opts.DetachKeys = &keys
195
opts.Started = startedChan
197
if err := c.ociRuntime.Attach(c, opts); err != nil {
204
case err := <-attachChan:
207
c.newContainerEvent(events.Attach)
210
return attachChan, nil
213
// RestartWithTimeout restarts a running container and takes a given timeout in uint
214
func (c *Container) RestartWithTimeout(ctx context.Context, timeout uint) error {
217
defer c.lock.Unlock()
219
if err := c.syncContainer(); err != nil {
224
if err := c.checkDependenciesAndHandleError(); err != nil {
228
return c.restartWithTimeout(ctx, timeout)
231
// Stop uses the container's stop signal (or SIGTERM if no signal was specified)
232
// to stop the container, and if it has not stopped after container's stop
233
// timeout, SIGKILL is used to attempt to forcibly stop the container
234
// Default stop timeout is 10 seconds, but can be overridden when the container
236
func (c *Container) Stop() error {
237
// Stop with the container's given timeout
238
return c.StopWithTimeout(c.config.StopTimeout)
241
// StopWithTimeout is a version of Stop that allows a timeout to be specified
242
// manually. If timeout is 0, SIGKILL will be used immediately to kill the
244
func (c *Container) StopWithTimeout(timeout uint) (finalErr error) {
248
// As this is the first defer, it's the last thing to
249
// happen in the function - so `defer c.lock.Unlock()`
250
// below already fired.
253
defer c.lock.Unlock()
256
if err := saveContainerError(c, finalErr); err != nil {
264
defer c.lock.Unlock()
266
if err := c.syncContainer(); err != nil {
271
return c.stop(timeout)
274
// Kill sends a signal to a container
275
func (c *Container) Kill(signal uint) error {
278
defer c.lock.Unlock()
280
if err := c.syncContainer(); err != nil {
285
switch c.state.State {
286
case define.ContainerStateRunning, define.ContainerStateStopping, define.ContainerStatePaused:
287
// Note that killing containers in "stopping" state is okay.
288
// In that state, the Podman is waiting for the runtime to
289
// stop the container and if that is taking too long, a user
290
// may have decided to kill the container after all.
292
return fmt.Errorf("can only kill running containers. %s is in state %s: %w", c.ID(), c.state.State.String(), define.ErrCtrStateInvalid)
295
// Hardcode all = false, we only use all when removing.
296
if err := c.ociRuntime.KillContainer(c, signal, false); err != nil {
300
c.state.StoppedByUser = true
302
c.newContainerEvent(events.Kill)
304
// Make sure to wait for the container to exit in case of SIGKILL.
305
if signal == uint(unix.SIGKILL) {
306
return c.waitForConmonToExitAndSave()
312
// Attach attaches to a container.
313
// This function returns when the attach finishes. It does not hold the lock for
314
// the duration of its runtime, only using it at the beginning to verify state.
315
func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <-chan resize.TerminalSize) error {
316
if c.LogDriver() == define.PassthroughLogging {
317
return fmt.Errorf("this container is using the 'passthrough' log driver, cannot attach: %w", define.ErrNoLogs)
319
if c.LogDriver() == define.PassthroughTTYLogging {
320
return fmt.Errorf("this container is using the 'passthrough-tty' log driver, cannot attach: %w", define.ErrNoLogs)
324
if err := c.syncContainer(); err != nil {
328
// We are NOT holding the lock for the duration of the function.
332
if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) {
333
return fmt.Errorf("can only attach to created or running containers: %w", define.ErrCtrStateInvalid)
336
// HACK: This is really gross, but there isn't a better way without
337
// splitting attach into separate versions for StartAndAttach and normal
338
// attaching, and I really do not want to do that right now.
339
// Send a SIGWINCH after attach succeeds so that most programs will
340
// redraw the screen for the new attach session.
341
attachRdy := make(chan bool, 1)
346
defer c.lock.Unlock()
347
if err := c.ociRuntime.KillContainer(c, uint(signal.SIGWINCH), false); err != nil {
348
logrus.Warnf("Unable to send SIGWINCH to container %s after attach: %v", c.ID(), err)
354
if c.LogDriver() != define.PassthroughLogging && c.LogDriver() != define.PassthroughTTYLogging {
355
registerResizeFunc(resize, c.bundlePath())
358
opts := new(AttachOptions)
359
opts.Streams = streams
360
opts.DetachKeys = &keys
361
opts.AttachReady = attachRdy
363
c.newContainerEvent(events.Attach)
364
return c.ociRuntime.Attach(c, opts)
367
// HTTPAttach forwards an attach session over a hijacked HTTP session.
368
// HTTPAttach will consume and close the included httpCon, which is expected to
369
// be sourced from a hijacked HTTP connection.
370
// The cancel channel is optional, and can be used to asynchronously cancel the
372
// The streams variable is only supported if the container was not a terminal,
373
// and allows specifying which of the container's standard streams will be
374
// forwarded to the client.
375
// This function returns when the attach finishes. It does not hold the lock for
376
// the duration of its runtime, only using it at the beginning to verify state.
377
// The streamLogs parameter indicates that all the container's logs until present
378
// will be streamed at the beginning of the attach.
379
// The streamAttach parameter indicates that the attach itself will be streamed
380
// over the socket; if this is not set, but streamLogs is, only the logs will be
382
// At least one of streamAttach and streamLogs must be set.
383
func (c *Container) HTTPAttach(r *http.Request, w http.ResponseWriter, streams *HTTPAttachStreams, detachKeys *string, cancel <-chan bool, streamAttach, streamLogs bool, hijackDone chan<- bool) error {
384
// Ensure we don't leak a goroutine if we exit before hijack completes.
391
if err := c.syncContainer(); err != nil {
396
// We are NOT holding the lock for the duration of the function.
400
if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) {
401
return fmt.Errorf("can only attach to created or running containers: %w", define.ErrCtrStateInvalid)
404
if !streamAttach && !streamLogs {
405
return fmt.Errorf("must specify at least one of stream or logs: %w", define.ErrInvalidArg)
408
logrus.Infof("Performing HTTP Hijack attach to container %s", c.ID())
410
c.newContainerEvent(events.Attach)
411
return c.ociRuntime.HTTPAttach(c, r, w, streams, detachKeys, cancel, hijackDone, streamAttach, streamLogs)
414
// AttachResize resizes the container's terminal, which is displayed by Attach
416
func (c *Container) AttachResize(newSize resize.TerminalSize) error {
419
defer c.lock.Unlock()
421
if err := c.syncContainer(); err != nil {
426
if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) {
427
return fmt.Errorf("can only resize created or running containers: %w", define.ErrCtrStateInvalid)
430
logrus.Infof("Resizing TTY of container %s", c.ID())
432
return c.ociRuntime.AttachResize(c, newSize)
435
// Mount mounts a container's filesystem on the host
436
// The path where the container has been mounted is returned
437
func (c *Container) Mount() (string, error) {
440
defer c.lock.Unlock()
442
if err := c.syncContainer(); err != nil {
447
defer c.newContainerEvent(events.Mount)
451
// Unmount unmounts a container's filesystem on the host
452
func (c *Container) Unmount(force bool) error {
455
defer c.lock.Unlock()
457
if err := c.syncContainer(); err != nil {
462
mounted, err := c.runtime.storageService.MountedContainerImage(c.ID())
464
return fmt.Errorf("can't determine how many times %s is mounted, refusing to unmount: %w", c.ID(), err)
467
if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) {
468
return fmt.Errorf("cannot unmount storage for container %s as it is running or paused: %w", c.ID(), define.ErrCtrStateInvalid)
470
execSessions, err := c.getActiveExecSessions()
474
if len(execSessions) != 0 {
475
return fmt.Errorf("container %s has active exec sessions, refusing to unmount: %w", c.ID(), define.ErrCtrStateInvalid)
477
return fmt.Errorf("can't unmount %s last mount, it is still in use: %w", c.ID(), define.ErrInternal)
480
defer c.newContainerEvent(events.Unmount)
481
return c.unmount(force)
484
// Pause pauses a container
485
func (c *Container) Pause() error {
488
defer c.lock.Unlock()
490
if err := c.syncContainer(); err != nil {
495
if c.state.State == define.ContainerStatePaused {
496
return fmt.Errorf("%q is already paused: %w", c.ID(), define.ErrCtrStateInvalid)
498
if c.state.State != define.ContainerStateRunning {
499
return fmt.Errorf("%q is not running, can't pause: %w", c.state.State, define.ErrCtrStateInvalid)
501
defer c.newContainerEvent(events.Pause)
505
// Unpause unpauses a container
506
func (c *Container) Unpause() error {
509
defer c.lock.Unlock()
511
if err := c.syncContainer(); err != nil {
516
if c.state.State != define.ContainerStatePaused {
517
return fmt.Errorf("%q is not paused, can't unpause: %w", c.ID(), define.ErrCtrStateInvalid)
519
defer c.newContainerEvent(events.Unpause)
523
// Export exports a container's root filesystem as a tar archive
524
// The archive will be saved as a file at the given path
525
func (c *Container) Export(out io.Writer) error {
528
defer c.lock.Unlock()
530
if err := c.syncContainer(); err != nil {
535
if c.state.State == define.ContainerStateRemoving {
536
return fmt.Errorf("cannot mount container %s as it is being removed: %w", c.ID(), define.ErrCtrStateInvalid)
539
defer c.newContainerEvent(events.Mount)
543
// AddArtifact creates and writes to an artifact file for the container
544
func (c *Container) AddArtifact(name string, data []byte) error {
546
return define.ErrCtrRemoved
549
return os.WriteFile(c.getArtifactPath(name), data, 0o740)
552
// GetArtifact reads the specified artifact file from the container
553
func (c *Container) GetArtifact(name string) ([]byte, error) {
555
return nil, define.ErrCtrRemoved
558
return os.ReadFile(c.getArtifactPath(name))
561
// RemoveArtifact deletes the specified artifacts file
562
func (c *Container) RemoveArtifact(name string) error {
564
return define.ErrCtrRemoved
567
return os.Remove(c.getArtifactPath(name))
570
// Wait blocks until the container exits and returns its exit code.
571
func (c *Container) Wait(ctx context.Context) (int32, error) {
572
return c.WaitForExit(ctx, DefaultWaitInterval)
575
// WaitForExit blocks until the container exits and returns its exit code. The
576
// argument is the interval at which checks the container's status.
577
func (c *Container) WaitForExit(ctx context.Context, pollInterval time.Duration) (int32, error) {
579
return -1, define.ErrCtrRemoved
583
var conmonTimer time.Timer
584
conmonTimerSet := false
586
conmonPidFd := c.getConmonPidFd()
587
if conmonPidFd != -1 {
588
defer unix.Close(conmonPidFd)
590
conmonPidFdTriggered := false
592
getExitCode := func() (bool, int32, error) {
593
containerRemoved := false
596
defer c.lock.Unlock()
599
if err := c.syncContainer(); err != nil {
600
if !errors.Is(err, define.ErrNoSuchCtr) {
601
return false, -1, err
603
containerRemoved = true
606
// If conmon is not alive anymore set a timer to make sure
607
// we're returning even if conmon has forcefully been killed.
608
if !conmonTimerSet && !containerRemoved {
609
conmonAlive, err := c.ociRuntime.CheckConmonRunning(c)
611
case errors.Is(err, define.ErrNoSuchCtr):
612
// Container has been removed, so we assume the
613
// exit code is present in the DB.
614
containerRemoved = true
616
return false, -1, err
618
// Give the exit code at most 20 seconds to
619
// show up in the DB. That should largely be
620
// enough for the cleanup process.
621
timerDuration := time.Second * 20
622
conmonTimer = *time.NewTimer(timerDuration)
623
conmonTimerSet = true
625
// Continue waiting if conmon's still running.
626
return false, -1, nil
631
if !containerRemoved {
632
// If conmon is dead for more than $timerDuration or if the
633
// container has exited properly, try to look up the exit code.
635
case <-conmonTimer.C:
636
logrus.Debugf("Exceeded conmon timeout waiting for container %s to exit", id)
637
timedout = " [exceeded conmon timeout waiting for container to exit]"
639
switch c.state.State {
640
case define.ContainerStateExited, define.ContainerStateConfigured:
641
// Container exited, so we can look up the exit code.
642
case define.ContainerStateStopped:
643
// Continue looping unless the restart policy is always.
644
// In this case, the container would never transition to
645
// the exited state, so we need to look up the exit code.
646
if c.config.RestartPolicy != define.RestartPolicyAlways {
647
return false, -1, nil
651
return false, -1, nil
656
exitCode, err := c.runtime.state.GetContainerExitCode(id)
658
if errors.Is(err, define.ErrNoSuchExitCode) {
659
// If the container is configured or created, we must assume it never ran.
660
if c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated) {
664
return true, -1, fmt.Errorf("%w (container in state %s)%s", err, c.state.State, timedout)
667
return true, exitCode, nil
671
hasExited, exitCode, err := getExitCode()
680
return -1, fmt.Errorf("waiting for exit code of container %s canceled", id)
682
if conmonPidFd != -1 && !conmonPidFdTriggered {
683
// If possible (pidfd works), the first cycle we block until conmon dies
684
// If this happens, and we fall back to the old poll delay
685
// There is a deadlock in the cleanup code for "play kube" which causes
686
// conmon to not exit, so unfortunately we have to use the poll interval
687
// timeout here to avoid hanging.
688
fds := []unix.PollFd{{Fd: int32(conmonPidFd), Events: unix.POLLIN}}
689
_, _ = unix.Poll(fds, int(pollInterval.Milliseconds()))
690
conmonPidFdTriggered = true
692
time.Sleep(pollInterval)
698
type waitResult struct {
703
func (c *Container) WaitForConditionWithInterval(ctx context.Context, waitTimeout time.Duration, conditions ...string) (int32, error) {
705
return -1, define.ErrCtrRemoved
708
if len(conditions) == 0 {
709
panic("at least one condition should be passed")
712
ctx, cancelFn := context.WithCancel(ctx)
715
resultChan := make(chan waitResult)
717
wantedStates := make(map[define.ContainerStatus]bool, len(conditions))
718
wantedHealthStates := make(map[string]bool)
720
for _, rawCondition := range conditions {
721
switch rawCondition {
722
case define.HealthCheckHealthy, define.HealthCheckUnhealthy:
723
if !c.HasHealthCheck() {
724
return -1, fmt.Errorf("cannot use condition %q: container %s has no healthcheck", rawCondition, c.ID())
726
wantedHealthStates[rawCondition] = true
728
condition, err := define.StringToContainerStatus(rawCondition)
733
case define.ContainerStateExited, define.ContainerStateStopped:
736
wantedStates[condition] = true
741
trySend := func(code int32, err error) {
743
case resultChan <- waitResult{code, err}:
748
var wg sync.WaitGroup
755
code, err := c.WaitForExit(ctx, waitTimeout)
760
if len(wantedStates) > 0 || len(wantedHealthStates) > 0 {
766
if len(wantedStates) > 0 {
767
state, err := c.State()
772
if _, found := wantedStates[state]; found {
777
if len(wantedHealthStates) > 0 {
778
status, err := c.HealthCheckStatus()
783
if _, found := wantedHealthStates[status]; found {
791
case <-time.After(waitTimeout):
798
var result waitResult
800
case result = <-resultChan:
803
result = waitResult{-1, define.ErrCanceled}
806
return result.code, result.err
809
// Cleanup unmounts all mount points in container and cleans up container storage
810
// It also cleans up the network stack
811
func (c *Container) Cleanup(ctx context.Context) error {
814
defer c.lock.Unlock()
816
if err := c.syncContainer(); err != nil {
817
// When the container has already been removed, the OCI runtime directory remains.
818
if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) {
819
if err := c.cleanupRuntime(ctx); err != nil {
820
return fmt.Errorf("cleaning up container %s from OCI runtime: %w", c.ID(), err)
824
logrus.Errorf("Syncing container %s status: %v", c.ID(), err)
829
// Check if state is good
830
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateStopping, define.ContainerStateExited) {
831
return fmt.Errorf("container %s is running or paused, refusing to clean up: %w", c.ID(), define.ErrCtrStateInvalid)
834
// if the container was not created in the oci runtime or was already cleaned up, then do nothing
835
if c.ensureState(define.ContainerStateConfigured, define.ContainerStateExited) {
839
// Handle restart policy.
840
// Returns a bool indicating whether we actually restarted.
841
// If we did, don't proceed to cleanup - just exit.
842
didRestart, err := c.handleRestartPolicy(ctx)
850
// If we didn't restart, we perform a normal cleanup
852
// make sure all the container processes are terminated if we are running without a pid namespace.
854
if c.config.Spec.Linux != nil {
855
for _, i := range c.config.Spec.Linux.Namespaces {
856
if i.Type == spec.PIDNamespace {
863
// do not fail on errors
864
_ = c.ociRuntime.KillContainer(c, uint(unix.SIGKILL), true)
867
// Check for running exec sessions
868
sessions, err := c.getActiveExecSessions()
872
if len(sessions) > 0 {
873
return fmt.Errorf("container %s has active exec sessions, refusing to clean up: %w", c.ID(), define.ErrCtrStateInvalid)
876
defer c.newContainerEvent(events.Cleanup)
877
return c.cleanup(ctx)
880
// Batch starts a batch operation on the given container
881
// All commands in the passed function will execute under the same lock and
882
// without synchronizing state after each operation
883
// This will result in substantial performance benefits when running numerous
884
// commands on the same container
885
// Note that the container passed into the Batch function cannot be removed
886
// during batched operations. runtime.RemoveContainer can only be called outside
888
// Any error returned by the given batch function will be returned unmodified by
890
// As Batch normally disables updating the current state of the container, the
891
// Sync() function is provided to enable container state to be updated and
892
// checked within Batch.
893
func (c *Container) Batch(batchFunc func(*Container) error) error {
895
defer c.lock.Unlock()
897
newCtr := new(Container)
898
newCtr.config = c.config
899
newCtr.state = c.state
900
newCtr.runtime = c.runtime
901
newCtr.ociRuntime = c.ociRuntime
905
newCtr.batched = true
906
err := batchFunc(newCtr)
907
newCtr.batched = false
912
// Sync updates the status of a container by querying the OCI runtime.
913
// If the container has not been created inside the OCI runtime, nothing will be
915
// Most of the time, Podman does not explicitly query the OCI runtime for
916
// container status, and instead relies upon exit files created by conmon.
917
// This can cause a disconnect between running state and what Podman sees in
918
// cases where Conmon was killed unexpectedly, or runc was upgraded.
919
// Running a manual Sync() ensures that container state will be correct in
921
func (c *Container) Sync() error {
924
defer c.lock.Unlock()
927
if err := c.syncContainer(); err != nil {
931
defer c.newContainerEvent(events.Sync)
935
// ReloadNetwork reconfigures the container's network.
936
// Technically speaking, it will tear down and then reconfigure the container's
937
// network namespace, which will result in all firewall rules being recreated.
938
// It is mostly intended to be used in cases where the system firewall has been
939
// reloaded, and existing rules have been wiped out. It is expected that some
940
// downtime will result, as the rules are destroyed as part of this process.
941
// At present, this only works on root containers; it may be expanded to restart
942
// slirp4netns in the future to work with rootless containers as well.
943
// Requires that the container must be running or created.
944
func (c *Container) ReloadNetwork() error {
947
defer c.lock.Unlock()
949
if err := c.syncContainer(); err != nil {
954
if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) {
955
return fmt.Errorf("cannot reload network unless container network has been configured: %w", define.ErrCtrStateInvalid)
958
return c.reloadNetwork()
961
// Refresh is DEPRECATED and REMOVED.
962
func (c *Container) Refresh(ctx context.Context) error {
963
// This has been deprecated for a long while, and is in the process of
965
return define.ErrNotImplemented
968
// ContainerCheckpointOptions is a struct used to pass the parameters
969
// for checkpointing (and restoring) to the corresponding functions
970
type ContainerCheckpointOptions struct {
971
// Keep tells the API to not delete checkpoint artifacts
973
// KeepRunning tells the API to keep the container running
974
// after writing the checkpoint to disk
976
// TCPEstablished tells the API to checkpoint a container
977
// even if it contains established TCP connections
979
// TargetFile tells the API to read (or write) the checkpoint image
980
// from (or to) the filename set in TargetFile
982
// CheckpointImageID tells the API to restore the container from
983
// checkpoint image with ID set in CheckpointImageID
984
CheckpointImageID string
985
// Name tells the API that during restore from an exported
986
// checkpoint archive a new name should be used for the
987
// restored container
989
// IgnoreRootfs tells the API to not export changes to
990
// the container's root file-system (or to not import)
992
// IgnoreStaticIP tells the API to ignore the IP set
993
// during 'podman run' with '--ip'. This is especially
994
// important to be able to restore a container multiple
995
// times with '--import --name'.
997
// IgnoreStaticMAC tells the API to ignore the MAC set
998
// during 'podman run' with '--mac-address'. This is especially
999
// important to be able to restore a container multiple
1000
// times with '--import --name'.
1001
IgnoreStaticMAC bool
1002
// IgnoreVolumes tells the API to not export or not to import
1003
// the content of volumes associated with the container
1005
// Pre Checkpoint container and leave container running
1007
// Dump container with Pre Checkpoint images
1009
// ImportPrevious tells the API to restore container with two
1010
// images. One is TargetFile, the other is ImportPrevious.
1011
ImportPrevious string
1012
// CreateImage tells Podman to create an OCI image from container
1013
// checkpoint in the local image store.
1015
// Compression tells the API which compression to use for
1016
// the exported checkpoint archive.
1017
Compression archive.Compression
1018
// If Pod is set the container should be restored into the
1019
// given Pod. If Pod is empty it is a restore without a Pod.
1020
// Restoring a non Pod container into a Pod or a Pod container
1021
// without a Pod is theoretically possible, but will
1022
// probably not work if a PID namespace is shared.
1023
// A shared PID namespace means that a Pod container has PID 1
1024
// in the infrastructure container, but without the infrastructure
1025
// container no PID 1 will be in the namespace and that is not
1028
// PrintStats tells the API to fill out the statistics about
1029
// how much time each component in the stack requires to
1030
// checkpoint a container.
1032
// FileLocks tells the API to checkpoint/restore a container
1037
// Checkpoint checkpoints a container
1038
// The return values *define.CRIUCheckpointRestoreStatistics and int64 (time
1039
// the runtime needs to checkpoint the container) are only set if
1040
// options.PrintStats is set to true. Not setting options.PrintStats to true
1041
// will return nil and 0.
1042
func (c *Container) Checkpoint(ctx context.Context, options ContainerCheckpointOptions) (*define.CRIUCheckpointRestoreStatistics, int64, error) {
1043
logrus.Debugf("Trying to checkpoint container %s", c.ID())
1045
if options.TargetFile != "" {
1046
if err := c.prepareCheckpointExport(); err != nil {
1051
if options.WithPrevious {
1052
if err := c.canWithPrevious(); err != nil {
1059
defer c.lock.Unlock()
1061
if err := c.syncContainer(); err != nil {
1065
return c.checkpoint(ctx, options)
1068
// Restore restores a container
1069
// The return values *define.CRIUCheckpointRestoreStatistics and int64 (time
1070
// the runtime needs to restore the container) are only set if
1071
// options.PrintStats is set to true. Not setting options.PrintStats to true
1072
// will return nil and 0.
1073
func (c *Container) Restore(ctx context.Context, options ContainerCheckpointOptions) (*define.CRIUCheckpointRestoreStatistics, int64, error) {
1074
if options.Pod == "" {
1075
logrus.Debugf("Trying to restore container %s", c.ID())
1077
logrus.Debugf("Trying to restore container %s into pod %s", c.ID(), options.Pod)
1081
defer c.lock.Unlock()
1083
if err := c.syncContainer(); err != nil {
1087
defer c.newContainerEvent(events.Restore)
1088
return c.restore(ctx, options)
1091
// Indicate whether or not the container should restart
1092
func (c *Container) ShouldRestart(ctx context.Context) bool {
1093
logrus.Debugf("Checking if container %s should restart", c.ID())
1096
defer c.lock.Unlock()
1098
if err := c.syncContainer(); err != nil {
1102
return c.shouldRestart()
1105
// CopyFromArchive copies the contents from the specified tarStream to path
1106
// *inside* the container.
1107
func (c *Container) CopyFromArchive(_ context.Context, containerPath string, chown, noOverwriteDirNonDir bool, rename map[string]string, tarStream io.Reader) (func() error, error) {
1110
defer c.lock.Unlock()
1112
if err := c.syncContainer(); err != nil {
1117
return c.copyFromArchive(containerPath, chown, noOverwriteDirNonDir, rename, tarStream)
1120
// CopyToArchive copies the contents from the specified path *inside* the
1121
// container to the tarStream.
1122
func (c *Container) CopyToArchive(ctx context.Context, containerPath string, tarStream io.Writer) (func() error, error) {
1125
defer c.lock.Unlock()
1127
if err := c.syncContainer(); err != nil {
1132
return c.copyToArchive(containerPath, tarStream)
1135
// Stat the specified path *inside* the container and return a file info.
1136
func (c *Container) Stat(ctx context.Context, containerPath string) (*define.FileInfo, error) {
1139
defer c.lock.Unlock()
1141
if err := c.syncContainer(); err != nil {
1146
var mountPoint string
1148
if c.state.Mounted {
1149
mountPoint = c.state.Mountpoint
1151
mountPoint, err = c.mount()
1156
if err := c.unmount(false); err != nil {
1157
logrus.Errorf("Unmounting container %s: %v", c.ID(), err)
1162
info, _, _, err := c.stat(mountPoint, containerPath)
1166
func saveContainerError(c *Container, err error) error {
1167
c.state.Error = err.Error()