podman

Форк
0
/
runtime_ctr.go 
1463 строки · 45.5 Кб
1
//go:build !remote
2

3
package libpod
4

5
import (
6
	"context"
7
	"errors"
8
	"fmt"
9
	"io/fs"
10
	"os"
11
	"path"
12
	"path/filepath"
13
	"strings"
14
	"time"
15

16
	"github.com/containers/buildah"
17
	"github.com/containers/common/libnetwork/types"
18
	"github.com/containers/common/pkg/cgroups"
19
	"github.com/containers/common/pkg/config"
20
	"github.com/containers/podman/v5/libpod/define"
21
	"github.com/containers/podman/v5/libpod/events"
22
	"github.com/containers/podman/v5/libpod/shutdown"
23
	"github.com/containers/podman/v5/pkg/domain/entities/reports"
24
	"github.com/containers/podman/v5/pkg/rootless"
25
	"github.com/containers/podman/v5/pkg/specgen"
26
	"github.com/containers/podman/v5/pkg/util"
27
	"github.com/containers/storage"
28
	"github.com/containers/storage/pkg/stringid"
29
	"github.com/docker/go-units"
30
	spec "github.com/opencontainers/runtime-spec/specs-go"
31
	"github.com/opencontainers/runtime-tools/generate"
32
	"github.com/sirupsen/logrus"
33
	"golang.org/x/exp/slices"
34
)
35

36
// Contains the public Runtime API for containers
37

38
// A CtrCreateOption is a functional option which alters the Container created
39
// by NewContainer
40
type CtrCreateOption func(*Container) error
41

42
// ContainerFilter is a function to determine whether a container is included
43
// in command output. Containers to be outputted are tested using the function.
44
// A true return will include the container, a false return will exclude it.
45
type ContainerFilter func(*Container) bool
46

47
// NewContainer creates a new container from a given OCI config.
48
func (r *Runtime) NewContainer(ctx context.Context, rSpec *spec.Spec, spec *specgen.SpecGenerator, infra bool, options ...CtrCreateOption) (*Container, error) {
49
	if !r.valid {
50
		return nil, define.ErrRuntimeStopped
51
	}
52
	if infra {
53
		options = append(options, withIsInfra())
54
	}
55
	return r.newContainer(ctx, rSpec, options...)
56
}
57

58
func (r *Runtime) PrepareVolumeOnCreateContainer(ctx context.Context, ctr *Container) error {
59
	// Copy the content from the underlying image into the newly created
60
	// volume if configured to do so.
61
	if !r.config.Containers.PrepareVolumeOnCreate {
62
		return nil
63
	}
64

65
	defer func() {
66
		if err := ctr.cleanupStorage(); err != nil {
67
			logrus.Errorf("Cleaning up container storage %s: %v", ctr.ID(), err)
68
		}
69
	}()
70

71
	mountPoint, err := ctr.mountStorage()
72
	if err == nil {
73
		// Finish up mountStorage
74
		ctr.state.Mounted = true
75
		ctr.state.Mountpoint = mountPoint
76
		if err = ctr.save(); err != nil {
77
			logrus.Errorf("Saving container %s state: %v", ctr.ID(), err)
78
		}
79
	}
80

81
	return err
82
}
83

84
// RestoreContainer re-creates a container from an imported checkpoint
85
func (r *Runtime) RestoreContainer(ctx context.Context, rSpec *spec.Spec, config *ContainerConfig) (*Container, error) {
86
	if !r.valid {
87
		return nil, define.ErrRuntimeStopped
88
	}
89

90
	ctr, err := r.initContainerVariables(rSpec, config)
91
	if err != nil {
92
		return nil, fmt.Errorf("initializing container variables: %w", err)
93
	}
94
	// For an imported checkpoint no one has ever set the StartedTime. Set it now.
95
	ctr.state.StartedTime = time.Now()
96

97
	// If the path to ConmonPidFile starts with the default value (RunRoot), then
98
	// the user has not specified '--conmon-pidfile' during run or create (probably).
99
	// In that case reset ConmonPidFile to be set to the default value later.
100
	if strings.HasPrefix(ctr.config.ConmonPidFile, r.storageConfig.RunRoot) {
101
		ctr.config.ConmonPidFile = ""
102
	}
103

104
	// If the path to PidFile starts with the default value (RunRoot), then
105
	// the user has not specified '--pidfile' during run or create (probably).
106
	// In that case reset PidFile to be set to the default value later.
107
	if strings.HasPrefix(ctr.config.PidFile, r.storageConfig.RunRoot) {
108
		ctr.config.PidFile = ""
109
	}
110

111
	return r.setupContainer(ctx, ctr)
112
}
113

114
// RenameContainer renames the given container.
115
// Returns a copy of the container that has been renamed if successful.
116
func (r *Runtime) RenameContainer(ctx context.Context, ctr *Container, newName string) (*Container, error) {
117
	ctr.lock.Lock()
118
	defer ctr.lock.Unlock()
119

120
	if err := ctr.syncContainer(); err != nil {
121
		return nil, err
122
	}
123

124
	newName = strings.TrimPrefix(newName, "/")
125
	if newName == "" || !define.NameRegex.MatchString(newName) {
126
		return nil, define.RegexError
127
	}
128

129
	// We need to pull an updated config, in case another rename fired and
130
	// the config was re-written.
131
	newConf, err := r.state.GetContainerConfig(ctr.ID())
132
	if err != nil {
133
		return nil, fmt.Errorf("retrieving container %s configuration from DB to remove: %w", ctr.ID(), err)
134
	}
135
	ctr.config = newConf
136

137
	logrus.Infof("Going to rename container %s from %q to %q", ctr.ID(), ctr.Name(), newName)
138

139
	// Step 1: Alter the config. Save the old name, we need it to rewrite
140
	// the config.
141
	oldName := ctr.config.Name
142
	ctr.config.Name = newName
143

144
	// Step 2: rewrite the old container's config in the DB.
145
	if err := r.state.SafeRewriteContainerConfig(ctr, oldName, ctr.config.Name, ctr.config); err != nil {
146
		// Assume the rename failed.
147
		// Set config back to the old name so reflect what is actually
148
		// present in the DB.
149
		ctr.config.Name = oldName
150
		return nil, fmt.Errorf("renaming container %s: %w", ctr.ID(), err)
151
	}
152

153
	// Step 3: rename the container in c/storage.
154
	// This can fail if the name is already in use by a non-Podman
155
	// container. This puts us in a bad spot - we've already renamed the
156
	// container in Podman. We can swap the order, but then we have the
157
	// opposite problem. Atomicity is a real problem here, with no easy
158
	// solution.
159
	if err := r.store.SetNames(ctr.ID(), []string{ctr.Name()}); err != nil {
160
		return nil, err
161
	}
162

163
	ctr.newContainerEvent(events.Rename)
164
	return ctr, nil
165
}
166

167
func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConfig) (*Container, error) {
168
	if rSpec == nil {
169
		return nil, fmt.Errorf("must provide a valid runtime spec to create container: %w", define.ErrInvalidArg)
170
	}
171
	ctr := new(Container)
172
	ctr.config = new(ContainerConfig)
173
	ctr.state = new(ContainerState)
174

175
	if config == nil {
176
		ctr.config.ID = stringid.GenerateRandomID()
177
		size, err := units.FromHumanSize(r.config.Containers.ShmSize)
178
		if useDevShm {
179
			if err != nil {
180
				return nil, fmt.Errorf("converting containers.conf ShmSize %s to an int: %w", r.config.Containers.ShmSize, err)
181
			}
182
			ctr.config.ShmSize = size
183
			ctr.config.NoShm = false
184
			ctr.config.NoShmShare = false
185
		} else {
186
			ctr.config.NoShm = true
187
			ctr.config.NoShmShare = true
188
		}
189
		ctr.config.StopSignal = 15
190

191
		ctr.config.StopTimeout = r.config.Engine.StopTimeout
192
	} else {
193
		// This is a restore from an imported checkpoint
194
		ctr.restoreFromCheckpoint = true
195
		if err := JSONDeepCopy(config, ctr.config); err != nil {
196
			return nil, fmt.Errorf("copying container config for restore: %w", err)
197
		}
198
		// If the ID is empty a new name for the restored container was requested
199
		if ctr.config.ID == "" {
200
			ctr.config.ID = stringid.GenerateRandomID()
201
		}
202
		// Reset the log path to point to the default
203
		ctr.config.LogPath = ""
204
		// Later in validate() the check is for nil. JSONDeepCopy sets it to an empty
205
		// object. Resetting it to nil if it was nil before.
206
		if config.StaticMAC == nil {
207
			ctr.config.StaticMAC = nil
208
		}
209
	}
210

211
	ctr.config.Spec = rSpec
212
	ctr.config.CreatedTime = time.Now()
213

214
	ctr.state.BindMounts = make(map[string]string)
215

216
	ctr.config.OCIRuntime = r.defaultOCIRuntime.Name()
217

218
	// Set namespace based on current runtime namespace
219
	// Do so before options run so they can override it
220
	if r.config.Engine.Namespace != "" {
221
		ctr.config.Namespace = r.config.Engine.Namespace
222
	}
223

224
	ctr.runtime = r
225

226
	return ctr, nil
227
}
228

229
func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ...CtrCreateOption) (*Container, error) {
230
	var ctr *Container
231
	var err error
232

233
	ctr, err = r.initContainerVariables(rSpec, nil)
234

235
	if err != nil {
236
		return nil, fmt.Errorf("initializing container variables: %w", err)
237
	}
238

239
	for _, option := range options {
240
		if err := option(ctr); err != nil {
241
			return nil, fmt.Errorf("running container create option: %w", err)
242
		}
243
	}
244

245
	return r.setupContainer(ctx, ctr)
246
}
247

248
func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Container, retErr error) {
249
	// normalize the networks to names
250
	// the db backend only knows about network names so we have to make
251
	// sure we do not use ids internally
252
	if len(ctr.config.Networks) > 0 {
253
		normalizeNetworks := make(map[string]types.PerNetworkOptions, len(ctr.config.Networks))
254
		// first get the already used interface names so we do not conflict
255
		usedIfNames := make([]string, 0, len(ctr.config.Networks))
256
		for _, opts := range ctr.config.Networks {
257
			if opts.InterfaceName != "" {
258
				// check that no name is assigned to more than network
259
				if slices.Contains(usedIfNames, opts.InterfaceName) {
260
					return nil, fmt.Errorf("network interface name %q is already assigned to another network", opts.InterfaceName)
261
				}
262
				usedIfNames = append(usedIfNames, opts.InterfaceName)
263
			}
264
		}
265
		i := 0
266
		for nameOrID, opts := range ctr.config.Networks {
267
			netName, nicName, err := r.normalizeNetworkName(nameOrID)
268
			if err != nil {
269
				return nil, err
270
			}
271

272
			// check whether interface is to be named as the network_interface
273
			// when name left unspecified
274
			if opts.InterfaceName == "" {
275
				opts.InterfaceName = nicName
276
			}
277

278
			// assign default interface name if empty
279
			if opts.InterfaceName == "" {
280
				for i < 100000 {
281
					ifName := fmt.Sprintf("eth%d", i)
282
					if !slices.Contains(usedIfNames, ifName) {
283
						opts.InterfaceName = ifName
284
						usedIfNames = append(usedIfNames, ifName)
285
						break
286
					}
287
					i++
288
				}
289
				// if still empty we did not find a free name
290
				if opts.InterfaceName == "" {
291
					return nil, errors.New("failed to find free network interface name")
292
				}
293
			}
294
			opts.Aliases = append(opts.Aliases, getExtraNetworkAliases(ctr)...)
295

296
			normalizeNetworks[netName] = opts
297
		}
298
		ctr.config.Networks = normalizeNetworks
299
	}
300

301
	// Validate the container
302
	if err := ctr.validate(); err != nil {
303
		return nil, err
304
	}
305
	if ctr.config.IsInfra {
306
		ctr.config.StopTimeout = 10
307
	}
308

309
	// Inhibit shutdown until creation succeeds
310
	shutdown.Inhibit()
311
	defer shutdown.Uninhibit()
312

313
	// Allocate a lock for the container
314
	lock, err := r.lockManager.AllocateLock()
315
	if err != nil {
316
		return nil, fmt.Errorf("allocating lock for new container: %w", err)
317
	}
318
	ctr.lock = lock
319
	ctr.config.LockID = ctr.lock.ID()
320
	logrus.Debugf("Allocated lock %d for container %s", ctr.lock.ID(), ctr.ID())
321

322
	defer func() {
323
		if retErr != nil {
324
			if err := ctr.lock.Free(); err != nil {
325
				logrus.Errorf("Freeing lock for container after creation failed: %v", err)
326
			}
327
		}
328
	}()
329

330
	ctr.valid = true
331
	ctr.state.State = define.ContainerStateConfigured
332
	ctr.runtime = r
333

334
	if ctr.config.OCIRuntime == "" {
335
		ctr.ociRuntime = r.defaultOCIRuntime
336
	} else {
337
		ociRuntime, ok := r.ociRuntimes[ctr.config.OCIRuntime]
338
		if !ok {
339
			return nil, fmt.Errorf("requested OCI runtime %s is not available: %w", ctr.config.OCIRuntime, define.ErrInvalidArg)
340
		}
341
		ctr.ociRuntime = ociRuntime
342
	}
343

344
	// Check NoCgroups support
345
	if ctr.config.NoCgroups {
346
		if !ctr.ociRuntime.SupportsNoCgroups() {
347
			return nil, fmt.Errorf("requested OCI runtime %s is not compatible with NoCgroups: %w", ctr.ociRuntime.Name(), define.ErrInvalidArg)
348
		}
349
	}
350

351
	var pod *Pod
352
	if ctr.config.Pod != "" {
353
		// Get the pod from state
354
		pod, err = r.state.Pod(ctr.config.Pod)
355
		if err != nil {
356
			return nil, fmt.Errorf("cannot add container %s to pod %s: %w", ctr.ID(), ctr.config.Pod, err)
357
		}
358
	}
359

360
	// Check Cgroup parent sanity, and set it if it was not set.
361
	// Only if we're actually configuring Cgroups.
362
	if !ctr.config.NoCgroups {
363
		ctr.config.CgroupManager = r.config.Engine.CgroupManager
364
		switch r.config.Engine.CgroupManager {
365
		case config.CgroupfsCgroupsManager:
366
			if ctr.config.CgroupParent == "" {
367
				if pod != nil && pod.config.UsePodCgroup && !ctr.IsInfra() {
368
					podCgroup, err := pod.CgroupPath()
369
					if err != nil {
370
						return nil, fmt.Errorf("retrieving pod %s cgroup: %w", pod.ID(), err)
371
					}
372
					expectPodCgroup, err := ctr.expectPodCgroup()
373
					if err != nil {
374
						return nil, err
375
					}
376
					if expectPodCgroup && podCgroup == "" {
377
						return nil, fmt.Errorf("pod %s cgroup is not set: %w", pod.ID(), define.ErrInternal)
378
					}
379
					canUseCgroup := !rootless.IsRootless() || isRootlessCgroupSet(podCgroup)
380
					if canUseCgroup {
381
						ctr.config.CgroupParent = podCgroup
382
					}
383
				} else if !rootless.IsRootless() {
384
					ctr.config.CgroupParent = CgroupfsDefaultCgroupParent
385
				}
386
			} else if strings.HasSuffix(path.Base(ctr.config.CgroupParent), ".slice") {
387
				return nil, fmt.Errorf("systemd slice received as cgroup parent when using cgroupfs: %w", define.ErrInvalidArg)
388
			}
389
		case config.SystemdCgroupsManager:
390
			if ctr.config.CgroupParent == "" {
391
				switch {
392
				case pod != nil && pod.config.UsePodCgroup && !ctr.IsInfra():
393
					podCgroup, err := pod.CgroupPath()
394
					if err != nil {
395
						return nil, fmt.Errorf("retrieving pod %s cgroup: %w", pod.ID(), err)
396
					}
397
					expectPodCgroup, err := ctr.expectPodCgroup()
398
					if err != nil {
399
						return nil, err
400
					}
401
					if expectPodCgroup && podCgroup == "" {
402
						return nil, fmt.Errorf("pod %s cgroup is not set: %w", pod.ID(), define.ErrInternal)
403
					}
404
					ctr.config.CgroupParent = podCgroup
405
				case rootless.IsRootless() && ctr.config.CgroupsMode != cgroupSplit:
406
					ctr.config.CgroupParent = SystemdDefaultRootlessCgroupParent
407
				case ctr.config.CgroupsMode != cgroupSplit:
408
					ctr.config.CgroupParent = SystemdDefaultCgroupParent
409
				}
410
			} else if len(ctr.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(ctr.config.CgroupParent), ".slice") {
411
				return nil, fmt.Errorf("did not receive systemd slice as cgroup parent when using systemd to manage cgroups: %w", define.ErrInvalidArg)
412
			}
413
		default:
414
			return nil, fmt.Errorf("unsupported Cgroup manager: %s - cannot validate cgroup parent: %w", r.config.Engine.CgroupManager, define.ErrInvalidArg)
415
		}
416
	}
417

418
	if ctr.config.Timezone == "" {
419
		ctr.config.Timezone = r.config.Containers.TZ
420
	}
421

422
	if ctr.restoreFromCheckpoint {
423
		// Remove information about bind mount
424
		// for new container from imported checkpoint
425

426
		// NewFromSpec() is deprecated according to its comment
427
		// however the recommended replace just causes a nil map panic
428
		g := generate.NewFromSpec(ctr.config.Spec)
429
		g.RemoveMount("/dev/shm")
430
		ctr.config.ShmDir = ""
431
		g.RemoveMount("/etc/resolv.conf")
432
		g.RemoveMount("/etc/hostname")
433
		g.RemoveMount("/etc/hosts")
434
		g.RemoveMount("/run/.containerenv")
435
		g.RemoveMount("/run/secrets")
436
		g.RemoveMount("/var/run/.containerenv")
437
		g.RemoveMount("/var/run/secrets")
438

439
		// Regenerate Cgroup paths so they don't point to the old
440
		// container ID.
441
		cgroupPath, err := ctr.getOCICgroupPath()
442
		if err != nil {
443
			return nil, err
444
		}
445
		g.SetLinuxCgroupsPath(cgroupPath)
446
	}
447

448
	// Set up storage for the container
449
	if err := ctr.setupStorage(ctx); err != nil {
450
		return nil, err
451
	}
452
	defer func() {
453
		if retErr != nil {
454
			if err := ctr.teardownStorage(); err != nil {
455
				logrus.Errorf("Removing partially-created container root filesystem: %v", err)
456
			}
457
		}
458
	}()
459

460
	ctr.config.SecretsPath = filepath.Join(ctr.config.StaticDir, "secrets")
461
	err = os.MkdirAll(ctr.config.SecretsPath, 0755)
462
	if err != nil {
463
		return nil, err
464
	}
465
	for _, secr := range ctr.config.Secrets {
466
		err = ctr.extractSecretToCtrStorage(secr)
467
		if err != nil {
468
			return nil, err
469
		}
470
	}
471

472
	if ctr.config.ConmonPidFile == "" {
473
		ctr.config.ConmonPidFile = filepath.Join(ctr.state.RunDir, "conmon.pid")
474
	}
475

476
	if ctr.config.PidFile == "" {
477
		ctr.config.PidFile = filepath.Join(ctr.state.RunDir, "pidfile")
478
	}
479

480
	// Go through named volumes and add them.
481
	// If they don't exist they will be created using basic options.
482
	for _, vol := range ctr.config.NamedVolumes {
483
		isAnonymous := false
484
		if vol.Name == "" {
485
			// Anonymous volume. We'll need to create it.
486
			// It needs a name first.
487
			vol.Name = stringid.GenerateRandomID()
488
			isAnonymous = true
489
		} else {
490
			// Check if it already exists
491
			_, err := r.state.Volume(vol.Name)
492
			if err == nil {
493
				// The volume exists, we're good
494
				continue
495
			} else if !errors.Is(err, define.ErrNoSuchVolume) {
496
				return nil, fmt.Errorf("retrieving named volume %s for new container: %w", vol.Name, err)
497
			}
498
		}
499
		if vol.IsAnonymous {
500
			// If SetAnonymous is true, make this an anonymous volume
501
			// this is needed for emptyDir volumes from kube yamls
502
			isAnonymous = true
503
		}
504

505
		logrus.Debugf("Creating new volume %s for container", vol.Name)
506

507
		// The volume does not exist, so we need to create it.
508
		volOptions := []VolumeCreateOption{
509
			WithVolumeName(vol.Name),
510
			WithVolumeMountLabel(ctr.MountLabel()),
511
		}
512
		if isAnonymous {
513
			volOptions = append(volOptions, withSetAnon())
514
		}
515

516
		needsChown := true
517

518
		// If volume-opts are set, parse and add driver opts.
519
		if len(vol.Options) > 0 {
520
			isDriverOpts := false
521
			driverOpts := make(map[string]string)
522
			for _, opts := range vol.Options {
523
				if opts == "idmap" {
524
					needsChown = false
525
				}
526
				if strings.HasPrefix(opts, "volume-opt") {
527
					isDriverOpts = true
528
					driverOptKey, driverOptValue, err := util.ParseDriverOpts(opts)
529
					if err != nil {
530
						return nil, err
531
					}
532
					driverOpts[driverOptKey] = driverOptValue
533
				}
534
			}
535
			if isDriverOpts {
536
				parsedOptions := []VolumeCreateOption{WithVolumeOptions(driverOpts)}
537
				volOptions = append(volOptions, parsedOptions...)
538
			}
539
		}
540

541
		if needsChown {
542
			volOptions = append(volOptions, WithVolumeUID(ctr.RootUID()), WithVolumeGID(ctr.RootGID()))
543
		} else {
544
			volOptions = append(volOptions, WithVolumeNoChown())
545
		}
546

547
		_, err = r.newVolume(ctx, false, volOptions...)
548
		if err != nil {
549
			return nil, fmt.Errorf("creating named volume %q: %w", vol.Name, err)
550
		}
551
	}
552

553
	switch ctr.config.LogDriver {
554
	case define.NoLogging, define.PassthroughLogging, define.JournaldLogging:
555
		break
556
	default:
557
		if ctr.config.LogPath == "" {
558
			ctr.config.LogPath = filepath.Join(ctr.config.StaticDir, "ctr.log")
559
		}
560
	}
561

562
	if useDevShm && !MountExists(ctr.config.Spec.Mounts, "/dev/shm") && ctr.config.ShmDir == "" && !ctr.config.NoShm {
563
		ctr.config.ShmDir = filepath.Join(ctr.bundlePath(), "shm")
564
		if err := os.MkdirAll(ctr.config.ShmDir, 0700); err != nil {
565
			if !os.IsExist(err) {
566
				return nil, fmt.Errorf("unable to create shm dir: %w", err)
567
			}
568
		}
569
		ctr.config.Mounts = append(ctr.config.Mounts, ctr.config.ShmDir)
570
	}
571

572
	// Add the container to the state
573
	// TODO: May be worth looking into recovering from name/ID collisions here
574
	if ctr.config.Pod != "" {
575
		// Lock the pod to ensure we can't add containers to pods
576
		// being removed
577
		pod.lock.Lock()
578
		defer pod.lock.Unlock()
579

580
		if err := r.state.AddContainerToPod(pod, ctr); err != nil {
581
			return nil, err
582
		}
583
	} else if err := r.state.AddContainer(ctr); err != nil {
584
		return nil, err
585
	}
586

587
	if ctr.runtime.config.Engine.EventsContainerCreateInspectData {
588
		if err := ctr.newContainerEventWithInspectData(events.Create, "", true); err != nil {
589
			return nil, err
590
		}
591
	} else {
592
		ctr.newContainerEvent(events.Create)
593
	}
594
	return ctr, nil
595
}
596

597
// RemoveContainer removes the given container. If force is true, the container
598
// will be stopped first (otherwise, an error will be returned if the container
599
// is running). If removeVolume is specified, anonymous named volumes used by the
600
// container will be removed also (iff the container is the sole user of the
601
// volumes). Timeout sets the stop timeout for the container if it is running.
602
func (r *Runtime) RemoveContainer(ctx context.Context, c *Container, force bool, removeVolume bool, timeout *uint) error {
603
	opts := ctrRmOpts{
604
		Force:        force,
605
		RemoveVolume: removeVolume,
606
		Timeout:      timeout,
607
	}
608

609
	// NOTE: container will be locked down the road. There is no unlocked
610
	// version of removeContainer.
611
	_, _, err := r.removeContainer(ctx, c, opts)
612
	return err
613
}
614

615
// RemoveContainerAndDependencies removes the given container and all its
616
// dependencies. This may include pods (if the container or any of its
617
// dependencies is an infra or service container, the associated pod(s) will also
618
// be removed). Otherwise, it functions identically to RemoveContainer.
619
// Returns two arrays: containers removed, and pods removed. These arrays are
620
// always returned, even if error is set, and indicate any containers that were
621
// successfully removed prior to the error.
622
func (r *Runtime) RemoveContainerAndDependencies(ctx context.Context, c *Container, force bool, removeVolume bool, timeout *uint) (map[string]error, map[string]error, error) {
623
	opts := ctrRmOpts{
624
		Force:        force,
625
		RemoveVolume: removeVolume,
626
		RemoveDeps:   true,
627
		Timeout:      timeout,
628
	}
629

630
	// NOTE: container will be locked down the road. There is no unlocked
631
	// version of removeContainer.
632
	return r.removeContainer(ctx, c, opts)
633
}
634

635
// Options for removeContainer
636
type ctrRmOpts struct {
637
	// Whether to stop running container(s)
638
	Force bool
639
	// Whether to remove anonymous volumes used by removing the container
640
	RemoveVolume bool
641
	// Only set by `removePod` as `removeContainer` is being called as part
642
	// of removing a whole pod.
643
	RemovePod bool
644
	// Whether to ignore dependencies of the container when removing
645
	// (This is *DANGEROUS* and should not be used outside of non-graph
646
	// traversal pod removal code).
647
	IgnoreDeps bool
648
	// Remove all the dependencies associated with the container. Can cause
649
	// multiple containers, and possibly one or more pods, to be removed.
650
	RemoveDeps bool
651
	// Do not lock the pod that the container is part of (used only by
652
	// recursive calls of removeContainer, used when removing dependencies)
653
	NoLockPod bool
654
	// Timeout to use when stopping the container. Only used if `Force` is
655
	// true.
656
	Timeout *uint
657
}
658

659
// Internal function to remove a container.
660
// Locks the container, but does not lock the runtime.
661
// removePod is used only when removing pods. It instructs Podman to ignore
662
// infra container protections, and *not* remove from the database (as pod
663
// remove will handle that).
664
// ignoreDeps is *DANGEROUS* and should not be used outside of a very specific
665
// context (alternate pod removal code, where graph traversal is not possible).
666
// removeDeps instructs Podman to remove dependency containers (and possible
667
// a dependency pod if an infra container is involved). removeDeps conflicts
668
// with removePod - pods have their own dependency management.
669
// noLockPod is used for recursive removeContainer calls when the pod is already
670
// locked.
671
// TODO: At this point we should just start accepting an options struct
672
func (r *Runtime) removeContainer(ctx context.Context, c *Container, opts ctrRmOpts) (removedCtrs map[string]error, removedPods map[string]error, retErr error) {
673
	removedCtrs = make(map[string]error)
674
	removedPods = make(map[string]error)
675

676
	if !c.valid {
677
		if ok, _ := r.state.HasContainer(c.ID()); !ok {
678
			// Container probably already removed
679
			// Or was never in the runtime to begin with
680
			removedCtrs[c.ID()] = nil
681
			return
682
		}
683
	}
684

685
	if opts.RemovePod && opts.RemoveDeps {
686
		retErr = fmt.Errorf("cannot remove dependencies while also removing a pod: %w", define.ErrInvalidArg)
687
		return
688
	}
689

690
	// We need to refresh container config from the DB, to ensure that any
691
	// changes (e.g. a rename) are picked up before we start removing.
692
	// Since HasContainer above succeeded, we can safely assume the
693
	// container exists.
694
	// This is *very iffy* but it should be OK because the container won't
695
	// exist once we're done.
696
	newConf, err := r.state.GetContainerConfig(c.ID())
697
	if err != nil {
698
		retErr = fmt.Errorf("retrieving container %s configuration from DB to remove: %w", c.ID(), err)
699
		return
700
	}
701
	c.config = newConf
702

703
	logrus.Debugf("Removing container %s", c.ID())
704

705
	// We need to lock the pod before we lock the container.
706
	// To avoid races around removing a container and the pod it is in.
707
	// Don't need to do this in pod removal case - we're evicting the entire
708
	// pod.
709
	var pod *Pod
710
	runtime := c.runtime
711
	if c.config.Pod != "" {
712
		pod, err = r.state.Pod(c.config.Pod)
713
		if err != nil {
714
			// There's a potential race here where the pod we are in
715
			// was already removed.
716
			// If so, this container is also removed, as pods take
717
			// all their containers with them.
718
			// So if it's already gone, check if we are too.
719
			if errors.Is(err, define.ErrNoSuchPod) {
720
				// We could check the DB to see if we still
721
				// exist, but that would be a serious violation
722
				// of DB integrity.
723
				// Mark this container as removed so there's no
724
				// confusion, though.
725
				removedCtrs[c.ID()] = nil
726
				return
727
			}
728

729
			retErr = err
730
			return
731
		}
732

733
		if !opts.RemovePod {
734
			// Lock the pod while we're removing container
735
			if pod.config.LockID == c.config.LockID {
736
				retErr = fmt.Errorf("container %s and pod %s share lock ID %d: %w", c.ID(), pod.ID(), c.config.LockID, define.ErrWillDeadlock)
737
				return
738
			}
739
			if !opts.NoLockPod {
740
				pod.lock.Lock()
741
				defer pod.lock.Unlock()
742
			}
743
			if err := pod.updatePod(); err != nil {
744
				// As above, there's a chance the pod was
745
				// already removed.
746
				if errors.Is(err, define.ErrNoSuchPod) {
747
					removedCtrs[c.ID()] = nil
748
					return
749
				}
750

751
				retErr = err
752
				return
753
			}
754

755
			infraID := pod.state.InfraContainerID
756
			if c.ID() == infraID && !opts.RemoveDeps {
757
				retErr = fmt.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID())
758
				return
759
			}
760
		}
761
	}
762

763
	// For pod removal, the container is already locked by the caller
764
	locked := false
765
	if !opts.RemovePod {
766
		c.lock.Lock()
767
		defer func() {
768
			if locked {
769
				c.lock.Unlock()
770
			}
771
		}()
772
		locked = true
773
	}
774

775
	if !r.valid {
776
		retErr = define.ErrRuntimeStopped
777
		return
778
	}
779

780
	// Update the container to get current state
781
	if err := c.syncContainer(); err != nil {
782
		retErr = err
783
		return
784
	}
785

786
	serviceForPod := false
787
	if c.IsService() {
788
		for _, id := range c.state.Service.Pods {
789
			depPod, err := c.runtime.LookupPod(id)
790
			if err != nil {
791
				if errors.Is(err, define.ErrNoSuchPod) {
792
					continue
793
				}
794
				retErr = err
795
				return
796
			}
797
			if !opts.RemoveDeps {
798
				retErr = fmt.Errorf("container %s is the service container of pod(s) %s and cannot be removed without removing the pod(s)", c.ID(), strings.Join(c.state.Service.Pods, ","))
799
				return
800
			}
801
			// If we are the service container for the pod we are a
802
			// member of: we need to remove that pod last, since
803
			// this container is part of it.
804
			if pod != nil && pod.ID() == depPod.ID() {
805
				serviceForPod = true
806
				continue
807
			}
808
			logrus.Infof("Removing pod %s as container %s is its service container", depPod.ID(), c.ID())
809
			podRemovedCtrs, err := r.RemovePod(ctx, depPod, true, opts.Force, opts.Timeout)
810
			for ctr, err := range podRemovedCtrs {
811
				removedCtrs[ctr] = err
812
			}
813
			if err != nil && !errors.Is(err, define.ErrNoSuchPod) && !errors.Is(err, define.ErrPodRemoved) {
814
				removedPods[depPod.ID()] = err
815
				retErr = fmt.Errorf("error removing container %s dependency pods: %w", c.ID(), err)
816
				return
817
			}
818
			removedPods[depPod.ID()] = nil
819
		}
820
	}
821
	if (serviceForPod || c.config.IsInfra) && !opts.RemovePod {
822
		// We're going to remove the pod we are a part of.
823
		// This will get rid of us as well, so we can just return
824
		// immediately after.
825
		if locked {
826
			locked = false
827
			c.lock.Unlock()
828
		}
829

830
		logrus.Infof("Removing pod %s (dependency of container %s)", pod.ID(), c.ID())
831
		podRemovedCtrs, err := r.removePod(ctx, pod, true, opts.Force, opts.Timeout)
832
		for ctr, err := range podRemovedCtrs {
833
			removedCtrs[ctr] = err
834
		}
835
		if err != nil && !errors.Is(err, define.ErrNoSuchPod) && !errors.Is(err, define.ErrPodRemoved) {
836
			removedPods[pod.ID()] = err
837
			retErr = fmt.Errorf("error removing container %s pod: %w", c.ID(), err)
838
			return
839
		}
840
		removedPods[pod.ID()] = nil
841
		return
842
	}
843

844
	// If we're not force-removing, we need to check if we're in a good
845
	// state to remove.
846
	if !opts.Force {
847
		if err := c.checkReadyForRemoval(); err != nil {
848
			retErr = err
849
			return
850
		}
851
	}
852

853
	if c.state.State == define.ContainerStatePaused {
854
		isV2, err := cgroups.IsCgroup2UnifiedMode()
855
		if err != nil {
856
			retErr = err
857
			return
858
		}
859
		// cgroups v1 and v2 handle signals on paused processes differently
860
		if !isV2 {
861
			if err := c.unpause(); err != nil {
862
				retErr = err
863
				return
864
			}
865
		}
866
		if err := c.ociRuntime.KillContainer(c, 9, false); err != nil {
867
			retErr = err
868
			return
869
		}
870
		// Need to update container state to make sure we know it's stopped
871
		if err := c.waitForExitFileAndSync(); err != nil {
872
			retErr = err
873
			return
874
		}
875
	}
876

877
	// Check that no other containers depend on the container.
878
	// Only used if not removing a pod - pods guarantee that all
879
	// deps will be evicted at the same time.
880
	if !opts.IgnoreDeps {
881
		deps, err := r.state.ContainerInUse(c)
882
		if err != nil {
883
			retErr = err
884
			return
885
		}
886
		if !opts.RemoveDeps {
887
			if len(deps) != 0 {
888
				depsStr := strings.Join(deps, ", ")
889
				retErr = fmt.Errorf("container %s has dependent containers which must be removed before it: %s: %w", c.ID(), depsStr, define.ErrCtrExists)
890
				return
891
			}
892
		}
893
		for _, depCtr := range deps {
894
			dep, err := r.GetContainer(depCtr)
895
			if err != nil {
896
				retErr = err
897
				return
898
			}
899
			logrus.Infof("Removing container %s (dependency of container %s)", dep.ID(), c.ID())
900
			recursiveOpts := ctrRmOpts{
901
				Force:        opts.Force,
902
				RemoveVolume: opts.RemoveVolume,
903
				RemoveDeps:   true,
904
				NoLockPod:    true,
905
				Timeout:      opts.Timeout,
906
			}
907
			ctrs, pods, err := r.removeContainer(ctx, dep, recursiveOpts)
908
			for rmCtr, err := range ctrs {
909
				if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) {
910
					removedCtrs[rmCtr] = nil
911
				} else {
912
					removedCtrs[rmCtr] = err
913
				}
914
			}
915
			for rmPod, err := range pods {
916
				removedPods[rmPod] = err
917
			}
918
			if err != nil && !errors.Is(err, define.ErrNoSuchCtr) && !errors.Is(err, define.ErrCtrRemoved) {
919
				retErr = err
920
				return
921
			}
922
		}
923
	}
924

925
	// Check that the container's in a good state to be removed.
926
	if c.ensureState(define.ContainerStateRunning, define.ContainerStateStopping) {
927
		time := c.StopTimeout()
928
		if opts.Timeout != nil {
929
			time = *opts.Timeout
930
		}
931
		// Ignore ErrConmonDead - we couldn't retrieve the container's
932
		// exit code properly, but it's still stopped.
933
		if err := c.stop(time); err != nil && !errors.Is(err, define.ErrConmonDead) {
934
			retErr = fmt.Errorf("cannot remove container %s as it could not be stopped: %w", c.ID(), err)
935
			return
936
		}
937

938
		// We unlocked as part of stop() above - there's a chance someone
939
		// else got in and removed the container before we reacquired the
940
		// lock.
941
		// Do a quick ping of the database to check if the container
942
		// still exists.
943
		if ok, _ := r.state.HasContainer(c.ID()); !ok {
944
			// When the container has already been removed, the OCI runtime directory remains.
945
			if err := c.cleanupRuntime(ctx); err != nil {
946
				retErr = fmt.Errorf("cleaning up container %s from OCI runtime: %w", c.ID(), err)
947
				return
948
			}
949
			// Do not add to removed containers, someone else
950
			// removed it.
951
			return
952
		}
953
	}
954

955
	reportErrorf := func(msg string, args ...any) {
956
		err := fmt.Errorf(msg, args...) // Always use fmt.Errorf instead of just logrus.Errorf(…) because the format string probably contains %w
957
		if retErr == nil {
958
			retErr = err
959
		} else {
960
			logrus.Errorf("%s", err.Error())
961
		}
962
	}
963

964
	// Clean up network namespace, cgroups, mounts.
965
	// Do this before we set ContainerStateRemoving, to ensure that we can
966
	// actually remove from the OCI runtime.
967
	if err := c.cleanup(ctx); err != nil {
968
		reportErrorf("cleaning up container %s: %w", c.ID(), err)
969
	}
970

971
	// Remove all active exec sessions
972
	// removing the exec sessions might temporarily unlock the container's lock.  Using it
973
	// after setting the state to ContainerStateRemoving will prevent that the container is
974
	// restarted
975
	if err := c.removeAllExecSessions(); err != nil {
976
		reportErrorf("removing exec sessions: %w", err)
977
	}
978

979
	// Set ContainerStateRemoving as an intermediate state (we may get
980
	// killed at any time) and save the container.
981
	c.state.State = define.ContainerStateRemoving
982

983
	if err := c.save(); err != nil {
984
		if !errors.Is(err, define.ErrCtrRemoved) {
985
			reportErrorf("saving container: %w", err)
986
		}
987
	}
988

989
	// Stop the container's storage
990
	if err := c.teardownStorage(); err != nil {
991
		reportErrorf("cleaning up storage: %w", err)
992
	}
993

994
	// Remove the container's CID file on container removal.
995
	if cidFile, ok := c.config.Spec.Annotations[define.InspectAnnotationCIDFile]; ok {
996
		if err := os.Remove(cidFile); err != nil && !errors.Is(err, os.ErrNotExist) {
997
			reportErrorf("cleaning up CID file: %w", err)
998
		}
999
	}
1000
	// Remove the container from the state
1001
	if c.config.Pod != "" {
1002
		// If we're removing the pod, the container will be evicted
1003
		// from the state elsewhere
1004
		if err := r.state.RemoveContainerFromPod(pod, c); err != nil {
1005
			reportErrorf("removing container %s from database: %w", c.ID(), err)
1006
		}
1007
	} else {
1008
		if err := r.state.RemoveContainer(c); err != nil {
1009
			reportErrorf("removing container %s from database: %w", c.ID(), err)
1010
		}
1011
	}
1012
	removedCtrs[c.ID()] = nil
1013

1014
	// Deallocate the container's lock
1015
	if err := c.lock.Free(); err != nil && !errors.Is(err, fs.ErrNotExist) {
1016
		reportErrorf("freeing lock for container %s: %w", c.ID(), err)
1017
	}
1018

1019
	// Set container as invalid so it can no longer be used
1020
	c.valid = false
1021

1022
	c.newContainerEvent(events.Remove)
1023

1024
	if !opts.RemoveVolume {
1025
		return
1026
	}
1027

1028
	for _, v := range c.config.NamedVolumes {
1029
		if volume, err := runtime.state.Volume(v.Name); err == nil {
1030
			if !volume.Anonymous() {
1031
				continue
1032
			}
1033
			if err := runtime.removeVolume(ctx, volume, false, opts.Timeout, false); err != nil && !errors.Is(err, define.ErrNoSuchVolume) {
1034
				if errors.Is(err, define.ErrVolumeBeingUsed) {
1035
					// Ignore error, since podman will report original error
1036
					volumesFrom, _ := c.volumesFrom()
1037
					if len(volumesFrom) > 0 {
1038
						logrus.Debugf("Cleaning up volume not possible since volume is in use (%s)", v.Name)
1039
						continue
1040
					}
1041
				}
1042
				logrus.Errorf("Cleaning up volume (%s): %v", v.Name, err)
1043
			}
1044
		}
1045
	}
1046

1047
	//nolint:nakedret
1048
	return
1049
}
1050

1051
// EvictContainer removes the given container partial or full ID or name, and
1052
// returns the full ID of the evicted container and any error encountered.
1053
// It should be used to remove a container when obtaining a Container struct
1054
// pointer has failed.
1055
// Running container will not be stopped.
1056
// If removeVolume is specified, named volumes used by the container will
1057
// be removed also if and only if the container is the sole user.
1058
func (r *Runtime) EvictContainer(ctx context.Context, idOrName string, removeVolume bool) (string, error) {
1059
	return r.evictContainer(ctx, idOrName, removeVolume)
1060
}
1061

1062
// evictContainer is the internal function to handle container eviction based
1063
// on its partial or full ID or name.
1064
// It returns the full ID of the evicted container and any error encountered.
1065
// This does not lock the runtime nor the container.
1066
// removePod is used only when removing pods. It instructs Podman to ignore
1067
// infra container protections, and *not* remove from the database (as pod
1068
// remove will handle that).
1069
func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVolume bool) (string, error) {
1070
	var err error
1071
	var timeout *uint
1072

1073
	if !r.valid {
1074
		return "", define.ErrRuntimeStopped
1075
	}
1076

1077
	id, err := r.state.LookupContainerID(idOrName)
1078
	if err != nil {
1079
		return "", err
1080
	}
1081

1082
	// Begin by trying a normal removal. Valid containers will be removed normally.
1083
	tmpCtr, err := r.state.Container(id)
1084
	if err == nil {
1085
		logrus.Infof("Container %s successfully retrieved from state, attempting normal removal", id)
1086
		// Assume force = true for the evict case
1087
		opts := ctrRmOpts{
1088
			Force:        true,
1089
			RemoveVolume: removeVolume,
1090
			Timeout:      timeout,
1091
		}
1092
		_, _, err = r.removeContainer(ctx, tmpCtr, opts)
1093
		if !tmpCtr.valid {
1094
			// If the container is marked invalid, remove succeeded
1095
			// in kicking it out of the state - no need to continue.
1096
			return id, err
1097
		}
1098

1099
		if err == nil {
1100
			// Something has gone seriously wrong - no error but
1101
			// container was not removed.
1102
			logrus.Errorf("Container %s not removed with no error", id)
1103
		} else {
1104
			logrus.Warnf("Failed to removal container %s normally, proceeding with evict: %v", id, err)
1105
		}
1106
	}
1107

1108
	// Error out if the container does not exist in libpod
1109
	exists, err := r.state.HasContainer(id)
1110
	if err != nil {
1111
		return id, err
1112
	}
1113
	if !exists {
1114
		return id, err
1115
	}
1116

1117
	// Re-create a container struct for removal purposes
1118
	c := new(Container)
1119
	c.config, err = r.state.GetContainerConfig(id)
1120
	if err != nil {
1121
		return id, fmt.Errorf("failed to retrieve config for ctr ID %q: %w", id, err)
1122
	}
1123
	c.state = new(ContainerState)
1124

1125
	// We need to lock the pod before we lock the container.
1126
	// To avoid races around removing a container and the pod it is in.
1127
	// Don't need to do this in pod removal case - we're evicting the entire
1128
	// pod.
1129
	var pod *Pod
1130
	if c.config.Pod != "" {
1131
		pod, err = r.state.Pod(c.config.Pod)
1132
		if err != nil {
1133
			return id, fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", c.ID(), pod.ID(), err)
1134
		}
1135

1136
		// Lock the pod while we're removing container
1137
		pod.lock.Lock()
1138
		defer pod.lock.Unlock()
1139
		if err := pod.updatePod(); err != nil {
1140
			return id, err
1141
		}
1142

1143
		infraID, err := pod.infraContainerID()
1144
		if err != nil {
1145
			return "", err
1146
		}
1147
		if c.ID() == infraID {
1148
			return id, fmt.Errorf("container %s is the infra container of pod %s and cannot be removed without removing the pod", c.ID(), pod.ID())
1149
		}
1150
	}
1151

1152
	if c.IsService() {
1153
		report, err := c.canStopServiceContainer()
1154
		if err != nil {
1155
			return id, err
1156
		}
1157
		if !report.canBeStopped {
1158
			return id, fmt.Errorf("container %s is the service container of pod(s) %s and cannot be removed without removing the pod(s)", c.ID(), strings.Join(c.state.Service.Pods, ","))
1159
		}
1160
	}
1161

1162
	var cleanupErr error
1163
	// Remove the container from the state
1164
	if c.config.Pod != "" {
1165
		// If we're removing the pod, the container will be evicted
1166
		// from the state elsewhere
1167
		if err := r.state.RemoveContainerFromPod(pod, c); err != nil {
1168
			cleanupErr = err
1169
		}
1170
	} else {
1171
		if err := r.state.RemoveContainer(c); err != nil {
1172
			cleanupErr = err
1173
		}
1174
	}
1175

1176
	// Unmount container mount points
1177
	for _, mount := range c.config.Mounts {
1178
		Unmount(mount)
1179
	}
1180

1181
	// Remove container from c/storage
1182
	if err := r.RemoveStorageContainer(id, true); err != nil {
1183
		if cleanupErr == nil {
1184
			cleanupErr = err
1185
		}
1186
	}
1187

1188
	if !removeVolume {
1189
		return id, cleanupErr
1190
	}
1191

1192
	for _, v := range c.config.NamedVolumes {
1193
		if volume, err := r.state.Volume(v.Name); err == nil {
1194
			if !volume.Anonymous() {
1195
				continue
1196
			}
1197
			if err := r.removeVolume(ctx, volume, false, timeout, false); err != nil && err != define.ErrNoSuchVolume && err != define.ErrVolumeBeingUsed {
1198
				logrus.Errorf("Cleaning up volume (%s): %v", v.Name, err)
1199
			}
1200
		}
1201
	}
1202

1203
	return id, cleanupErr
1204
}
1205

1206
// GetContainer retrieves a container by its ID
1207
func (r *Runtime) GetContainer(id string) (*Container, error) {
1208
	if !r.valid {
1209
		return nil, define.ErrRuntimeStopped
1210
	}
1211

1212
	return r.state.Container(id)
1213
}
1214

1215
// HasContainer checks if a container with the given ID is present
1216
func (r *Runtime) HasContainer(id string) (bool, error) {
1217
	if !r.valid {
1218
		return false, define.ErrRuntimeStopped
1219
	}
1220

1221
	return r.state.HasContainer(id)
1222
}
1223

1224
// LookupContainer looks up a container by its name or a partial ID
1225
// If a partial ID is not unique, an error will be returned
1226
func (r *Runtime) LookupContainer(idOrName string) (*Container, error) {
1227
	if !r.valid {
1228
		return nil, define.ErrRuntimeStopped
1229
	}
1230
	return r.state.LookupContainer(idOrName)
1231
}
1232

1233
// LookupContainerId looks up a container id by its name or a partial ID
1234
// If a partial ID is not unique, an error will be returned
1235
func (r *Runtime) LookupContainerID(idOrName string) (string, error) {
1236
	if !r.valid {
1237
		return "", define.ErrRuntimeStopped
1238
	}
1239
	return r.state.LookupContainerID(idOrName)
1240
}
1241

1242
// GetContainers retrieves all containers from the state.
1243
// If `loadState` is set, the containers' state will be loaded as well.
1244
// Filters can be provided which will determine what containers are included in
1245
// the output. Multiple filters are handled by ANDing their output, so only
1246
// containers matching all filters are returned
1247
func (r *Runtime) GetContainers(loadState bool, filters ...ContainerFilter) ([]*Container, error) {
1248
	if !r.valid {
1249
		return nil, define.ErrRuntimeStopped
1250
	}
1251

1252
	ctrs, err := r.state.AllContainers(loadState)
1253
	if err != nil {
1254
		return nil, err
1255
	}
1256

1257
	ctrsFiltered := make([]*Container, 0, len(ctrs))
1258

1259
	for _, ctr := range ctrs {
1260
		include := true
1261
		for _, filter := range filters {
1262
			include = include && filter(ctr)
1263
		}
1264

1265
		if include {
1266
			ctrsFiltered = append(ctrsFiltered, ctr)
1267
		}
1268
	}
1269

1270
	return ctrsFiltered, nil
1271
}
1272

1273
// GetAllContainers is a helper function for GetContainers
1274
func (r *Runtime) GetAllContainers() ([]*Container, error) {
1275
	return r.state.AllContainers(false)
1276
}
1277

1278
// GetRunningContainers is a helper function for GetContainers
1279
func (r *Runtime) GetRunningContainers() ([]*Container, error) {
1280
	running := func(c *Container) bool {
1281
		state, _ := c.State()
1282
		return state == define.ContainerStateRunning
1283
	}
1284
	return r.GetContainers(false, running)
1285
}
1286

1287
// GetContainersByList is a helper function for GetContainers
1288
// which takes a []string of container IDs or names
1289
func (r *Runtime) GetContainersByList(containers []string) ([]*Container, error) {
1290
	ctrs := make([]*Container, 0, len(containers))
1291
	for _, inputContainer := range containers {
1292
		ctr, err := r.LookupContainer(inputContainer)
1293
		if err != nil {
1294
			return ctrs, fmt.Errorf("unable to look up container %s: %w", inputContainer, err)
1295
		}
1296
		ctrs = append(ctrs, ctr)
1297
	}
1298
	return ctrs, nil
1299
}
1300

1301
// GetLatestContainer returns a container object of the latest created container.
1302
func (r *Runtime) GetLatestContainer() (*Container, error) {
1303
	lastCreatedIndex := -1
1304
	var lastCreatedTime time.Time
1305
	ctrs, err := r.GetAllContainers()
1306
	if err != nil {
1307
		return nil, fmt.Errorf("unable to find latest container: %w", err)
1308
	}
1309
	if len(ctrs) == 0 {
1310
		return nil, define.ErrNoSuchCtr
1311
	}
1312
	for containerIndex, ctr := range ctrs {
1313
		createdTime := ctr.config.CreatedTime
1314
		if createdTime.After(lastCreatedTime) {
1315
			lastCreatedTime = createdTime
1316
			lastCreatedIndex = containerIndex
1317
		}
1318
	}
1319
	return ctrs[lastCreatedIndex], nil
1320
}
1321

1322
// GetExecSessionContainer gets the container that a given exec session ID is
1323
// attached to.
1324
func (r *Runtime) GetExecSessionContainer(id string) (*Container, error) {
1325
	if !r.valid {
1326
		return nil, define.ErrRuntimeStopped
1327
	}
1328

1329
	ctrID, err := r.state.GetExecSession(id)
1330
	if err != nil {
1331
		return nil, err
1332
	}
1333

1334
	return r.state.Container(ctrID)
1335
}
1336

1337
// PruneContainers removes stopped and exited containers from localstorage.  A set of optional filters
1338
// can be provided to be more granular.
1339
func (r *Runtime) PruneContainers(filterFuncs []ContainerFilter) ([]*reports.PruneReport, error) {
1340
	preports := make([]*reports.PruneReport, 0)
1341
	// We add getting the exited and stopped containers via a filter
1342
	containerStateFilter := func(c *Container) bool {
1343
		if c.PodID() != "" {
1344
			return false
1345
		}
1346
		state, err := c.State()
1347
		if err != nil {
1348
			logrus.Error(err)
1349
			return false
1350
		}
1351
		if state == define.ContainerStateStopped || state == define.ContainerStateExited ||
1352
			state == define.ContainerStateCreated || state == define.ContainerStateConfigured {
1353
			return true
1354
		}
1355
		return false
1356
	}
1357
	filterFuncs = append(filterFuncs, containerStateFilter)
1358
	delContainers, err := r.GetContainers(false, filterFuncs...)
1359
	if err != nil {
1360
		return nil, err
1361
	}
1362
	for _, c := range delContainers {
1363
		report := new(reports.PruneReport)
1364
		report.Id = c.ID()
1365
		report.Err = nil
1366
		report.Size = 0
1367
		size, err := c.RWSize()
1368
		if err != nil {
1369
			report.Err = err
1370
			preports = append(preports, report)
1371
			continue
1372
		}
1373
		var time *uint
1374
		err = r.RemoveContainer(context.Background(), c, false, false, time)
1375
		if err != nil {
1376
			report.Err = err
1377
		} else {
1378
			report.Size = (uint64)(size)
1379
		}
1380
		preports = append(preports, report)
1381
	}
1382
	return preports, nil
1383
}
1384

1385
// MountStorageContainer mounts the storage container's root filesystem
1386
func (r *Runtime) MountStorageContainer(id string) (string, error) {
1387
	if _, err := r.GetContainer(id); err == nil {
1388
		return "", fmt.Errorf("ctr %s is a libpod container: %w", id, define.ErrCtrExists)
1389
	}
1390
	container, err := r.store.Container(id)
1391
	if err != nil {
1392
		return "", err
1393
	}
1394
	mountPoint, err := r.store.Mount(container.ID, "")
1395
	if err != nil {
1396
		return "", fmt.Errorf("mounting storage for container %s: %w", id, err)
1397
	}
1398
	return mountPoint, nil
1399
}
1400

1401
// UnmountStorageContainer unmounts the storage container's root filesystem
1402
func (r *Runtime) UnmountStorageContainer(id string, force bool) (bool, error) {
1403
	if _, err := r.GetContainer(id); err == nil {
1404
		return false, fmt.Errorf("ctr %s is a libpod container: %w", id, define.ErrCtrExists)
1405
	}
1406
	container, err := r.store.Container(id)
1407
	if err != nil {
1408
		return false, err
1409
	}
1410
	return r.store.Unmount(container.ID, force)
1411
}
1412

1413
// MountedStorageContainer returns whether a storage container is mounted
1414
// along with the mount path
1415
func (r *Runtime) IsStorageContainerMounted(id string) (bool, string, error) {
1416
	var path string
1417
	if _, err := r.GetContainer(id); err == nil {
1418
		return false, "", fmt.Errorf("ctr %s is a libpod container: %w", id, define.ErrCtrExists)
1419
	}
1420

1421
	mountCnt, err := r.storageService.MountedContainerImage(id)
1422
	if err != nil {
1423
		return false, "", err
1424
	}
1425
	mounted := mountCnt > 0
1426
	if mounted {
1427
		path, err = r.storageService.GetMountpoint(id)
1428
		if err != nil {
1429
			return false, "", err
1430
		}
1431
	}
1432
	return mounted, path, nil
1433
}
1434

1435
// StorageContainers returns a list of containers from containers/storage that
1436
// are not currently known to Podman.
1437
func (r *Runtime) StorageContainers() ([]storage.Container, error) {
1438
	if r.store == nil {
1439
		return nil, define.ErrStoreNotInitialized
1440
	}
1441

1442
	storeContainers, err := r.store.Containers()
1443
	if err != nil {
1444
		return nil, fmt.Errorf("reading list of all storage containers: %w", err)
1445
	}
1446
	retCtrs := []storage.Container{}
1447
	for _, container := range storeContainers {
1448
		exists, err := r.state.HasContainer(container.ID)
1449
		if err != nil && err != define.ErrNoSuchCtr {
1450
			return nil, fmt.Errorf("failed to check if %s container exists in database: %w", container.ID, err)
1451
		}
1452
		if exists {
1453
			continue
1454
		}
1455
		retCtrs = append(retCtrs, container)
1456
	}
1457

1458
	return retCtrs, nil
1459
}
1460

1461
func (r *Runtime) IsBuildahContainer(id string) (bool, error) {
1462
	return buildah.IsContainer(id, r.store)
1463
}
1464

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.