podman

Форк
0
/
container.go 
1422 строки · 42.9 Кб
1
//go:build !remote
2

3
package libpod
4

5
import (
6
	"bytes"
7
	"errors"
8
	"fmt"
9
	"io"
10
	"net"
11
	"os"
12
	"strings"
13
	"time"
14

15
	"github.com/containers/common/libnetwork/pasta"
16
	"github.com/containers/common/libnetwork/types"
17
	"github.com/containers/common/pkg/config"
18
	"github.com/containers/common/pkg/secrets"
19
	"github.com/containers/image/v5/manifest"
20
	"github.com/containers/podman/v5/libpod/define"
21
	"github.com/containers/podman/v5/libpod/lock"
22
	"github.com/containers/storage"
23
	spec "github.com/opencontainers/runtime-spec/specs-go"
24
	"github.com/sirupsen/logrus"
25
	"golang.org/x/sys/unix"
26
)
27

28
// CgroupfsDefaultCgroupParent is the cgroup parent for CgroupFS in libpod
29
const CgroupfsDefaultCgroupParent = "/libpod_parent"
30

31
// SystemdDefaultCgroupParent is the cgroup parent for the systemd cgroup
32
// manager in libpod
33
const SystemdDefaultCgroupParent = "machine.slice"
34

35
// SystemdDefaultRootlessCgroupParent is the cgroup parent for the systemd cgroup
36
// manager in libpod when running as rootless
37
const SystemdDefaultRootlessCgroupParent = "user.slice"
38

39
// DefaultWaitInterval is the default interval between container status checks
40
// while waiting.
41
const DefaultWaitInterval = 250 * time.Millisecond
42

43
// LinuxNS represents a Linux namespace
44
type LinuxNS int
45

46
const (
47
	// InvalidNS is an invalid namespace
48
	InvalidNS LinuxNS = iota
49
	// IPCNS is the IPC namespace
50
	IPCNS LinuxNS = iota
51
	// MountNS is the mount namespace
52
	MountNS LinuxNS = iota
53
	// NetNS is the network namespace
54
	NetNS LinuxNS = iota
55
	// PIDNS is the PID namespace
56
	PIDNS LinuxNS = iota
57
	// UserNS is the user namespace
58
	UserNS LinuxNS = iota
59
	// UTSNS is the UTS namespace
60
	UTSNS LinuxNS = iota
61
	// CgroupNS is the Cgroup namespace
62
	CgroupNS LinuxNS = iota
63
)
64

65
// String returns a string representation of a Linux namespace
66
// It is guaranteed to be the name of the namespace in /proc for valid ns types
67
func (ns LinuxNS) String() string {
68
	switch ns {
69
	case InvalidNS:
70
		return "invalid"
71
	case IPCNS:
72
		return "ipc"
73
	case MountNS:
74
		return "mnt"
75
	case NetNS:
76
		return "net"
77
	case PIDNS:
78
		return "pid"
79
	case UserNS:
80
		return "user"
81
	case UTSNS:
82
		return "uts"
83
	case CgroupNS:
84
		return "cgroup"
85
	default:
86
		return "unknown"
87
	}
88
}
89

90
// Container is a single OCI container.
91
// All operations on a Container that access state must begin with a call to
92
// syncContainer().
93
// There is no guarantee that state exists in a readable state before
94
// syncContainer() is run, and even if it does, its contents will be out of date
95
// and must be refreshed from the database.
96
// Generally, this requirement applies only to top-level functions; helpers can
97
// assume that their callers handled this requirement. Generally speaking, if a
98
// function takes the container lock and accesses any part of state, it should
99
// syncContainer() immediately after locking.
100
type Container struct {
101
	config *ContainerConfig
102

103
	state *ContainerState
104

105
	// Batched indicates that a container has been locked as part of a
106
	// Batch() operation
107
	// Functions called on a batched container will not lock or sync
108
	batched bool
109

110
	valid      bool
111
	lock       lock.Locker
112
	runtime    *Runtime
113
	ociRuntime OCIRuntime
114

115
	rootlessSlirpSyncR *os.File
116
	rootlessSlirpSyncW *os.File
117

118
	rootlessPortSyncR *os.File
119
	rootlessPortSyncW *os.File
120

121
	// perNetworkOpts should be set when you want to use special network
122
	// options when calling network setup/teardown. This should be used for
123
	// container restore or network reload for example. Leave this nil if
124
	// the settings from the container config should be used.
125
	perNetworkOpts map[string]types.PerNetworkOptions
126

127
	// This is true if a container is restored from a checkpoint.
128
	restoreFromCheckpoint bool
129

130
	slirp4netnsSubnet *net.IPNet
131
	pastaResult       *pasta.SetupResult
132
}
133

134
// ContainerState contains the current state of the container
135
// It is stored on disk in a tmpfs and recreated on reboot
136
type ContainerState struct {
137
	// The current state of the running container
138
	State define.ContainerStatus `json:"state"`
139
	// The path to the JSON OCI runtime spec for this container
140
	ConfigPath string `json:"configPath,omitempty"`
141
	// RunDir is a per-boot directory for container content
142
	RunDir string `json:"runDir,omitempty"`
143
	// Mounted indicates whether the container's storage has been mounted
144
	// for use
145
	Mounted bool `json:"mounted,omitempty"`
146
	// Mountpoint contains the path to the container's mounted storage as given
147
	// by containers/storage.
148
	Mountpoint string `json:"mountPoint,omitempty"`
149
	// StartedTime is the time the container was started
150
	StartedTime time.Time `json:"startedTime,omitempty"`
151
	// FinishedTime is the time the container finished executing
152
	FinishedTime time.Time `json:"finishedTime,omitempty"`
153
	// ExitCode is the exit code returned when the container stopped
154
	ExitCode int32 `json:"exitCode,omitempty"`
155
	// Exited is whether the container has exited
156
	Exited bool `json:"exited,omitempty"`
157
	// Error holds the last known error message during start, stop, or remove
158
	Error string `json:"error,omitempty"`
159
	// OOMKilled indicates that the container was killed as it ran out of
160
	// memory
161
	OOMKilled bool `json:"oomKilled,omitempty"`
162
	// Checkpointed indicates that the container was stopped by a checkpoint
163
	// operation.
164
	Checkpointed bool `json:"checkpointed,omitempty"`
165
	// PID is the PID of a running container
166
	PID int `json:"pid,omitempty"`
167
	// ConmonPID is the PID of the container's conmon
168
	ConmonPID int `json:"conmonPid,omitempty"`
169
	// ExecSessions contains all exec sessions that are associated with this
170
	// container.
171
	ExecSessions map[string]*ExecSession `json:"newExecSessions,omitempty"`
172
	// LegacyExecSessions are legacy exec sessions from older versions of
173
	// Podman.
174
	// These are DEPRECATED and will be removed in a future release.
175
	LegacyExecSessions map[string]*legacyExecSession `json:"execSessions,omitempty"`
176
	// NetNS is the path or name of the NetNS
177
	NetNS string `json:"netns,omitempty"`
178
	// NetworkStatus contains the network Status for all networks
179
	// the container is attached to. Only populated if we created a network
180
	// namespace for the container, and the network namespace is currently
181
	// active.
182
	// To read this field use container.getNetworkStatus() instead, this will
183
	// take care of migrating the old DEPRECATED network status to the new format.
184
	NetworkStatus map[string]types.StatusBlock `json:"networkStatus,omitempty"`
185
	// BindMounts contains files that will be bind-mounted into the
186
	// container when it is mounted.
187
	// These include /etc/hosts and /etc/resolv.conf
188
	// This maps the path the file will be mounted to in the container to
189
	// the path of the file on disk outside the container
190
	BindMounts map[string]string `json:"bindMounts,omitempty"`
191
	// StoppedByUser indicates whether the container was stopped by an
192
	// explicit call to the Stop() API.
193
	StoppedByUser bool `json:"stoppedByUser,omitempty"`
194
	// RestartPolicyMatch indicates whether the conditions for restart
195
	// policy have been met.
196
	RestartPolicyMatch bool `json:"restartPolicyMatch,omitempty"`
197
	// RestartCount is how many times the container was restarted by its
198
	// restart policy. This is NOT incremented by normal container restarts
199
	// (only by restart policy).
200
	RestartCount uint `json:"restartCount,omitempty"`
201
	// StartupHCPassed indicates that the startup healthcheck has
202
	// succeeded and the main healthcheck can begin.
203
	StartupHCPassed bool `json:"startupHCPassed,omitempty"`
204
	// StartupHCSuccessCount indicates the number of successes of the
205
	// startup healthcheck. A startup HC can require more than one success
206
	// to be marked as passed.
207
	StartupHCSuccessCount int `json:"startupHCSuccessCount,omitempty"`
208
	// StartupHCFailureCount indicates the number of failures of the startup
209
	// healthcheck. The container will be restarted if this exceed a set
210
	// number in the startup HC config.
211
	StartupHCFailureCount int `json:"startupHCFailureCount,omitempty"`
212

213
	// ExtensionStageHooks holds hooks which will be executed by libpod
214
	// and not delegated to the OCI runtime.
215
	ExtensionStageHooks map[string][]spec.Hook `json:"extensionStageHooks,omitempty"`
216

217
	// NetInterfaceDescriptions describe the relationship between a CNI
218
	// network and an interface names
219
	NetInterfaceDescriptions ContainerNetworkDescriptions `json:"networkDescriptions,omitempty"`
220

221
	// Service indicates that container is the service container of a
222
	// service. A service consists of one or more pods.  The service
223
	// container is started before all pods and is stopped when the last
224
	// pod stops. The service container allows for tracking and managing
225
	// the entire life cycle of service which may be started via
226
	// `podman-play-kube`.
227
	Service Service
228

229
	// Following checkpoint/restore related information is displayed
230
	// if the container has been checkpointed or restored.
231
	CheckpointedTime time.Time `json:"checkpointedTime,omitempty"`
232
	RestoredTime     time.Time `json:"restoredTime,omitempty"`
233
	CheckpointLog    string    `json:"checkpointLog,omitempty"`
234
	CheckpointPath   string    `json:"checkpointPath,omitempty"`
235
	RestoreLog       string    `json:"restoreLog,omitempty"`
236
	Restored         bool      `json:"restored,omitempty"`
237
}
238

239
// ContainerNamedVolume is a named volume that will be mounted into the
240
// container. Each named volume is a libpod Volume present in the state.
241
type ContainerNamedVolume struct {
242
	// Name is the name of the volume to mount in.
243
	// Must resolve to a valid volume present in this Podman.
244
	Name string `json:"volumeName"`
245
	// Dest is the mount's destination
246
	Dest string `json:"dest"`
247
	// Options are fstab style mount options
248
	Options []string `json:"options,omitempty"`
249
	// IsAnonymous sets the named volume as anonymous even if it has a name
250
	// This is used for emptyDir volumes from a kube yaml
251
	IsAnonymous bool `json:"setAnonymous,omitempty"`
252
	// SubPath determines which part of the Source will be mounted in the container
253
	SubPath string
254
}
255

256
// ContainerOverlayVolume is an overlay volume that will be mounted into the
257
// container. Each volume is a libpod Volume present in the state.
258
type ContainerOverlayVolume struct {
259
	// Destination is the absolute path where the mount will be placed in the container.
260
	Dest string `json:"dest"`
261
	// Source specifies the source path of the mount.
262
	Source string `json:"source,omitempty"`
263
	// Options holds overlay volume options.
264
	Options []string `json:"options,omitempty"`
265
}
266

267
// ContainerImageVolume is a volume based on a container image.  The container
268
// image is first mounted on the host and is then bind-mounted into the
269
// container.
270
type ContainerImageVolume struct {
271
	// Source is the source of the image volume.  The image can be referred
272
	// to by name and by ID.
273
	Source string `json:"source"`
274
	// Dest is the absolute path of the mount in the container.
275
	Dest string `json:"dest"`
276
	// ReadWrite sets the volume writable.
277
	ReadWrite bool `json:"rw"`
278
}
279

280
// ContainerSecret is a secret that is mounted in a container
281
type ContainerSecret struct {
282
	// Secret is the secret
283
	*secrets.Secret
284
	// UID is the UID of the secret file
285
	UID uint32
286
	// GID is the GID of the secret file
287
	GID uint32
288
	// Mode is the mode of the secret file
289
	Mode uint32
290
	// Secret target inside container
291
	Target string
292
}
293

294
// ContainerNetworkDescriptions describes the relationship between the CNI
295
// network and the ethN where N is an integer
296
type ContainerNetworkDescriptions map[string]int
297

298
// Config accessors
299
// Unlocked
300

301
// Config returns the configuration used to create the container.
302
// Note that the returned config does not include the actual networks.
303
// Use ConfigWithNetworks() if you need them.
304
func (c *Container) Config() *ContainerConfig {
305
	returnConfig := new(ContainerConfig)
306
	if err := JSONDeepCopy(c.config, returnConfig); err != nil {
307
		return nil
308
	}
309
	return returnConfig
310
}
311

312
// Config returns the configuration used to create the container.
313
func (c *Container) ConfigWithNetworks() *ContainerConfig {
314
	returnConfig := c.Config()
315
	if returnConfig == nil {
316
		return nil
317
	}
318

319
	networks, err := c.networks()
320
	if err != nil {
321
		return nil
322
	}
323
	returnConfig.Networks = networks
324

325
	return returnConfig
326
}
327

328
// ConfigNoCopy returns the configuration used by the container.
329
// Note that the returned value is not a copy and must hence
330
// only be used in a reading fashion.
331
func (c *Container) ConfigNoCopy() *ContainerConfig {
332
	return c.config
333
}
334

335
// DeviceHostSrc returns the user supplied device to be passed down in the pod
336
func (c *Container) DeviceHostSrc() []spec.LinuxDevice {
337
	return c.config.DeviceHostSrc
338
}
339

340
// Runtime returns the container's Runtime.
341
func (c *Container) Runtime() *Runtime {
342
	return c.runtime
343
}
344

345
// Spec returns the container's OCI runtime spec
346
// The spec returned is the one used to create the container. The running
347
// spec may differ slightly as mounts are added based on the image
348
func (c *Container) Spec() *spec.Spec {
349
	returnSpec := new(spec.Spec)
350
	if err := JSONDeepCopy(c.config.Spec, returnSpec); err != nil {
351
		return nil
352
	}
353

354
	return returnSpec
355
}
356

357
// specFromState returns the unmarshalled json config of the container.  If the
358
// config does not exist (e.g., because the container was never started) return
359
// the spec from the config.
360
func (c *Container) specFromState() (*spec.Spec, error) {
361
	returnSpec := c.config.Spec
362

363
	if f, err := os.Open(c.state.ConfigPath); err == nil {
364
		returnSpec = new(spec.Spec)
365
		content, err := io.ReadAll(f)
366
		if err != nil {
367
			return nil, fmt.Errorf("reading container config: %w", err)
368
		}
369
		if err := json.Unmarshal(content, &returnSpec); err != nil {
370
			// Malformed spec, just use c.config.Spec instead
371
			logrus.Warnf("Error unmarshalling container %s config: %v", c.ID(), err)
372
			return c.config.Spec, nil
373
		}
374
	} else if !os.IsNotExist(err) {
375
		// ignore when the file does not exist
376
		return nil, fmt.Errorf("opening container config: %w", err)
377
	}
378

379
	return returnSpec, nil
380
}
381

382
// ID returns the container's ID
383
func (c *Container) ID() string {
384
	return c.config.ID
385
}
386

387
// Name returns the container's name
388
func (c *Container) Name() string {
389
	return c.config.Name
390
}
391

392
// PodID returns the full ID of the pod the container belongs to, or "" if it
393
// does not belong to a pod
394
func (c *Container) PodID() string {
395
	return c.config.Pod
396
}
397

398
// Namespace returns the libpod namespace the container is in.
399
// Namespaces are used to logically separate containers and pods in the state.
400
func (c *Container) Namespace() string {
401
	return c.config.Namespace
402
}
403

404
// Image returns the ID and name of the image used as the container's rootfs.
405
func (c *Container) Image() (string, string) {
406
	return c.config.RootfsImageID, c.config.RootfsImageName
407
}
408

409
// RawImageName returns the unprocessed and not-normalized user-specified image
410
// name.
411
func (c *Container) RawImageName() string {
412
	return c.config.RawImageName
413
}
414

415
// ShmDir returns the sources path to be mounted on /dev/shm in container
416
func (c *Container) ShmDir() string {
417
	return c.config.ShmDir
418
}
419

420
// ShmSize returns the size of SHM device to be mounted into the container
421
func (c *Container) ShmSize() int64 {
422
	return c.config.ShmSize
423
}
424

425
// StaticDir returns the directory used to store persistent container files
426
func (c *Container) StaticDir() string {
427
	return c.config.StaticDir
428
}
429

430
// NamedVolumes returns the container's named volumes.
431
// The name of each is guaranteed to point to a valid libpod Volume present in
432
// the state.
433
func (c *Container) NamedVolumes() []*ContainerNamedVolume {
434
	volumes := []*ContainerNamedVolume{}
435
	for _, vol := range c.config.NamedVolumes {
436
		newVol := new(ContainerNamedVolume)
437
		newVol.Name = vol.Name
438
		newVol.Dest = vol.Dest
439
		newVol.Options = vol.Options
440
		newVol.SubPath = vol.SubPath
441
		volumes = append(volumes, newVol)
442
	}
443

444
	return volumes
445
}
446

447
// Privileged returns whether the container is privileged
448
func (c *Container) Privileged() bool {
449
	return c.config.Privileged
450
}
451

452
// ProcessLabel returns the selinux ProcessLabel of the container
453
func (c *Container) ProcessLabel() string {
454
	return c.config.ProcessLabel
455
}
456

457
// MountLabel returns the SELinux mount label of the container
458
func (c *Container) MountLabel() string {
459
	return c.config.MountLabel
460
}
461

462
// Systemd returns whether the container will be running in systemd mode
463
func (c *Container) Systemd() bool {
464
	if c.config.Systemd != nil {
465
		return *c.config.Systemd
466
	}
467
	return false
468
}
469

470
// User returns the user who the container is run as
471
func (c *Container) User() string {
472
	return c.config.User
473
}
474

475
// Dependencies gets the containers this container depends upon
476
func (c *Container) Dependencies() []string {
477
	// Collect in a map first to remove dupes
478
	dependsCtrs := map[string]bool{}
479

480
	// First add all namespace containers
481
	if c.config.IPCNsCtr != "" {
482
		dependsCtrs[c.config.IPCNsCtr] = true
483
	}
484
	if c.config.MountNsCtr != "" {
485
		dependsCtrs[c.config.MountNsCtr] = true
486
	}
487
	if c.config.NetNsCtr != "" {
488
		dependsCtrs[c.config.NetNsCtr] = true
489
	}
490
	if c.config.PIDNsCtr != "" {
491
		dependsCtrs[c.config.PIDNsCtr] = true
492
	}
493
	if c.config.UserNsCtr != "" {
494
		dependsCtrs[c.config.UserNsCtr] = true
495
	}
496
	if c.config.UTSNsCtr != "" {
497
		dependsCtrs[c.config.UTSNsCtr] = true
498
	}
499
	if c.config.CgroupNsCtr != "" {
500
		dependsCtrs[c.config.CgroupNsCtr] = true
501
	}
502

503
	// Add all generic dependencies
504
	for _, id := range c.config.Dependencies {
505
		dependsCtrs[id] = true
506
	}
507

508
	if len(dependsCtrs) == 0 {
509
		return []string{}
510
	}
511

512
	depends := make([]string, 0, len(dependsCtrs))
513
	for ctr := range dependsCtrs {
514
		depends = append(depends, ctr)
515
	}
516

517
	return depends
518
}
519

520
// NewNetNS returns whether the container will create a new network namespace
521
func (c *Container) NewNetNS() bool {
522
	return c.config.CreateNetNS
523
}
524

525
// PortMappings returns the ports that will be mapped into a container if
526
// a new network namespace is created
527
// If NewNetNS() is false, this value is unused
528
func (c *Container) PortMappings() ([]types.PortMapping, error) {
529
	// First check if the container belongs to a network namespace (like a pod)
530
	if len(c.config.NetNsCtr) > 0 {
531
		netNsCtr, err := c.runtime.GetContainer(c.config.NetNsCtr)
532
		if err != nil {
533
			return nil, fmt.Errorf("unable to look up network namespace for container %s: %w", c.ID(), err)
534
		}
535
		return netNsCtr.PortMappings()
536
	}
537
	return c.config.PortMappings, nil
538
}
539

540
// DNSServers returns DNS servers that will be used in the container's
541
// resolv.conf
542
// If empty, DNS server from the host's resolv.conf will be used instead
543
func (c *Container) DNSServers() []net.IP {
544
	return c.config.DNSServer
545
}
546

547
// DNSSearch returns the DNS search domains that will be used in the container's
548
// resolv.conf
549
// If empty, DNS Search domains from the host's resolv.conf will be used instead
550
func (c *Container) DNSSearch() []string {
551
	return c.config.DNSSearch
552
}
553

554
// DNSOption returns the DNS options that will be used in the container's
555
// resolv.conf
556
// If empty, options from the host's resolv.conf will be used instead
557
func (c *Container) DNSOption() []string {
558
	return c.config.DNSOption
559
}
560

561
// HostsAdd returns hosts that will be added to the container's hosts file
562
// The host system's hosts file is used as a base, and these are appended to it
563
func (c *Container) HostsAdd() []string {
564
	return c.config.HostAdd
565
}
566

567
// UserVolumes returns user-added volume mounts in the container.
568
// These are not added to the spec, but are used during image commit and to
569
// trigger some OCI hooks.
570
func (c *Container) UserVolumes() []string {
571
	volumes := make([]string, 0, len(c.config.UserVolumes))
572
	volumes = append(volumes, c.config.UserVolumes...)
573
	return volumes
574
}
575

576
// Entrypoint is the container's entrypoint.
577
// This is not added to the spec, but is instead used during image commit.
578
func (c *Container) Entrypoint() []string {
579
	entrypoint := make([]string, 0, len(c.config.Entrypoint))
580
	entrypoint = append(entrypoint, c.config.Entrypoint...)
581
	return entrypoint
582
}
583

584
// Command is the container's command
585
// This is not added to the spec, but is instead used during image commit
586
func (c *Container) Command() []string {
587
	command := make([]string, 0, len(c.config.Command))
588
	command = append(command, c.config.Command...)
589
	return command
590
}
591

592
// Stdin returns whether STDIN on the container will be kept open
593
func (c *Container) Stdin() bool {
594
	return c.config.Stdin
595
}
596

597
// Labels returns the container's labels
598
func (c *Container) Labels() map[string]string {
599
	labels := make(map[string]string)
600
	for key, value := range c.config.Labels {
601
		labels[key] = value
602
	}
603
	return labels
604
}
605

606
// StopSignal is the signal that will be used to stop the container
607
// If it fails to stop the container, SIGKILL will be used after a timeout
608
// If StopSignal is 0, the default signal of SIGTERM will be used
609
func (c *Container) StopSignal() uint {
610
	return c.config.StopSignal
611
}
612

613
// StopTimeout returns the container's stop timeout
614
// If the container's default stop signal fails to kill the container, SIGKILL
615
// will be used after this timeout
616
func (c *Container) StopTimeout() uint {
617
	return c.config.StopTimeout
618
}
619

620
// CreatedTime gets the time when the container was created
621
func (c *Container) CreatedTime() time.Time {
622
	return c.config.CreatedTime
623
}
624

625
// CgroupParent gets the container's Cgroup parent
626
func (c *Container) CgroupParent() string {
627
	return c.config.CgroupParent
628
}
629

630
// LogPath returns the path to the container's log file
631
// This file will only be present after Init() is called to create the container
632
// in the runtime
633
func (c *Container) LogPath() string {
634
	return c.config.LogPath
635
}
636

637
// LogTag returns the tag to the container's log file
638
func (c *Container) LogTag() string {
639
	return c.config.LogTag
640
}
641

642
// RestartPolicy returns the container's restart policy.
643
func (c *Container) RestartPolicy() string {
644
	return c.config.RestartPolicy
645
}
646

647
// RestartRetries returns the number of retries that will be attempted when
648
// using the "on-failure" restart policy
649
func (c *Container) RestartRetries() uint {
650
	return c.config.RestartRetries
651
}
652

653
// LogDriver returns the log driver for this container
654
func (c *Container) LogDriver() string {
655
	return c.config.LogDriver
656
}
657

658
// RuntimeName returns the name of the runtime
659
func (c *Container) RuntimeName() string {
660
	return c.config.OCIRuntime
661
}
662

663
// Runtime spec accessors
664
// Unlocked
665

666
// Hostname gets the container's hostname
667
func (c *Container) Hostname() string {
668
	if c.config.UTSNsCtr != "" {
669
		utsNsCtr, err := c.runtime.GetContainer(c.config.UTSNsCtr)
670
		if err != nil {
671
			// should we return an error here?
672
			logrus.Errorf("unable to look up uts namespace for container %s: %v", c.ID(), err)
673
			return ""
674
		}
675
		return utsNsCtr.Hostname()
676
	}
677
	if c.config.Spec.Hostname != "" {
678
		return c.config.Spec.Hostname
679
	}
680

681
	// if the container is not running in a private UTS namespace,
682
	// return the host's hostname.
683
	privateUTS := c.hasPrivateUTS()
684
	if !privateUTS {
685
		hostname, err := os.Hostname()
686
		if err == nil {
687
			return hostname
688
		}
689
	}
690
	if len(c.ID()) < 11 {
691
		return c.ID()
692
	}
693
	return c.ID()[:12]
694
}
695

696
// WorkingDir returns the containers working dir
697
func (c *Container) WorkingDir() string {
698
	if c.config.Spec.Process != nil {
699
		return c.config.Spec.Process.Cwd
700
	}
701
	return "/"
702
}
703

704
// Terminal returns true if the container has a terminal
705
func (c *Container) Terminal() bool {
706
	if c.config.Spec != nil && c.config.Spec.Process != nil {
707
		return c.config.Spec.Process.Terminal
708
	}
709
	return false
710
}
711

712
// LinuxResources return the containers Linux Resources (if any)
713
func (c *Container) LinuxResources() *spec.LinuxResources {
714
	if c.config.Spec != nil && c.config.Spec.Linux != nil {
715
		return c.config.Spec.Linux.Resources
716
	}
717
	return nil
718
}
719

720
// Env returns the default environment variables defined for the container
721
func (c *Container) Env() []string {
722
	if c.config.Spec != nil && c.config.Spec.Process != nil {
723
		return c.config.Spec.Process.Env
724
	}
725
	return nil
726
}
727

728
// State Accessors
729
// Require locking
730

731
// State returns the current state of the container
732
func (c *Container) State() (define.ContainerStatus, error) {
733
	if !c.batched {
734
		c.lock.Lock()
735
		defer c.lock.Unlock()
736

737
		if err := c.syncContainer(); err != nil {
738
			return define.ContainerStateUnknown, err
739
		}
740
	}
741
	return c.state.State, nil
742
}
743

744
func (c *Container) RestartCount() (uint, error) {
745
	if !c.batched {
746
		c.lock.Lock()
747
		defer c.lock.Unlock()
748

749
		if err := c.syncContainer(); err != nil {
750
			return 0, err
751
		}
752
	}
753
	return c.state.RestartCount, nil
754
}
755

756
// Mounted returns whether the container is mounted and the path it is mounted
757
// at (if it is mounted).
758
// If the container is not mounted, no error is returned, and the mountpoint
759
// will be set to "".
760
func (c *Container) Mounted() (bool, string, error) {
761
	if !c.batched {
762
		c.lock.Lock()
763
		defer c.lock.Unlock()
764
		if err := c.syncContainer(); err != nil {
765
			return false, "", fmt.Errorf("updating container %s state: %w", c.ID(), err)
766
		}
767
	}
768
	// We cannot directly return c.state.Mountpoint as it is not guaranteed
769
	// to be set if the container is mounted, only if the container has been
770
	// prepared with c.prepare().
771
	// Instead, let's call into c/storage
772
	mountedTimes, err := c.runtime.storageService.MountedContainerImage(c.ID())
773
	if err != nil {
774
		return false, "", err
775
	}
776

777
	if mountedTimes > 0 {
778
		mountPoint, err := c.runtime.storageService.GetMountpoint(c.ID())
779
		if err != nil {
780
			return false, "", err
781
		}
782

783
		return true, mountPoint, nil
784
	}
785

786
	return false, "", nil
787
}
788

789
// StartedTime is the time the container was started
790
func (c *Container) StartedTime() (time.Time, error) {
791
	if !c.batched {
792
		c.lock.Lock()
793
		defer c.lock.Unlock()
794
		if err := c.syncContainer(); err != nil {
795
			return time.Time{}, fmt.Errorf("updating container %s state: %w", c.ID(), err)
796
		}
797
	}
798
	return c.state.StartedTime, nil
799
}
800

801
// FinishedTime is the time the container was stopped
802
func (c *Container) FinishedTime() (time.Time, error) {
803
	if !c.batched {
804
		c.lock.Lock()
805
		defer c.lock.Unlock()
806
		if err := c.syncContainer(); err != nil {
807
			return time.Time{}, fmt.Errorf("updating container %s state: %w", c.ID(), err)
808
		}
809
	}
810
	return c.state.FinishedTime, nil
811
}
812

813
// ExitCode returns the exit code of the container as
814
// an int32, and whether the container has exited.
815
// If the container has not exited, exit code will always be 0.
816
// If the container restarts, the exit code is reset to 0.
817
func (c *Container) ExitCode() (int32, bool, error) {
818
	if !c.batched {
819
		c.lock.Lock()
820
		defer c.lock.Unlock()
821
		if err := c.syncContainer(); err != nil {
822
			return 0, false, fmt.Errorf("updating container %s state: %w", c.ID(), err)
823
		}
824
	}
825
	return c.state.ExitCode, c.state.Exited, nil
826
}
827

828
// OOMKilled returns whether the container was killed by an OOM condition
829
func (c *Container) OOMKilled() (bool, error) {
830
	if !c.batched {
831
		c.lock.Lock()
832
		defer c.lock.Unlock()
833
		if err := c.syncContainer(); err != nil {
834
			return false, fmt.Errorf("updating container %s state: %w", c.ID(), err)
835
		}
836
	}
837
	return c.state.OOMKilled, nil
838
}
839

840
// PID returns the PID of the container.
841
// If the container is not running, a pid of 0 will be returned. No error will
842
// occur.
843
func (c *Container) PID() (int, error) {
844
	if !c.batched {
845
		c.lock.Lock()
846
		defer c.lock.Unlock()
847

848
		if err := c.syncContainer(); err != nil {
849
			return -1, err
850
		}
851
	}
852

853
	return c.state.PID, nil
854
}
855

856
// ConmonPID Returns the PID of the container's conmon process.
857
// If the container is not running, a PID of 0 will be returned. No error will
858
// occur.
859
func (c *Container) ConmonPID() (int, error) {
860
	if !c.batched {
861
		c.lock.Lock()
862
		defer c.lock.Unlock()
863

864
		if err := c.syncContainer(); err != nil {
865
			return -1, err
866
		}
867
	}
868

869
	return c.state.ConmonPID, nil
870
}
871

872
// ExecSessions retrieves active exec sessions running in the container
873
func (c *Container) ExecSessions() ([]string, error) {
874
	if !c.batched {
875
		c.lock.Lock()
876
		defer c.lock.Unlock()
877

878
		if err := c.syncContainer(); err != nil {
879
			return nil, err
880
		}
881
	}
882

883
	ids := make([]string, 0, len(c.state.ExecSessions))
884
	for id := range c.state.ExecSessions {
885
		ids = append(ids, id)
886
	}
887

888
	return ids, nil
889
}
890

891
// execSessionNoCopy returns the associated exec session to id.
892
// Note that the session is not a deep copy.
893
func (c *Container) execSessionNoCopy(id string) (*ExecSession, error) {
894
	if !c.batched {
895
		c.lock.Lock()
896
		defer c.lock.Unlock()
897

898
		if err := c.syncContainer(); err != nil {
899
			return nil, err
900
		}
901
	}
902

903
	session, ok := c.state.ExecSessions[id]
904
	if !ok {
905
		return nil, fmt.Errorf("no exec session with ID %s found in container %s: %w", id, c.ID(), define.ErrNoSuchExecSession)
906
	}
907

908
	// make sure to update the exec session if needed #18424
909
	alive, err := c.ociRuntime.ExecUpdateStatus(c, id)
910
	if err != nil {
911
		return nil, err
912
	}
913
	if !alive {
914
		if err := retrieveAndWriteExecExitCode(c, session.ID()); err != nil {
915
			return nil, err
916
		}
917
	}
918

919
	return session, nil
920
}
921

922
// ExecSession retrieves detailed information on a single active exec session in
923
// a container
924
func (c *Container) ExecSession(id string) (*ExecSession, error) {
925
	session, err := c.execSessionNoCopy(id)
926
	if err != nil {
927
		return nil, err
928
	}
929

930
	returnSession := new(ExecSession)
931
	if err := JSONDeepCopy(session, returnSession); err != nil {
932
		return nil, fmt.Errorf("copying contents of container %s exec session %s: %w", c.ID(), session.ID(), err)
933
	}
934

935
	return returnSession, nil
936
}
937

938
// BindMounts retrieves bind mounts that were created by libpod and will be
939
// added to the container
940
// All these mounts except /dev/shm are ignored if a mount in the given spec has
941
// the same destination
942
// These mounts include /etc/resolv.conf, /etc/hosts, and /etc/hostname
943
// The return is formatted as a map from destination (mountpoint in the
944
// container) to source (path of the file that will be mounted into the
945
// container)
946
// If the container has not been started yet, an empty map will be returned, as
947
// the files in question are only created when the container is started.
948
func (c *Container) BindMounts() (map[string]string, error) {
949
	if !c.batched {
950
		c.lock.Lock()
951
		defer c.lock.Unlock()
952

953
		if err := c.syncContainer(); err != nil {
954
			return nil, err
955
		}
956
	}
957

958
	newMap := make(map[string]string, len(c.state.BindMounts))
959

960
	for key, val := range c.state.BindMounts {
961
		newMap[key] = val
962
	}
963

964
	return newMap, nil
965
}
966

967
// StoppedByUser returns whether the container was last stopped by an explicit
968
// call to the Stop() API, or whether it exited naturally.
969
func (c *Container) StoppedByUser() (bool, error) {
970
	if !c.batched {
971
		c.lock.Lock()
972
		defer c.lock.Unlock()
973

974
		if err := c.syncContainer(); err != nil {
975
			return false, err
976
		}
977
	}
978

979
	return c.state.StoppedByUser, nil
980
}
981

982
// StartupHCPassed returns whether the container's startup healthcheck passed.
983
func (c *Container) StartupHCPassed() (bool, error) {
984
	if !c.batched {
985
		c.lock.Lock()
986
		defer c.lock.Unlock()
987

988
		if err := c.syncContainer(); err != nil {
989
			return false, err
990
		}
991
	}
992

993
	return c.state.StartupHCPassed, nil
994
}
995

996
// Misc Accessors
997
// Most will require locking
998

999
// NamespacePath returns the path of one of the container's namespaces
1000
// If the container is not running, an error will be returned
1001
func (c *Container) NamespacePath(linuxNS LinuxNS) (string, error) { //nolint:interfacer
1002
	if !c.batched {
1003
		c.lock.Lock()
1004
		defer c.lock.Unlock()
1005
		if err := c.syncContainer(); err != nil {
1006
			return "", fmt.Errorf("updating container %s state: %w", c.ID(), err)
1007
		}
1008
	}
1009

1010
	return c.namespacePath(linuxNS)
1011
}
1012

1013
// namespacePath returns the path of one of the container's namespaces
1014
// If the container is not running, an error will be returned
1015
func (c *Container) namespacePath(linuxNS LinuxNS) (string, error) { //nolint:interfacer
1016
	if c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused {
1017
		return "", fmt.Errorf("cannot get namespace path unless container %s is running: %w", c.ID(), define.ErrCtrStopped)
1018
	}
1019

1020
	if linuxNS == InvalidNS {
1021
		return "", fmt.Errorf("invalid namespace requested from container %s: %w", c.ID(), define.ErrInvalidArg)
1022
	}
1023

1024
	return fmt.Sprintf("/proc/%d/ns/%s", c.state.PID, linuxNS.String()), nil
1025
}
1026

1027
// CgroupManager returns the cgroup manager used by the given container.
1028
func (c *Container) CgroupManager() string {
1029
	cgroupManager := c.config.CgroupManager
1030
	if cgroupManager == "" {
1031
		cgroupManager = c.runtime.config.Engine.CgroupManager
1032
	}
1033
	return cgroupManager
1034
}
1035

1036
// CgroupPath returns a cgroups "path" for the given container.
1037
// Note that the container must be running.  Otherwise, an error
1038
// is returned.
1039
func (c *Container) CgroupPath() (string, error) {
1040
	if !c.batched {
1041
		c.lock.Lock()
1042
		defer c.lock.Unlock()
1043
		if err := c.syncContainer(); err != nil {
1044
			return "", fmt.Errorf("updating container %s state: %w", c.ID(), err)
1045
		}
1046
	}
1047
	return c.cGroupPath()
1048
}
1049

1050
// cGroupPath returns a cgroups "path" for the given container.
1051
// Note that the container must be running.  Otherwise, an error
1052
// is returned.
1053
// NOTE: only call this when owning the container's lock.
1054
func (c *Container) cGroupPath() (string, error) {
1055
	if c.config.NoCgroups || c.config.CgroupsMode == "disabled" {
1056
		return "", fmt.Errorf("this container is not creating cgroups: %w", define.ErrNoCgroups)
1057
	}
1058
	if c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused {
1059
		return "", fmt.Errorf("cannot get cgroup path unless container %s is running: %w", c.ID(), define.ErrCtrStopped)
1060
	}
1061

1062
	// Read /proc/{PID}/cgroup and find the *longest* cgroup entry.  That's
1063
	// needed to account for hacks in cgroups v1, where each line in the
1064
	// file could potentially point to a cgroup.  The longest one, however,
1065
	// is the libpod-specific one we're looking for.
1066
	//
1067
	// See #8397 on the need for the longest-path look up.
1068
	//
1069
	// And another workaround for containers running systemd as the payload.
1070
	// containers running systemd moves themselves into a child subgroup of
1071
	// the named systemd cgroup hierarchy.  Ignore any named cgroups during
1072
	// the lookup.
1073
	// See #10602 for more details.
1074
	procPath := fmt.Sprintf("/proc/%d/cgroup", c.state.PID)
1075
	lines, err := os.ReadFile(procPath)
1076
	if err != nil {
1077
		// If the file doesn't exist, it means the container could have been terminated
1078
		// so report it.  Also check for ESRCH, which means the container could have been
1079
		// terminated after the file under /proc was opened but before it was read.
1080
		if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.ESRCH) {
1081
			return "", fmt.Errorf("cannot get cgroup path unless container %s is running: %w", c.ID(), define.ErrCtrStopped)
1082
		}
1083
		return "", err
1084
	}
1085

1086
	var cgroupPath string
1087
	for _, line := range bytes.Split(lines, []byte("\n")) {
1088
		// skip last empty line
1089
		if len(line) == 0 {
1090
			continue
1091
		}
1092
		// cgroups(7) nails it down to three fields with the 3rd
1093
		// pointing to the cgroup's path which works both on v1 and v2.
1094
		fields := bytes.Split(line, []byte(":"))
1095
		if len(fields) != 3 {
1096
			logrus.Debugf("Error parsing cgroup: expected 3 fields but got %d: %s", len(fields), procPath)
1097
			continue
1098
		}
1099
		// Ignore named cgroups like name=systemd.
1100
		if bytes.Contains(fields[1], []byte("=")) {
1101
			continue
1102
		}
1103
		path := string(fields[2])
1104
		if len(path) > len(cgroupPath) {
1105
			cgroupPath = path
1106
		}
1107
	}
1108

1109
	if len(cgroupPath) == 0 {
1110
		return "", fmt.Errorf("could not find any cgroup in %q", procPath)
1111
	}
1112

1113
	cgroupManager := c.CgroupManager()
1114
	switch {
1115
	case c.config.CgroupsMode == cgroupSplit:
1116
		name := fmt.Sprintf("/libpod-payload-%s/", c.ID())
1117
		if index := strings.LastIndex(cgroupPath, name); index >= 0 {
1118
			return cgroupPath[:index+len(name)-1], nil
1119
		}
1120
	case cgroupManager == config.CgroupfsCgroupsManager:
1121
		name := fmt.Sprintf("/libpod-%s/", c.ID())
1122
		if index := strings.LastIndex(cgroupPath, name); index >= 0 {
1123
			return cgroupPath[:index+len(name)-1], nil
1124
		}
1125
	case cgroupManager == config.SystemdCgroupsManager:
1126
		// When running under systemd, try to detect the scope that was requested
1127
		// to be created.  It improves the heuristic since we report the first
1128
		// cgroup that was created instead of the cgroup where PID 1 might have
1129
		// moved to.
1130
		name := fmt.Sprintf("/libpod-%s.scope/", c.ID())
1131
		if index := strings.LastIndex(cgroupPath, name); index >= 0 {
1132
			return cgroupPath[:index+len(name)-1], nil
1133
		}
1134
	}
1135

1136
	return cgroupPath, nil
1137
}
1138

1139
// RootFsSize returns the root FS size of the container
1140
func (c *Container) RootFsSize() (int64, error) {
1141
	if !c.batched {
1142
		c.lock.Lock()
1143
		defer c.lock.Unlock()
1144
		if err := c.syncContainer(); err != nil {
1145
			return -1, fmt.Errorf("updating container %s state: %w", c.ID(), err)
1146
		}
1147
	}
1148
	return c.rootFsSize()
1149
}
1150

1151
// RWSize returns the rw size of the container
1152
func (c *Container) RWSize() (int64, error) {
1153
	if !c.batched {
1154
		c.lock.Lock()
1155
		defer c.lock.Unlock()
1156
		if err := c.syncContainer(); err != nil {
1157
			return -1, fmt.Errorf("updating container %s state: %w", c.ID(), err)
1158
		}
1159
	}
1160
	return c.rwSize()
1161
}
1162

1163
// IDMappings returns the UID/GID mapping used for the container
1164
func (c *Container) IDMappings() storage.IDMappingOptions {
1165
	return c.config.IDMappings
1166
}
1167

1168
// RootUID returns the root user mapping from container
1169
func (c *Container) RootUID() int {
1170
	if len(c.config.IDMappings.UIDMap) == 1 && c.config.IDMappings.UIDMap[0].Size == 1 {
1171
		return c.config.IDMappings.UIDMap[0].HostID
1172
	}
1173
	for _, uidmap := range c.config.IDMappings.UIDMap {
1174
		if uidmap.ContainerID == 0 {
1175
			return uidmap.HostID
1176
		}
1177
	}
1178
	return 0
1179
}
1180

1181
// RootGID returns the root user mapping from container
1182
func (c *Container) RootGID() int {
1183
	if len(c.config.IDMappings.GIDMap) == 1 && c.config.IDMappings.GIDMap[0].Size == 1 {
1184
		return c.config.IDMappings.GIDMap[0].HostID
1185
	}
1186
	for _, gidmap := range c.config.IDMappings.GIDMap {
1187
		if gidmap.ContainerID == 0 {
1188
			return gidmap.HostID
1189
		}
1190
	}
1191
	return 0
1192
}
1193

1194
// IsInfra returns whether the container is an infra container
1195
func (c *Container) IsInfra() bool {
1196
	return c.config.IsInfra
1197
}
1198

1199
// IsInitCtr returns whether the container is an init container
1200
func (c *Container) IsInitCtr() bool {
1201
	return len(c.config.InitContainerType) > 0
1202
}
1203

1204
// IsReadOnly returns whether the container is running in read-only mode
1205
func (c *Container) IsReadOnly() bool {
1206
	return c.config.Spec.Root.Readonly
1207
}
1208

1209
// NetworkDisabled returns whether the container is running with a disabled network
1210
func (c *Container) NetworkDisabled() (bool, error) {
1211
	if c.config.NetNsCtr != "" {
1212
		container, err := c.runtime.state.Container(c.config.NetNsCtr)
1213
		if err != nil {
1214
			return false, err
1215
		}
1216
		return container.NetworkDisabled()
1217
	}
1218
	return networkDisabled(c)
1219
}
1220

1221
func (c *Container) HostNetwork() bool {
1222
	if c.config.CreateNetNS || c.config.NetNsCtr != "" {
1223
		return false
1224
	}
1225
	if c.config.Spec.Linux != nil {
1226
		for _, ns := range c.config.Spec.Linux.Namespaces {
1227
			if ns.Type == spec.NetworkNamespace {
1228
				return false
1229
			}
1230
		}
1231
	}
1232
	return true
1233
}
1234

1235
// HasHealthCheck returns bool as to whether there is a health check
1236
// defined for the container
1237
func (c *Container) HasHealthCheck() bool {
1238
	return c.config.HealthCheckConfig != nil
1239
}
1240

1241
// HealthCheckConfig returns the command and timing attributes of the health check
1242
func (c *Container) HealthCheckConfig() *manifest.Schema2HealthConfig {
1243
	return c.config.HealthCheckConfig
1244
}
1245

1246
// AutoRemove indicates whether the container will be removed after it is executed
1247
func (c *Container) AutoRemove() bool {
1248
	spec := c.config.Spec
1249
	if spec.Annotations == nil {
1250
		return false
1251
	}
1252
	return spec.Annotations[define.InspectAnnotationAutoremove] == define.InspectResponseTrue
1253
}
1254

1255
// Timezone returns the timezone configured inside the container.
1256
// Local means it has the same timezone as the host machine
1257
func (c *Container) Timezone() string {
1258
	return c.config.Timezone
1259
}
1260

1261
// Umask returns the Umask bits configured inside the container.
1262
func (c *Container) Umask() string {
1263
	return c.config.Umask
1264
}
1265

1266
// Secrets return the secrets in the container
1267
func (c *Container) Secrets() []*ContainerSecret {
1268
	return c.config.Secrets
1269
}
1270

1271
// Networks gets all the networks this container is connected to.
1272
// Please do NOT use ctr.config.Networks, as this can be changed from those
1273
// values at runtime via network connect and disconnect.
1274
// Returned array of network names or error.
1275
func (c *Container) Networks() ([]string, error) {
1276
	if !c.batched {
1277
		c.lock.Lock()
1278
		defer c.lock.Unlock()
1279

1280
		if err := c.syncContainer(); err != nil {
1281
			return nil, err
1282
		}
1283
	}
1284

1285
	networks, err := c.networks()
1286
	if err != nil {
1287
		return nil, err
1288
	}
1289

1290
	names := make([]string, 0, len(networks))
1291

1292
	for name := range networks {
1293
		names = append(names, name)
1294
	}
1295

1296
	return names, nil
1297
}
1298

1299
// NetworkMode gets the configured network mode for the container.
1300
// Get actual value from the database
1301
func (c *Container) NetworkMode() string {
1302
	networkMode := ""
1303
	ctrSpec := c.config.Spec
1304

1305
	switch {
1306
	case c.config.CreateNetNS:
1307
		// We actually store the network
1308
		// mode for Slirp and Bridge, so
1309
		// we can just use that
1310
		networkMode = string(c.config.NetMode)
1311
	case c.config.NetNsCtr != "":
1312
		networkMode = fmt.Sprintf("container:%s", c.config.NetNsCtr)
1313
	default:
1314
		// Find the spec's network namespace.
1315
		// If there is none, it's host networking.
1316
		// If there is one and it has a path, it's "ns:".
1317
		foundNetNS := false
1318
		for _, ns := range ctrSpec.Linux.Namespaces {
1319
			if ns.Type == spec.NetworkNamespace {
1320
				foundNetNS = true
1321
				if ns.Path != "" {
1322
					networkMode = fmt.Sprintf("ns:%s", ns.Path)
1323
				} else {
1324
					// We're making a network ns,  but not
1325
					// configuring with Slirp or CNI. That
1326
					// means it's --net=none
1327
					networkMode = "none"
1328
				}
1329
				break
1330
			}
1331
		}
1332
		if !foundNetNS {
1333
			networkMode = "host"
1334
		}
1335
	}
1336
	return networkMode
1337
}
1338

1339
// Unlocked accessor for networks
1340
func (c *Container) networks() (map[string]types.PerNetworkOptions, error) {
1341
	return c.runtime.state.GetNetworks(c)
1342
}
1343

1344
// getInterfaceByName returns a formatted interface name for a given
1345
// network along with a bool as to whether the network existed
1346
func (d ContainerNetworkDescriptions) getInterfaceByName(networkName string) (string, bool) {
1347
	val, exists := d[networkName]
1348
	if !exists {
1349
		return "", exists
1350
	}
1351
	return fmt.Sprintf("eth%d", val), exists
1352
}
1353

1354
// GetNetworkStatus returns the current network status for this container.
1355
// This returns a map without deep copying which means this should only ever
1356
// be used as read only access, do not modify this status.
1357
func (c *Container) GetNetworkStatus() (map[string]types.StatusBlock, error) {
1358
	if !c.batched {
1359
		c.lock.Lock()
1360
		defer c.lock.Unlock()
1361

1362
		if err := c.syncContainer(); err != nil {
1363
			return nil, err
1364
		}
1365
	}
1366
	return c.getNetworkStatus(), nil
1367
}
1368

1369
// getNetworkStatus get the current network status from the state. This function
1370
// should be used instead of reading c.state.NetworkStatus directly.
1371
func (c *Container) getNetworkStatus() map[string]types.StatusBlock {
1372
	return c.state.NetworkStatus
1373
}
1374

1375
func (c *Container) NamespaceMode(ns spec.LinuxNamespaceType, ctrSpec *spec.Spec) string {
1376
	switch ns {
1377
	case spec.UTSNamespace:
1378
		if c.config.UTSNsCtr != "" {
1379
			return fmt.Sprintf("container:%s", c.config.UTSNsCtr)
1380
		}
1381
	case spec.CgroupNamespace:
1382
		if c.config.CgroupNsCtr != "" {
1383
			return fmt.Sprintf("container:%s", c.config.CgroupNsCtr)
1384
		}
1385
	case spec.IPCNamespace:
1386
		if c.config.IPCNsCtr != "" {
1387
			return fmt.Sprintf("container:%s", c.config.IPCNsCtr)
1388
		}
1389
	case spec.PIDNamespace:
1390
		if c.config.PIDNsCtr != "" {
1391
			return fmt.Sprintf("container:%s", c.config.PIDNsCtr)
1392
		}
1393
	case spec.UserNamespace:
1394
		if c.config.UserNsCtr != "" {
1395
			return fmt.Sprintf("container:%s", c.config.UserNsCtr)
1396
		}
1397
	case spec.NetworkNamespace:
1398
		if c.config.NetNsCtr != "" {
1399
			return fmt.Sprintf("container:%s", c.config.NetNsCtr)
1400
		}
1401
	case spec.MountNamespace:
1402
		if c.config.MountNsCtr != "" {
1403
			return fmt.Sprintf("container:%s", c.config.MountNsCtr)
1404
		}
1405
	}
1406

1407
	if ctrSpec.Linux != nil {
1408
		// Locate the spec's given namespace.
1409
		// If there is none, it's namespace=host.
1410
		// If there is one and it has a path, it's "ns:".
1411
		// If there is no path, it's default - the empty string.
1412
		for _, availableNS := range ctrSpec.Linux.Namespaces {
1413
			if availableNS.Type == ns {
1414
				if availableNS.Path != "" {
1415
					return fmt.Sprintf("ns:%s", availableNS.Path)
1416
				}
1417
				return "private"
1418
			}
1419
		}
1420
	}
1421
	return "host"
1422
}
1423

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.