18
"github.com/containers/buildah/pkg/parse"
19
"github.com/containers/common/libimage"
20
"github.com/containers/common/libnetwork/network"
21
nettypes "github.com/containers/common/libnetwork/types"
22
"github.com/containers/common/pkg/cgroups"
23
"github.com/containers/common/pkg/config"
24
"github.com/containers/common/pkg/secrets"
25
systemdCommon "github.com/containers/common/pkg/systemd"
26
"github.com/containers/image/v5/pkg/sysregistriesv2"
27
is "github.com/containers/image/v5/storage"
28
"github.com/containers/image/v5/types"
29
"github.com/containers/podman/v5/libpod/define"
30
"github.com/containers/podman/v5/libpod/events"
31
"github.com/containers/podman/v5/libpod/lock"
32
"github.com/containers/podman/v5/libpod/plugin"
33
"github.com/containers/podman/v5/libpod/shutdown"
34
"github.com/containers/podman/v5/pkg/rootless"
35
"github.com/containers/podman/v5/pkg/systemd"
36
"github.com/containers/podman/v5/pkg/util"
37
"github.com/containers/storage"
38
"github.com/containers/storage/pkg/fileutils"
39
"github.com/containers/storage/pkg/lockfile"
40
"github.com/containers/storage/pkg/unshare"
41
"github.com/docker/docker/pkg/namesgenerator"
42
jsoniter "github.com/json-iterator/go"
43
spec "github.com/opencontainers/runtime-spec/specs-go"
44
"github.com/sirupsen/logrus"
47
// Set up the JSON library for all of Libpod
48
var json = jsoniter.ConfigCompatibleWithStandardLibrary
50
// A RuntimeOption is a functional option which alters the Runtime created by
52
type RuntimeOption func(*Runtime) error
54
type storageSet struct {
59
GraphDriverNameSet bool
63
// Runtime is the core libpod runtime
66
storageConfig storage.StoreOptions
71
storageService *storageService
72
imageContext *types.SystemContext
73
defaultOCIRuntime OCIRuntime
74
ociRuntimes map[string]OCIRuntime
76
network nettypes.ContainerNetwork
78
libimageRuntime *libimage.Runtime
79
libimageEventsShutdown chan bool
80
lockManager lock.Manager
83
workerChannel chan func()
84
workerGroup sync.WaitGroup
86
// syslog describes whenever logrus should log to the syslog as well.
87
// Note that the syslog hook will be enabled early in cmd/podman/syslog_linux.go
88
// This bool is just needed so that we can set it for netavark interface.
91
// doReset indicates that the runtime will perform a system reset.
92
// A reset will remove all containers, pods, volumes, networks, etc.
93
// A number of validation checks are relaxed, or replaced with logic to
94
// remove as much of the runtime as possible if they fail. This ensures
95
// that even a broken Libpod can still be removed via `system reset`.
96
// This does not actually perform a `system reset`. That is done by
97
// calling "Reset()" on the returned runtime.
99
// doRenumber indicates that the runtime will perform a system renumber.
100
// A renumber will reassign lock numbers for all containers, pods, etc.
101
// This will not perform the renumber itself, but will ignore some
102
// errors related to lock initialization so a renumber can be performed
103
// if something has gone wrong.
106
// valid indicates whether the runtime is ready to use.
107
// valid is set to true when a runtime is returned from GetRuntime(),
108
// and remains true until the runtime is shut down (rendering its
109
// storage unusable). When valid is false, the runtime cannot be used.
112
// mechanism to read and write even logs
113
eventer events.Eventer
115
// secretsManager manages secrets
116
secretsManager *secrets.SecretsManager
119
// SetXdgDirs ensures the XDG_RUNTIME_DIR env and XDG_CONFIG_HOME variables are set.
120
// containers/image uses XDG_RUNTIME_DIR to locate the auth file, XDG_CONFIG_HOME is
121
// use for the containers.conf configuration file.
122
func SetXdgDirs() error {
123
if !rootless.IsRootless() {
127
// Set up XDG_RUNTIME_DIR
128
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
130
if runtimeDir == "" {
132
runtimeDir, err = util.GetRootlessRuntimeDir()
137
if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil {
138
return fmt.Errorf("cannot set XDG_RUNTIME_DIR: %w", err)
141
if rootless.IsRootless() && os.Getenv("DBUS_SESSION_BUS_ADDRESS") == "" {
142
sessionAddr := filepath.Join(runtimeDir, "bus")
143
if err := fileutils.Exists(sessionAddr); err == nil {
144
os.Setenv("DBUS_SESSION_BUS_ADDRESS", fmt.Sprintf("unix:path=%s", sessionAddr))
148
// Set up XDG_CONFIG_HOME
149
if cfgHomeDir := os.Getenv("XDG_CONFIG_HOME"); cfgHomeDir == "" {
150
cfgHomeDir, err := util.GetRootlessConfigHomeDir()
154
if err := os.Setenv("XDG_CONFIG_HOME", cfgHomeDir); err != nil {
155
return fmt.Errorf("cannot set XDG_CONFIG_HOME: %w", err)
161
// NewRuntime creates a new container runtime
162
// Options can be passed to override the default configuration for the runtime
163
func NewRuntime(ctx context.Context, options ...RuntimeOption) (*Runtime, error) {
164
conf, err := config.Default()
168
return newRuntimeFromConfig(ctx, conf, options...)
171
// NewRuntimeFromConfig creates a new container runtime using the given
172
// configuration file for its default configuration. Passed RuntimeOption
173
// functions can be used to mutate this configuration further.
174
// An error will be returned if the configuration file at the given path does
175
// not exist or cannot be loaded
176
func NewRuntimeFromConfig(ctx context.Context, userConfig *config.Config, options ...RuntimeOption) (*Runtime, error) {
177
return newRuntimeFromConfig(ctx, userConfig, options...)
180
func newRuntimeFromConfig(ctx context.Context, conf *config.Config, options ...RuntimeOption) (*Runtime, error) {
181
runtime := new(Runtime)
183
if conf.Engine.OCIRuntime == "" {
184
conf.Engine.OCIRuntime = "runc"
185
// If we're running on cgroups v2, default to using crun.
186
if onCgroupsv2, _ := cgroups.IsCgroup2UnifiedMode(); onCgroupsv2 {
187
conf.Engine.OCIRuntime = "crun"
191
runtime.config = conf
193
if err := SetXdgDirs(); err != nil {
197
storeOpts, err := storage.DefaultStoreOptions()
201
runtime.storageConfig = storeOpts
203
// Overwrite config with user-given configuration options
204
for _, opt := range options {
205
if err := opt(runtime); err != nil {
206
return nil, fmt.Errorf("configuring runtime: %w", err)
210
if err := shutdown.Register("libpod", func(sig os.Signal) error {
211
// For `systemctl stop podman.service` support, exit code should be 0
212
if sig == syscall.SIGTERM {
217
}); err != nil && !errors.Is(err, shutdown.ErrHandlerExists) {
218
logrus.Errorf("Registering shutdown handler for libpod: %v", err)
221
if err := shutdown.Start(); err != nil {
222
return nil, fmt.Errorf("starting shutdown signal handler: %w", err)
225
if err := makeRuntime(ctx, runtime); err != nil {
229
runtime.config.CheckCgroupsAndAdjustConfig()
234
func getLockManager(runtime *Runtime) (lock.Manager, error) {
236
var manager lock.Manager
238
switch runtime.config.Engine.LockType {
240
lockPath := filepath.Join(runtime.config.Engine.TmpDir, "locks")
241
manager, err = lock.OpenFileLockManager(lockPath)
243
if errors.Is(err, os.ErrNotExist) {
244
manager, err = lock.NewFileLockManager(lockPath)
246
return nil, fmt.Errorf("failed to get new file lock manager: %w", err)
254
lockPath := define.DefaultSHMLockPath
255
if rootless.IsRootless() {
256
lockPath = fmt.Sprintf("%s_%d", define.DefaultRootlessSHMLockPath, rootless.GetRootlessUID())
258
// Set up the lock manager
259
manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
262
case errors.Is(err, os.ErrNotExist):
263
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
265
return nil, fmt.Errorf("failed to get new shm lock manager: %w", err)
267
case errors.Is(err, syscall.ERANGE) && runtime.doRenumber:
268
logrus.Debugf("Number of locks does not match - removing old locks")
270
// ERANGE indicates a lock numbering mismatch.
271
// Since we're renumbering, this is not fatal.
272
// Remove the earlier set of locks and recreate.
273
if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
274
return nil, fmt.Errorf("removing libpod locks file %s: %w", lockPath, err)
277
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
286
return nil, fmt.Errorf("unknown lock type %s: %w", runtime.config.Engine.LockType, define.ErrInvalidArg)
291
func getDBState(runtime *Runtime) (State, error) {
292
// TODO - if we further break out the state implementation into
293
// libpod/state, the config could take care of the code below. It
294
// would further allow to move the types and consts into a coherent
296
backend, err := config.ParseDBBackend(runtime.config.Engine.DBBackend)
301
// get default boltdb path
302
baseDir := runtime.config.Engine.StaticDir
303
if runtime.storageConfig.TransientStore {
304
baseDir = runtime.config.Engine.TmpDir
306
boltDBPath := filepath.Join(baseDir, "bolt_state.db")
309
case config.DBBackendDefault:
310
// for backwards compatibility check if boltdb exists, if it does not we use sqlite
311
if err := fileutils.Exists(boltDBPath); err != nil {
312
if errors.Is(err, fs.ErrNotExist) {
313
// need to set DBBackend string so podman info will show the backend name correctly
314
runtime.config.Engine.DBBackend = config.DBBackendSQLite.String()
315
return NewSqliteState(runtime)
317
// Return error here some other problem with the boltdb file, rather than silently
318
// switch to sqlite which would be hard to debug for the user return the error back
319
// as this likely a real bug.
322
runtime.config.Engine.DBBackend = config.DBBackendBoltDB.String()
324
case config.DBBackendBoltDB:
325
return NewBoltState(boltDBPath, runtime)
326
case config.DBBackendSQLite:
327
return NewSqliteState(runtime)
329
return nil, fmt.Errorf("unrecognized database backend passed (%q): %w", backend.String(), define.ErrInvalidArg)
333
// Make a new runtime based on the given configuration
334
// Sets up containers/storage, state store, OCI runtime
335
func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) {
336
// Find a working conmon binary
337
cPath, err := runtime.config.FindConmon()
341
runtime.conmonPath = cPath
343
if runtime.config.Engine.StaticDir == "" {
344
runtime.config.Engine.StaticDir = filepath.Join(runtime.storageConfig.GraphRoot, "libpod")
345
runtime.storageSet.StaticDirSet = true
348
if runtime.config.Engine.VolumePath == "" {
349
runtime.config.Engine.VolumePath = filepath.Join(runtime.storageConfig.GraphRoot, "volumes")
350
runtime.storageSet.VolumePathSet = true
353
// Make the static files directory if it does not exist
354
if err := os.MkdirAll(runtime.config.Engine.StaticDir, 0700); err != nil {
355
// The directory is allowed to exist
356
if !errors.Is(err, os.ErrExist) {
357
return fmt.Errorf("creating runtime static files directory %q: %w", runtime.config.Engine.StaticDir, err)
361
// Create the TmpDir if needed
362
if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0751); err != nil {
363
return fmt.Errorf("creating runtime temporary files directory: %w", err)
367
runtime.state, err = getDBState(runtime)
372
// Grab config from the database so we can reset some defaults
373
dbConfig, err := runtime.state.GetDBConfig()
376
// We can at least delete the DB and the static files
378
// Can't safely touch anything else because we aren't
379
// sure of other directories.
380
if err := runtime.state.Close(); err != nil {
381
logrus.Errorf("Closing database connection: %v", err)
383
if err := os.RemoveAll(runtime.config.Engine.StaticDir); err != nil {
384
logrus.Errorf("Removing static files directory %v: %v", runtime.config.Engine.StaticDir, err)
389
return fmt.Errorf("retrieving runtime configuration from database: %w", err)
392
runtime.mergeDBConfig(dbConfig)
394
checkCgroups2UnifiedMode(runtime)
396
logrus.Debugf("Using graph driver %s", runtime.storageConfig.GraphDriverName)
397
logrus.Debugf("Using graph root %s", runtime.storageConfig.GraphRoot)
398
logrus.Debugf("Using run root %s", runtime.storageConfig.RunRoot)
399
logrus.Debugf("Using static dir %s", runtime.config.Engine.StaticDir)
400
logrus.Debugf("Using tmp dir %s", runtime.config.Engine.TmpDir)
401
logrus.Debugf("Using volume path %s", runtime.config.Engine.VolumePath)
402
logrus.Debugf("Using transient store: %v", runtime.storageConfig.TransientStore)
404
// Validate our config against the database, now that we've set our
405
// final storage configuration
406
if err := runtime.state.ValidateDBConfig(runtime); err != nil {
407
// If we are performing a storage reset: continue on with a
408
// warning. Otherwise we can't `system reset` after a change to
410
if !runtime.doReset {
413
logrus.Errorf("Runtime paths differ from those stored in database, storage reset may not remove all files")
416
if runtime.config.Engine.Namespace != "" {
417
return fmt.Errorf("namespaces are not supported by this version of Libpod, please unset the `namespace` field in containers.conf: %w", define.ErrNotImplemented)
420
needsUserns := os.Geteuid() != 0
422
hasCapSysAdmin, err := unshare.HasCapSysAdmin()
426
needsUserns = !hasCapSysAdmin
428
// Set up containers/storage
429
var store storage.Store
431
logrus.Debug("Not configuring container store")
432
} else if err := runtime.configureStore(); err != nil {
433
// Make a best-effort attempt to clean up if performing a
436
if err := runtime.removeAllDirs(); err != nil {
437
logrus.Errorf("Removing libpod directories: %v", err)
441
return fmt.Errorf("configure storage: %w", err)
444
if retErr != nil && store != nil {
445
// Don't forcibly shut down
446
// We could be opening a store in use by another libpod
447
if _, err := store.Shutdown(false); err != nil {
448
logrus.Errorf("Removing store for partially-created runtime: %s", err)
453
// Set up the eventer
454
eventer, err := runtime.newEventer()
458
runtime.eventer = eventer
460
// Set up containers/image
461
if runtime.imageContext == nil {
462
runtime.imageContext = &types.SystemContext{
463
BigFilesTemporaryDir: parse.GetTempDir(),
466
runtime.imageContext.SignaturePolicyPath = runtime.config.Engine.SignaturePolicyPath
468
// Get us at least one working OCI runtime.
469
runtime.ociRuntimes = make(map[string]OCIRuntime)
471
// Initialize remaining OCI runtimes
472
for name, paths := range runtime.config.Engine.OCIRuntimes {
473
ociRuntime, err := newConmonOCIRuntime(name, paths, runtime.conmonPath, runtime.runtimeFlags, runtime.config)
475
// Don't fatally error.
476
// This will allow us to ship configs including optional
477
// runtimes that might not be installed (crun, kata).
478
// Only an infof so default configs don't spec errors.
479
logrus.Debugf("Configured OCI runtime %s initialization failed: %v", name, err)
483
runtime.ociRuntimes[name] = ociRuntime
486
// Do we have a default OCI runtime?
487
if runtime.config.Engine.OCIRuntime != "" {
488
// If the string starts with / it's a path to a runtime
490
if strings.HasPrefix(runtime.config.Engine.OCIRuntime, "/") {
491
ociRuntime, err := newConmonOCIRuntime(runtime.config.Engine.OCIRuntime, []string{runtime.config.Engine.OCIRuntime}, runtime.conmonPath, runtime.runtimeFlags, runtime.config)
496
runtime.ociRuntimes[runtime.config.Engine.OCIRuntime] = ociRuntime
497
runtime.defaultOCIRuntime = ociRuntime
499
ociRuntime, ok := runtime.ociRuntimes[runtime.config.Engine.OCIRuntime]
501
return fmt.Errorf("default OCI runtime %q not found: %w", runtime.config.Engine.OCIRuntime, define.ErrInvalidArg)
503
runtime.defaultOCIRuntime = ociRuntime
506
logrus.Debugf("Using OCI runtime %q", runtime.defaultOCIRuntime.Path())
508
// Do we have at least one valid OCI runtime?
509
if len(runtime.ociRuntimes) == 0 {
510
return fmt.Errorf("no OCI runtime has been configured: %w", define.ErrInvalidArg)
513
// Do we have a default runtime?
514
if runtime.defaultOCIRuntime == nil {
515
return fmt.Errorf("no default OCI runtime was configured: %w", define.ErrInvalidArg)
518
// the store is only set up when we are in the userns so we do the same for the network interface
520
netBackend, netInterface, err := network.NetworkBackend(runtime.store, runtime.config, runtime.syslog)
524
runtime.config.Network.NetworkBackend = string(netBackend)
525
runtime.network = netInterface
528
// We now need to see if the system has restarted
529
// We check for the presence of a file in our tmp directory to verify this
530
// This check must be locked to prevent races
531
runtimeAliveFile := filepath.Join(runtime.config.Engine.TmpDir, "alive")
532
aliveLock, err := runtime.getRuntimeAliveLock()
534
return fmt.Errorf("acquiring runtime init lock: %w", err)
536
// Acquire the lock and hold it until we return
537
// This ensures that no two processes will be in runtime.refresh at once
540
unLockFunc := aliveLock.Unlock
542
if unLockFunc != nil {
547
err = fileutils.Exists(runtimeAliveFile)
549
// If we need to refresh, then it is safe to assume there are
550
// no containers running. Create immediately a namespace, as
551
// we will need to access the storage.
553
// warn users if mode is rootless and cgroup manager is systemd
554
// and no valid systemd session is present
555
// warn only whenever new namespace is created
556
if runtime.config.Engine.CgroupManager == config.SystemdCgroupsManager {
557
unified, _ := cgroups.IsCgroup2UnifiedMode()
558
if unified && rootless.IsRootless() && !systemd.IsSystemdSessionValid(rootless.GetRootlessUID()) {
559
logrus.Debug("Invalid systemd user session for current user")
564
pausePid, err := util.GetRootlessPauseProcessPidPath()
566
return fmt.Errorf("could not get pause process pid file path: %w", err)
569
// create the path in case it does not already exists
570
// https://github.com/containers/podman/issues/8539
571
if err := os.MkdirAll(filepath.Dir(pausePid), 0o700); err != nil {
572
return fmt.Errorf("could not create pause process pid file directory: %w", err)
575
became, ret, err := rootless.BecomeRootInUserNS(pausePid)
580
// Check if the pause process was created. If it was created, then
581
// move it to its own systemd scope.
582
systemdCommon.MovePauseProcessToScope(pausePid)
584
// gocritic complains because defer is not run on os.Exit()
585
// However this is fine because the lock is released anyway when the process exits
590
// If the file doesn't exist, we need to refresh the state
591
// This will trigger on first use as well, but refreshing an
592
// empty state only creates a single file
593
// As such, it's not really a performance concern
594
if errors.Is(err, os.ErrNotExist) {
597
return fmt.Errorf("reading runtime status file %s: %w", runtimeAliveFile, err)
601
runtime.lockManager, err = getLockManager(runtime)
606
// Mark the runtime as valid - ready to be used, cannot be modified
608
// Need to do this *before* refresh as we can remove containers there.
609
// Should not be a big deal as we don't return it to users until after
613
// If we need to refresh the state, do it now - things are guaranteed to
616
// Ensure we have a store before refresh occurs
617
if runtime.store == nil {
618
if err := runtime.configureStore(); err != nil {
619
return fmt.Errorf("configure storage: %w", err)
623
if err2 := runtime.refresh(ctx, runtimeAliveFile); err2 != nil {
628
// Check current boot ID - will be written to the alive file.
629
if err := runtime.checkBootID(runtimeAliveFile); err != nil {
633
runtime.startWorker()
638
// TmpDir gets the current Libpod temporary files directory.
639
func (r *Runtime) TmpDir() (string, error) {
641
return "", define.ErrRuntimeStopped
644
return r.config.Engine.TmpDir, nil
647
// GetConfig returns the configuration used by the runtime.
648
// Note that the returned value is not a copy and must hence
649
// only be used in a reading fashion.
650
func (r *Runtime) GetConfigNoCopy() (*config.Config, error) {
652
return nil, define.ErrRuntimeStopped
657
// GetConfig returns a copy of the configuration used by the runtime.
658
// Please use GetConfigNoCopy() in case you only want to read from
659
// but not write to the returned config.
660
func (r *Runtime) GetConfig() (*config.Config, error) {
661
rtConfig, err := r.GetConfigNoCopy()
666
config := new(config.Config)
668
// Copy so the caller won't be able to modify the actual config
669
if err := JSONDeepCopy(rtConfig, config); err != nil {
670
return nil, fmt.Errorf("copying config: %w", err)
676
// libimageEventsMap translates a libimage event type to a libpod event status.
677
var libimageEventsMap = map[libimage.EventType]events.Status{
678
libimage.EventTypeImagePull: events.Pull,
679
libimage.EventTypeImagePullError: events.PullError,
680
libimage.EventTypeImagePush: events.Push,
681
libimage.EventTypeImageRemove: events.Remove,
682
libimage.EventTypeImageLoad: events.LoadFromArchive,
683
libimage.EventTypeImageSave: events.Save,
684
libimage.EventTypeImageTag: events.Tag,
685
libimage.EventTypeImageUntag: events.Untag,
686
libimage.EventTypeImageMount: events.Mount,
687
libimage.EventTypeImageUnmount: events.Unmount,
690
// libimageEvents spawns a goroutine which will listen for events on
691
// the libimage.Runtime. The goroutine will be cleaned up implicitly
692
// when the main() exists.
693
func (r *Runtime) libimageEvents() {
694
r.libimageEventsShutdown = make(chan bool)
696
toLibpodEventStatus := func(e *libimage.Event) events.Status {
697
status, found := libimageEventsMap[e.Type]
704
eventChannel := r.libimageRuntime.EventChannel()
708
// Make sure to read and write all events before
710
for len(eventChannel) > 0 {
711
libimageEvent := <-eventChannel
713
ID: libimageEvent.ID,
714
Name: libimageEvent.Name,
715
Status: toLibpodEventStatus(libimageEvent),
716
Time: libimageEvent.Time,
719
if libimageEvent.Error != nil {
720
e.Error = libimageEvent.Error.Error()
722
if err := r.eventer.Write(e); err != nil {
723
logrus.Errorf("Unable to write image event: %q", err)
728
close(r.libimageEventsShutdown)
733
case <-r.libimageEventsShutdown:
735
case <-time.After(100 * time.Millisecond):
741
// DeferredShutdown shuts down the runtime without exposing any
742
// errors. This is only meant to be used when the runtime is being
743
// shutdown within a defer statement; else use Shutdown
744
func (r *Runtime) DeferredShutdown(force bool) {
745
_ = r.Shutdown(force)
748
// Shutdown shuts down the runtime and associated containers and storage
749
// If force is true, containers and mounted storage will be shut down before
750
// cleaning up; if force is false, an error will be returned if there are
751
// still containers running or mounted
752
func (r *Runtime) Shutdown(force bool) error {
757
if r.workerChannel != nil {
759
close(r.workerChannel)
764
// Shutdown all containers if --force is given
766
ctrs, err := r.state.AllContainers(false)
768
logrus.Errorf("Retrieving containers from database: %v", err)
770
for _, ctr := range ctrs {
771
if err := ctr.StopWithTimeout(r.config.Engine.StopTimeout); err != nil {
772
logrus.Errorf("Stopping container %s: %v", ctr.ID(), err)
779
// If no store was requested, it can be nil and there is no need to
780
// attempt to shut it down
782
// Wait for the events to be written.
783
if r.libimageEventsShutdown != nil {
784
// Tell loop to shutdown
785
r.libimageEventsShutdown <- true
786
// Wait for close to signal shutdown
787
<-r.libimageEventsShutdown
790
// Note that the libimage runtime shuts down the store.
791
if err := r.libimageRuntime.Shutdown(force); err != nil {
792
lastError = fmt.Errorf("shutting down container storage: %w", err)
795
if err := r.state.Close(); err != nil {
796
if lastError != nil {
797
logrus.Error(lastError)
805
// Reconfigures the runtime after a reboot
806
// Refreshes the state, recreating temporary files
807
// Does not check validity as the runtime is not valid until after this has run
808
func (r *Runtime) refresh(ctx context.Context, alivePath string) error {
809
logrus.Debugf("Podman detected system restart - performing state refresh")
811
// Clear state of database if not running in container
812
if !graphRootMounted() {
813
// First clear the state in the database
814
if err := r.state.Refresh(); err != nil {
819
// Next refresh the state of all containers to recreate dirs and
820
// namespaces, and all the pods to recreate cgroups.
821
// Containers, pods, and volumes must also reacquire their locks.
822
ctrs, err := r.state.AllContainers(false)
824
return fmt.Errorf("retrieving all containers from state: %w", err)
826
pods, err := r.state.AllPods()
828
return fmt.Errorf("retrieving all pods from state: %w", err)
830
vols, err := r.state.AllVolumes()
832
return fmt.Errorf("retrieving all volumes from state: %w", err)
834
// No locks are taken during pod, volume, and container refresh.
835
// Furthermore, the pod/volume/container refresh() functions are not
836
// allowed to take locks themselves.
837
// We cannot assume that any pod/volume/container has a valid lock until
838
// after this function has returned.
839
// The runtime alive lock should suffice to provide mutual exclusion
840
// until this has run.
841
for _, ctr := range ctrs {
842
if err := ctr.refresh(); err != nil {
843
logrus.Errorf("Refreshing container %s: %v", ctr.ID(), err)
845
// This is the only place it's safe to use ctr.state.State unlocked
846
// We're holding the alive lock, guaranteed to be the only Libpod on the system right now.
847
if (ctr.AutoRemove() && ctr.state.State == define.ContainerStateExited) || ctr.state.State == define.ContainerStateRemoving {
849
// Don't force-remove, we're supposed to be fresh off a reboot
850
// If we have to force something is seriously wrong
854
// This container should have autoremoved before the
855
// reboot but did not.
857
if _, _, err := r.removeContainer(ctx, ctr, opts); err != nil {
858
logrus.Errorf("Unable to remove container %s which should have autoremoved: %v", ctr.ID(), err)
862
for _, pod := range pods {
863
if err := pod.refresh(); err != nil {
864
logrus.Errorf("Refreshing pod %s: %v", pod.ID(), err)
867
for _, vol := range vols {
868
if err := vol.refresh(); err != nil {
869
logrus.Errorf("Refreshing volume %s: %v", vol.Name(), err)
873
// Create a file indicating the runtime is alive and ready
874
file, err := os.OpenFile(alivePath, os.O_RDONLY|os.O_CREATE, 0644)
876
return fmt.Errorf("creating runtime status file: %w", err)
880
r.NewSystemEvent(events.Refresh)
885
// Info returns the store and host information
886
func (r *Runtime) Info() (*define.Info, error) {
890
// generateName generates a unique name for a container or pod.
891
func (r *Runtime) generateName() (string, error) {
893
name := namesgenerator.GetRandomName(0)
894
// Make sure container with this name does not exist
895
if _, err := r.state.LookupContainer(name); err == nil {
897
} else if !errors.Is(err, define.ErrNoSuchCtr) {
900
// Make sure pod with this name does not exist
901
if _, err := r.state.LookupPod(name); err == nil {
903
} else if !errors.Is(err, define.ErrNoSuchPod) {
908
// The code should never reach here.
911
// Configure store and image runtime
912
func (r *Runtime) configureStore() error {
913
store, err := storage.GetStore(r.storageConfig)
919
is.Transport.SetStore(store)
921
// Set up a storage service for creating container root filesystems from
923
r.storageService = getStorageService(r.store)
925
runtimeOptions := &libimage.RuntimeOptions{
926
SystemContext: r.imageContext,
928
libimageRuntime, err := libimage.RuntimeFromStore(store, runtimeOptions)
932
r.libimageRuntime = libimageRuntime
933
// Run the libimage events routine.
939
// LibimageRuntime ... to allow for a step-by-step migration to libimage.
940
func (r *Runtime) LibimageRuntime() *libimage.Runtime {
941
return r.libimageRuntime
944
// SystemContext returns the imagecontext
945
func (r *Runtime) SystemContext() *types.SystemContext {
946
// Return the context from the libimage runtime. libimage is sensitive
947
// to a number of env vars.
948
return r.libimageRuntime.SystemContext()
951
// GetOCIRuntimePath retrieves the path of the default OCI runtime.
952
func (r *Runtime) GetOCIRuntimePath() string {
953
return r.defaultOCIRuntime.Path()
956
// DefaultOCIRuntime return copy of Default OCI Runtime
957
func (r *Runtime) DefaultOCIRuntime() OCIRuntime {
958
return r.defaultOCIRuntime
961
// StorageConfig retrieves the storage options for the container runtime
962
func (r *Runtime) StorageConfig() storage.StoreOptions {
963
return r.storageConfig
966
func (r *Runtime) GarbageCollect() error {
967
return r.store.GarbageCollect()
970
// RunRoot retrieves the current c/storage temporary directory in use by Libpod.
971
func (r *Runtime) RunRoot() string {
975
return r.store.RunRoot()
978
// GraphRoot retrieves the current c/storage directory in use by Libpod.
979
func (r *Runtime) GraphRoot() string {
983
return r.store.GraphRoot()
986
// GetPodName retrieves the pod name associated with a given full ID.
987
// If the given ID does not correspond to any existing Pod or Container,
988
// ErrNoSuchPod is returned.
989
func (r *Runtime) GetPodName(id string) (string, error) {
991
return "", define.ErrRuntimeStopped
994
return r.state.GetPodName(id)
997
// DBConfig is a set of Libpod runtime configuration settings that are saved in
998
// a State when it is first created, and can subsequently be retrieved.
999
type DBConfig struct {
1008
// mergeDBConfig merges the configuration from the database.
1009
func (r *Runtime) mergeDBConfig(dbConfig *DBConfig) {
1010
c := &r.config.Engine
1011
if !r.storageSet.RunRootSet && dbConfig.StorageTmp != "" {
1012
if r.storageConfig.RunRoot != dbConfig.StorageTmp &&
1013
r.storageConfig.RunRoot != "" {
1014
logrus.Debugf("Overriding run root %q with %q from database",
1015
r.storageConfig.RunRoot, dbConfig.StorageTmp)
1017
r.storageConfig.RunRoot = dbConfig.StorageTmp
1020
if !r.storageSet.GraphRootSet && dbConfig.StorageRoot != "" {
1021
if r.storageConfig.GraphRoot != dbConfig.StorageRoot &&
1022
r.storageConfig.GraphRoot != "" {
1023
logrus.Debugf("Overriding graph root %q with %q from database",
1024
r.storageConfig.GraphRoot, dbConfig.StorageRoot)
1026
r.storageConfig.GraphRoot = dbConfig.StorageRoot
1029
if !r.storageSet.GraphDriverNameSet && dbConfig.GraphDriver != "" {
1030
if r.storageConfig.GraphDriverName != dbConfig.GraphDriver &&
1031
r.storageConfig.GraphDriverName != "" {
1032
logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files (%q) to resolve. May prevent use of images created by other tools",
1033
r.storageConfig.GraphDriverName, dbConfig.GraphDriver, r.storageConfig.GraphRoot)
1035
r.storageConfig.GraphDriverName = dbConfig.GraphDriver
1038
if !r.storageSet.StaticDirSet && dbConfig.LibpodRoot != "" {
1039
if c.StaticDir != dbConfig.LibpodRoot && c.StaticDir != "" {
1040
logrus.Debugf("Overriding static dir %q with %q from database", c.StaticDir, dbConfig.LibpodRoot)
1042
c.StaticDir = dbConfig.LibpodRoot
1045
if !r.storageSet.TmpDirSet && dbConfig.LibpodTmp != "" {
1046
if c.TmpDir != dbConfig.LibpodTmp && c.TmpDir != "" {
1047
logrus.Debugf("Overriding tmp dir %q with %q from database", c.TmpDir, dbConfig.LibpodTmp)
1049
c.TmpDir = dbConfig.LibpodTmp
1052
if !r.storageSet.VolumePathSet && dbConfig.VolumePath != "" {
1053
if c.VolumePath != dbConfig.VolumePath && c.VolumePath != "" {
1054
logrus.Debugf("Overriding volume path %q with %q from database", c.VolumePath, dbConfig.VolumePath)
1056
c.VolumePath = dbConfig.VolumePath
1060
func (r *Runtime) EnableLabeling() bool {
1061
return r.config.Containers.EnableLabeling
1064
// Reload reloads the configurations files
1065
func (r *Runtime) Reload() error {
1066
if err := r.reloadContainersConf(); err != nil {
1069
if err := r.reloadStorageConf(); err != nil {
1072
// Invalidate the registries.conf cache. The next invocation will
1074
sysregistriesv2.InvalidateCache()
1078
// reloadContainersConf reloads the containers.conf
1079
func (r *Runtime) reloadContainersConf() error {
1080
config, err := config.Reload()
1085
logrus.Infof("Applied new containers configuration: %v", config)
1089
// reloadStorageConf reloads the storage.conf
1090
func (r *Runtime) reloadStorageConf() error {
1091
configFile, err := storage.DefaultConfigFile()
1095
storage.ReloadConfigurationFile(configFile, &r.storageConfig)
1096
logrus.Infof("Applied new storage configuration: %v", r.storageConfig)
1100
// getVolumePlugin gets a specific volume plugin.
1101
func (r *Runtime) getVolumePlugin(volConfig *VolumeConfig) (*plugin.VolumePlugin, error) {
1102
// There is no plugin for local.
1103
name := volConfig.Driver
1104
timeout := volConfig.Timeout
1105
if name == define.VolumeDriverLocal || name == "" {
1109
pluginPath, ok := r.config.Engine.VolumePlugins[name]
1111
if name == define.VolumeDriverImage {
1114
return nil, fmt.Errorf("no volume plugin with name %s available: %w", name, define.ErrMissingPlugin)
1117
return plugin.GetVolumePlugin(name, pluginPath, timeout, r.config)
1120
// GetSecretsStorageDir returns the directory that the secrets manager should take
1121
func (r *Runtime) GetSecretsStorageDir() string {
1122
return filepath.Join(r.store.GraphRoot(), "secrets")
1125
// SecretsManager returns the directory that the secrets manager should take
1126
func (r *Runtime) SecretsManager() (*secrets.SecretsManager, error) {
1127
if r.secretsManager == nil {
1128
manager, err := secrets.NewManager(r.GetSecretsStorageDir())
1132
r.secretsManager = manager
1134
return r.secretsManager, nil
1137
func graphRootMounted() bool {
1138
f, err := os.OpenFile("/run/.containerenv", os.O_RDONLY, os.ModePerm)
1144
scanner := bufio.NewScanner(f)
1145
for scanner.Scan() {
1146
if scanner.Text() == "graphRootMounted=1" {
1153
func (r *Runtime) graphRootMountedFlag(mounts []spec.Mount) string {
1154
root := r.store.GraphRoot()
1155
for _, val := range mounts {
1156
if strings.HasPrefix(root, val.Source) {
1157
return "graphRootMounted=1"
1163
// Returns a copy of the runtime alive lock
1164
func (r *Runtime) getRuntimeAliveLock() (*lockfile.LockFile, error) {
1165
return lockfile.GetLockFile(filepath.Join(r.config.Engine.TmpDir, "alive.lck"))
1168
// Network returns the network interface which is used by the runtime
1169
func (r *Runtime) Network() nettypes.ContainerNetwork {
1173
// GetDefaultNetworkName returns the network interface which is used by the runtime
1174
func (r *Runtime) GetDefaultNetworkName() string {
1175
return r.config.Network.DefaultNetwork
1178
// RemoteURI returns the API server URI
1179
func (r *Runtime) RemoteURI() string {
1180
return r.config.Engine.RemoteURI
1183
// SetRemoteURI records the API server URI
1184
func (r *Runtime) SetRemoteURI(uri string) {
1185
r.config.Engine.RemoteURI = uri
1188
// Get information on potential lock conflicts.
1189
// Returns a map of lock number to object(s) using the lock, formatted as
1190
// "container <id>" or "volume <id>" or "pod <id>", and an array of locks that
1191
// are currently being held, formatted as []uint32.
1192
// If the map returned is not empty, you should immediately renumber locks on
1193
// the runtime, because you have a deadlock waiting to happen.
1194
func (r *Runtime) LockConflicts() (map[uint32][]string, []uint32, error) {
1195
// Make an internal map to store what lock is associated with what
1196
locksInUse := make(map[uint32][]string)
1198
ctrs, err := r.state.AllContainers(false)
1200
return nil, nil, err
1202
for _, ctr := range ctrs {
1203
lockNum := ctr.lock.ID()
1204
ctrString := fmt.Sprintf("container %s", ctr.ID())
1205
locksInUse[lockNum] = append(locksInUse[lockNum], ctrString)
1208
pods, err := r.state.AllPods()
1210
return nil, nil, err
1212
for _, pod := range pods {
1213
lockNum := pod.lock.ID()
1214
podString := fmt.Sprintf("pod %s", pod.ID())
1215
locksInUse[lockNum] = append(locksInUse[lockNum], podString)
1218
volumes, err := r.state.AllVolumes()
1220
return nil, nil, err
1222
for _, vol := range volumes {
1223
lockNum := vol.lock.ID()
1224
volString := fmt.Sprintf("volume %s", vol.Name())
1225
locksInUse[lockNum] = append(locksInUse[lockNum], volString)
1228
// Now go through and find any entries with >1 item associated
1229
toReturn := make(map[uint32][]string)
1230
for lockNum, objects := range locksInUse {
1231
// If debug logging is requested, just spit out *every* lock in
1233
logrus.Debugf("Lock number %d is in use by %v", lockNum, objects)
1235
if len(objects) > 1 {
1236
toReturn[lockNum] = objects
1240
locksHeld, err := r.lockManager.LocksHeld()
1242
if errors.Is(err, define.ErrNotImplemented) {
1243
logrus.Warnf("Could not retrieve currently taken locks as the lock backend does not support this operation")
1244
return toReturn, []uint32{}, nil
1247
return nil, nil, err
1250
return toReturn, locksHeld, nil