podman

Форк
0
/
runtime.go 
1251 строка · 38.9 Кб
1
//go:build !remote
2

3
package libpod
4

5
import (
6
	"bufio"
7
	"context"
8
	"errors"
9
	"fmt"
10
	"io/fs"
11
	"os"
12
	"path/filepath"
13
	"strings"
14
	"sync"
15
	"syscall"
16
	"time"
17

18
	"github.com/containers/buildah/pkg/parse"
19
	"github.com/containers/common/libimage"
20
	"github.com/containers/common/libnetwork/network"
21
	nettypes "github.com/containers/common/libnetwork/types"
22
	"github.com/containers/common/pkg/cgroups"
23
	"github.com/containers/common/pkg/config"
24
	"github.com/containers/common/pkg/secrets"
25
	systemdCommon "github.com/containers/common/pkg/systemd"
26
	"github.com/containers/image/v5/pkg/sysregistriesv2"
27
	is "github.com/containers/image/v5/storage"
28
	"github.com/containers/image/v5/types"
29
	"github.com/containers/podman/v5/libpod/define"
30
	"github.com/containers/podman/v5/libpod/events"
31
	"github.com/containers/podman/v5/libpod/lock"
32
	"github.com/containers/podman/v5/libpod/plugin"
33
	"github.com/containers/podman/v5/libpod/shutdown"
34
	"github.com/containers/podman/v5/pkg/rootless"
35
	"github.com/containers/podman/v5/pkg/systemd"
36
	"github.com/containers/podman/v5/pkg/util"
37
	"github.com/containers/storage"
38
	"github.com/containers/storage/pkg/fileutils"
39
	"github.com/containers/storage/pkg/lockfile"
40
	"github.com/containers/storage/pkg/unshare"
41
	"github.com/docker/docker/pkg/namesgenerator"
42
	jsoniter "github.com/json-iterator/go"
43
	spec "github.com/opencontainers/runtime-spec/specs-go"
44
	"github.com/sirupsen/logrus"
45
)
46

47
// Set up the JSON library for all of Libpod
48
var json = jsoniter.ConfigCompatibleWithStandardLibrary
49

50
// A RuntimeOption is a functional option which alters the Runtime created by
51
// NewRuntime
52
type RuntimeOption func(*Runtime) error
53

54
type storageSet struct {
55
	RunRootSet         bool
56
	GraphRootSet       bool
57
	StaticDirSet       bool
58
	VolumePathSet      bool
59
	GraphDriverNameSet bool
60
	TmpDirSet          bool
61
}
62

63
// Runtime is the core libpod runtime
64
type Runtime struct {
65
	config        *config.Config
66
	storageConfig storage.StoreOptions
67
	storageSet    storageSet
68

69
	state                  State
70
	store                  storage.Store
71
	storageService         *storageService
72
	imageContext           *types.SystemContext
73
	defaultOCIRuntime      OCIRuntime
74
	ociRuntimes            map[string]OCIRuntime
75
	runtimeFlags           []string
76
	network                nettypes.ContainerNetwork
77
	conmonPath             string
78
	libimageRuntime        *libimage.Runtime
79
	libimageEventsShutdown chan bool
80
	lockManager            lock.Manager
81

82
	// Worker
83
	workerChannel chan func()
84
	workerGroup   sync.WaitGroup
85

86
	// syslog describes whenever logrus should log to the syslog as well.
87
	// Note that the syslog hook will be enabled early in cmd/podman/syslog_linux.go
88
	// This bool is just needed so that we can set it for netavark interface.
89
	syslog bool
90

91
	// doReset indicates that the runtime will perform a system reset.
92
	// A reset will remove all containers, pods, volumes, networks, etc.
93
	// A number of validation checks are relaxed, or replaced with logic to
94
	// remove as much of the runtime as possible if they fail. This ensures
95
	// that even a broken Libpod can still be removed via `system reset`.
96
	// This does not actually perform a `system reset`. That is done by
97
	// calling "Reset()" on the returned runtime.
98
	doReset bool
99
	// doRenumber indicates that the runtime will perform a system renumber.
100
	// A renumber will reassign lock numbers for all containers, pods, etc.
101
	// This will not perform the renumber itself, but will ignore some
102
	// errors related to lock initialization so a renumber can be performed
103
	// if something has gone wrong.
104
	doRenumber bool
105

106
	// valid indicates whether the runtime is ready to use.
107
	// valid is set to true when a runtime is returned from GetRuntime(),
108
	// and remains true until the runtime is shut down (rendering its
109
	// storage unusable). When valid is false, the runtime cannot be used.
110
	valid bool
111

112
	// mechanism to read and write even logs
113
	eventer events.Eventer
114

115
	// secretsManager manages secrets
116
	secretsManager *secrets.SecretsManager
117
}
118

119
// SetXdgDirs ensures the XDG_RUNTIME_DIR env and XDG_CONFIG_HOME variables are set.
120
// containers/image uses XDG_RUNTIME_DIR to locate the auth file, XDG_CONFIG_HOME is
121
// use for the containers.conf configuration file.
122
func SetXdgDirs() error {
123
	if !rootless.IsRootless() {
124
		return nil
125
	}
126

127
	// Set up XDG_RUNTIME_DIR
128
	runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
129

130
	if runtimeDir == "" {
131
		var err error
132
		runtimeDir, err = util.GetRootlessRuntimeDir()
133
		if err != nil {
134
			return err
135
		}
136
	}
137
	if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil {
138
		return fmt.Errorf("cannot set XDG_RUNTIME_DIR: %w", err)
139
	}
140

141
	if rootless.IsRootless() && os.Getenv("DBUS_SESSION_BUS_ADDRESS") == "" {
142
		sessionAddr := filepath.Join(runtimeDir, "bus")
143
		if err := fileutils.Exists(sessionAddr); err == nil {
144
			os.Setenv("DBUS_SESSION_BUS_ADDRESS", fmt.Sprintf("unix:path=%s", sessionAddr))
145
		}
146
	}
147

148
	// Set up XDG_CONFIG_HOME
149
	if cfgHomeDir := os.Getenv("XDG_CONFIG_HOME"); cfgHomeDir == "" {
150
		cfgHomeDir, err := util.GetRootlessConfigHomeDir()
151
		if err != nil {
152
			return err
153
		}
154
		if err := os.Setenv("XDG_CONFIG_HOME", cfgHomeDir); err != nil {
155
			return fmt.Errorf("cannot set XDG_CONFIG_HOME: %w", err)
156
		}
157
	}
158
	return nil
159
}
160

161
// NewRuntime creates a new container runtime
162
// Options can be passed to override the default configuration for the runtime
163
func NewRuntime(ctx context.Context, options ...RuntimeOption) (*Runtime, error) {
164
	conf, err := config.Default()
165
	if err != nil {
166
		return nil, err
167
	}
168
	return newRuntimeFromConfig(ctx, conf, options...)
169
}
170

171
// NewRuntimeFromConfig creates a new container runtime using the given
172
// configuration file for its default configuration. Passed RuntimeOption
173
// functions can be used to mutate this configuration further.
174
// An error will be returned if the configuration file at the given path does
175
// not exist or cannot be loaded
176
func NewRuntimeFromConfig(ctx context.Context, userConfig *config.Config, options ...RuntimeOption) (*Runtime, error) {
177
	return newRuntimeFromConfig(ctx, userConfig, options...)
178
}
179

180
func newRuntimeFromConfig(ctx context.Context, conf *config.Config, options ...RuntimeOption) (*Runtime, error) {
181
	runtime := new(Runtime)
182

183
	if conf.Engine.OCIRuntime == "" {
184
		conf.Engine.OCIRuntime = "runc"
185
		// If we're running on cgroups v2, default to using crun.
186
		if onCgroupsv2, _ := cgroups.IsCgroup2UnifiedMode(); onCgroupsv2 {
187
			conf.Engine.OCIRuntime = "crun"
188
		}
189
	}
190

191
	runtime.config = conf
192

193
	if err := SetXdgDirs(); err != nil {
194
		return nil, err
195
	}
196

197
	storeOpts, err := storage.DefaultStoreOptions()
198
	if err != nil {
199
		return nil, err
200
	}
201
	runtime.storageConfig = storeOpts
202

203
	// Overwrite config with user-given configuration options
204
	for _, opt := range options {
205
		if err := opt(runtime); err != nil {
206
			return nil, fmt.Errorf("configuring runtime: %w", err)
207
		}
208
	}
209

210
	if err := shutdown.Register("libpod", func(sig os.Signal) error {
211
		// For `systemctl stop podman.service` support, exit code should be 0
212
		if sig == syscall.SIGTERM {
213
			os.Exit(0)
214
		}
215
		os.Exit(1)
216
		return nil
217
	}); err != nil && !errors.Is(err, shutdown.ErrHandlerExists) {
218
		logrus.Errorf("Registering shutdown handler for libpod: %v", err)
219
	}
220

221
	if err := shutdown.Start(); err != nil {
222
		return nil, fmt.Errorf("starting shutdown signal handler: %w", err)
223
	}
224

225
	if err := makeRuntime(ctx, runtime); err != nil {
226
		return nil, err
227
	}
228

229
	runtime.config.CheckCgroupsAndAdjustConfig()
230

231
	return runtime, nil
232
}
233

234
func getLockManager(runtime *Runtime) (lock.Manager, error) {
235
	var err error
236
	var manager lock.Manager
237

238
	switch runtime.config.Engine.LockType {
239
	case "file":
240
		lockPath := filepath.Join(runtime.config.Engine.TmpDir, "locks")
241
		manager, err = lock.OpenFileLockManager(lockPath)
242
		if err != nil {
243
			if errors.Is(err, os.ErrNotExist) {
244
				manager, err = lock.NewFileLockManager(lockPath)
245
				if err != nil {
246
					return nil, fmt.Errorf("failed to get new file lock manager: %w", err)
247
				}
248
			} else {
249
				return nil, err
250
			}
251
		}
252

253
	case "", "shm":
254
		lockPath := define.DefaultSHMLockPath
255
		if rootless.IsRootless() {
256
			lockPath = fmt.Sprintf("%s_%d", define.DefaultRootlessSHMLockPath, rootless.GetRootlessUID())
257
		}
258
		// Set up the lock manager
259
		manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
260
		if err != nil {
261
			switch {
262
			case errors.Is(err, os.ErrNotExist):
263
				manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
264
				if err != nil {
265
					return nil, fmt.Errorf("failed to get new shm lock manager: %w", err)
266
				}
267
			case errors.Is(err, syscall.ERANGE) && runtime.doRenumber:
268
				logrus.Debugf("Number of locks does not match - removing old locks")
269

270
				// ERANGE indicates a lock numbering mismatch.
271
				// Since we're renumbering, this is not fatal.
272
				// Remove the earlier set of locks and recreate.
273
				if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
274
					return nil, fmt.Errorf("removing libpod locks file %s: %w", lockPath, err)
275
				}
276

277
				manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks)
278
				if err != nil {
279
					return nil, err
280
				}
281
			default:
282
				return nil, err
283
			}
284
		}
285
	default:
286
		return nil, fmt.Errorf("unknown lock type %s: %w", runtime.config.Engine.LockType, define.ErrInvalidArg)
287
	}
288
	return manager, nil
289
}
290

291
func getDBState(runtime *Runtime) (State, error) {
292
	// TODO - if we further break out the state implementation into
293
	// libpod/state, the config could take care of the code below.  It
294
	// would further allow to move the types and consts into a coherent
295
	// package.
296
	backend, err := config.ParseDBBackend(runtime.config.Engine.DBBackend)
297
	if err != nil {
298
		return nil, err
299
	}
300

301
	// get default boltdb path
302
	baseDir := runtime.config.Engine.StaticDir
303
	if runtime.storageConfig.TransientStore {
304
		baseDir = runtime.config.Engine.TmpDir
305
	}
306
	boltDBPath := filepath.Join(baseDir, "bolt_state.db")
307

308
	switch backend {
309
	case config.DBBackendDefault:
310
		// for backwards compatibility check if boltdb exists, if it does not we use sqlite
311
		if err := fileutils.Exists(boltDBPath); err != nil {
312
			if errors.Is(err, fs.ErrNotExist) {
313
				// need to set DBBackend string so podman info will show the backend name correctly
314
				runtime.config.Engine.DBBackend = config.DBBackendSQLite.String()
315
				return NewSqliteState(runtime)
316
			}
317
			// Return error here some other problem with the boltdb file, rather than silently
318
			// switch to sqlite which would be hard to debug for the user return the error back
319
			// as this likely a real bug.
320
			return nil, err
321
		}
322
		runtime.config.Engine.DBBackend = config.DBBackendBoltDB.String()
323
		fallthrough
324
	case config.DBBackendBoltDB:
325
		return NewBoltState(boltDBPath, runtime)
326
	case config.DBBackendSQLite:
327
		return NewSqliteState(runtime)
328
	default:
329
		return nil, fmt.Errorf("unrecognized database backend passed (%q): %w", backend.String(), define.ErrInvalidArg)
330
	}
331
}
332

333
// Make a new runtime based on the given configuration
334
// Sets up containers/storage, state store, OCI runtime
335
func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) {
336
	// Find a working conmon binary
337
	cPath, err := runtime.config.FindConmon()
338
	if err != nil {
339
		return err
340
	}
341
	runtime.conmonPath = cPath
342

343
	if runtime.config.Engine.StaticDir == "" {
344
		runtime.config.Engine.StaticDir = filepath.Join(runtime.storageConfig.GraphRoot, "libpod")
345
		runtime.storageSet.StaticDirSet = true
346
	}
347

348
	if runtime.config.Engine.VolumePath == "" {
349
		runtime.config.Engine.VolumePath = filepath.Join(runtime.storageConfig.GraphRoot, "volumes")
350
		runtime.storageSet.VolumePathSet = true
351
	}
352

353
	// Make the static files directory if it does not exist
354
	if err := os.MkdirAll(runtime.config.Engine.StaticDir, 0700); err != nil {
355
		// The directory is allowed to exist
356
		if !errors.Is(err, os.ErrExist) {
357
			return fmt.Errorf("creating runtime static files directory %q: %w", runtime.config.Engine.StaticDir, err)
358
		}
359
	}
360

361
	// Create the TmpDir if needed
362
	if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0751); err != nil {
363
		return fmt.Errorf("creating runtime temporary files directory: %w", err)
364
	}
365

366
	// Set up the state.
367
	runtime.state, err = getDBState(runtime)
368
	if err != nil {
369
		return err
370
	}
371

372
	// Grab config from the database so we can reset some defaults
373
	dbConfig, err := runtime.state.GetDBConfig()
374
	if err != nil {
375
		if runtime.doReset {
376
			// We can at least delete the DB and the static files
377
			// directory.
378
			// Can't safely touch anything else because we aren't
379
			// sure of other directories.
380
			if err := runtime.state.Close(); err != nil {
381
				logrus.Errorf("Closing database connection: %v", err)
382
			} else {
383
				if err := os.RemoveAll(runtime.config.Engine.StaticDir); err != nil {
384
					logrus.Errorf("Removing static files directory %v: %v", runtime.config.Engine.StaticDir, err)
385
				}
386
			}
387
		}
388

389
		return fmt.Errorf("retrieving runtime configuration from database: %w", err)
390
	}
391

392
	runtime.mergeDBConfig(dbConfig)
393

394
	checkCgroups2UnifiedMode(runtime)
395

396
	logrus.Debugf("Using graph driver %s", runtime.storageConfig.GraphDriverName)
397
	logrus.Debugf("Using graph root %s", runtime.storageConfig.GraphRoot)
398
	logrus.Debugf("Using run root %s", runtime.storageConfig.RunRoot)
399
	logrus.Debugf("Using static dir %s", runtime.config.Engine.StaticDir)
400
	logrus.Debugf("Using tmp dir %s", runtime.config.Engine.TmpDir)
401
	logrus.Debugf("Using volume path %s", runtime.config.Engine.VolumePath)
402
	logrus.Debugf("Using transient store: %v", runtime.storageConfig.TransientStore)
403

404
	// Validate our config against the database, now that we've set our
405
	// final storage configuration
406
	if err := runtime.state.ValidateDBConfig(runtime); err != nil {
407
		// If we are performing a storage reset: continue on with a
408
		// warning. Otherwise we can't `system reset` after a change to
409
		// the core paths.
410
		if !runtime.doReset {
411
			return err
412
		}
413
		logrus.Errorf("Runtime paths differ from those stored in database, storage reset may not remove all files")
414
	}
415

416
	if runtime.config.Engine.Namespace != "" {
417
		return fmt.Errorf("namespaces are not supported by this version of Libpod, please unset the `namespace` field in containers.conf: %w", define.ErrNotImplemented)
418
	}
419

420
	needsUserns := os.Geteuid() != 0
421
	if !needsUserns {
422
		hasCapSysAdmin, err := unshare.HasCapSysAdmin()
423
		if err != nil {
424
			return err
425
		}
426
		needsUserns = !hasCapSysAdmin
427
	}
428
	// Set up containers/storage
429
	var store storage.Store
430
	if needsUserns {
431
		logrus.Debug("Not configuring container store")
432
	} else if err := runtime.configureStore(); err != nil {
433
		// Make a best-effort attempt to clean up if performing a
434
		// storage reset.
435
		if runtime.doReset {
436
			if err := runtime.removeAllDirs(); err != nil {
437
				logrus.Errorf("Removing libpod directories: %v", err)
438
			}
439
		}
440

441
		return fmt.Errorf("configure storage: %w", err)
442
	}
443
	defer func() {
444
		if retErr != nil && store != nil {
445
			// Don't forcibly shut down
446
			// We could be opening a store in use by another libpod
447
			if _, err := store.Shutdown(false); err != nil {
448
				logrus.Errorf("Removing store for partially-created runtime: %s", err)
449
			}
450
		}
451
	}()
452

453
	// Set up the eventer
454
	eventer, err := runtime.newEventer()
455
	if err != nil {
456
		return err
457
	}
458
	runtime.eventer = eventer
459

460
	// Set up containers/image
461
	if runtime.imageContext == nil {
462
		runtime.imageContext = &types.SystemContext{
463
			BigFilesTemporaryDir: parse.GetTempDir(),
464
		}
465
	}
466
	runtime.imageContext.SignaturePolicyPath = runtime.config.Engine.SignaturePolicyPath
467

468
	// Get us at least one working OCI runtime.
469
	runtime.ociRuntimes = make(map[string]OCIRuntime)
470

471
	// Initialize remaining OCI runtimes
472
	for name, paths := range runtime.config.Engine.OCIRuntimes {
473
		ociRuntime, err := newConmonOCIRuntime(name, paths, runtime.conmonPath, runtime.runtimeFlags, runtime.config)
474
		if err != nil {
475
			// Don't fatally error.
476
			// This will allow us to ship configs including optional
477
			// runtimes that might not be installed (crun, kata).
478
			// Only an infof so default configs don't spec errors.
479
			logrus.Debugf("Configured OCI runtime %s initialization failed: %v", name, err)
480
			continue
481
		}
482

483
		runtime.ociRuntimes[name] = ociRuntime
484
	}
485

486
	// Do we have a default OCI runtime?
487
	if runtime.config.Engine.OCIRuntime != "" {
488
		// If the string starts with / it's a path to a runtime
489
		// executable.
490
		if strings.HasPrefix(runtime.config.Engine.OCIRuntime, "/") {
491
			ociRuntime, err := newConmonOCIRuntime(runtime.config.Engine.OCIRuntime, []string{runtime.config.Engine.OCIRuntime}, runtime.conmonPath, runtime.runtimeFlags, runtime.config)
492
			if err != nil {
493
				return err
494
			}
495

496
			runtime.ociRuntimes[runtime.config.Engine.OCIRuntime] = ociRuntime
497
			runtime.defaultOCIRuntime = ociRuntime
498
		} else {
499
			ociRuntime, ok := runtime.ociRuntimes[runtime.config.Engine.OCIRuntime]
500
			if !ok {
501
				return fmt.Errorf("default OCI runtime %q not found: %w", runtime.config.Engine.OCIRuntime, define.ErrInvalidArg)
502
			}
503
			runtime.defaultOCIRuntime = ociRuntime
504
		}
505
	}
506
	logrus.Debugf("Using OCI runtime %q", runtime.defaultOCIRuntime.Path())
507

508
	// Do we have at least one valid OCI runtime?
509
	if len(runtime.ociRuntimes) == 0 {
510
		return fmt.Errorf("no OCI runtime has been configured: %w", define.ErrInvalidArg)
511
	}
512

513
	// Do we have a default runtime?
514
	if runtime.defaultOCIRuntime == nil {
515
		return fmt.Errorf("no default OCI runtime was configured: %w", define.ErrInvalidArg)
516
	}
517

518
	// the store is only set up when we are in the userns so we do the same for the network interface
519
	if !needsUserns {
520
		netBackend, netInterface, err := network.NetworkBackend(runtime.store, runtime.config, runtime.syslog)
521
		if err != nil {
522
			return err
523
		}
524
		runtime.config.Network.NetworkBackend = string(netBackend)
525
		runtime.network = netInterface
526
	}
527

528
	// We now need to see if the system has restarted
529
	// We check for the presence of a file in our tmp directory to verify this
530
	// This check must be locked to prevent races
531
	runtimeAliveFile := filepath.Join(runtime.config.Engine.TmpDir, "alive")
532
	aliveLock, err := runtime.getRuntimeAliveLock()
533
	if err != nil {
534
		return fmt.Errorf("acquiring runtime init lock: %w", err)
535
	}
536
	// Acquire the lock and hold it until we return
537
	// This ensures that no two processes will be in runtime.refresh at once
538
	aliveLock.Lock()
539
	doRefresh := false
540
	unLockFunc := aliveLock.Unlock
541
	defer func() {
542
		if unLockFunc != nil {
543
			unLockFunc()
544
		}
545
	}()
546

547
	err = fileutils.Exists(runtimeAliveFile)
548
	if err != nil {
549
		// If we need to refresh, then it is safe to assume there are
550
		// no containers running.  Create immediately a namespace, as
551
		// we will need to access the storage.
552
		if needsUserns {
553
			// warn users if mode is rootless and cgroup manager is systemd
554
			// and no valid systemd session is present
555
			// warn only whenever new namespace is created
556
			if runtime.config.Engine.CgroupManager == config.SystemdCgroupsManager {
557
				unified, _ := cgroups.IsCgroup2UnifiedMode()
558
				if unified && rootless.IsRootless() && !systemd.IsSystemdSessionValid(rootless.GetRootlessUID()) {
559
					logrus.Debug("Invalid systemd user session for current user")
560
				}
561
			}
562
			unLockFunc()
563
			unLockFunc = nil
564
			pausePid, err := util.GetRootlessPauseProcessPidPath()
565
			if err != nil {
566
				return fmt.Errorf("could not get pause process pid file path: %w", err)
567
			}
568

569
			// create the path in case it does not already exists
570
			// https://github.com/containers/podman/issues/8539
571
			if err := os.MkdirAll(filepath.Dir(pausePid), 0o700); err != nil {
572
				return fmt.Errorf("could not create pause process pid file directory: %w", err)
573
			}
574

575
			became, ret, err := rootless.BecomeRootInUserNS(pausePid)
576
			if err != nil {
577
				return err
578
			}
579
			if became {
580
				// Check if the pause process was created.  If it was created, then
581
				// move it to its own systemd scope.
582
				systemdCommon.MovePauseProcessToScope(pausePid)
583

584
				// gocritic complains because defer is not run on os.Exit()
585
				// However this is fine because the lock is released anyway when the process exits
586
				//nolint:gocritic
587
				os.Exit(ret)
588
			}
589
		}
590
		// If the file doesn't exist, we need to refresh the state
591
		// This will trigger on first use as well, but refreshing an
592
		// empty state only creates a single file
593
		// As such, it's not really a performance concern
594
		if errors.Is(err, os.ErrNotExist) {
595
			doRefresh = true
596
		} else {
597
			return fmt.Errorf("reading runtime status file %s: %w", runtimeAliveFile, err)
598
		}
599
	}
600

601
	runtime.lockManager, err = getLockManager(runtime)
602
	if err != nil {
603
		return err
604
	}
605

606
	// Mark the runtime as valid - ready to be used, cannot be modified
607
	// further.
608
	// Need to do this *before* refresh as we can remove containers there.
609
	// Should not be a big deal as we don't return it to users until after
610
	// refresh runs.
611
	runtime.valid = true
612

613
	// If we need to refresh the state, do it now - things are guaranteed to
614
	// be set up by now.
615
	if doRefresh {
616
		// Ensure we have a store before refresh occurs
617
		if runtime.store == nil {
618
			if err := runtime.configureStore(); err != nil {
619
				return fmt.Errorf("configure storage: %w", err)
620
			}
621
		}
622

623
		if err2 := runtime.refresh(ctx, runtimeAliveFile); err2 != nil {
624
			return err2
625
		}
626
	}
627

628
	// Check current boot ID - will be written to the alive file.
629
	if err := runtime.checkBootID(runtimeAliveFile); err != nil {
630
		return err
631
	}
632

633
	runtime.startWorker()
634

635
	return nil
636
}
637

638
// TmpDir gets the current Libpod temporary files directory.
639
func (r *Runtime) TmpDir() (string, error) {
640
	if !r.valid {
641
		return "", define.ErrRuntimeStopped
642
	}
643

644
	return r.config.Engine.TmpDir, nil
645
}
646

647
// GetConfig returns the configuration used by the runtime.
648
// Note that the returned value is not a copy and must hence
649
// only be used in a reading fashion.
650
func (r *Runtime) GetConfigNoCopy() (*config.Config, error) {
651
	if !r.valid {
652
		return nil, define.ErrRuntimeStopped
653
	}
654
	return r.config, nil
655
}
656

657
// GetConfig returns a copy of the configuration used by the runtime.
658
// Please use GetConfigNoCopy() in case you only want to read from
659
// but not write to the returned config.
660
func (r *Runtime) GetConfig() (*config.Config, error) {
661
	rtConfig, err := r.GetConfigNoCopy()
662
	if err != nil {
663
		return nil, err
664
	}
665

666
	config := new(config.Config)
667

668
	// Copy so the caller won't be able to modify the actual config
669
	if err := JSONDeepCopy(rtConfig, config); err != nil {
670
		return nil, fmt.Errorf("copying config: %w", err)
671
	}
672

673
	return config, nil
674
}
675

676
// libimageEventsMap translates a libimage event type to a libpod event status.
677
var libimageEventsMap = map[libimage.EventType]events.Status{
678
	libimage.EventTypeImagePull:      events.Pull,
679
	libimage.EventTypeImagePullError: events.PullError,
680
	libimage.EventTypeImagePush:      events.Push,
681
	libimage.EventTypeImageRemove:    events.Remove,
682
	libimage.EventTypeImageLoad:      events.LoadFromArchive,
683
	libimage.EventTypeImageSave:      events.Save,
684
	libimage.EventTypeImageTag:       events.Tag,
685
	libimage.EventTypeImageUntag:     events.Untag,
686
	libimage.EventTypeImageMount:     events.Mount,
687
	libimage.EventTypeImageUnmount:   events.Unmount,
688
}
689

690
// libimageEvents spawns a goroutine which will listen for events on
691
// the libimage.Runtime.  The goroutine will be cleaned up implicitly
692
// when the main() exists.
693
func (r *Runtime) libimageEvents() {
694
	r.libimageEventsShutdown = make(chan bool)
695

696
	toLibpodEventStatus := func(e *libimage.Event) events.Status {
697
		status, found := libimageEventsMap[e.Type]
698
		if !found {
699
			return "Unknown"
700
		}
701
		return status
702
	}
703

704
	eventChannel := r.libimageRuntime.EventChannel()
705
	go func() {
706
		sawShutdown := false
707
		for {
708
			// Make sure to read and write all events before
709
			// shutting down.
710
			for len(eventChannel) > 0 {
711
				libimageEvent := <-eventChannel
712
				e := events.Event{
713
					ID:     libimageEvent.ID,
714
					Name:   libimageEvent.Name,
715
					Status: toLibpodEventStatus(libimageEvent),
716
					Time:   libimageEvent.Time,
717
					Type:   events.Image,
718
				}
719
				if libimageEvent.Error != nil {
720
					e.Error = libimageEvent.Error.Error()
721
				}
722
				if err := r.eventer.Write(e); err != nil {
723
					logrus.Errorf("Unable to write image event: %q", err)
724
				}
725
			}
726

727
			if sawShutdown {
728
				close(r.libimageEventsShutdown)
729
				return
730
			}
731

732
			select {
733
			case <-r.libimageEventsShutdown:
734
				sawShutdown = true
735
			case <-time.After(100 * time.Millisecond):
736
			}
737
		}
738
	}()
739
}
740

741
// DeferredShutdown shuts down the runtime without exposing any
742
// errors. This is only meant to be used when the runtime is being
743
// shutdown within a defer statement; else use Shutdown
744
func (r *Runtime) DeferredShutdown(force bool) {
745
	_ = r.Shutdown(force)
746
}
747

748
// Shutdown shuts down the runtime and associated containers and storage
749
// If force is true, containers and mounted storage will be shut down before
750
// cleaning up; if force is false, an error will be returned if there are
751
// still containers running or mounted
752
func (r *Runtime) Shutdown(force bool) error {
753
	if !r.valid {
754
		return nil
755
	}
756

757
	if r.workerChannel != nil {
758
		r.workerGroup.Wait()
759
		close(r.workerChannel)
760
	}
761

762
	r.valid = false
763

764
	// Shutdown all containers if --force is given
765
	if force {
766
		ctrs, err := r.state.AllContainers(false)
767
		if err != nil {
768
			logrus.Errorf("Retrieving containers from database: %v", err)
769
		} else {
770
			for _, ctr := range ctrs {
771
				if err := ctr.StopWithTimeout(r.config.Engine.StopTimeout); err != nil {
772
					logrus.Errorf("Stopping container %s: %v", ctr.ID(), err)
773
				}
774
			}
775
		}
776
	}
777

778
	var lastError error
779
	// If no store was requested, it can be nil and there is no need to
780
	// attempt to shut it down
781
	if r.store != nil {
782
		// Wait for the events to be written.
783
		if r.libimageEventsShutdown != nil {
784
			// Tell loop to shutdown
785
			r.libimageEventsShutdown <- true
786
			// Wait for close to signal shutdown
787
			<-r.libimageEventsShutdown
788
		}
789

790
		// Note that the libimage runtime shuts down the store.
791
		if err := r.libimageRuntime.Shutdown(force); err != nil {
792
			lastError = fmt.Errorf("shutting down container storage: %w", err)
793
		}
794
	}
795
	if err := r.state.Close(); err != nil {
796
		if lastError != nil {
797
			logrus.Error(lastError)
798
		}
799
		lastError = err
800
	}
801

802
	return lastError
803
}
804

805
// Reconfigures the runtime after a reboot
806
// Refreshes the state, recreating temporary files
807
// Does not check validity as the runtime is not valid until after this has run
808
func (r *Runtime) refresh(ctx context.Context, alivePath string) error {
809
	logrus.Debugf("Podman detected system restart - performing state refresh")
810

811
	// Clear state of database if not running in container
812
	if !graphRootMounted() {
813
		// First clear the state in the database
814
		if err := r.state.Refresh(); err != nil {
815
			return err
816
		}
817
	}
818

819
	// Next refresh the state of all containers to recreate dirs and
820
	// namespaces, and all the pods to recreate cgroups.
821
	// Containers, pods, and volumes must also reacquire their locks.
822
	ctrs, err := r.state.AllContainers(false)
823
	if err != nil {
824
		return fmt.Errorf("retrieving all containers from state: %w", err)
825
	}
826
	pods, err := r.state.AllPods()
827
	if err != nil {
828
		return fmt.Errorf("retrieving all pods from state: %w", err)
829
	}
830
	vols, err := r.state.AllVolumes()
831
	if err != nil {
832
		return fmt.Errorf("retrieving all volumes from state: %w", err)
833
	}
834
	// No locks are taken during pod, volume, and container refresh.
835
	// Furthermore, the pod/volume/container refresh() functions are not
836
	// allowed to take locks themselves.
837
	// We cannot assume that any pod/volume/container has a valid lock until
838
	// after this function has returned.
839
	// The runtime alive lock should suffice to provide mutual exclusion
840
	// until this has run.
841
	for _, ctr := range ctrs {
842
		if err := ctr.refresh(); err != nil {
843
			logrus.Errorf("Refreshing container %s: %v", ctr.ID(), err)
844
		}
845
		// This is the only place it's safe to use ctr.state.State unlocked
846
		// We're holding the alive lock, guaranteed to be the only Libpod on the system right now.
847
		if (ctr.AutoRemove() && ctr.state.State == define.ContainerStateExited) || ctr.state.State == define.ContainerStateRemoving {
848
			opts := ctrRmOpts{
849
				// Don't force-remove, we're supposed to be fresh off a reboot
850
				// If we have to force something is seriously wrong
851
				Force:        false,
852
				RemoveVolume: true,
853
			}
854
			// This container should have autoremoved before the
855
			// reboot but did not.
856
			// Get rid of it.
857
			if _, _, err := r.removeContainer(ctx, ctr, opts); err != nil {
858
				logrus.Errorf("Unable to remove container %s which should have autoremoved: %v", ctr.ID(), err)
859
			}
860
		}
861
	}
862
	for _, pod := range pods {
863
		if err := pod.refresh(); err != nil {
864
			logrus.Errorf("Refreshing pod %s: %v", pod.ID(), err)
865
		}
866
	}
867
	for _, vol := range vols {
868
		if err := vol.refresh(); err != nil {
869
			logrus.Errorf("Refreshing volume %s: %v", vol.Name(), err)
870
		}
871
	}
872

873
	// Create a file indicating the runtime is alive and ready
874
	file, err := os.OpenFile(alivePath, os.O_RDONLY|os.O_CREATE, 0644)
875
	if err != nil {
876
		return fmt.Errorf("creating runtime status file: %w", err)
877
	}
878
	defer file.Close()
879

880
	r.NewSystemEvent(events.Refresh)
881

882
	return nil
883
}
884

885
// Info returns the store and host information
886
func (r *Runtime) Info() (*define.Info, error) {
887
	return r.info()
888
}
889

890
// generateName generates a unique name for a container or pod.
891
func (r *Runtime) generateName() (string, error) {
892
	for {
893
		name := namesgenerator.GetRandomName(0)
894
		// Make sure container with this name does not exist
895
		if _, err := r.state.LookupContainer(name); err == nil {
896
			continue
897
		} else if !errors.Is(err, define.ErrNoSuchCtr) {
898
			return "", err
899
		}
900
		// Make sure pod with this name does not exist
901
		if _, err := r.state.LookupPod(name); err == nil {
902
			continue
903
		} else if !errors.Is(err, define.ErrNoSuchPod) {
904
			return "", err
905
		}
906
		return name, nil
907
	}
908
	// The code should never reach here.
909
}
910

911
// Configure store and image runtime
912
func (r *Runtime) configureStore() error {
913
	store, err := storage.GetStore(r.storageConfig)
914
	if err != nil {
915
		return err
916
	}
917

918
	r.store = store
919
	is.Transport.SetStore(store)
920

921
	// Set up a storage service for creating container root filesystems from
922
	// images
923
	r.storageService = getStorageService(r.store)
924

925
	runtimeOptions := &libimage.RuntimeOptions{
926
		SystemContext: r.imageContext,
927
	}
928
	libimageRuntime, err := libimage.RuntimeFromStore(store, runtimeOptions)
929
	if err != nil {
930
		return err
931
	}
932
	r.libimageRuntime = libimageRuntime
933
	// Run the libimage events routine.
934
	r.libimageEvents()
935

936
	return nil
937
}
938

939
// LibimageRuntime ... to allow for a step-by-step migration to libimage.
940
func (r *Runtime) LibimageRuntime() *libimage.Runtime {
941
	return r.libimageRuntime
942
}
943

944
// SystemContext returns the imagecontext
945
func (r *Runtime) SystemContext() *types.SystemContext {
946
	// Return the context from the libimage runtime.  libimage is sensitive
947
	// to a number of env vars.
948
	return r.libimageRuntime.SystemContext()
949
}
950

951
// GetOCIRuntimePath retrieves the path of the default OCI runtime.
952
func (r *Runtime) GetOCIRuntimePath() string {
953
	return r.defaultOCIRuntime.Path()
954
}
955

956
// DefaultOCIRuntime return copy of Default OCI Runtime
957
func (r *Runtime) DefaultOCIRuntime() OCIRuntime {
958
	return r.defaultOCIRuntime
959
}
960

961
// StorageConfig retrieves the storage options for the container runtime
962
func (r *Runtime) StorageConfig() storage.StoreOptions {
963
	return r.storageConfig
964
}
965

966
func (r *Runtime) GarbageCollect() error {
967
	return r.store.GarbageCollect()
968
}
969

970
// RunRoot retrieves the current c/storage temporary directory in use by Libpod.
971
func (r *Runtime) RunRoot() string {
972
	if r.store == nil {
973
		return ""
974
	}
975
	return r.store.RunRoot()
976
}
977

978
// GraphRoot retrieves the current c/storage directory in use by Libpod.
979
func (r *Runtime) GraphRoot() string {
980
	if r.store == nil {
981
		return ""
982
	}
983
	return r.store.GraphRoot()
984
}
985

986
// GetPodName retrieves the pod name associated with a given full ID.
987
// If the given ID does not correspond to any existing Pod or Container,
988
// ErrNoSuchPod is returned.
989
func (r *Runtime) GetPodName(id string) (string, error) {
990
	if !r.valid {
991
		return "", define.ErrRuntimeStopped
992
	}
993

994
	return r.state.GetPodName(id)
995
}
996

997
// DBConfig is a set of Libpod runtime configuration settings that are saved in
998
// a State when it is first created, and can subsequently be retrieved.
999
type DBConfig struct {
1000
	LibpodRoot  string
1001
	LibpodTmp   string
1002
	StorageRoot string
1003
	StorageTmp  string
1004
	GraphDriver string
1005
	VolumePath  string
1006
}
1007

1008
// mergeDBConfig merges the configuration from the database.
1009
func (r *Runtime) mergeDBConfig(dbConfig *DBConfig) {
1010
	c := &r.config.Engine
1011
	if !r.storageSet.RunRootSet && dbConfig.StorageTmp != "" {
1012
		if r.storageConfig.RunRoot != dbConfig.StorageTmp &&
1013
			r.storageConfig.RunRoot != "" {
1014
			logrus.Debugf("Overriding run root %q with %q from database",
1015
				r.storageConfig.RunRoot, dbConfig.StorageTmp)
1016
		}
1017
		r.storageConfig.RunRoot = dbConfig.StorageTmp
1018
	}
1019

1020
	if !r.storageSet.GraphRootSet && dbConfig.StorageRoot != "" {
1021
		if r.storageConfig.GraphRoot != dbConfig.StorageRoot &&
1022
			r.storageConfig.GraphRoot != "" {
1023
			logrus.Debugf("Overriding graph root %q with %q from database",
1024
				r.storageConfig.GraphRoot, dbConfig.StorageRoot)
1025
		}
1026
		r.storageConfig.GraphRoot = dbConfig.StorageRoot
1027
	}
1028

1029
	if !r.storageSet.GraphDriverNameSet && dbConfig.GraphDriver != "" {
1030
		if r.storageConfig.GraphDriverName != dbConfig.GraphDriver &&
1031
			r.storageConfig.GraphDriverName != "" {
1032
			logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files (%q) to resolve.  May prevent use of images created by other tools",
1033
				r.storageConfig.GraphDriverName, dbConfig.GraphDriver, r.storageConfig.GraphRoot)
1034
		}
1035
		r.storageConfig.GraphDriverName = dbConfig.GraphDriver
1036
	}
1037

1038
	if !r.storageSet.StaticDirSet && dbConfig.LibpodRoot != "" {
1039
		if c.StaticDir != dbConfig.LibpodRoot && c.StaticDir != "" {
1040
			logrus.Debugf("Overriding static dir %q with %q from database", c.StaticDir, dbConfig.LibpodRoot)
1041
		}
1042
		c.StaticDir = dbConfig.LibpodRoot
1043
	}
1044

1045
	if !r.storageSet.TmpDirSet && dbConfig.LibpodTmp != "" {
1046
		if c.TmpDir != dbConfig.LibpodTmp && c.TmpDir != "" {
1047
			logrus.Debugf("Overriding tmp dir %q with %q from database", c.TmpDir, dbConfig.LibpodTmp)
1048
		}
1049
		c.TmpDir = dbConfig.LibpodTmp
1050
	}
1051

1052
	if !r.storageSet.VolumePathSet && dbConfig.VolumePath != "" {
1053
		if c.VolumePath != dbConfig.VolumePath && c.VolumePath != "" {
1054
			logrus.Debugf("Overriding volume path %q with %q from database", c.VolumePath, dbConfig.VolumePath)
1055
		}
1056
		c.VolumePath = dbConfig.VolumePath
1057
	}
1058
}
1059

1060
func (r *Runtime) EnableLabeling() bool {
1061
	return r.config.Containers.EnableLabeling
1062
}
1063

1064
// Reload reloads the configurations files
1065
func (r *Runtime) Reload() error {
1066
	if err := r.reloadContainersConf(); err != nil {
1067
		return err
1068
	}
1069
	if err := r.reloadStorageConf(); err != nil {
1070
		return err
1071
	}
1072
	// Invalidate the registries.conf cache. The next invocation will
1073
	// reload all data.
1074
	sysregistriesv2.InvalidateCache()
1075
	return nil
1076
}
1077

1078
// reloadContainersConf reloads the containers.conf
1079
func (r *Runtime) reloadContainersConf() error {
1080
	config, err := config.Reload()
1081
	if err != nil {
1082
		return err
1083
	}
1084
	r.config = config
1085
	logrus.Infof("Applied new containers configuration: %v", config)
1086
	return nil
1087
}
1088

1089
// reloadStorageConf reloads the storage.conf
1090
func (r *Runtime) reloadStorageConf() error {
1091
	configFile, err := storage.DefaultConfigFile()
1092
	if err != nil {
1093
		return err
1094
	}
1095
	storage.ReloadConfigurationFile(configFile, &r.storageConfig)
1096
	logrus.Infof("Applied new storage configuration: %v", r.storageConfig)
1097
	return nil
1098
}
1099

1100
// getVolumePlugin gets a specific volume plugin.
1101
func (r *Runtime) getVolumePlugin(volConfig *VolumeConfig) (*plugin.VolumePlugin, error) {
1102
	// There is no plugin for local.
1103
	name := volConfig.Driver
1104
	timeout := volConfig.Timeout
1105
	if name == define.VolumeDriverLocal || name == "" {
1106
		return nil, nil
1107
	}
1108

1109
	pluginPath, ok := r.config.Engine.VolumePlugins[name]
1110
	if !ok {
1111
		if name == define.VolumeDriverImage {
1112
			return nil, nil
1113
		}
1114
		return nil, fmt.Errorf("no volume plugin with name %s available: %w", name, define.ErrMissingPlugin)
1115
	}
1116

1117
	return plugin.GetVolumePlugin(name, pluginPath, timeout, r.config)
1118
}
1119

1120
// GetSecretsStorageDir returns the directory that the secrets manager should take
1121
func (r *Runtime) GetSecretsStorageDir() string {
1122
	return filepath.Join(r.store.GraphRoot(), "secrets")
1123
}
1124

1125
// SecretsManager returns the directory that the secrets manager should take
1126
func (r *Runtime) SecretsManager() (*secrets.SecretsManager, error) {
1127
	if r.secretsManager == nil {
1128
		manager, err := secrets.NewManager(r.GetSecretsStorageDir())
1129
		if err != nil {
1130
			return nil, err
1131
		}
1132
		r.secretsManager = manager
1133
	}
1134
	return r.secretsManager, nil
1135
}
1136

1137
func graphRootMounted() bool {
1138
	f, err := os.OpenFile("/run/.containerenv", os.O_RDONLY, os.ModePerm)
1139
	if err != nil {
1140
		return false
1141
	}
1142
	defer f.Close()
1143

1144
	scanner := bufio.NewScanner(f)
1145
	for scanner.Scan() {
1146
		if scanner.Text() == "graphRootMounted=1" {
1147
			return true
1148
		}
1149
	}
1150
	return false
1151
}
1152

1153
func (r *Runtime) graphRootMountedFlag(mounts []spec.Mount) string {
1154
	root := r.store.GraphRoot()
1155
	for _, val := range mounts {
1156
		if strings.HasPrefix(root, val.Source) {
1157
			return "graphRootMounted=1"
1158
		}
1159
	}
1160
	return ""
1161
}
1162

1163
// Returns a copy of the runtime alive lock
1164
func (r *Runtime) getRuntimeAliveLock() (*lockfile.LockFile, error) {
1165
	return lockfile.GetLockFile(filepath.Join(r.config.Engine.TmpDir, "alive.lck"))
1166
}
1167

1168
// Network returns the network interface which is used by the runtime
1169
func (r *Runtime) Network() nettypes.ContainerNetwork {
1170
	return r.network
1171
}
1172

1173
// GetDefaultNetworkName returns the network interface which is used by the runtime
1174
func (r *Runtime) GetDefaultNetworkName() string {
1175
	return r.config.Network.DefaultNetwork
1176
}
1177

1178
// RemoteURI returns the API server URI
1179
func (r *Runtime) RemoteURI() string {
1180
	return r.config.Engine.RemoteURI
1181
}
1182

1183
// SetRemoteURI records the API server URI
1184
func (r *Runtime) SetRemoteURI(uri string) {
1185
	r.config.Engine.RemoteURI = uri
1186
}
1187

1188
// Get information on potential lock conflicts.
1189
// Returns a map of lock number to object(s) using the lock, formatted as
1190
// "container <id>" or "volume <id>" or "pod <id>", and an array of locks that
1191
// are currently being held, formatted as []uint32.
1192
// If the map returned is not empty, you should immediately renumber locks on
1193
// the runtime, because you have a deadlock waiting to happen.
1194
func (r *Runtime) LockConflicts() (map[uint32][]string, []uint32, error) {
1195
	// Make an internal map to store what lock is associated with what
1196
	locksInUse := make(map[uint32][]string)
1197

1198
	ctrs, err := r.state.AllContainers(false)
1199
	if err != nil {
1200
		return nil, nil, err
1201
	}
1202
	for _, ctr := range ctrs {
1203
		lockNum := ctr.lock.ID()
1204
		ctrString := fmt.Sprintf("container %s", ctr.ID())
1205
		locksInUse[lockNum] = append(locksInUse[lockNum], ctrString)
1206
	}
1207

1208
	pods, err := r.state.AllPods()
1209
	if err != nil {
1210
		return nil, nil, err
1211
	}
1212
	for _, pod := range pods {
1213
		lockNum := pod.lock.ID()
1214
		podString := fmt.Sprintf("pod %s", pod.ID())
1215
		locksInUse[lockNum] = append(locksInUse[lockNum], podString)
1216
	}
1217

1218
	volumes, err := r.state.AllVolumes()
1219
	if err != nil {
1220
		return nil, nil, err
1221
	}
1222
	for _, vol := range volumes {
1223
		lockNum := vol.lock.ID()
1224
		volString := fmt.Sprintf("volume %s", vol.Name())
1225
		locksInUse[lockNum] = append(locksInUse[lockNum], volString)
1226
	}
1227

1228
	// Now go through and find any entries with >1 item associated
1229
	toReturn := make(map[uint32][]string)
1230
	for lockNum, objects := range locksInUse {
1231
		// If debug logging is requested, just spit out *every* lock in
1232
		// use.
1233
		logrus.Debugf("Lock number %d is in use by %v", lockNum, objects)
1234

1235
		if len(objects) > 1 {
1236
			toReturn[lockNum] = objects
1237
		}
1238
	}
1239

1240
	locksHeld, err := r.lockManager.LocksHeld()
1241
	if err != nil {
1242
		if errors.Is(err, define.ErrNotImplemented) {
1243
			logrus.Warnf("Could not retrieve currently taken locks as the lock backend does not support this operation")
1244
			return toReturn, []uint32{}, nil
1245
		}
1246

1247
		return nil, nil, err
1248
	}
1249

1250
	return toReturn, locksHeld, nil
1251
}
1252

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.