podman
1386 строк · 42.8 Кб
1//go:build linux
2// +build linux
3
4package buildah
5
6import (
7"context"
8"errors"
9"fmt"
10"os"
11"path/filepath"
12"strings"
13"syscall"
14
15"github.com/containers/buildah/bind"
16"github.com/containers/buildah/chroot"
17"github.com/containers/buildah/copier"
18"github.com/containers/buildah/define"
19"github.com/containers/buildah/internal"
20"github.com/containers/buildah/internal/tmpdir"
21"github.com/containers/buildah/internal/volumes"
22"github.com/containers/buildah/pkg/overlay"
23"github.com/containers/buildah/pkg/parse"
24butil "github.com/containers/buildah/pkg/util"
25"github.com/containers/buildah/util"
26"github.com/containers/common/libnetwork/etchosts"
27"github.com/containers/common/libnetwork/pasta"
28"github.com/containers/common/libnetwork/resolvconf"
29"github.com/containers/common/libnetwork/slirp4netns"
30nettypes "github.com/containers/common/libnetwork/types"
31netUtil "github.com/containers/common/libnetwork/util"
32"github.com/containers/common/pkg/capabilities"
33"github.com/containers/common/pkg/chown"
34"github.com/containers/common/pkg/config"
35"github.com/containers/common/pkg/hooks"
36hooksExec "github.com/containers/common/pkg/hooks/exec"
37"github.com/containers/storage/pkg/fileutils"
38"github.com/containers/storage/pkg/idtools"
39"github.com/containers/storage/pkg/ioutils"
40"github.com/containers/storage/pkg/lockfile"
41"github.com/containers/storage/pkg/stringid"
42"github.com/containers/storage/pkg/unshare"
43"github.com/docker/go-units"
44"github.com/opencontainers/runtime-spec/specs-go"
45"github.com/opencontainers/runtime-tools/generate"
46"github.com/sirupsen/logrus"
47"golang.org/x/exp/slices"
48"golang.org/x/sys/unix"
49"tags.cncf.io/container-device-interface/pkg/cdi"
50)
51
52var (
53// We dont want to remove destinations with /etc, /dev, /sys,
54// /proc as rootfs already contains these files and unionfs
55// will create a `whiteout` i.e `.wh` files on removal of
56// overlapping files from these directories. everything other
57// than these will be cleaned up
58nonCleanablePrefixes = []string{
59"/etc", "/dev", "/sys", "/proc",
60}
61)
62
63func setChildProcess() error {
64if err := unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, uintptr(1), 0, 0, 0); err != nil {
65fmt.Fprintf(os.Stderr, "prctl(PR_SET_CHILD_SUBREAPER, 1): %v\n", err)
66return err
67}
68return nil
69}
70
71func (b *Builder) cdiSetupDevicesInSpec(deviceSpecs []string, configDir string, spec *specs.Spec) ([]string, error) {
72leftoverDevices := deviceSpecs
73registry, err := cdi.NewCache()
74if err != nil {
75return nil, fmt.Errorf("creating CDI registry: %w", err)
76}
77var configDirs []string
78if b.CDIConfigDir != "" {
79configDirs = append(configDirs, b.CDIConfigDir)
80}
81if configDir != "" {
82configDirs = append(configDirs, configDir)
83}
84// TODO: CdiSpecDirs will be in containers/common v0.59.0 or later?
85// defConfig, err := config.Default()
86// if err != nil {
87// return nil, fmt.Errorf("failed to get container config: %w", err)
88// }
89// configDirs = append(configDirs, defConfig.Engine.CdiSpecDirs.Get()...)
90if len(configDirs) > 0 {
91if err := registry.Configure(cdi.WithSpecDirs(configDirs...)); err != nil {
92return nil, fmt.Errorf("CDI registry ignored configured directories %v: %w", configDirs, err)
93}
94}
95if err := registry.Refresh(); err != nil {
96logrus.Warnf("CDI registry refresh: %v", err)
97} else {
98leftoverDevices, err = registry.InjectDevices(spec, deviceSpecs...)
99if err != nil {
100logrus.Debugf("CDI device injection: %v, unresolved list %v", err, leftoverDevices)
101}
102}
103removed := slices.DeleteFunc(slices.Clone(deviceSpecs), func(t string) bool { return slices.Contains(leftoverDevices, t) })
104logrus.Debugf("CDI taking care of devices %v, leaving devices %v", removed, leftoverDevices)
105return leftoverDevices, nil
106}
107
108// Extract the device list so that we can still try to make it work if
109// we're running rootless and can't just mknod() the device nodes.
110func separateDevicesFromRuntimeSpec(g *generate.Generator) define.ContainerDevices {
111var result define.ContainerDevices
112if g.Config != nil && g.Config.Linux != nil {
113for _, device := range g.Config.Linux.Devices {
114var bDevice define.BuildahDevice
115bDevice.Path = device.Path
116switch device.Type {
117case "b":
118bDevice.Type = 'b'
119case "c":
120bDevice.Type = 'c'
121case "u":
122bDevice.Type = 'u'
123case "p":
124bDevice.Type = 'p'
125}
126bDevice.Major = device.Major
127bDevice.Minor = device.Minor
128if device.FileMode != nil {
129bDevice.FileMode = *device.FileMode
130}
131if device.UID != nil {
132bDevice.Uid = *device.UID
133}
134if device.GID != nil {
135bDevice.Gid = *device.GID
136}
137bDevice.Source = device.Path
138bDevice.Destination = device.Path
139result = append(result, bDevice)
140}
141}
142g.ClearLinuxDevices()
143return result
144}
145
146// Run runs the specified command in the container's root filesystem.
147func (b *Builder) Run(command []string, options RunOptions) error {
148p, err := os.MkdirTemp(tmpdir.GetTempDir(), define.Package)
149if err != nil {
150return err
151}
152// On some hosts like AH, /tmp is a symlink and we need an
153// absolute path.
154path, err := filepath.EvalSymlinks(p)
155if err != nil {
156return err
157}
158logrus.Debugf("using %q to hold bundle data", path)
159defer func() {
160if err2 := os.RemoveAll(path); err2 != nil {
161options.Logger.Error(err2)
162}
163}()
164
165gp, err := generate.New("linux")
166if err != nil {
167return fmt.Errorf("generating new 'linux' runtime spec: %w", err)
168}
169g := &gp
170
171isolation := options.Isolation
172if isolation == define.IsolationDefault {
173isolation = b.Isolation
174if isolation == define.IsolationDefault {
175isolation, err = parse.IsolationOption("")
176if err != nil {
177logrus.Debugf("got %v while trying to determine default isolation, guessing OCI", err)
178isolation = IsolationOCI
179} else if isolation == IsolationDefault {
180isolation = IsolationOCI
181}
182}
183}
184if err := checkAndOverrideIsolationOptions(isolation, &options); err != nil {
185return err
186}
187
188// hardwire the environment to match docker build to avoid subtle and hard-to-debug differences due to containers.conf
189b.configureEnvironment(g, options, []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"})
190
191if b.CommonBuildOpts == nil {
192return fmt.Errorf("invalid format on container you must recreate the container")
193}
194
195if err := addCommonOptsToSpec(b.CommonBuildOpts, g); err != nil {
196return err
197}
198
199workDir := b.WorkDir()
200if options.WorkingDir != "" {
201g.SetProcessCwd(options.WorkingDir)
202workDir = options.WorkingDir
203} else if b.WorkDir() != "" {
204g.SetProcessCwd(b.WorkDir())
205}
206setupSelinux(g, b.ProcessLabel, b.MountLabel)
207mountPoint, err := b.Mount(b.MountLabel)
208if err != nil {
209return fmt.Errorf("mounting container %q: %w", b.ContainerID, err)
210}
211defer func() {
212if err := b.Unmount(); err != nil {
213options.Logger.Errorf("error unmounting container: %v", err)
214}
215}()
216g.SetRootPath(mountPoint)
217if len(command) > 0 {
218command = runLookupPath(g, command)
219g.SetProcessArgs(command)
220} else {
221g.SetProcessArgs(nil)
222}
223
224// Combine the working container's set of devices with the ones for just this run.
225deviceSpecs := append(append([]string{}, options.DeviceSpecs...), b.DeviceSpecs...)
226deviceSpecs, err = b.cdiSetupDevicesInSpec(deviceSpecs, options.CDIConfigDir, g.Config) // makes changes to more than just the device list
227if err != nil {
228return err
229}
230devices := separateDevicesFromRuntimeSpec(g)
231for _, deviceSpec := range deviceSpecs {
232device, err := parse.DeviceFromPath(deviceSpec)
233if err != nil {
234return fmt.Errorf("setting up device %q: %w", deviceSpec, err)
235}
236devices = append(devices, device...)
237}
238devices = append(append(devices, options.Devices...), b.Devices...)
239
240// Mount devices, if any, and if we're rootless attempt to work around not
241// being able to create device nodes by bind-mounting them from the host, like podman does.
242if unshare.IsRootless() {
243// We are going to create bind mounts for devices
244// but we need to make sure that we don't override
245// anything which is already in OCI spec.
246mounts := make(map[string]interface{})
247for _, m := range g.Mounts() {
248mounts[m.Destination] = true
249}
250newMounts := []specs.Mount{}
251for _, d := range devices {
252// Default permission is read-only.
253perm := "ro"
254// Get permission configured for this device but only process `write`
255// permission in rootless since `mknod` is not supported anyways.
256if strings.Contains(string(d.Rule.Permissions), "w") {
257perm = "rw"
258}
259devMnt := specs.Mount{
260Destination: d.Destination,
261Type: parse.TypeBind,
262Source: d.Source,
263Options: []string{"slave", "nosuid", "noexec", perm, "rbind"},
264}
265// Podman parity: podman skips these two devices hence we do the same.
266if d.Path == "/dev/ptmx" || strings.HasPrefix(d.Path, "/dev/tty") {
267continue
268}
269// Device is already in OCI spec do not re-mount.
270if _, found := mounts[d.Path]; found {
271continue
272}
273newMounts = append(newMounts, devMnt)
274}
275g.Config.Mounts = append(newMounts, g.Config.Mounts...)
276} else {
277for _, d := range devices {
278sDev := specs.LinuxDevice{
279Type: string(d.Type),
280Path: d.Path,
281Major: d.Major,
282Minor: d.Minor,
283FileMode: &d.FileMode,
284UID: &d.Uid,
285GID: &d.Gid,
286}
287g.AddDevice(sDev)
288g.AddLinuxResourcesDevice(true, string(d.Type), &d.Major, &d.Minor, string(d.Permissions))
289}
290}
291
292setupMaskedPaths(g)
293setupReadOnlyPaths(g)
294
295setupTerminal(g, options.Terminal, options.TerminalSize)
296
297configureNetwork, networkString, err := b.configureNamespaces(g, &options)
298if err != nil {
299return err
300}
301
302homeDir, err := b.configureUIDGID(g, mountPoint, options)
303if err != nil {
304return err
305}
306
307g.SetProcessNoNewPrivileges(b.CommonBuildOpts.NoNewPrivileges)
308
309g.SetProcessApparmorProfile(b.CommonBuildOpts.ApparmorProfile)
310
311// Now grab the spec from the generator. Set the generator to nil so that future contributors
312// will quickly be able to tell that they're supposed to be modifying the spec directly from here.
313spec := g.Config
314g = nil
315
316// Set the seccomp configuration using the specified profile name. Some syscalls are
317// allowed if certain capabilities are to be granted (example: CAP_SYS_CHROOT and chroot),
318// so we sorted out the capabilities lists first.
319if err = setupSeccomp(spec, b.CommonBuildOpts.SeccompProfilePath); err != nil {
320return err
321}
322
323uid, gid := spec.Process.User.UID, spec.Process.User.GID
324if spec.Linux != nil {
325uid, gid, err = util.GetHostIDs(spec.Linux.UIDMappings, spec.Linux.GIDMappings, uid, gid)
326if err != nil {
327return err
328}
329}
330
331idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)}
332
333mode := os.FileMode(0755)
334coptions := copier.MkdirOptions{
335ChownNew: idPair,
336ChmodNew: &mode,
337}
338if err := copier.Mkdir(mountPoint, filepath.Join(mountPoint, spec.Process.Cwd), coptions); err != nil {
339return err
340}
341
342bindFiles := make(map[string]string)
343volumes := b.Volumes()
344
345// Figure out who owns files that will appear to be owned by UID/GID 0 in the container.
346rootUID, rootGID, err := util.GetHostRootIDs(spec)
347if err != nil {
348return err
349}
350rootIDPair := &idtools.IDPair{UID: int(rootUID), GID: int(rootGID)}
351
352hostsFile := ""
353if !options.NoHosts && !slices.Contains(volumes, config.DefaultHostsFile) && options.ConfigureNetwork != define.NetworkDisabled {
354hostsFile, err = b.createHostsFile(path, rootIDPair)
355if err != nil {
356return err
357}
358bindFiles[config.DefaultHostsFile] = hostsFile
359
360// Only add entries here if we do not have to do setup network,
361// if we do we have to do it much later after the network setup.
362if !configureNetwork {
363var entries etchosts.HostEntries
364isHost := true
365if spec.Linux != nil {
366for _, ns := range spec.Linux.Namespaces {
367if ns.Type == specs.NetworkNamespace {
368isHost = false
369break
370}
371}
372}
373// add host entry for local ip when running in host network
374if spec.Hostname != "" && isHost {
375ip := netUtil.GetLocalIP()
376if ip != "" {
377entries = append(entries, etchosts.HostEntry{
378Names: []string{spec.Hostname},
379IP: ip,
380})
381}
382}
383err = b.addHostsEntries(hostsFile, mountPoint, entries, nil)
384if err != nil {
385return err
386}
387}
388}
389
390if !options.NoHostname && !(slices.Contains(volumes, "/etc/hostname")) {
391hostnameFile, err := b.generateHostname(path, spec.Hostname, rootIDPair)
392if err != nil {
393return err
394}
395// Bind /etc/hostname
396bindFiles["/etc/hostname"] = hostnameFile
397}
398
399resolvFile := ""
400if !slices.Contains(volumes, resolvconf.DefaultResolvConf) && options.ConfigureNetwork != define.NetworkDisabled && !(len(b.CommonBuildOpts.DNSServers) == 1 && strings.ToLower(b.CommonBuildOpts.DNSServers[0]) == "none") {
401resolvFile, err = b.createResolvConf(path, rootIDPair)
402if err != nil {
403return err
404}
405bindFiles[resolvconf.DefaultResolvConf] = resolvFile
406
407// Only add entries here if we do not have to do setup network,
408// if we do we have to do it much later after the network setup.
409if !configureNetwork {
410err = b.addResolvConfEntries(resolvFile, nil, spec.Linux.Namespaces, false, true)
411if err != nil {
412return err
413}
414}
415}
416// Empty file, so no need to recreate if it exists
417if _, ok := bindFiles["/run/.containerenv"]; !ok {
418containerenvPath := filepath.Join(path, "/run/.containerenv")
419if err = os.MkdirAll(filepath.Dir(containerenvPath), 0755); err != nil {
420return err
421}
422
423rootless := 0
424if unshare.IsRootless() {
425rootless = 1
426}
427// Populate the .containerenv with container information
428containerenv := fmt.Sprintf(`
429engine="buildah-%s"
430name=%q
431id=%q
432image=%q
433imageid=%q
434rootless=%d
435`, define.Version, b.Container, b.ContainerID, b.FromImage, b.FromImageID, rootless)
436
437if err = ioutils.AtomicWriteFile(containerenvPath, []byte(containerenv), 0755); err != nil {
438return err
439}
440if err := relabel(containerenvPath, b.MountLabel, false); err != nil {
441return err
442}
443
444bindFiles["/run/.containerenv"] = containerenvPath
445}
446
447// Setup OCI hooks
448_, err = b.setupOCIHooks(spec, (len(options.Mounts) > 0 || len(volumes) > 0))
449if err != nil {
450return fmt.Errorf("unable to setup OCI hooks: %w", err)
451}
452
453runMountInfo := runMountInfo{
454WorkDir: workDir,
455ContextDir: options.ContextDir,
456Secrets: options.Secrets,
457SSHSources: options.SSHSources,
458StageMountPoints: options.StageMountPoints,
459SystemContext: options.SystemContext,
460}
461
462runArtifacts, err := b.setupMounts(mountPoint, spec, path, options.Mounts, bindFiles, volumes, b.CommonBuildOpts.Volumes, options.RunMounts, runMountInfo)
463if err != nil {
464return fmt.Errorf("resolving mountpoints for container %q: %w", b.ContainerID, err)
465}
466if runArtifacts.SSHAuthSock != "" {
467sshenv := "SSH_AUTH_SOCK=" + runArtifacts.SSHAuthSock
468spec.Process.Env = append(spec.Process.Env, sshenv)
469}
470
471// following run was called from `buildah run`
472// and some images were mounted for this run
473// add them to cleanup artifacts
474if len(options.ExternalImageMounts) > 0 {
475runArtifacts.MountedImages = append(runArtifacts.MountedImages, options.ExternalImageMounts...)
476}
477
478defer func() {
479if err := b.cleanupRunMounts(options.SystemContext, mountPoint, runArtifacts); err != nil {
480options.Logger.Errorf("unable to cleanup run mounts %v", err)
481}
482}()
483
484defer b.cleanupTempVolumes()
485
486switch isolation {
487case define.IsolationOCI:
488var moreCreateArgs []string
489if options.NoPivot {
490moreCreateArgs = append(moreCreateArgs, "--no-pivot")
491}
492err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, networkString, moreCreateArgs, spec,
493mountPoint, path, define.Package+"-"+filepath.Base(path), b.Container, hostsFile, resolvFile)
494case IsolationChroot:
495err = chroot.RunUsingChroot(spec, path, homeDir, options.Stdin, options.Stdout, options.Stderr)
496case IsolationOCIRootless:
497moreCreateArgs := []string{"--no-new-keyring"}
498if options.NoPivot {
499moreCreateArgs = append(moreCreateArgs, "--no-pivot")
500}
501err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, networkString, moreCreateArgs, spec,
502mountPoint, path, define.Package+"-"+filepath.Base(path), b.Container, hostsFile, resolvFile)
503default:
504err = errors.New("don't know how to run this command")
505}
506return err
507}
508
509func (b *Builder) setupOCIHooks(config *specs.Spec, hasVolumes bool) (map[string][]specs.Hook, error) {
510allHooks := make(map[string][]specs.Hook)
511if len(b.CommonBuildOpts.OCIHooksDir) == 0 {
512if unshare.IsRootless() {
513return nil, nil
514}
515for _, hDir := range []string{hooks.DefaultDir, hooks.OverrideDir} {
516manager, err := hooks.New(context.Background(), []string{hDir}, []string{})
517if err != nil {
518if errors.Is(err, os.ErrNotExist) {
519continue
520}
521return nil, err
522}
523ociHooks, err := manager.Hooks(config, b.ImageAnnotations, hasVolumes)
524if err != nil {
525return nil, err
526}
527if len(ociHooks) > 0 || config.Hooks != nil {
528logrus.Warnf("Implicit hook directories are deprecated; set --hooks-dir=%q explicitly to continue to load ociHooks from this directory", hDir)
529}
530for i, hook := range ociHooks {
531allHooks[i] = hook
532}
533}
534} else {
535manager, err := hooks.New(context.Background(), b.CommonBuildOpts.OCIHooksDir, []string{})
536if err != nil {
537return nil, err
538}
539
540allHooks, err = manager.Hooks(config, b.ImageAnnotations, hasVolumes)
541if err != nil {
542return nil, err
543}
544}
545
546hookErr, err := hooksExec.RuntimeConfigFilter(context.Background(), allHooks["precreate"], config, hooksExec.DefaultPostKillTimeout) //nolint:staticcheck
547if err != nil {
548logrus.Warnf("Container: precreate hook: %v", err)
549if hookErr != nil && hookErr != err {
550logrus.Debugf("container: precreate hook (hook error): %v", hookErr)
551}
552return nil, err
553}
554return allHooks, nil
555}
556
557func addCommonOptsToSpec(commonOpts *define.CommonBuildOptions, g *generate.Generator) error {
558// Resources - CPU
559if commonOpts.CPUPeriod != 0 {
560g.SetLinuxResourcesCPUPeriod(commonOpts.CPUPeriod)
561}
562if commonOpts.CPUQuota != 0 {
563g.SetLinuxResourcesCPUQuota(commonOpts.CPUQuota)
564}
565if commonOpts.CPUShares != 0 {
566g.SetLinuxResourcesCPUShares(commonOpts.CPUShares)
567}
568if commonOpts.CPUSetCPUs != "" {
569g.SetLinuxResourcesCPUCpus(commonOpts.CPUSetCPUs)
570}
571if commonOpts.CPUSetMems != "" {
572g.SetLinuxResourcesCPUMems(commonOpts.CPUSetMems)
573}
574
575// Resources - Memory
576if commonOpts.Memory != 0 {
577g.SetLinuxResourcesMemoryLimit(commonOpts.Memory)
578}
579if commonOpts.MemorySwap != 0 {
580g.SetLinuxResourcesMemorySwap(commonOpts.MemorySwap)
581}
582
583// cgroup membership
584if commonOpts.CgroupParent != "" {
585g.SetLinuxCgroupsPath(commonOpts.CgroupParent)
586}
587
588defaultContainerConfig, err := config.Default()
589if err != nil {
590return fmt.Errorf("failed to get container config: %w", err)
591}
592// Other process resource limits
593if err := addRlimits(commonOpts.Ulimit, g, defaultContainerConfig.Containers.DefaultUlimits.Get()); err != nil {
594return err
595}
596
597logrus.Debugf("Resources: %#v", commonOpts)
598return nil
599}
600
601func setupSlirp4netnsNetwork(config *config.Config, netns, cid string, options, hostnames []string) (func(), *netResult, error) {
602// we need the TmpDir for the slirp4netns code
603if err := os.MkdirAll(config.Engine.TmpDir, 0o751); err != nil {
604return nil, nil, fmt.Errorf("failed to create tempdir: %w", err)
605}
606res, err := slirp4netns.Setup(&slirp4netns.SetupOptions{
607Config: config,
608ContainerID: cid,
609Netns: netns,
610ExtraOptions: options,
611Pdeathsig: syscall.SIGKILL,
612})
613if err != nil {
614return nil, nil, err
615}
616
617ip, err := slirp4netns.GetIP(res.Subnet)
618if err != nil {
619return nil, nil, fmt.Errorf("get slirp4netns ip: %w", err)
620}
621
622dns, err := slirp4netns.GetDNS(res.Subnet)
623if err != nil {
624return nil, nil, fmt.Errorf("get slirp4netns dns ip: %w", err)
625}
626
627result := &netResult{
628entries: etchosts.HostEntries{{IP: ip.String(), Names: hostnames}},
629dnsServers: []string{dns.String()},
630ipv6: res.IPv6,
631keepHostResolvers: true,
632}
633
634return func() {
635syscall.Kill(res.Pid, syscall.SIGKILL) // nolint:errcheck
636var status syscall.WaitStatus
637syscall.Wait4(res.Pid, &status, 0, nil) // nolint:errcheck
638}, result, nil
639}
640
641func setupPasta(config *config.Config, netns string, options, hostnames []string) (func(), *netResult, error) {
642res, err := pasta.Setup2(&pasta.SetupOptions{
643Config: config,
644Netns: netns,
645ExtraOptions: options,
646})
647if err != nil {
648return nil, nil, err
649}
650
651var entries etchosts.HostEntries
652if len(res.IPAddresses) > 0 {
653entries = etchosts.HostEntries{{IP: res.IPAddresses[0].String(), Names: hostnames}}
654}
655
656result := &netResult{
657entries: entries,
658dnsServers: res.DNSForwardIPs,
659excludeIPs: res.IPAddresses,
660ipv6: res.IPv6,
661keepHostResolvers: true,
662}
663
664return nil, result, nil
665}
666
667func (b *Builder) runConfigureNetwork(pid int, isolation define.Isolation, options RunOptions, network, containerName string, hostnames []string) (func(), *netResult, error) {
668netns := fmt.Sprintf("/proc/%d/ns/net", pid)
669var configureNetworks []string
670defConfig, err := config.Default()
671if err != nil {
672return nil, nil, fmt.Errorf("failed to get container config: %w", err)
673}
674
675name, networkOpts, hasOpts := strings.Cut(network, ":")
676var netOpts []string
677if hasOpts {
678netOpts = strings.Split(networkOpts, ",")
679}
680if isolation == IsolationOCIRootless && name == "" {
681switch defConfig.Network.DefaultRootlessNetworkCmd {
682case slirp4netns.BinaryName, "":
683name = slirp4netns.BinaryName
684case pasta.BinaryName:
685name = pasta.BinaryName
686default:
687return nil, nil, fmt.Errorf("invalid default_rootless_network_cmd option %q",
688defConfig.Network.DefaultRootlessNetworkCmd)
689}
690}
691
692switch {
693case name == slirp4netns.BinaryName:
694return setupSlirp4netnsNetwork(defConfig, netns, containerName, netOpts, hostnames)
695case name == pasta.BinaryName:
696return setupPasta(defConfig, netns, netOpts, hostnames)
697
698// Basically default case except we make sure to not split an empty
699// name as this would return a slice with one empty string which is
700// not a valid network name.
701case len(network) > 0:
702// old syntax allow comma separated network names
703configureNetworks = strings.Split(network, ",")
704}
705
706if isolation == IsolationOCIRootless {
707return nil, nil, errors.New("cannot use networks as rootless")
708}
709
710if len(configureNetworks) == 0 {
711configureNetworks = []string{b.NetworkInterface.DefaultNetworkName()}
712}
713
714// Make sure we can access the container's network namespace,
715// even after it exits, to successfully tear down the
716// interfaces. Ensure this by opening a handle to the network
717// namespace, and using our copy to both configure and
718// deconfigure it.
719netFD, err := unix.Open(netns, unix.O_RDONLY, 0)
720if err != nil {
721return nil, nil, fmt.Errorf("opening network namespace: %w", err)
722}
723mynetns := fmt.Sprintf("/proc/%d/fd/%d", unix.Getpid(), netFD)
724
725networks := make(map[string]nettypes.PerNetworkOptions, len(configureNetworks))
726for i, network := range configureNetworks {
727networks[network] = nettypes.PerNetworkOptions{
728InterfaceName: fmt.Sprintf("eth%d", i),
729}
730}
731
732opts := nettypes.NetworkOptions{
733ContainerID: containerName,
734ContainerName: containerName,
735Networks: networks,
736}
737netStatus, err := b.NetworkInterface.Setup(mynetns, nettypes.SetupOptions{NetworkOptions: opts})
738if err != nil {
739return nil, nil, err
740}
741
742teardown := func() {
743err := b.NetworkInterface.Teardown(mynetns, nettypes.TeardownOptions{NetworkOptions: opts})
744if err != nil {
745options.Logger.Errorf("failed to cleanup network: %v", err)
746}
747}
748
749return teardown, netStatusToNetResult(netStatus, hostnames), nil
750}
751
752// Create pipes to use for relaying stdio.
753func runMakeStdioPipe(uid, gid int) ([][]int, error) {
754stdioPipe := make([][]int, 3)
755for i := range stdioPipe {
756stdioPipe[i] = make([]int, 2)
757if err := unix.Pipe(stdioPipe[i]); err != nil {
758return nil, fmt.Errorf("creating pipe for container FD %d: %w", i, err)
759}
760}
761if err := unix.Fchown(stdioPipe[unix.Stdin][0], uid, gid); err != nil {
762return nil, fmt.Errorf("setting owner of stdin pipe descriptor: %w", err)
763}
764if err := unix.Fchown(stdioPipe[unix.Stdout][1], uid, gid); err != nil {
765return nil, fmt.Errorf("setting owner of stdout pipe descriptor: %w", err)
766}
767if err := unix.Fchown(stdioPipe[unix.Stderr][1], uid, gid); err != nil {
768return nil, fmt.Errorf("setting owner of stderr pipe descriptor: %w", err)
769}
770return stdioPipe, nil
771}
772
773func setupNamespaces(logger *logrus.Logger, g *generate.Generator, namespaceOptions define.NamespaceOptions, idmapOptions define.IDMappingOptions, policy define.NetworkConfigurationPolicy) (configureNetwork bool, networkString string, configureUTS bool, err error) {
774defaultContainerConfig, err := config.Default()
775if err != nil {
776return false, "", false, fmt.Errorf("failed to get container config: %w", err)
777}
778
779addSysctl := func(prefixes []string) error {
780for _, sysctl := range defaultContainerConfig.Sysctls() {
781splitn := strings.SplitN(sysctl, "=", 2)
782if len(splitn) > 2 {
783return fmt.Errorf("sysctl %q defined in containers.conf must be formatted name=value", sysctl)
784}
785for _, prefix := range prefixes {
786if strings.HasPrefix(splitn[0], prefix) {
787g.AddLinuxSysctl(splitn[0], splitn[1])
788}
789}
790}
791return nil
792}
793
794// Set namespace options in the container configuration.
795configureUserns := false
796specifiedNetwork := false
797for _, namespaceOption := range namespaceOptions {
798switch namespaceOption.Name {
799case string(specs.IPCNamespace):
800if !namespaceOption.Host {
801if err := addSysctl([]string{"fs.mqueue"}); err != nil {
802return false, "", false, err
803}
804}
805case string(specs.UserNamespace):
806configureUserns = false
807if !namespaceOption.Host && namespaceOption.Path == "" {
808configureUserns = true
809}
810case string(specs.NetworkNamespace):
811specifiedNetwork = true
812configureNetwork = false
813if !namespaceOption.Host && (namespaceOption.Path == "" || !filepath.IsAbs(namespaceOption.Path)) {
814if namespaceOption.Path != "" && !filepath.IsAbs(namespaceOption.Path) {
815networkString = namespaceOption.Path
816namespaceOption.Path = ""
817}
818configureNetwork = (policy != define.NetworkDisabled)
819}
820case string(specs.UTSNamespace):
821configureUTS = false
822if !namespaceOption.Host {
823if namespaceOption.Path == "" {
824configureUTS = true
825}
826if err := addSysctl([]string{"kernel.hostname", "kernel.domainame"}); err != nil {
827return false, "", false, err
828}
829}
830}
831if namespaceOption.Host {
832if err := g.RemoveLinuxNamespace(namespaceOption.Name); err != nil {
833return false, "", false, fmt.Errorf("removing %q namespace for run: %w", namespaceOption.Name, err)
834}
835} else if err := g.AddOrReplaceLinuxNamespace(namespaceOption.Name, namespaceOption.Path); err != nil {
836if namespaceOption.Path == "" {
837return false, "", false, fmt.Errorf("adding new %q namespace for run: %w", namespaceOption.Name, err)
838}
839return false, "", false, fmt.Errorf("adding %q namespace %q for run: %w", namespaceOption.Name, namespaceOption.Path, err)
840}
841}
842
843// If we've got mappings, we're going to have to create a user namespace.
844if len(idmapOptions.UIDMap) > 0 || len(idmapOptions.GIDMap) > 0 || configureUserns {
845if err := g.AddOrReplaceLinuxNamespace(string(specs.UserNamespace), ""); err != nil {
846return false, "", false, fmt.Errorf("adding new %q namespace for run: %w", string(specs.UserNamespace), err)
847}
848hostUidmap, hostGidmap, err := unshare.GetHostIDMappings("")
849if err != nil {
850return false, "", false, err
851}
852for _, m := range idmapOptions.UIDMap {
853g.AddLinuxUIDMapping(m.HostID, m.ContainerID, m.Size)
854}
855if len(idmapOptions.UIDMap) == 0 {
856for _, m := range hostUidmap {
857g.AddLinuxUIDMapping(m.ContainerID, m.ContainerID, m.Size)
858}
859}
860for _, m := range idmapOptions.GIDMap {
861g.AddLinuxGIDMapping(m.HostID, m.ContainerID, m.Size)
862}
863if len(idmapOptions.GIDMap) == 0 {
864for _, m := range hostGidmap {
865g.AddLinuxGIDMapping(m.ContainerID, m.ContainerID, m.Size)
866}
867}
868if !specifiedNetwork {
869if err := g.AddOrReplaceLinuxNamespace(string(specs.NetworkNamespace), ""); err != nil {
870return false, "", false, fmt.Errorf("adding new %q namespace for run: %w", string(specs.NetworkNamespace), err)
871}
872configureNetwork = (policy != define.NetworkDisabled)
873}
874} else {
875if err := g.RemoveLinuxNamespace(string(specs.UserNamespace)); err != nil {
876return false, "", false, fmt.Errorf("removing %q namespace for run: %w", string(specs.UserNamespace), err)
877}
878if !specifiedNetwork {
879if err := g.RemoveLinuxNamespace(string(specs.NetworkNamespace)); err != nil {
880return false, "", false, fmt.Errorf("removing %q namespace for run: %w", string(specs.NetworkNamespace), err)
881}
882}
883}
884if configureNetwork {
885if err := addSysctl([]string{"net"}); err != nil {
886return false, "", false, err
887}
888}
889return configureNetwork, networkString, configureUTS, nil
890}
891
892func (b *Builder) configureNamespaces(g *generate.Generator, options *RunOptions) (bool, string, error) {
893defaultNamespaceOptions, err := DefaultNamespaceOptions()
894if err != nil {
895return false, "", err
896}
897
898namespaceOptions := defaultNamespaceOptions
899namespaceOptions.AddOrReplace(b.NamespaceOptions...)
900namespaceOptions.AddOrReplace(options.NamespaceOptions...)
901
902networkPolicy := options.ConfigureNetwork
903//Nothing was specified explicitly so network policy should be inherited from builder
904if networkPolicy == NetworkDefault {
905networkPolicy = b.ConfigureNetwork
906
907// If builder policy was NetworkDisabled and
908// we want to disable network for this run.
909// reset options.ConfigureNetwork to NetworkDisabled
910// since it will be treated as source of truth later.
911if networkPolicy == NetworkDisabled {
912options.ConfigureNetwork = networkPolicy
913}
914}
915if networkPolicy == NetworkDisabled {
916namespaceOptions.AddOrReplace(define.NamespaceOptions{{Name: string(specs.NetworkNamespace), Host: false}}...)
917}
918configureNetwork, networkString, configureUTS, err := setupNamespaces(options.Logger, g, namespaceOptions, b.IDMappingOptions, networkPolicy)
919if err != nil {
920return false, "", err
921}
922
923if configureUTS {
924if options.Hostname != "" {
925g.SetHostname(options.Hostname)
926} else if b.Hostname() != "" {
927g.SetHostname(b.Hostname())
928} else {
929g.SetHostname(stringid.TruncateID(b.ContainerID))
930}
931} else {
932g.SetHostname("")
933}
934
935found := false
936spec := g.Config
937for i := range spec.Process.Env {
938if strings.HasPrefix(spec.Process.Env[i], "HOSTNAME=") {
939found = true
940break
941}
942}
943if !found {
944spec.Process.Env = append(spec.Process.Env, fmt.Sprintf("HOSTNAME=%s", spec.Hostname))
945}
946
947return configureNetwork, networkString, nil
948}
949
950func runSetupBoundFiles(bundlePath string, bindFiles map[string]string) (mounts []specs.Mount) {
951for dest, src := range bindFiles {
952options := []string{"rbind"}
953if strings.HasPrefix(src, bundlePath) {
954options = append(options, bind.NoBindOption)
955}
956mounts = append(mounts, specs.Mount{
957Source: src,
958Destination: dest,
959Type: "bind",
960Options: options,
961})
962}
963return mounts
964}
965
966func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string) error {
967var (
968ul *units.Ulimit
969err error
970// setup rlimits
971nofileSet bool
972nprocSet bool
973)
974
975ulimit = append(defaultUlimits, ulimit...)
976for _, u := range ulimit {
977if ul, err = butil.ParseUlimit(u); err != nil {
978return fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", u, err)
979}
980
981if strings.ToUpper(ul.Name) == "NOFILE" {
982nofileSet = true
983}
984if strings.ToUpper(ul.Name) == "NPROC" {
985nprocSet = true
986}
987g.AddProcessRlimits("RLIMIT_"+strings.ToUpper(ul.Name), uint64(ul.Hard), uint64(ul.Soft))
988}
989if !nofileSet {
990max := define.RLimitDefaultValue
991var rlimit unix.Rlimit
992if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlimit); err == nil {
993if max < rlimit.Max || unshare.IsRootless() {
994max = rlimit.Max
995}
996} else {
997logrus.Warnf("Failed to return RLIMIT_NOFILE ulimit %q", err)
998}
999g.AddProcessRlimits("RLIMIT_NOFILE", max, max)
1000}
1001if !nprocSet {
1002max := define.RLimitDefaultValue
1003var rlimit unix.Rlimit
1004if err := unix.Getrlimit(unix.RLIMIT_NPROC, &rlimit); err == nil {
1005if max < rlimit.Max || unshare.IsRootless() {
1006max = rlimit.Max
1007}
1008} else {
1009logrus.Warnf("Failed to return RLIMIT_NPROC ulimit %q", err)
1010}
1011g.AddProcessRlimits("RLIMIT_NPROC", max, max)
1012}
1013
1014return nil
1015}
1016
1017func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, idMaps IDMaps) (mounts []specs.Mount, Err error) {
1018// Make sure the overlay directory is clean before running
1019containerDir, err := b.store.ContainerDirectory(b.ContainerID)
1020if err != nil {
1021return nil, fmt.Errorf("looking up container directory for %s: %w", b.ContainerID, err)
1022}
1023if err := overlay.CleanupContent(containerDir); err != nil {
1024return nil, fmt.Errorf("cleaning up overlay content for %s: %w", b.ContainerID, err)
1025}
1026
1027parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) {
1028var foundrw, foundro, foundz, foundZ, foundO, foundU bool
1029var rootProp, upperDir, workDir string
1030for _, opt := range options {
1031switch opt {
1032case "rw":
1033foundrw = true
1034case "ro":
1035foundro = true
1036case "z":
1037foundz = true
1038case "Z":
1039foundZ = true
1040case "O":
1041foundO = true
1042case "U":
1043foundU = true
1044case "private", "rprivate", "slave", "rslave", "shared", "rshared":
1045rootProp = opt
1046}
1047
1048if strings.HasPrefix(opt, "upperdir") {
1049splitOpt := strings.SplitN(opt, "=", 2)
1050if len(splitOpt) > 1 {
1051upperDir = splitOpt[1]
1052}
1053}
1054if strings.HasPrefix(opt, "workdir") {
1055splitOpt := strings.SplitN(opt, "=", 2)
1056if len(splitOpt) > 1 {
1057workDir = splitOpt[1]
1058}
1059}
1060}
1061if !foundrw && !foundro {
1062options = append(options, "rw")
1063}
1064if foundz {
1065if err := relabel(host, mountLabel, true); err != nil {
1066return specs.Mount{}, err
1067}
1068}
1069if foundZ {
1070if err := relabel(host, mountLabel, false); err != nil {
1071return specs.Mount{}, err
1072}
1073}
1074if foundU {
1075if err := chown.ChangeHostPathOwnership(host, true, idMaps.processUID, idMaps.processGID); err != nil {
1076return specs.Mount{}, err
1077}
1078}
1079if foundO {
1080if (upperDir != "" && workDir == "") || (workDir != "" && upperDir == "") {
1081return specs.Mount{}, errors.New("if specifying upperdir then workdir must be specified or vice versa")
1082}
1083
1084containerDir, err := b.store.ContainerDirectory(b.ContainerID)
1085if err != nil {
1086return specs.Mount{}, err
1087}
1088
1089contentDir, err := overlay.TempDir(containerDir, idMaps.rootUID, idMaps.rootGID)
1090if err != nil {
1091return specs.Mount{}, fmt.Errorf("failed to create TempDir in the %s directory: %w", containerDir, err)
1092}
1093
1094overlayOpts := overlay.Options{
1095RootUID: idMaps.rootUID,
1096RootGID: idMaps.rootGID,
1097UpperDirOptionFragment: upperDir,
1098WorkDirOptionFragment: workDir,
1099GraphOpts: b.store.GraphOptions(),
1100}
1101
1102overlayMount, err := overlay.MountWithOptions(contentDir, host, container, &overlayOpts)
1103if err == nil {
1104b.TempVolumes[contentDir] = true
1105}
1106
1107// If chown true, add correct ownership to the overlay temp directories.
1108if foundU {
1109if err := chown.ChangeHostPathOwnership(contentDir, true, idMaps.processUID, idMaps.processGID); err != nil {
1110return specs.Mount{}, err
1111}
1112}
1113
1114return overlayMount, err
1115}
1116if rootProp == "" {
1117options = append(options, "private")
1118}
1119if mountType != "tmpfs" {
1120mountType = "bind"
1121options = append(options, "rbind")
1122}
1123return specs.Mount{
1124Destination: container,
1125Type: mountType,
1126Source: host,
1127Options: options,
1128}, nil
1129}
1130
1131// Bind mount volumes specified for this particular Run() invocation
1132for _, i := range optionMounts {
1133logrus.Debugf("setting up mounted volume at %q", i.Destination)
1134mount, err := parseMount(i.Type, i.Source, i.Destination, i.Options)
1135if err != nil {
1136return nil, err
1137}
1138mounts = append(mounts, mount)
1139}
1140// Bind mount volumes given by the user when the container was created
1141for _, i := range volumeMounts {
1142var options []string
1143spliti := parse.SplitStringWithColonEscape(i)
1144if len(spliti) > 2 {
1145options = strings.Split(spliti[2], ",")
1146}
1147options = append(options, "rbind")
1148mount, err := parseMount("bind", spliti[0], spliti[1], options)
1149if err != nil {
1150return nil, err
1151}
1152mounts = append(mounts, mount)
1153}
1154return mounts, nil
1155}
1156
1157func setupMaskedPaths(g *generate.Generator) {
1158for _, mp := range config.DefaultMaskedPaths {
1159g.AddLinuxMaskedPaths(mp)
1160}
1161}
1162
1163func setupReadOnlyPaths(g *generate.Generator) {
1164for _, rp := range config.DefaultReadOnlyPaths {
1165g.AddLinuxReadonlyPaths(rp)
1166}
1167}
1168
1169func setupCapAdd(g *generate.Generator, caps ...string) error {
1170for _, cap := range caps {
1171if err := g.AddProcessCapabilityBounding(cap); err != nil {
1172return fmt.Errorf("adding %q to the bounding capability set: %w", cap, err)
1173}
1174if err := g.AddProcessCapabilityEffective(cap); err != nil {
1175return fmt.Errorf("adding %q to the effective capability set: %w", cap, err)
1176}
1177if err := g.AddProcessCapabilityPermitted(cap); err != nil {
1178return fmt.Errorf("adding %q to the permitted capability set: %w", cap, err)
1179}
1180if err := g.AddProcessCapabilityAmbient(cap); err != nil {
1181return fmt.Errorf("adding %q to the ambient capability set: %w", cap, err)
1182}
1183}
1184return nil
1185}
1186
1187func setupCapDrop(g *generate.Generator, caps ...string) error {
1188for _, cap := range caps {
1189if err := g.DropProcessCapabilityBounding(cap); err != nil {
1190return fmt.Errorf("removing %q from the bounding capability set: %w", cap, err)
1191}
1192if err := g.DropProcessCapabilityEffective(cap); err != nil {
1193return fmt.Errorf("removing %q from the effective capability set: %w", cap, err)
1194}
1195if err := g.DropProcessCapabilityPermitted(cap); err != nil {
1196return fmt.Errorf("removing %q from the permitted capability set: %w", cap, err)
1197}
1198if err := g.DropProcessCapabilityAmbient(cap); err != nil {
1199return fmt.Errorf("removing %q from the ambient capability set: %w", cap, err)
1200}
1201}
1202return nil
1203}
1204
1205func setupCapabilities(g *generate.Generator, defaultCapabilities, adds, drops []string) error {
1206g.ClearProcessCapabilities()
1207if err := setupCapAdd(g, defaultCapabilities...); err != nil {
1208return err
1209}
1210for _, c := range adds {
1211if strings.ToLower(c) == "all" {
1212adds = capabilities.AllCapabilities()
1213break
1214}
1215}
1216for _, c := range drops {
1217if strings.ToLower(c) == "all" {
1218g.ClearProcessCapabilities()
1219return nil
1220}
1221}
1222if err := setupCapAdd(g, adds...); err != nil {
1223return err
1224}
1225return setupCapDrop(g, drops...)
1226}
1227
1228func addOrReplaceMount(mounts []specs.Mount, mount specs.Mount) []specs.Mount {
1229for i := range mounts {
1230if mounts[i].Destination == mount.Destination {
1231mounts[i] = mount
1232return mounts
1233}
1234}
1235return append(mounts, mount)
1236}
1237
1238// setupSpecialMountSpecChanges creates special mounts for depending on the namespaces
1239// logic taken from podman and adapted for buildah
1240// https://github.com/containers/podman/blob/4ba71f955a944790edda6e007e6d074009d437a7/pkg/specgen/generate/oci.go#L178
1241func setupSpecialMountSpecChanges(spec *specs.Spec, shmSize string) ([]specs.Mount, error) {
1242mounts := spec.Mounts
1243isRootless := unshare.IsRootless()
1244isNewUserns := false
1245isNetns := false
1246isPidns := false
1247isIpcns := false
1248
1249for _, namespace := range spec.Linux.Namespaces {
1250switch namespace.Type {
1251case specs.NetworkNamespace:
1252isNetns = true
1253case specs.UserNamespace:
1254isNewUserns = true
1255case specs.PIDNamespace:
1256isPidns = true
1257case specs.IPCNamespace:
1258isIpcns = true
1259}
1260}
1261
1262addCgroup := true
1263// mount sys when root and no userns or when a new netns is created
1264canMountSys := (!isRootless && !isNewUserns) || isNetns
1265if !canMountSys {
1266addCgroup = false
1267sys := "/sys"
1268sysMnt := specs.Mount{
1269Destination: sys,
1270Type: "bind",
1271Source: sys,
1272Options: []string{bind.NoBindOption, "rprivate", "nosuid", "noexec", "nodev", "ro", "rbind"},
1273}
1274mounts = addOrReplaceMount(mounts, sysMnt)
1275}
1276
1277gid5Available := true
1278if isRootless {
1279_, gids, err := unshare.GetHostIDMappings("")
1280if err != nil {
1281return nil, err
1282}
1283gid5Available = checkIdsGreaterThan5(gids)
1284}
1285if gid5Available && len(spec.Linux.GIDMappings) > 0 {
1286gid5Available = checkIdsGreaterThan5(spec.Linux.GIDMappings)
1287}
1288if !gid5Available {
1289// If we have no GID mappings, the gid=5 default option would fail, so drop it.
1290devPts := specs.Mount{
1291Destination: "/dev/pts",
1292Type: "devpts",
1293Source: "devpts",
1294Options: []string{"rprivate", "nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620"},
1295}
1296mounts = addOrReplaceMount(mounts, devPts)
1297}
1298
1299isUserns := isNewUserns || isRootless
1300
1301if isUserns && !isIpcns {
1302devMqueue := "/dev/mqueue"
1303devMqueueMnt := specs.Mount{
1304Destination: devMqueue,
1305Type: "bind",
1306Source: devMqueue,
1307Options: []string{bind.NoBindOption, "bind", "nosuid", "noexec", "nodev"},
1308}
1309mounts = addOrReplaceMount(mounts, devMqueueMnt)
1310}
1311if isUserns && !isPidns {
1312proc := "/proc"
1313procMount := specs.Mount{
1314Destination: proc,
1315Type: "bind",
1316Source: proc,
1317Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"},
1318}
1319mounts = addOrReplaceMount(mounts, procMount)
1320}
1321
1322if addCgroup {
1323cgroupMnt := specs.Mount{
1324Destination: "/sys/fs/cgroup",
1325Type: "cgroup",
1326Source: "cgroup",
1327Options: []string{"rprivate", "nosuid", "noexec", "nodev", "relatime", "rw"},
1328}
1329mounts = addOrReplaceMount(mounts, cgroupMnt)
1330}
1331
1332// if userns and host ipc bind mount shm
1333if isUserns && !isIpcns {
1334// bind mount /dev/shm when it exists
1335if err := fileutils.Exists("/dev/shm"); err == nil {
1336shmMount := specs.Mount{
1337Source: "/dev/shm",
1338Type: "bind",
1339Destination: "/dev/shm",
1340Options: []string{bind.NoBindOption, "rbind", "nosuid", "noexec", "nodev"},
1341}
1342mounts = addOrReplaceMount(mounts, shmMount)
1343}
1344} else if shmSize != "" {
1345shmMount := specs.Mount{
1346Source: "shm",
1347Destination: "/dev/shm",
1348Type: "tmpfs",
1349Options: []string{"private", "nodev", "noexec", "nosuid", "mode=1777", "size=" + shmSize},
1350}
1351mounts = addOrReplaceMount(mounts, shmMount)
1352}
1353
1354return mounts, nil
1355}
1356
1357func checkIdsGreaterThan5(ids []specs.LinuxIDMapping) bool {
1358for _, r := range ids {
1359if r.ContainerID <= 5 && 5 < r.ContainerID+r.Size {
1360return true
1361}
1362}
1363return false
1364}
1365
1366// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??).
1367func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir string) (*specs.Mount, *lockfile.LockFile, error) {
1368var optionMounts []specs.Mount
1369mount, targetLock, err := volumes.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints, workDir)
1370if err != nil {
1371return nil, nil, err
1372}
1373succeeded := false
1374defer func() {
1375if !succeeded && targetLock != nil {
1376targetLock.Unlock()
1377}
1378}()
1379optionMounts = append(optionMounts, mount)
1380volumes, err := b.runSetupVolumeMounts(b.MountLabel, nil, optionMounts, idMaps)
1381if err != nil {
1382return nil, nil, err
1383}
1384succeeded = true
1385return &volumes[0], targetLock, nil
1386}
1387