podman
726 строк · 23.2 Кб
1package buildah
2
3import (
4"archive/tar"
5"errors"
6"fmt"
7"io"
8"net/http"
9"net/url"
10"os"
11"path"
12"path/filepath"
13"strconv"
14"strings"
15"sync"
16"syscall"
17"time"
18
19"github.com/containers/buildah/copier"
20"github.com/containers/buildah/define"
21"github.com/containers/buildah/pkg/chrootuser"
22"github.com/containers/storage/pkg/fileutils"
23"github.com/containers/storage/pkg/idtools"
24"github.com/hashicorp/go-multierror"
25digest "github.com/opencontainers/go-digest"
26"github.com/opencontainers/runtime-spec/specs-go"
27"github.com/sirupsen/logrus"
28)
29
30// AddAndCopyOptions holds options for add and copy commands.
31type AddAndCopyOptions struct {
32//Chmod sets the access permissions of the destination content.
33Chmod string
34// Chown is a spec for the user who should be given ownership over the
35// newly-added content, potentially overriding permissions which would
36// otherwise be set to 0:0.
37Chown string
38// Checksum is a standard container digest string (e.g. <algorithm>:<digest>)
39// and is the expected hash of the content being copied.
40Checksum string
41// PreserveOwnership, if Chown is not set, tells us to avoid setting
42// ownership of copied items to 0:0, instead using whatever ownership
43// information is already set. Not meaningful for remote sources or
44// local archives that we extract.
45PreserveOwnership bool
46// All of the data being copied will pass through Hasher, if set.
47// If the sources are URLs or files, their contents will be passed to
48// Hasher.
49// If the sources include directory trees, Hasher will be passed
50// tar-format archives of the directory trees.
51Hasher io.Writer
52// Excludes is the contents of the .containerignore file.
53Excludes []string
54// IgnoreFile is the path to the .containerignore file.
55IgnoreFile string
56// ContextDir is the base directory for content being copied and
57// Excludes patterns.
58ContextDir string
59// ID mapping options to use when contents to be copied are part of
60// another container, and need ownerships to be mapped from the host to
61// that container's values before copying them into the container.
62IDMappingOptions *define.IDMappingOptions
63// DryRun indicates that the content should be digested, but not actually
64// copied into the container.
65DryRun bool
66// Clear the setuid bit on items being copied. Has no effect on
67// archives being extracted, where the bit is always preserved.
68StripSetuidBit bool
69// Clear the setgid bit on items being copied. Has no effect on
70// archives being extracted, where the bit is always preserved.
71StripSetgidBit bool
72// Clear the sticky bit on items being copied. Has no effect on
73// archives being extracted, where the bit is always preserved.
74StripStickyBit bool
75}
76
77// sourceIsRemote returns true if "source" is a remote location.
78func sourceIsRemote(source string) bool {
79return strings.HasPrefix(source, "http://") || strings.HasPrefix(source, "https://")
80}
81
82// getURL writes a tar archive containing the named content
83func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, writer io.Writer, chmod *os.FileMode, srcDigest digest.Digest) error {
84url, err := url.Parse(src)
85if err != nil {
86return err
87}
88response, err := http.Get(src)
89if err != nil {
90return err
91}
92defer response.Body.Close()
93
94if response.StatusCode < http.StatusOK || response.StatusCode >= http.StatusBadRequest {
95return fmt.Errorf("invalid response status %d", response.StatusCode)
96}
97
98// Figure out what to name the new content.
99name := renameTarget
100if name == "" {
101name = path.Base(url.Path)
102}
103// If there's a date on the content, use it. If not, use the Unix epoch
104// for compatibility.
105date := time.Unix(0, 0).UTC()
106lastModified := response.Header.Get("Last-Modified")
107if lastModified != "" {
108d, err := time.Parse(time.RFC1123, lastModified)
109if err != nil {
110return fmt.Errorf("parsing last-modified time: %w", err)
111}
112date = d
113}
114// Figure out the size of the content.
115size := response.ContentLength
116var responseBody io.Reader = response.Body
117if size < 0 {
118// Create a temporary file and copy the content to it, so that
119// we can figure out how much content there is.
120f, err := os.CreateTemp(mountpoint, "download")
121if err != nil {
122return fmt.Errorf("creating temporary file to hold %q: %w", src, err)
123}
124defer os.Remove(f.Name())
125defer f.Close()
126size, err = io.Copy(f, response.Body)
127if err != nil {
128return fmt.Errorf("writing %q to temporary file %q: %w", src, f.Name(), err)
129}
130_, err = f.Seek(0, io.SeekStart)
131if err != nil {
132return fmt.Errorf("setting up to read %q from temporary file %q: %w", src, f.Name(), err)
133}
134responseBody = f
135}
136var digester digest.Digester
137if srcDigest != "" {
138digester = srcDigest.Algorithm().Digester()
139responseBody = io.TeeReader(responseBody, digester.Hash())
140}
141// Write the output archive. Set permissions for compatibility.
142tw := tar.NewWriter(writer)
143defer tw.Close()
144uid := 0
145gid := 0
146if chown != nil {
147uid = chown.UID
148gid = chown.GID
149}
150var mode int64 = 0600
151if chmod != nil {
152mode = int64(*chmod)
153}
154hdr := tar.Header{
155Typeflag: tar.TypeReg,
156Name: name,
157Size: size,
158Uid: uid,
159Gid: gid,
160Mode: mode,
161ModTime: date,
162}
163err = tw.WriteHeader(&hdr)
164if err != nil {
165return fmt.Errorf("writing header: %w", err)
166}
167
168if _, err := io.Copy(tw, responseBody); err != nil {
169return fmt.Errorf("writing content from %q to tar stream: %w", src, err)
170}
171
172if digester != nil {
173if responseDigest := digester.Digest(); responseDigest != srcDigest {
174return fmt.Errorf("unexpected response digest for %q: %s, want %s", src, responseDigest, srcDigest)
175}
176}
177
178return nil
179}
180
181// includeDirectoryAnyway returns true if "path" is a prefix for an exception
182// known to "pm". If "path" is a directory that "pm" claims matches its list
183// of patterns, but "pm"'s list of exclusions contains a pattern for which
184// "path" is a prefix, then IncludeDirectoryAnyway() will return true.
185// This is not always correct, because it relies on the directory part of any
186// exception paths to be specified without wildcards.
187func includeDirectoryAnyway(path string, pm *fileutils.PatternMatcher) bool {
188if !pm.Exclusions() {
189return false
190}
191prefix := strings.TrimPrefix(path, string(os.PathSeparator)) + string(os.PathSeparator)
192for _, pattern := range pm.Patterns() {
193if !pattern.Exclusion() {
194continue
195}
196spec := strings.TrimPrefix(pattern.String(), string(os.PathSeparator))
197if strings.HasPrefix(spec, prefix) {
198return true
199}
200}
201return false
202}
203
204// Add copies the contents of the specified sources into the container's root
205// filesystem, optionally extracting contents of local files that look like
206// non-empty archives.
207func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, sources ...string) error {
208mountPoint, err := b.Mount(b.MountLabel)
209if err != nil {
210return err
211}
212defer func() {
213if err2 := b.Unmount(); err2 != nil {
214logrus.Errorf("error unmounting container: %v", err2)
215}
216}()
217
218contextDir := options.ContextDir
219currentDir := options.ContextDir
220if options.ContextDir == "" {
221contextDir = string(os.PathSeparator)
222currentDir, err = os.Getwd()
223if err != nil {
224return fmt.Errorf("determining current working directory: %w", err)
225}
226} else {
227if !filepath.IsAbs(options.ContextDir) {
228contextDir, err = filepath.Abs(options.ContextDir)
229if err != nil {
230return fmt.Errorf("converting context directory path %q to an absolute path: %w", options.ContextDir, err)
231}
232}
233}
234
235// Figure out what sorts of sources we have.
236var localSources, remoteSources []string
237for i, src := range sources {
238if sourceIsRemote(src) {
239remoteSources = append(remoteSources, src)
240continue
241}
242if !filepath.IsAbs(src) && options.ContextDir == "" {
243sources[i] = filepath.Join(currentDir, src)
244}
245localSources = append(localSources, sources[i])
246}
247
248// Check how many items our local source specs matched. Each spec
249// should have matched at least one item, otherwise we consider it an
250// error.
251var localSourceStats []*copier.StatsForGlob
252if len(localSources) > 0 {
253statOptions := copier.StatOptions{
254CheckForArchives: extract,
255}
256localSourceStats, err = copier.Stat(contextDir, contextDir, statOptions, localSources)
257if err != nil {
258return fmt.Errorf("checking on sources under %q: %w", contextDir, err)
259}
260}
261numLocalSourceItems := 0
262for _, localSourceStat := range localSourceStats {
263if localSourceStat.Error != "" {
264errorText := localSourceStat.Error
265rel, err := filepath.Rel(contextDir, localSourceStat.Glob)
266if err != nil {
267errorText = fmt.Sprintf("%v; %s", err, errorText)
268}
269if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
270errorText = fmt.Sprintf("possible escaping context directory error: %s", errorText)
271}
272return fmt.Errorf("checking on sources under %q: %v", contextDir, errorText)
273}
274if len(localSourceStat.Globbed) == 0 {
275return fmt.Errorf("checking source under %q: no glob matches: %w", contextDir, syscall.ENOENT)
276}
277numLocalSourceItems += len(localSourceStat.Globbed)
278}
279if numLocalSourceItems+len(remoteSources) == 0 {
280return fmt.Errorf("no sources %v found: %w", sources, syscall.ENOENT)
281}
282
283// Find out which user (and group) the destination should belong to.
284var chownDirs, chownFiles *idtools.IDPair
285var userUID, userGID uint32
286if options.Chown != "" {
287userUID, userGID, err = b.userForCopy(mountPoint, options.Chown)
288if err != nil {
289return fmt.Errorf("looking up UID/GID for %q: %w", options.Chown, err)
290}
291}
292var chmodDirsFiles *os.FileMode
293if options.Chmod != "" {
294p, err := strconv.ParseUint(options.Chmod, 8, 32)
295if err != nil {
296return fmt.Errorf("parsing chmod %q: %w", options.Chmod, err)
297}
298perm := os.FileMode(p)
299chmodDirsFiles = &perm
300}
301
302chownDirs = &idtools.IDPair{UID: int(userUID), GID: int(userGID)}
303chownFiles = &idtools.IDPair{UID: int(userUID), GID: int(userGID)}
304if options.Chown == "" && options.PreserveOwnership {
305chownDirs = nil
306chownFiles = nil
307}
308
309// If we have a single source archive to extract, or more than one
310// source item, or the destination has a path separator at the end of
311// it, and it's not a remote URL, the destination needs to be a
312// directory.
313if destination == "" || !filepath.IsAbs(destination) {
314tmpDestination := filepath.Join(string(os.PathSeparator)+b.WorkDir(), destination)
315if destination == "" || strings.HasSuffix(destination, string(os.PathSeparator)) {
316destination = tmpDestination + string(os.PathSeparator)
317} else {
318destination = tmpDestination
319}
320}
321destMustBeDirectory := (len(sources) > 1) || strings.HasSuffix(destination, string(os.PathSeparator)) || destination == b.WorkDir()
322destCanBeFile := false
323if len(sources) == 1 {
324if len(remoteSources) == 1 {
325destCanBeFile = sourceIsRemote(sources[0])
326}
327if len(localSources) == 1 {
328item := localSourceStats[0].Results[localSourceStats[0].Globbed[0]]
329if item.IsDir || (item.IsArchive && extract) {
330destMustBeDirectory = true
331}
332if item.IsRegular {
333destCanBeFile = true
334}
335}
336}
337
338// We care if the destination either doesn't exist, or exists and is a
339// file. If the source can be a single file, for those cases we treat
340// the destination as a file rather than as a directory tree.
341renameTarget := ""
342extractDirectory := filepath.Join(mountPoint, destination)
343statOptions := copier.StatOptions{
344CheckForArchives: extract,
345}
346destStats, err := copier.Stat(mountPoint, filepath.Join(mountPoint, b.WorkDir()), statOptions, []string{extractDirectory})
347if err != nil {
348return fmt.Errorf("checking on destination %v: %w", extractDirectory, err)
349}
350if (len(destStats) == 0 || len(destStats[0].Globbed) == 0) && !destMustBeDirectory && destCanBeFile {
351// destination doesn't exist - extract to parent and rename the incoming file to the destination's name
352renameTarget = filepath.Base(extractDirectory)
353extractDirectory = filepath.Dir(extractDirectory)
354}
355
356// if the destination is a directory that doesn't yet exist, let's copy it.
357newDestDirFound := false
358if (len(destStats) == 1 || len(destStats[0].Globbed) == 0) && destMustBeDirectory && !destCanBeFile {
359newDestDirFound = true
360}
361
362if len(destStats) == 1 && len(destStats[0].Globbed) == 1 && destStats[0].Results[destStats[0].Globbed[0]].IsRegular {
363if destMustBeDirectory {
364return fmt.Errorf("destination %v already exists but is not a directory", destination)
365}
366// destination exists - it's a file, we need to extract to parent and rename the incoming file to the destination's name
367renameTarget = filepath.Base(extractDirectory)
368extractDirectory = filepath.Dir(extractDirectory)
369}
370
371pm, err := fileutils.NewPatternMatcher(options.Excludes)
372if err != nil {
373return fmt.Errorf("processing excludes list %v: %w", options.Excludes, err)
374}
375
376// Make sure that, if it's a symlink, we'll chroot to the target of the link;
377// knowing that target requires that we resolve it within the chroot.
378evalOptions := copier.EvalOptions{}
379evaluated, err := copier.Eval(mountPoint, extractDirectory, evalOptions)
380if err != nil {
381return fmt.Errorf("checking on destination %v: %w", extractDirectory, err)
382}
383extractDirectory = evaluated
384
385// Set up ID maps.
386var srcUIDMap, srcGIDMap []idtools.IDMap
387if options.IDMappingOptions != nil {
388srcUIDMap, srcGIDMap = convertRuntimeIDMaps(options.IDMappingOptions.UIDMap, options.IDMappingOptions.GIDMap)
389}
390destUIDMap, destGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
391
392// Create the target directory if it doesn't exist yet.
393mkdirOptions := copier.MkdirOptions{
394UIDMap: destUIDMap,
395GIDMap: destGIDMap,
396ChownNew: chownDirs,
397}
398if err := copier.Mkdir(mountPoint, extractDirectory, mkdirOptions); err != nil {
399return fmt.Errorf("ensuring target directory exists: %w", err)
400}
401
402// Copy each source in turn.
403for _, src := range sources {
404var multiErr *multierror.Error
405var getErr, closeErr, renameErr, putErr error
406var wg sync.WaitGroup
407if sourceIsRemote(src) {
408pipeReader, pipeWriter := io.Pipe()
409var srcDigest digest.Digest
410if options.Checksum != "" {
411srcDigest, err = digest.Parse(options.Checksum)
412if err != nil {
413return fmt.Errorf("invalid checksum flag: %w", err)
414}
415}
416wg.Add(1)
417go func() {
418getErr = getURL(src, chownFiles, mountPoint, renameTarget, pipeWriter, chmodDirsFiles, srcDigest)
419pipeWriter.Close()
420wg.Done()
421}()
422wg.Add(1)
423go func() {
424b.ContentDigester.Start("")
425hashCloser := b.ContentDigester.Hash()
426hasher := io.Writer(hashCloser)
427if options.Hasher != nil {
428hasher = io.MultiWriter(hasher, options.Hasher)
429}
430if options.DryRun {
431_, putErr = io.Copy(hasher, pipeReader)
432} else {
433putOptions := copier.PutOptions{
434UIDMap: destUIDMap,
435GIDMap: destGIDMap,
436ChownDirs: nil,
437ChmodDirs: nil,
438ChownFiles: nil,
439ChmodFiles: nil,
440IgnoreDevices: runningInUserNS(),
441}
442putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
443}
444hashCloser.Close()
445pipeReader.Close()
446wg.Done()
447}()
448wg.Wait()
449if getErr != nil {
450getErr = fmt.Errorf("reading %q: %w", src, getErr)
451}
452if putErr != nil {
453putErr = fmt.Errorf("storing %q: %w", src, putErr)
454}
455multiErr = multierror.Append(getErr, putErr)
456if multiErr != nil && multiErr.ErrorOrNil() != nil {
457if len(multiErr.Errors) > 1 {
458return multiErr.ErrorOrNil()
459}
460return multiErr.Errors[0]
461}
462continue
463}
464
465if options.Checksum != "" {
466return fmt.Errorf("checksum flag is not supported for local sources")
467}
468
469// Dig out the result of running glob+stat on this source spec.
470var localSourceStat *copier.StatsForGlob
471for _, st := range localSourceStats {
472if st.Glob == src {
473localSourceStat = st
474break
475}
476}
477if localSourceStat == nil {
478continue
479}
480
481// Iterate through every item that matched the glob.
482itemsCopied := 0
483for _, glob := range localSourceStat.Globbed {
484rel := glob
485if filepath.IsAbs(glob) {
486if rel, err = filepath.Rel(contextDir, glob); err != nil {
487return fmt.Errorf("computing path of %q relative to %q: %w", glob, contextDir, err)
488}
489}
490if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
491return fmt.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir)
492}
493// Check for dockerignore-style exclusion of this item.
494if rel != "." {
495excluded, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck
496if err != nil {
497return fmt.Errorf("checking if %q(%q) is excluded: %w", glob, rel, err)
498}
499if excluded {
500// non-directories that are excluded are excluded, no question, but
501// directories can only be skipped if we don't have to allow for the
502// possibility of finding things to include under them
503globInfo := localSourceStat.Results[glob]
504if !globInfo.IsDir || !includeDirectoryAnyway(rel, pm) {
505continue
506}
507} else {
508// if the destination is a directory that doesn't yet exist, and is not excluded, let's copy it.
509if newDestDirFound {
510itemsCopied++
511}
512}
513} else {
514// Make sure we don't trigger a "copied nothing" error for an empty context
515// directory if we were told to copy the context directory itself. We won't
516// actually copy it, but we need to make sure that we don't produce an error
517// due to potentially not having anything in the tarstream that we passed.
518itemsCopied++
519}
520st := localSourceStat.Results[glob]
521pipeReader, pipeWriter := io.Pipe()
522wg.Add(1)
523go func() {
524renamedItems := 0
525writer := io.WriteCloser(pipeWriter)
526if renameTarget != "" {
527writer = newTarFilterer(writer, func(hdr *tar.Header) (bool, bool, io.Reader) {
528hdr.Name = renameTarget
529renamedItems++
530return false, false, nil
531})
532}
533writer = newTarFilterer(writer, func(hdr *tar.Header) (bool, bool, io.Reader) {
534itemsCopied++
535return false, false, nil
536})
537getOptions := copier.GetOptions{
538UIDMap: srcUIDMap,
539GIDMap: srcGIDMap,
540Excludes: options.Excludes,
541ExpandArchives: extract,
542ChownDirs: chownDirs,
543ChmodDirs: chmodDirsFiles,
544ChownFiles: chownFiles,
545ChmodFiles: chmodDirsFiles,
546StripSetuidBit: options.StripSetuidBit,
547StripSetgidBit: options.StripSetgidBit,
548StripStickyBit: options.StripStickyBit,
549}
550getErr = copier.Get(contextDir, contextDir, getOptions, []string{glob}, writer)
551closeErr = writer.Close()
552if renameTarget != "" && renamedItems > 1 {
553renameErr = fmt.Errorf("internal error: renamed %d items when we expected to only rename 1", renamedItems)
554}
555wg.Done()
556}()
557wg.Add(1)
558go func() {
559if st.IsDir {
560b.ContentDigester.Start("dir")
561} else {
562b.ContentDigester.Start("file")
563}
564hashCloser := b.ContentDigester.Hash()
565hasher := io.Writer(hashCloser)
566if options.Hasher != nil {
567hasher = io.MultiWriter(hasher, options.Hasher)
568}
569if options.DryRun {
570_, putErr = io.Copy(hasher, pipeReader)
571} else {
572putOptions := copier.PutOptions{
573UIDMap: destUIDMap,
574GIDMap: destGIDMap,
575DefaultDirOwner: chownDirs,
576DefaultDirMode: nil,
577ChownDirs: nil,
578ChmodDirs: nil,
579ChownFiles: nil,
580ChmodFiles: nil,
581IgnoreDevices: runningInUserNS(),
582}
583putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
584}
585hashCloser.Close()
586pipeReader.Close()
587wg.Done()
588}()
589wg.Wait()
590if getErr != nil {
591getErr = fmt.Errorf("reading %q: %w", src, getErr)
592}
593if closeErr != nil {
594closeErr = fmt.Errorf("closing %q: %w", src, closeErr)
595}
596if renameErr != nil {
597renameErr = fmt.Errorf("renaming %q: %w", src, renameErr)
598}
599if putErr != nil {
600putErr = fmt.Errorf("storing %q: %w", src, putErr)
601}
602multiErr = multierror.Append(getErr, closeErr, renameErr, putErr)
603if multiErr != nil && multiErr.ErrorOrNil() != nil {
604if len(multiErr.Errors) > 1 {
605return multiErr.ErrorOrNil()
606}
607return multiErr.Errors[0]
608}
609}
610if itemsCopied == 0 {
611excludesFile := ""
612if options.IgnoreFile != "" {
613excludesFile = " using " + options.IgnoreFile
614}
615return fmt.Errorf("no items matching glob %q copied (%d filtered out%s): %w", localSourceStat.Glob, len(localSourceStat.Globbed), excludesFile, syscall.ENOENT)
616}
617}
618return nil
619}
620
621// userForRun returns the user (and group) information which we should use for
622// running commands
623func (b *Builder) userForRun(mountPoint string, userspec string) (specs.User, string, error) {
624if userspec == "" {
625userspec = b.User()
626}
627
628uid, gid, homeDir, err := chrootuser.GetUser(mountPoint, userspec)
629u := specs.User{
630UID: uid,
631GID: gid,
632Username: userspec,
633}
634if !strings.Contains(userspec, ":") {
635groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID))
636if err2 != nil {
637if !errors.Is(err2, chrootuser.ErrNoSuchUser) && err == nil {
638err = err2
639}
640} else {
641u.AdditionalGids = groups
642}
643
644}
645return u, homeDir, err
646}
647
648// userForCopy returns the user (and group) information which we should use for
649// setting ownership of contents being copied. It's just like what
650// userForRun() does, except for the case where we're passed a single numeric
651// value, where we need to use that value for both the UID and the GID.
652func (b *Builder) userForCopy(mountPoint string, userspec string) (uint32, uint32, error) {
653var (
654user, group string
655uid, gid uint64
656err error
657)
658
659split := strings.SplitN(userspec, ":", 2)
660user = split[0]
661if len(split) > 1 {
662group = split[1]
663}
664
665// If userspec did not specify any values for user or group, then fail
666if user == "" && group == "" {
667return 0, 0, fmt.Errorf("can't find uid for user %s", userspec)
668}
669
670// If userspec specifies values for user or group, check for numeric values
671// and return early. If not, then translate username/groupname
672if user != "" {
673uid, err = strconv.ParseUint(user, 10, 32)
674}
675if err == nil {
676// default gid to uid
677gid = uid
678if group != "" {
679gid, err = strconv.ParseUint(group, 10, 32)
680}
681}
682// If err != nil, then user or group not numeric, check filesystem
683if err == nil {
684return uint32(uid), uint32(gid), nil
685}
686
687owner, _, err := b.userForRun(mountPoint, userspec)
688if err != nil {
689return 0xffffffff, 0xffffffff, err
690}
691return owner.UID, owner.GID, nil
692}
693
694// EnsureContainerPathAs creates the specified directory owned by USER
695// with the file mode set to MODE.
696func (b *Builder) EnsureContainerPathAs(path, user string, mode *os.FileMode) error {
697mountPoint, err := b.Mount(b.MountLabel)
698if err != nil {
699return err
700}
701defer func() {
702if err2 := b.Unmount(); err2 != nil {
703logrus.Errorf("error unmounting container: %v", err2)
704}
705}()
706
707uid, gid := uint32(0), uint32(0)
708if user != "" {
709if uidForCopy, gidForCopy, err := b.userForCopy(mountPoint, user); err == nil {
710uid = uidForCopy
711gid = gidForCopy
712}
713}
714
715destUIDMap, destGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
716
717idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)}
718opts := copier.MkdirOptions{
719ChmodNew: mode,
720ChownNew: idPair,
721UIDMap: destUIDMap,
722GIDMap: destGIDMap,
723}
724return copier.Mkdir(mountPoint, filepath.Join(mountPoint, path), opts)
725
726}
727