podman

Форк
0
726 строк · 23.2 Кб
1
package buildah
2

3
import (
4
	"archive/tar"
5
	"errors"
6
	"fmt"
7
	"io"
8
	"net/http"
9
	"net/url"
10
	"os"
11
	"path"
12
	"path/filepath"
13
	"strconv"
14
	"strings"
15
	"sync"
16
	"syscall"
17
	"time"
18

19
	"github.com/containers/buildah/copier"
20
	"github.com/containers/buildah/define"
21
	"github.com/containers/buildah/pkg/chrootuser"
22
	"github.com/containers/storage/pkg/fileutils"
23
	"github.com/containers/storage/pkg/idtools"
24
	"github.com/hashicorp/go-multierror"
25
	digest "github.com/opencontainers/go-digest"
26
	"github.com/opencontainers/runtime-spec/specs-go"
27
	"github.com/sirupsen/logrus"
28
)
29

30
// AddAndCopyOptions holds options for add and copy commands.
31
type AddAndCopyOptions struct {
32
	//Chmod sets the access permissions of the destination content.
33
	Chmod string
34
	// Chown is a spec for the user who should be given ownership over the
35
	// newly-added content, potentially overriding permissions which would
36
	// otherwise be set to 0:0.
37
	Chown string
38
	// Checksum is a standard container digest string (e.g. <algorithm>:<digest>)
39
	// and is the expected hash of the content being copied.
40
	Checksum string
41
	// PreserveOwnership, if Chown is not set, tells us to avoid setting
42
	// ownership of copied items to 0:0, instead using whatever ownership
43
	// information is already set.  Not meaningful for remote sources or
44
	// local archives that we extract.
45
	PreserveOwnership bool
46
	// All of the data being copied will pass through Hasher, if set.
47
	// If the sources are URLs or files, their contents will be passed to
48
	// Hasher.
49
	// If the sources include directory trees, Hasher will be passed
50
	// tar-format archives of the directory trees.
51
	Hasher io.Writer
52
	// Excludes is the contents of the .containerignore file.
53
	Excludes []string
54
	// IgnoreFile is the path to the .containerignore file.
55
	IgnoreFile string
56
	// ContextDir is the base directory for content being copied and
57
	// Excludes patterns.
58
	ContextDir string
59
	// ID mapping options to use when contents to be copied are part of
60
	// another container, and need ownerships to be mapped from the host to
61
	// that container's values before copying them into the container.
62
	IDMappingOptions *define.IDMappingOptions
63
	// DryRun indicates that the content should be digested, but not actually
64
	// copied into the container.
65
	DryRun bool
66
	// Clear the setuid bit on items being copied.  Has no effect on
67
	// archives being extracted, where the bit is always preserved.
68
	StripSetuidBit bool
69
	// Clear the setgid bit on items being copied.  Has no effect on
70
	// archives being extracted, where the bit is always preserved.
71
	StripSetgidBit bool
72
	// Clear the sticky bit on items being copied.  Has no effect on
73
	// archives being extracted, where the bit is always preserved.
74
	StripStickyBit bool
75
}
76

77
// sourceIsRemote returns true if "source" is a remote location.
78
func sourceIsRemote(source string) bool {
79
	return strings.HasPrefix(source, "http://") || strings.HasPrefix(source, "https://")
80
}
81

82
// getURL writes a tar archive containing the named content
83
func getURL(src string, chown *idtools.IDPair, mountpoint, renameTarget string, writer io.Writer, chmod *os.FileMode, srcDigest digest.Digest) error {
84
	url, err := url.Parse(src)
85
	if err != nil {
86
		return err
87
	}
88
	response, err := http.Get(src)
89
	if err != nil {
90
		return err
91
	}
92
	defer response.Body.Close()
93

94
	if response.StatusCode < http.StatusOK || response.StatusCode >= http.StatusBadRequest {
95
		return fmt.Errorf("invalid response status %d", response.StatusCode)
96
	}
97

98
	// Figure out what to name the new content.
99
	name := renameTarget
100
	if name == "" {
101
		name = path.Base(url.Path)
102
	}
103
	// If there's a date on the content, use it.  If not, use the Unix epoch
104
	// for compatibility.
105
	date := time.Unix(0, 0).UTC()
106
	lastModified := response.Header.Get("Last-Modified")
107
	if lastModified != "" {
108
		d, err := time.Parse(time.RFC1123, lastModified)
109
		if err != nil {
110
			return fmt.Errorf("parsing last-modified time: %w", err)
111
		}
112
		date = d
113
	}
114
	// Figure out the size of the content.
115
	size := response.ContentLength
116
	var responseBody io.Reader = response.Body
117
	if size < 0 {
118
		// Create a temporary file and copy the content to it, so that
119
		// we can figure out how much content there is.
120
		f, err := os.CreateTemp(mountpoint, "download")
121
		if err != nil {
122
			return fmt.Errorf("creating temporary file to hold %q: %w", src, err)
123
		}
124
		defer os.Remove(f.Name())
125
		defer f.Close()
126
		size, err = io.Copy(f, response.Body)
127
		if err != nil {
128
			return fmt.Errorf("writing %q to temporary file %q: %w", src, f.Name(), err)
129
		}
130
		_, err = f.Seek(0, io.SeekStart)
131
		if err != nil {
132
			return fmt.Errorf("setting up to read %q from temporary file %q: %w", src, f.Name(), err)
133
		}
134
		responseBody = f
135
	}
136
	var digester digest.Digester
137
	if srcDigest != "" {
138
		digester = srcDigest.Algorithm().Digester()
139
		responseBody = io.TeeReader(responseBody, digester.Hash())
140
	}
141
	// Write the output archive.  Set permissions for compatibility.
142
	tw := tar.NewWriter(writer)
143
	defer tw.Close()
144
	uid := 0
145
	gid := 0
146
	if chown != nil {
147
		uid = chown.UID
148
		gid = chown.GID
149
	}
150
	var mode int64 = 0600
151
	if chmod != nil {
152
		mode = int64(*chmod)
153
	}
154
	hdr := tar.Header{
155
		Typeflag: tar.TypeReg,
156
		Name:     name,
157
		Size:     size,
158
		Uid:      uid,
159
		Gid:      gid,
160
		Mode:     mode,
161
		ModTime:  date,
162
	}
163
	err = tw.WriteHeader(&hdr)
164
	if err != nil {
165
		return fmt.Errorf("writing header: %w", err)
166
	}
167

168
	if _, err := io.Copy(tw, responseBody); err != nil {
169
		return fmt.Errorf("writing content from %q to tar stream: %w", src, err)
170
	}
171

172
	if digester != nil {
173
		if responseDigest := digester.Digest(); responseDigest != srcDigest {
174
			return fmt.Errorf("unexpected response digest for %q: %s, want %s", src, responseDigest, srcDigest)
175
		}
176
	}
177

178
	return nil
179
}
180

181
// includeDirectoryAnyway returns true if "path" is a prefix for an exception
182
// known to "pm".  If "path" is a directory that "pm" claims matches its list
183
// of patterns, but "pm"'s list of exclusions contains a pattern for which
184
// "path" is a prefix, then IncludeDirectoryAnyway() will return true.
185
// This is not always correct, because it relies on the directory part of any
186
// exception paths to be specified without wildcards.
187
func includeDirectoryAnyway(path string, pm *fileutils.PatternMatcher) bool {
188
	if !pm.Exclusions() {
189
		return false
190
	}
191
	prefix := strings.TrimPrefix(path, string(os.PathSeparator)) + string(os.PathSeparator)
192
	for _, pattern := range pm.Patterns() {
193
		if !pattern.Exclusion() {
194
			continue
195
		}
196
		spec := strings.TrimPrefix(pattern.String(), string(os.PathSeparator))
197
		if strings.HasPrefix(spec, prefix) {
198
			return true
199
		}
200
	}
201
	return false
202
}
203

204
// Add copies the contents of the specified sources into the container's root
205
// filesystem, optionally extracting contents of local files that look like
206
// non-empty archives.
207
func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, sources ...string) error {
208
	mountPoint, err := b.Mount(b.MountLabel)
209
	if err != nil {
210
		return err
211
	}
212
	defer func() {
213
		if err2 := b.Unmount(); err2 != nil {
214
			logrus.Errorf("error unmounting container: %v", err2)
215
		}
216
	}()
217

218
	contextDir := options.ContextDir
219
	currentDir := options.ContextDir
220
	if options.ContextDir == "" {
221
		contextDir = string(os.PathSeparator)
222
		currentDir, err = os.Getwd()
223
		if err != nil {
224
			return fmt.Errorf("determining current working directory: %w", err)
225
		}
226
	} else {
227
		if !filepath.IsAbs(options.ContextDir) {
228
			contextDir, err = filepath.Abs(options.ContextDir)
229
			if err != nil {
230
				return fmt.Errorf("converting context directory path %q to an absolute path: %w", options.ContextDir, err)
231
			}
232
		}
233
	}
234

235
	// Figure out what sorts of sources we have.
236
	var localSources, remoteSources []string
237
	for i, src := range sources {
238
		if sourceIsRemote(src) {
239
			remoteSources = append(remoteSources, src)
240
			continue
241
		}
242
		if !filepath.IsAbs(src) && options.ContextDir == "" {
243
			sources[i] = filepath.Join(currentDir, src)
244
		}
245
		localSources = append(localSources, sources[i])
246
	}
247

248
	// Check how many items our local source specs matched.  Each spec
249
	// should have matched at least one item, otherwise we consider it an
250
	// error.
251
	var localSourceStats []*copier.StatsForGlob
252
	if len(localSources) > 0 {
253
		statOptions := copier.StatOptions{
254
			CheckForArchives: extract,
255
		}
256
		localSourceStats, err = copier.Stat(contextDir, contextDir, statOptions, localSources)
257
		if err != nil {
258
			return fmt.Errorf("checking on sources under %q: %w", contextDir, err)
259
		}
260
	}
261
	numLocalSourceItems := 0
262
	for _, localSourceStat := range localSourceStats {
263
		if localSourceStat.Error != "" {
264
			errorText := localSourceStat.Error
265
			rel, err := filepath.Rel(contextDir, localSourceStat.Glob)
266
			if err != nil {
267
				errorText = fmt.Sprintf("%v; %s", err, errorText)
268
			}
269
			if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
270
				errorText = fmt.Sprintf("possible escaping context directory error: %s", errorText)
271
			}
272
			return fmt.Errorf("checking on sources under %q: %v", contextDir, errorText)
273
		}
274
		if len(localSourceStat.Globbed) == 0 {
275
			return fmt.Errorf("checking source under %q: no glob matches: %w", contextDir, syscall.ENOENT)
276
		}
277
		numLocalSourceItems += len(localSourceStat.Globbed)
278
	}
279
	if numLocalSourceItems+len(remoteSources) == 0 {
280
		return fmt.Errorf("no sources %v found: %w", sources, syscall.ENOENT)
281
	}
282

283
	// Find out which user (and group) the destination should belong to.
284
	var chownDirs, chownFiles *idtools.IDPair
285
	var userUID, userGID uint32
286
	if options.Chown != "" {
287
		userUID, userGID, err = b.userForCopy(mountPoint, options.Chown)
288
		if err != nil {
289
			return fmt.Errorf("looking up UID/GID for %q: %w", options.Chown, err)
290
		}
291
	}
292
	var chmodDirsFiles *os.FileMode
293
	if options.Chmod != "" {
294
		p, err := strconv.ParseUint(options.Chmod, 8, 32)
295
		if err != nil {
296
			return fmt.Errorf("parsing chmod %q: %w", options.Chmod, err)
297
		}
298
		perm := os.FileMode(p)
299
		chmodDirsFiles = &perm
300
	}
301

302
	chownDirs = &idtools.IDPair{UID: int(userUID), GID: int(userGID)}
303
	chownFiles = &idtools.IDPair{UID: int(userUID), GID: int(userGID)}
304
	if options.Chown == "" && options.PreserveOwnership {
305
		chownDirs = nil
306
		chownFiles = nil
307
	}
308

309
	// If we have a single source archive to extract, or more than one
310
	// source item, or the destination has a path separator at the end of
311
	// it, and it's not a remote URL, the destination needs to be a
312
	// directory.
313
	if destination == "" || !filepath.IsAbs(destination) {
314
		tmpDestination := filepath.Join(string(os.PathSeparator)+b.WorkDir(), destination)
315
		if destination == "" || strings.HasSuffix(destination, string(os.PathSeparator)) {
316
			destination = tmpDestination + string(os.PathSeparator)
317
		} else {
318
			destination = tmpDestination
319
		}
320
	}
321
	destMustBeDirectory := (len(sources) > 1) || strings.HasSuffix(destination, string(os.PathSeparator)) || destination == b.WorkDir()
322
	destCanBeFile := false
323
	if len(sources) == 1 {
324
		if len(remoteSources) == 1 {
325
			destCanBeFile = sourceIsRemote(sources[0])
326
		}
327
		if len(localSources) == 1 {
328
			item := localSourceStats[0].Results[localSourceStats[0].Globbed[0]]
329
			if item.IsDir || (item.IsArchive && extract) {
330
				destMustBeDirectory = true
331
			}
332
			if item.IsRegular {
333
				destCanBeFile = true
334
			}
335
		}
336
	}
337

338
	// We care if the destination either doesn't exist, or exists and is a
339
	// file.  If the source can be a single file, for those cases we treat
340
	// the destination as a file rather than as a directory tree.
341
	renameTarget := ""
342
	extractDirectory := filepath.Join(mountPoint, destination)
343
	statOptions := copier.StatOptions{
344
		CheckForArchives: extract,
345
	}
346
	destStats, err := copier.Stat(mountPoint, filepath.Join(mountPoint, b.WorkDir()), statOptions, []string{extractDirectory})
347
	if err != nil {
348
		return fmt.Errorf("checking on destination %v: %w", extractDirectory, err)
349
	}
350
	if (len(destStats) == 0 || len(destStats[0].Globbed) == 0) && !destMustBeDirectory && destCanBeFile {
351
		// destination doesn't exist - extract to parent and rename the incoming file to the destination's name
352
		renameTarget = filepath.Base(extractDirectory)
353
		extractDirectory = filepath.Dir(extractDirectory)
354
	}
355

356
	// if the destination is a directory that doesn't yet exist, let's copy it.
357
	newDestDirFound := false
358
	if (len(destStats) == 1 || len(destStats[0].Globbed) == 0) && destMustBeDirectory && !destCanBeFile {
359
		newDestDirFound = true
360
	}
361

362
	if len(destStats) == 1 && len(destStats[0].Globbed) == 1 && destStats[0].Results[destStats[0].Globbed[0]].IsRegular {
363
		if destMustBeDirectory {
364
			return fmt.Errorf("destination %v already exists but is not a directory", destination)
365
		}
366
		// destination exists - it's a file, we need to extract to parent and rename the incoming file to the destination's name
367
		renameTarget = filepath.Base(extractDirectory)
368
		extractDirectory = filepath.Dir(extractDirectory)
369
	}
370

371
	pm, err := fileutils.NewPatternMatcher(options.Excludes)
372
	if err != nil {
373
		return fmt.Errorf("processing excludes list %v: %w", options.Excludes, err)
374
	}
375

376
	// Make sure that, if it's a symlink, we'll chroot to the target of the link;
377
	// knowing that target requires that we resolve it within the chroot.
378
	evalOptions := copier.EvalOptions{}
379
	evaluated, err := copier.Eval(mountPoint, extractDirectory, evalOptions)
380
	if err != nil {
381
		return fmt.Errorf("checking on destination %v: %w", extractDirectory, err)
382
	}
383
	extractDirectory = evaluated
384

385
	// Set up ID maps.
386
	var srcUIDMap, srcGIDMap []idtools.IDMap
387
	if options.IDMappingOptions != nil {
388
		srcUIDMap, srcGIDMap = convertRuntimeIDMaps(options.IDMappingOptions.UIDMap, options.IDMappingOptions.GIDMap)
389
	}
390
	destUIDMap, destGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
391

392
	// Create the target directory if it doesn't exist yet.
393
	mkdirOptions := copier.MkdirOptions{
394
		UIDMap:   destUIDMap,
395
		GIDMap:   destGIDMap,
396
		ChownNew: chownDirs,
397
	}
398
	if err := copier.Mkdir(mountPoint, extractDirectory, mkdirOptions); err != nil {
399
		return fmt.Errorf("ensuring target directory exists: %w", err)
400
	}
401

402
	// Copy each source in turn.
403
	for _, src := range sources {
404
		var multiErr *multierror.Error
405
		var getErr, closeErr, renameErr, putErr error
406
		var wg sync.WaitGroup
407
		if sourceIsRemote(src) {
408
			pipeReader, pipeWriter := io.Pipe()
409
			var srcDigest digest.Digest
410
			if options.Checksum != "" {
411
				srcDigest, err = digest.Parse(options.Checksum)
412
				if err != nil {
413
					return fmt.Errorf("invalid checksum flag: %w", err)
414
				}
415
			}
416
			wg.Add(1)
417
			go func() {
418
				getErr = getURL(src, chownFiles, mountPoint, renameTarget, pipeWriter, chmodDirsFiles, srcDigest)
419
				pipeWriter.Close()
420
				wg.Done()
421
			}()
422
			wg.Add(1)
423
			go func() {
424
				b.ContentDigester.Start("")
425
				hashCloser := b.ContentDigester.Hash()
426
				hasher := io.Writer(hashCloser)
427
				if options.Hasher != nil {
428
					hasher = io.MultiWriter(hasher, options.Hasher)
429
				}
430
				if options.DryRun {
431
					_, putErr = io.Copy(hasher, pipeReader)
432
				} else {
433
					putOptions := copier.PutOptions{
434
						UIDMap:        destUIDMap,
435
						GIDMap:        destGIDMap,
436
						ChownDirs:     nil,
437
						ChmodDirs:     nil,
438
						ChownFiles:    nil,
439
						ChmodFiles:    nil,
440
						IgnoreDevices: runningInUserNS(),
441
					}
442
					putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
443
				}
444
				hashCloser.Close()
445
				pipeReader.Close()
446
				wg.Done()
447
			}()
448
			wg.Wait()
449
			if getErr != nil {
450
				getErr = fmt.Errorf("reading %q: %w", src, getErr)
451
			}
452
			if putErr != nil {
453
				putErr = fmt.Errorf("storing %q: %w", src, putErr)
454
			}
455
			multiErr = multierror.Append(getErr, putErr)
456
			if multiErr != nil && multiErr.ErrorOrNil() != nil {
457
				if len(multiErr.Errors) > 1 {
458
					return multiErr.ErrorOrNil()
459
				}
460
				return multiErr.Errors[0]
461
			}
462
			continue
463
		}
464

465
		if options.Checksum != "" {
466
			return fmt.Errorf("checksum flag is not supported for local sources")
467
		}
468

469
		// Dig out the result of running glob+stat on this source spec.
470
		var localSourceStat *copier.StatsForGlob
471
		for _, st := range localSourceStats {
472
			if st.Glob == src {
473
				localSourceStat = st
474
				break
475
			}
476
		}
477
		if localSourceStat == nil {
478
			continue
479
		}
480

481
		// Iterate through every item that matched the glob.
482
		itemsCopied := 0
483
		for _, glob := range localSourceStat.Globbed {
484
			rel := glob
485
			if filepath.IsAbs(glob) {
486
				if rel, err = filepath.Rel(contextDir, glob); err != nil {
487
					return fmt.Errorf("computing path of %q relative to %q: %w", glob, contextDir, err)
488
				}
489
			}
490
			if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
491
				return fmt.Errorf("possible escaping context directory error: %q is outside of %q", glob, contextDir)
492
			}
493
			// Check for dockerignore-style exclusion of this item.
494
			if rel != "." {
495
				excluded, err := pm.Matches(filepath.ToSlash(rel)) // nolint:staticcheck
496
				if err != nil {
497
					return fmt.Errorf("checking if %q(%q) is excluded: %w", glob, rel, err)
498
				}
499
				if excluded {
500
					// non-directories that are excluded are excluded, no question, but
501
					// directories can only be skipped if we don't have to allow for the
502
					// possibility of finding things to include under them
503
					globInfo := localSourceStat.Results[glob]
504
					if !globInfo.IsDir || !includeDirectoryAnyway(rel, pm) {
505
						continue
506
					}
507
				} else {
508
					// if the destination is a directory that doesn't yet exist, and is not excluded, let's copy it.
509
					if newDestDirFound {
510
						itemsCopied++
511
					}
512
				}
513
			} else {
514
				// Make sure we don't trigger a "copied nothing" error for an empty context
515
				// directory if we were told to copy the context directory itself.  We won't
516
				// actually copy it, but we need to make sure that we don't produce an error
517
				// due to potentially not having anything in the tarstream that we passed.
518
				itemsCopied++
519
			}
520
			st := localSourceStat.Results[glob]
521
			pipeReader, pipeWriter := io.Pipe()
522
			wg.Add(1)
523
			go func() {
524
				renamedItems := 0
525
				writer := io.WriteCloser(pipeWriter)
526
				if renameTarget != "" {
527
					writer = newTarFilterer(writer, func(hdr *tar.Header) (bool, bool, io.Reader) {
528
						hdr.Name = renameTarget
529
						renamedItems++
530
						return false, false, nil
531
					})
532
				}
533
				writer = newTarFilterer(writer, func(hdr *tar.Header) (bool, bool, io.Reader) {
534
					itemsCopied++
535
					return false, false, nil
536
				})
537
				getOptions := copier.GetOptions{
538
					UIDMap:         srcUIDMap,
539
					GIDMap:         srcGIDMap,
540
					Excludes:       options.Excludes,
541
					ExpandArchives: extract,
542
					ChownDirs:      chownDirs,
543
					ChmodDirs:      chmodDirsFiles,
544
					ChownFiles:     chownFiles,
545
					ChmodFiles:     chmodDirsFiles,
546
					StripSetuidBit: options.StripSetuidBit,
547
					StripSetgidBit: options.StripSetgidBit,
548
					StripStickyBit: options.StripStickyBit,
549
				}
550
				getErr = copier.Get(contextDir, contextDir, getOptions, []string{glob}, writer)
551
				closeErr = writer.Close()
552
				if renameTarget != "" && renamedItems > 1 {
553
					renameErr = fmt.Errorf("internal error: renamed %d items when we expected to only rename 1", renamedItems)
554
				}
555
				wg.Done()
556
			}()
557
			wg.Add(1)
558
			go func() {
559
				if st.IsDir {
560
					b.ContentDigester.Start("dir")
561
				} else {
562
					b.ContentDigester.Start("file")
563
				}
564
				hashCloser := b.ContentDigester.Hash()
565
				hasher := io.Writer(hashCloser)
566
				if options.Hasher != nil {
567
					hasher = io.MultiWriter(hasher, options.Hasher)
568
				}
569
				if options.DryRun {
570
					_, putErr = io.Copy(hasher, pipeReader)
571
				} else {
572
					putOptions := copier.PutOptions{
573
						UIDMap:          destUIDMap,
574
						GIDMap:          destGIDMap,
575
						DefaultDirOwner: chownDirs,
576
						DefaultDirMode:  nil,
577
						ChownDirs:       nil,
578
						ChmodDirs:       nil,
579
						ChownFiles:      nil,
580
						ChmodFiles:      nil,
581
						IgnoreDevices:   runningInUserNS(),
582
					}
583
					putErr = copier.Put(extractDirectory, extractDirectory, putOptions, io.TeeReader(pipeReader, hasher))
584
				}
585
				hashCloser.Close()
586
				pipeReader.Close()
587
				wg.Done()
588
			}()
589
			wg.Wait()
590
			if getErr != nil {
591
				getErr = fmt.Errorf("reading %q: %w", src, getErr)
592
			}
593
			if closeErr != nil {
594
				closeErr = fmt.Errorf("closing %q: %w", src, closeErr)
595
			}
596
			if renameErr != nil {
597
				renameErr = fmt.Errorf("renaming %q: %w", src, renameErr)
598
			}
599
			if putErr != nil {
600
				putErr = fmt.Errorf("storing %q: %w", src, putErr)
601
			}
602
			multiErr = multierror.Append(getErr, closeErr, renameErr, putErr)
603
			if multiErr != nil && multiErr.ErrorOrNil() != nil {
604
				if len(multiErr.Errors) > 1 {
605
					return multiErr.ErrorOrNil()
606
				}
607
				return multiErr.Errors[0]
608
			}
609
		}
610
		if itemsCopied == 0 {
611
			excludesFile := ""
612
			if options.IgnoreFile != "" {
613
				excludesFile = " using " + options.IgnoreFile
614
			}
615
			return fmt.Errorf("no items matching glob %q copied (%d filtered out%s): %w", localSourceStat.Glob, len(localSourceStat.Globbed), excludesFile, syscall.ENOENT)
616
		}
617
	}
618
	return nil
619
}
620

621
// userForRun returns the user (and group) information which we should use for
622
// running commands
623
func (b *Builder) userForRun(mountPoint string, userspec string) (specs.User, string, error) {
624
	if userspec == "" {
625
		userspec = b.User()
626
	}
627

628
	uid, gid, homeDir, err := chrootuser.GetUser(mountPoint, userspec)
629
	u := specs.User{
630
		UID:      uid,
631
		GID:      gid,
632
		Username: userspec,
633
	}
634
	if !strings.Contains(userspec, ":") {
635
		groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID))
636
		if err2 != nil {
637
			if !errors.Is(err2, chrootuser.ErrNoSuchUser) && err == nil {
638
				err = err2
639
			}
640
		} else {
641
			u.AdditionalGids = groups
642
		}
643

644
	}
645
	return u, homeDir, err
646
}
647

648
// userForCopy returns the user (and group) information which we should use for
649
// setting ownership of contents being copied.  It's just like what
650
// userForRun() does, except for the case where we're passed a single numeric
651
// value, where we need to use that value for both the UID and the GID.
652
func (b *Builder) userForCopy(mountPoint string, userspec string) (uint32, uint32, error) {
653
	var (
654
		user, group string
655
		uid, gid    uint64
656
		err         error
657
	)
658

659
	split := strings.SplitN(userspec, ":", 2)
660
	user = split[0]
661
	if len(split) > 1 {
662
		group = split[1]
663
	}
664

665
	// If userspec did not specify any values for user or group, then fail
666
	if user == "" && group == "" {
667
		return 0, 0, fmt.Errorf("can't find uid for user %s", userspec)
668
	}
669

670
	// If userspec specifies values for user or group, check for numeric values
671
	// and return early.  If not, then translate username/groupname
672
	if user != "" {
673
		uid, err = strconv.ParseUint(user, 10, 32)
674
	}
675
	if err == nil {
676
		// default gid to uid
677
		gid = uid
678
		if group != "" {
679
			gid, err = strconv.ParseUint(group, 10, 32)
680
		}
681
	}
682
	// If err != nil, then user or group not numeric, check filesystem
683
	if err == nil {
684
		return uint32(uid), uint32(gid), nil
685
	}
686

687
	owner, _, err := b.userForRun(mountPoint, userspec)
688
	if err != nil {
689
		return 0xffffffff, 0xffffffff, err
690
	}
691
	return owner.UID, owner.GID, nil
692
}
693

694
// EnsureContainerPathAs creates the specified directory owned by USER
695
// with the file mode set to MODE.
696
func (b *Builder) EnsureContainerPathAs(path, user string, mode *os.FileMode) error {
697
	mountPoint, err := b.Mount(b.MountLabel)
698
	if err != nil {
699
		return err
700
	}
701
	defer func() {
702
		if err2 := b.Unmount(); err2 != nil {
703
			logrus.Errorf("error unmounting container: %v", err2)
704
		}
705
	}()
706

707
	uid, gid := uint32(0), uint32(0)
708
	if user != "" {
709
		if uidForCopy, gidForCopy, err := b.userForCopy(mountPoint, user); err == nil {
710
			uid = uidForCopy
711
			gid = gidForCopy
712
		}
713
	}
714

715
	destUIDMap, destGIDMap := convertRuntimeIDMaps(b.IDMappingOptions.UIDMap, b.IDMappingOptions.GIDMap)
716

717
	idPair := &idtools.IDPair{UID: int(uid), GID: int(gid)}
718
	opts := copier.MkdirOptions{
719
		ChmodNew: mode,
720
		ChownNew: idPair,
721
		UIDMap:   destUIDMap,
722
		GIDMap:   destGIDMap,
723
	}
724
	return copier.Mkdir(mountPoint, filepath.Join(mountPoint, path), opts)
725

726
}
727

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.