podman

Форк
0
1108 строк · 37.6 Кб
1
package buildah
2

3
import (
4
	"archive/tar"
5
	"bytes"
6
	"context"
7
	"encoding/json"
8
	"errors"
9
	"fmt"
10
	"io"
11
	"os"
12
	"path/filepath"
13
	"strings"
14
	"time"
15

16
	"github.com/containers/buildah/copier"
17
	"github.com/containers/buildah/define"
18
	"github.com/containers/buildah/docker"
19
	"github.com/containers/buildah/internal/config"
20
	"github.com/containers/buildah/internal/mkcw"
21
	"github.com/containers/buildah/internal/tmpdir"
22
	"github.com/containers/image/v5/docker/reference"
23
	"github.com/containers/image/v5/image"
24
	"github.com/containers/image/v5/manifest"
25
	is "github.com/containers/image/v5/storage"
26
	"github.com/containers/image/v5/types"
27
	"github.com/containers/storage"
28
	"github.com/containers/storage/pkg/archive"
29
	"github.com/containers/storage/pkg/idtools"
30
	"github.com/containers/storage/pkg/ioutils"
31
	digest "github.com/opencontainers/go-digest"
32
	specs "github.com/opencontainers/image-spec/specs-go"
33
	v1 "github.com/opencontainers/image-spec/specs-go/v1"
34
	"github.com/sirupsen/logrus"
35
)
36

37
const (
38
	// OCIv1ImageManifest is the MIME type of an OCIv1 image manifest,
39
	// suitable for specifying as a value of the PreferredManifestType
40
	// member of a CommitOptions structure.  It is also the default.
41
	OCIv1ImageManifest = define.OCIv1ImageManifest
42
	// Dockerv2ImageManifest is the MIME type of a Docker v2s2 image
43
	// manifest, suitable for specifying as a value of the
44
	// PreferredManifestType member of a CommitOptions structure.
45
	Dockerv2ImageManifest = define.Dockerv2ImageManifest
46
)
47

48
// ExtractRootfsOptions is consumed by ExtractRootfs() which allows users to
49
// control whether various information like the like setuid and setgid bits and
50
// xattrs are preserved when extracting file system objects.
51
type ExtractRootfsOptions struct {
52
	StripSetuidBit bool // strip the setuid bit off of items being extracted.
53
	StripSetgidBit bool // strip the setgid bit off of items being extracted.
54
	StripXattrs    bool // don't record extended attributes of items being extracted.
55
}
56

57
type containerImageRef struct {
58
	fromImageName         string
59
	fromImageID           string
60
	store                 storage.Store
61
	compression           archive.Compression
62
	name                  reference.Named
63
	names                 []string
64
	containerID           string
65
	mountLabel            string
66
	layerID               string
67
	oconfig               []byte
68
	dconfig               []byte
69
	created               *time.Time
70
	createdBy             string
71
	historyComment        string
72
	annotations           map[string]string
73
	preferredManifestType string
74
	squash                bool
75
	confidentialWorkload  ConfidentialWorkloadOptions
76
	omitHistory           bool
77
	emptyLayer            bool
78
	idMappingOptions      *define.IDMappingOptions
79
	parent                string
80
	blobDirectory         string
81
	preEmptyLayers        []v1.History
82
	postEmptyLayers       []v1.History
83
	overrideChanges       []string
84
	overrideConfig        *manifest.Schema2Config
85
	extraImageContent     map[string]string
86
}
87

88
type blobLayerInfo struct {
89
	ID   string
90
	Size int64
91
}
92

93
type containerImageSource struct {
94
	path          string
95
	ref           *containerImageRef
96
	store         storage.Store
97
	containerID   string
98
	mountLabel    string
99
	layerID       string
100
	names         []string
101
	compression   archive.Compression
102
	config        []byte
103
	configDigest  digest.Digest
104
	manifest      []byte
105
	manifestType  string
106
	blobDirectory string
107
	blobLayers    map[digest.Digest]blobLayerInfo
108
}
109

110
func (i *containerImageRef) NewImage(ctx context.Context, sc *types.SystemContext) (types.ImageCloser, error) {
111
	src, err := i.NewImageSource(ctx, sc)
112
	if err != nil {
113
		return nil, err
114
	}
115
	return image.FromSource(ctx, sc, src)
116
}
117

118
func expectedOCIDiffIDs(image v1.Image) int {
119
	expected := 0
120
	for _, history := range image.History {
121
		if !history.EmptyLayer {
122
			expected = expected + 1
123
		}
124
	}
125
	return expected
126
}
127

128
func expectedDockerDiffIDs(image docker.V2Image) int {
129
	expected := 0
130
	for _, history := range image.History {
131
		if !history.EmptyLayer {
132
			expected = expected + 1
133
		}
134
	}
135
	return expected
136
}
137

138
// Compute the media types which we need to attach to a layer, given the type of
139
// compression that we'll be applying.
140
func computeLayerMIMEType(what string, layerCompression archive.Compression) (omediaType, dmediaType string, err error) {
141
	omediaType = v1.MediaTypeImageLayer
142
	dmediaType = docker.V2S2MediaTypeUncompressedLayer
143
	if layerCompression != archive.Uncompressed {
144
		switch layerCompression {
145
		case archive.Gzip:
146
			omediaType = v1.MediaTypeImageLayerGzip
147
			dmediaType = manifest.DockerV2Schema2LayerMediaType
148
			logrus.Debugf("compressing %s with gzip", what)
149
		case archive.Bzip2:
150
			// Until the image specs define a media type for bzip2-compressed layers, even if we know
151
			// how to decompress them, we can't try to compress layers with bzip2.
152
			return "", "", errors.New("media type for bzip2-compressed layers is not defined")
153
		case archive.Xz:
154
			// Until the image specs define a media type for xz-compressed layers, even if we know
155
			// how to decompress them, we can't try to compress layers with xz.
156
			return "", "", errors.New("media type for xz-compressed layers is not defined")
157
		case archive.Zstd:
158
			// Until the image specs define a media type for zstd-compressed layers, even if we know
159
			// how to decompress them, we can't try to compress layers with zstd.
160
			return "", "", errors.New("media type for zstd-compressed layers is not defined")
161
		default:
162
			logrus.Debugf("compressing %s with unknown compressor(?)", what)
163
		}
164
	}
165
	return omediaType, dmediaType, nil
166
}
167

168
// Extract the container's whole filesystem as a filesystem image, wrapped
169
// in LUKS-compatible encryption.
170
func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWorkloadOptions) (io.ReadCloser, error) {
171
	var image v1.Image
172
	if err := json.Unmarshal(i.oconfig, &image); err != nil {
173
		return nil, fmt.Errorf("recreating OCI configuration for %q: %w", i.containerID, err)
174
	}
175
	if options.TempDir == "" {
176
		cdir, err := i.store.ContainerDirectory(i.containerID)
177
		if err != nil {
178
			return nil, fmt.Errorf("getting the per-container data directory for %q: %w", i.containerID, err)
179
		}
180
		tempdir, err := os.MkdirTemp(cdir, "buildah-rootfs")
181
		if err != nil {
182
			return nil, fmt.Errorf("creating a temporary data directory to hold a rootfs image for %q: %w", i.containerID, err)
183
		}
184
		defer func() {
185
			if err := os.RemoveAll(tempdir); err != nil {
186
				logrus.Warnf("removing temporary directory %q: %v", tempdir, err)
187
			}
188
		}()
189
		options.TempDir = tempdir
190
	}
191
	mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
192
	if err != nil {
193
		return nil, fmt.Errorf("mounting container %q: %w", i.containerID, err)
194
	}
195
	archiveOptions := mkcw.ArchiveOptions{
196
		AttestationURL:           options.AttestationURL,
197
		CPUs:                     options.CPUs,
198
		Memory:                   options.Memory,
199
		TempDir:                  options.TempDir,
200
		TeeType:                  options.TeeType,
201
		IgnoreAttestationErrors:  options.IgnoreAttestationErrors,
202
		WorkloadID:               options.WorkloadID,
203
		DiskEncryptionPassphrase: options.DiskEncryptionPassphrase,
204
		Slop:                     options.Slop,
205
		FirmwareLibrary:          options.FirmwareLibrary,
206
		GraphOptions:             i.store.GraphOptions(),
207
		ExtraImageContent:        i.extraImageContent,
208
	}
209
	rc, _, err := mkcw.Archive(mountPoint, &image, archiveOptions)
210
	if err != nil {
211
		if _, err2 := i.store.Unmount(i.containerID, false); err2 != nil {
212
			logrus.Debugf("unmounting container %q: %v", i.containerID, err2)
213
		}
214
		return nil, fmt.Errorf("converting rootfs %q: %w", i.containerID, err)
215
	}
216
	return ioutils.NewReadCloserWrapper(rc, func() error {
217
		if err = rc.Close(); err != nil {
218
			err = fmt.Errorf("closing tar archive of container %q: %w", i.containerID, err)
219
		}
220
		if _, err2 := i.store.Unmount(i.containerID, false); err == nil {
221
			if err2 != nil {
222
				err2 = fmt.Errorf("unmounting container %q: %w", i.containerID, err2)
223
			}
224
			err = err2
225
		} else {
226
			logrus.Debugf("unmounting container %q: %v", i.containerID, err2)
227
		}
228
		return err
229
	}), nil
230
}
231

232
// Extract the container's whole filesystem as if it were a single layer.
233
// The ExtractRootfsOptions control whether or not to preserve setuid and
234
// setgid bits and extended attributes on contents.
235
func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) {
236
	var uidMap, gidMap []idtools.IDMap
237
	mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
238
	if err != nil {
239
		return nil, nil, fmt.Errorf("mounting container %q: %w", i.containerID, err)
240
	}
241
	pipeReader, pipeWriter := io.Pipe()
242
	errChan := make(chan error, 1)
243
	go func() {
244
		defer close(errChan)
245
		if len(i.extraImageContent) > 0 {
246
			// Abuse the tar format and _prepend_ the synthesized
247
			// data items to the archive we'll get from
248
			// copier.Get(), in a way that looks right to a reader
249
			// as long as we DON'T Close() the tar Writer.
250
			filename, _, _, err := i.makeExtraImageContentDiff(false)
251
			if err != nil {
252
				errChan <- err
253
				return
254
			}
255
			file, err := os.Open(filename)
256
			if err != nil {
257
				errChan <- err
258
				return
259
			}
260
			defer file.Close()
261
			if _, err = io.Copy(pipeWriter, file); err != nil {
262
				errChan <- err
263
				return
264
			}
265
		}
266
		if i.idMappingOptions != nil {
267
			uidMap, gidMap = convertRuntimeIDMaps(i.idMappingOptions.UIDMap, i.idMappingOptions.GIDMap)
268
		}
269
		copierOptions := copier.GetOptions{
270
			UIDMap:         uidMap,
271
			GIDMap:         gidMap,
272
			StripSetuidBit: opts.StripSetuidBit,
273
			StripSetgidBit: opts.StripSetgidBit,
274
			StripXattrs:    opts.StripXattrs,
275
		}
276
		err := copier.Get(mountPoint, mountPoint, copierOptions, []string{"."}, pipeWriter)
277
		errChan <- err
278
		pipeWriter.Close()
279

280
	}()
281
	return ioutils.NewReadCloserWrapper(pipeReader, func() error {
282
		if err = pipeReader.Close(); err != nil {
283
			err = fmt.Errorf("closing tar archive of container %q: %w", i.containerID, err)
284
		}
285
		if _, err2 := i.store.Unmount(i.containerID, false); err == nil {
286
			if err2 != nil {
287
				err2 = fmt.Errorf("unmounting container %q: %w", i.containerID, err2)
288
			}
289
			err = err2
290
		}
291
		return err
292
	}), errChan, nil
293
}
294

295
// Build fresh copies of the container configuration structures so that we can edit them
296
// without making unintended changes to the original Builder.
297
func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, docker.V2Image, docker.V2S2Manifest, error) {
298
	created := time.Now().UTC()
299
	if i.created != nil {
300
		created = *i.created
301
	}
302

303
	// Build an empty image, and then decode over it.
304
	oimage := v1.Image{}
305
	if err := json.Unmarshal(i.oconfig, &oimage); err != nil {
306
		return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err
307
	}
308
	// Always replace this value, since we're newer than our base image.
309
	oimage.Created = &created
310
	// Clear the list of diffIDs, since we always repopulate it.
311
	oimage.RootFS.Type = docker.TypeLayers
312
	oimage.RootFS.DiffIDs = []digest.Digest{}
313
	// Only clear the history if we're squashing, otherwise leave it be so that we can append
314
	// entries to it.
315
	if i.confidentialWorkload.Convert || i.squash || i.omitHistory {
316
		oimage.History = []v1.History{}
317
	}
318

319
	// Build an empty image, and then decode over it.
320
	dimage := docker.V2Image{}
321
	if err := json.Unmarshal(i.dconfig, &dimage); err != nil {
322
		return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err
323
	}
324
	dimage.Parent = docker.ID(i.parent)
325
	dimage.Container = i.containerID
326
	if dimage.Config != nil {
327
		dimage.ContainerConfig = *dimage.Config
328
	}
329
	// Always replace this value, since we're newer than our base image.
330
	dimage.Created = created
331
	// Clear the list of diffIDs, since we always repopulate it.
332
	dimage.RootFS = &docker.V2S2RootFS{}
333
	dimage.RootFS.Type = docker.TypeLayers
334
	dimage.RootFS.DiffIDs = []digest.Digest{}
335
	// Only clear the history if we're squashing, otherwise leave it be so
336
	// that we can append entries to it.  Clear the parent, too, to reflect
337
	// that we no longer include its layers and history.
338
	if i.confidentialWorkload.Convert || i.squash || i.omitHistory {
339
		dimage.Parent = ""
340
		dimage.History = []docker.V2S2History{}
341
	}
342

343
	// If we were supplied with a configuration, copy fields from it to
344
	// matching fields in both formats.
345
	if err := config.Override(dimage.Config, &oimage.Config, i.overrideChanges, i.overrideConfig); err != nil {
346
		return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, fmt.Errorf("applying changes: %w", err)
347
	}
348

349
	// If we're producing a confidential workload, override the command and
350
	// assorted other settings that aren't expected to work correctly.
351
	if i.confidentialWorkload.Convert {
352
		dimage.Config.Entrypoint = []string{"/entrypoint"}
353
		oimage.Config.Entrypoint = []string{"/entrypoint"}
354
		dimage.Config.Cmd = nil
355
		oimage.Config.Cmd = nil
356
		dimage.Config.User = ""
357
		oimage.Config.User = ""
358
		dimage.Config.WorkingDir = ""
359
		oimage.Config.WorkingDir = ""
360
		dimage.Config.Healthcheck = nil
361
		dimage.Config.Shell = nil
362
		dimage.Config.Volumes = nil
363
		oimage.Config.Volumes = nil
364
		dimage.Config.ExposedPorts = nil
365
		oimage.Config.ExposedPorts = nil
366
	}
367

368
	// Build empty manifests.  The Layers lists will be populated later.
369
	omanifest := v1.Manifest{
370
		Versioned: specs.Versioned{
371
			SchemaVersion: 2,
372
		},
373
		MediaType: v1.MediaTypeImageManifest,
374
		Config: v1.Descriptor{
375
			MediaType: v1.MediaTypeImageConfig,
376
		},
377
		Layers:      []v1.Descriptor{},
378
		Annotations: i.annotations,
379
	}
380

381
	dmanifest := docker.V2S2Manifest{
382
		V2Versioned: docker.V2Versioned{
383
			SchemaVersion: 2,
384
			MediaType:     manifest.DockerV2Schema2MediaType,
385
		},
386
		Config: docker.V2S2Descriptor{
387
			MediaType: manifest.DockerV2Schema2ConfigMediaType,
388
		},
389
		Layers: []docker.V2S2Descriptor{},
390
	}
391

392
	return oimage, omanifest, dimage, dmanifest, nil
393
}
394

395
func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.SystemContext) (src types.ImageSource, err error) {
396
	// Decide which type of manifest and configuration output we're going to provide.
397
	manifestType := i.preferredManifestType
398
	// If it's not a format we support, return an error.
399
	if manifestType != v1.MediaTypeImageManifest && manifestType != manifest.DockerV2Schema2MediaType {
400
		return nil, fmt.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
401
			manifestType, v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType)
402
	}
403
	// Start building the list of layers using the read-write layer.
404
	layers := []string{}
405
	layerID := i.layerID
406
	layer, err := i.store.Layer(layerID)
407
	if err != nil {
408
		return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err)
409
	}
410
	// Walk the list of parent layers, prepending each as we go.  If we're squashing
411
	// or making a confidential workload, we're only producing one layer, so stop at
412
	// the layer ID of the top layer, which we won't really be using anyway.
413
	for layer != nil {
414
		layers = append(append([]string{}, layerID), layers...)
415
		layerID = layer.Parent
416
		if layerID == "" || i.confidentialWorkload.Convert || i.squash {
417
			err = nil
418
			break
419
		}
420
		layer, err = i.store.Layer(layerID)
421
		if err != nil {
422
			return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err)
423
		}
424
	}
425
	layer = nil
426

427
	// If we're slipping in a synthesized layer, we need to add a placeholder for it
428
	// to the list.
429
	const synthesizedLayerID = "(synthesized layer)"
430
	if len(i.extraImageContent) > 0 && !i.confidentialWorkload.Convert && !i.squash {
431
		layers = append(layers, synthesizedLayerID)
432
	}
433
	logrus.Debugf("layer list: %q", layers)
434

435
	// Make a temporary directory to hold blobs.
436
	path, err := os.MkdirTemp(tmpdir.GetTempDir(), define.Package)
437
	if err != nil {
438
		return nil, fmt.Errorf("creating temporary directory to hold layer blobs: %w", err)
439
	}
440
	logrus.Debugf("using %q to hold temporary data", path)
441
	defer func() {
442
		if src == nil {
443
			err2 := os.RemoveAll(path)
444
			if err2 != nil {
445
				logrus.Errorf("error removing layer blob directory: %v", err)
446
			}
447
		}
448
	}()
449

450
	// Build fresh copies of the configurations and manifest so that we don't mess with any
451
	// values in the Builder object itself.
452
	oimage, omanifest, dimage, dmanifest, err := i.createConfigsAndManifests()
453
	if err != nil {
454
		return nil, err
455
	}
456

457
	// Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
458
	var extraImageContentDiff string
459
	var extraImageContentDiffDigest digest.Digest
460
	blobLayers := make(map[digest.Digest]blobLayerInfo)
461
	for _, layerID := range layers {
462
		what := fmt.Sprintf("layer %q", layerID)
463
		if i.confidentialWorkload.Convert || i.squash {
464
			what = fmt.Sprintf("container %q", i.containerID)
465
		}
466
		// The default layer media type assumes no compression.
467
		omediaType := v1.MediaTypeImageLayer
468
		dmediaType := docker.V2S2MediaTypeUncompressedLayer
469
		// Look up this layer.
470
		var layerUncompressedDigest digest.Digest
471
		var layerUncompressedSize int64
472
		if layerID != synthesizedLayerID {
473
			layer, err := i.store.Layer(layerID)
474
			if err != nil {
475
				return nil, fmt.Errorf("unable to locate layer %q: %w", layerID, err)
476
			}
477
			layerID = layer.ID
478
			layerUncompressedDigest = layer.UncompressedDigest
479
			layerUncompressedSize = layer.UncompressedSize
480
		} else {
481
			diffFilename, digest, size, err := i.makeExtraImageContentDiff(true)
482
			if err != nil {
483
				return nil, fmt.Errorf("unable to generate layer for additional content: %w", err)
484
			}
485
			extraImageContentDiff = diffFilename
486
			extraImageContentDiffDigest = digest
487
			layerUncompressedDigest = digest
488
			layerUncompressedSize = size
489
		}
490
		// If we already know the digest of the contents of parent
491
		// layers, reuse their blobsums, diff IDs, and sizes.
492
		if !i.confidentialWorkload.Convert && !i.squash && layerID != i.layerID && layerID != synthesizedLayerID && layerUncompressedDigest != "" {
493
			layerBlobSum := layerUncompressedDigest
494
			layerBlobSize := layerUncompressedSize
495
			diffID := layerUncompressedDigest
496
			// Note this layer in the manifest, using the appropriate blobsum.
497
			olayerDescriptor := v1.Descriptor{
498
				MediaType: omediaType,
499
				Digest:    layerBlobSum,
500
				Size:      layerBlobSize,
501
			}
502
			omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
503
			dlayerDescriptor := docker.V2S2Descriptor{
504
				MediaType: dmediaType,
505
				Digest:    layerBlobSum,
506
				Size:      layerBlobSize,
507
			}
508
			dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
509
			// Note this layer in the list of diffIDs, again using the uncompressed digest.
510
			oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, diffID)
511
			dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, diffID)
512
			blobLayers[diffID] = blobLayerInfo{
513
				ID:   layerID,
514
				Size: layerBlobSize,
515
			}
516
			continue
517
		}
518
		// Figure out if we need to change the media type, in case we've changed the compression.
519
		omediaType, dmediaType, err = computeLayerMIMEType(what, i.compression)
520
		if err != nil {
521
			return nil, err
522
		}
523
		// Start reading either the layer or the whole container rootfs.
524
		noCompression := archive.Uncompressed
525
		diffOptions := &storage.DiffOptions{
526
			Compression: &noCompression,
527
		}
528
		var rc io.ReadCloser
529
		var errChan chan error
530
		if i.confidentialWorkload.Convert {
531
			// Convert the root filesystem into an encrypted disk image.
532
			rc, err = i.extractConfidentialWorkloadFS(i.confidentialWorkload)
533
			if err != nil {
534
				return nil, err
535
			}
536
		} else if i.squash {
537
			// Extract the root filesystem as a single layer.
538
			rc, errChan, err = i.extractRootfs(ExtractRootfsOptions{})
539
			if err != nil {
540
				return nil, err
541
			}
542
		} else {
543
			if layerID != synthesizedLayerID {
544
				// If we're up to the final layer, but we don't want to
545
				// include a diff for it, we're done.
546
				if i.emptyLayer && layerID == i.layerID {
547
					continue
548
				}
549
				// Extract this layer, one of possibly many.
550
				rc, err = i.store.Diff("", layerID, diffOptions)
551
				if err != nil {
552
					return nil, fmt.Errorf("extracting %s: %w", what, err)
553
				}
554
			} else {
555
				// Slip in additional content as an additional layer.
556
				if rc, err = os.Open(extraImageContentDiff); err != nil {
557
					return nil, err
558
				}
559
			}
560
		}
561
		srcHasher := digest.Canonical.Digester()
562
		// Set up to write the possibly-recompressed blob.
563
		layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600)
564
		if err != nil {
565
			rc.Close()
566
			return nil, fmt.Errorf("opening file for %s: %w", what, err)
567
		}
568

569
		counter := ioutils.NewWriteCounter(layerFile)
570
		var destHasher digest.Digester
571
		var multiWriter io.Writer
572
		// Avoid rehashing when we do not compress.
573
		if i.compression != archive.Uncompressed {
574
			destHasher = digest.Canonical.Digester()
575
			multiWriter = io.MultiWriter(counter, destHasher.Hash())
576
		} else {
577
			destHasher = srcHasher
578
			multiWriter = counter
579
		}
580
		// Compress the layer, if we're recompressing it.
581
		writeCloser, err := archive.CompressStream(multiWriter, i.compression)
582
		if err != nil {
583
			layerFile.Close()
584
			rc.Close()
585
			return nil, fmt.Errorf("compressing %s: %w", what, err)
586
		}
587
		writer := io.MultiWriter(writeCloser, srcHasher.Hash())
588
		// Scrub any local user names that might correspond to UIDs or GIDs of
589
		// files in this layer.
590
		{
591
			nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close)
592
			writeCloser = newTarFilterer(nestedWriteCloser, func(hdr *tar.Header) (bool, bool, io.Reader) {
593
				hdr.Uname, hdr.Gname = "", ""
594
				return false, false, nil
595
			})
596
			writer = io.Writer(writeCloser)
597
		}
598
		// Use specified timestamps in the layer, if we're doing that for
599
		// history entries.
600
		if i.created != nil {
601
			nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close)
602
			writeCloser = newTarFilterer(nestedWriteCloser, func(hdr *tar.Header) (bool, bool, io.Reader) {
603
				// Changing a zeroed field to a non-zero field
604
				// can affect the format that the library uses
605
				// for writing the header, so only change
606
				// fields that are already set to avoid
607
				// changing the format (and as a result,
608
				// changing the length) of the header that we
609
				// write.
610
				if !hdr.ModTime.IsZero() {
611
					hdr.ModTime = *i.created
612
				}
613
				if !hdr.AccessTime.IsZero() {
614
					hdr.AccessTime = *i.created
615
				}
616
				if !hdr.ChangeTime.IsZero() {
617
					hdr.ChangeTime = *i.created
618
				}
619
				return false, false, nil
620
			})
621
			writer = io.Writer(writeCloser)
622
		}
623
		size, err := io.Copy(writer, rc)
624
		writeCloser.Close()
625
		layerFile.Close()
626
		rc.Close()
627

628
		if errChan != nil {
629
			err = <-errChan
630
			if err != nil {
631
				return nil, err
632
			}
633
		}
634

635
		if err != nil {
636
			return nil, fmt.Errorf("storing %s to file: %w", what, err)
637
		}
638
		if i.compression == archive.Uncompressed {
639
			if size != counter.Count {
640
				return nil, fmt.Errorf("storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count)
641
			}
642
		} else {
643
			size = counter.Count
644
		}
645
		logrus.Debugf("%s size is %d bytes, uncompressed digest %s, possibly-compressed digest %s", what, size, srcHasher.Digest().String(), destHasher.Digest().String())
646
		// Rename the layer so that we can more easily find it by digest later.
647
		finalBlobName := filepath.Join(path, destHasher.Digest().String())
648
		if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil {
649
			return nil, fmt.Errorf("storing %s to file while renaming %q to %q: %w", what, filepath.Join(path, "layer"), finalBlobName, err)
650
		}
651
		// Add a note in the manifest about the layer.  The blobs are identified by their possibly-
652
		// compressed blob digests.
653
		olayerDescriptor := v1.Descriptor{
654
			MediaType: omediaType,
655
			Digest:    destHasher.Digest(),
656
			Size:      size,
657
		}
658
		omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
659
		dlayerDescriptor := docker.V2S2Descriptor{
660
			MediaType: dmediaType,
661
			Digest:    destHasher.Digest(),
662
			Size:      size,
663
		}
664
		dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
665
		// Add a note about the diffID, which is always the layer's uncompressed digest.
666
		oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest())
667
		dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest())
668
	}
669

670
	// Build history notes in the image configurations.
671
	appendHistory := func(history []v1.History) {
672
		for i := range history {
673
			var created *time.Time
674
			if history[i].Created != nil {
675
				copiedTimestamp := *history[i].Created
676
				created = &copiedTimestamp
677
			}
678
			onews := v1.History{
679
				Created:    created,
680
				CreatedBy:  history[i].CreatedBy,
681
				Author:     history[i].Author,
682
				Comment:    history[i].Comment,
683
				EmptyLayer: true,
684
			}
685
			oimage.History = append(oimage.History, onews)
686
			if created == nil {
687
				created = &time.Time{}
688
			}
689
			dnews := docker.V2S2History{
690
				Created:    *created,
691
				CreatedBy:  history[i].CreatedBy,
692
				Author:     history[i].Author,
693
				Comment:    history[i].Comment,
694
				EmptyLayer: true,
695
			}
696
			dimage.History = append(dimage.History, dnews)
697
		}
698
	}
699

700
	// Only attempt to append history if history was not disabled explicitly.
701
	if !i.omitHistory {
702
		// Keep track of how many entries the base image's history had
703
		// before we started adding to it.
704
		baseImageHistoryLen := len(oimage.History)
705
		appendHistory(i.preEmptyLayers)
706
		created := time.Now().UTC()
707
		if i.created != nil {
708
			created = (*i.created).UTC()
709
		}
710
		comment := i.historyComment
711
		// Add a comment indicating which base image was used, if it wasn't
712
		// just an image ID.
713
		if strings.Contains(i.parent, i.fromImageID) && i.fromImageName != i.fromImageID {
714
			comment += "FROM " + i.fromImageName
715
		}
716
		onews := v1.History{
717
			Created:    &created,
718
			CreatedBy:  i.createdBy,
719
			Author:     oimage.Author,
720
			Comment:    comment,
721
			EmptyLayer: i.emptyLayer,
722
		}
723
		oimage.History = append(oimage.History, onews)
724
		dnews := docker.V2S2History{
725
			Created:    created,
726
			CreatedBy:  i.createdBy,
727
			Author:     dimage.Author,
728
			Comment:    comment,
729
			EmptyLayer: i.emptyLayer,
730
		}
731
		dimage.History = append(dimage.History, dnews)
732
		appendHistory(i.postEmptyLayers)
733

734
		// Add a history entry for the extra image content if we added a layer for it.
735
		if extraImageContentDiff != "" {
736
			createdBy := fmt.Sprintf(`/bin/sh -c #(nop) ADD dir:%s in /",`, extraImageContentDiffDigest.Encoded())
737
			onews := v1.History{
738
				Created:   &created,
739
				CreatedBy: createdBy,
740
			}
741
			oimage.History = append(oimage.History, onews)
742
			dnews := docker.V2S2History{
743
				Created:   created,
744
				CreatedBy: createdBy,
745
			}
746
			dimage.History = append(dimage.History, dnews)
747
		}
748

749
		// Confidence check that we didn't just create a mismatch between non-empty layers in the
750
		// history and the number of diffIDs.  Only applicable if the base image (if there was
751
		// one) provided us at least one entry to use as a starting point.
752
		if baseImageHistoryLen != 0 {
753
			expectedDiffIDs := expectedOCIDiffIDs(oimage)
754
			if len(oimage.RootFS.DiffIDs) != expectedDiffIDs {
755
				return nil, fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(oimage.RootFS.DiffIDs))
756
			}
757
			expectedDiffIDs = expectedDockerDiffIDs(dimage)
758
			if len(dimage.RootFS.DiffIDs) != expectedDiffIDs {
759
				return nil, fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(dimage.RootFS.DiffIDs))
760
			}
761
		}
762
	}
763

764
	// Encode the image configuration blob.
765
	oconfig, err := json.Marshal(&oimage)
766
	if err != nil {
767
		return nil, fmt.Errorf("encoding %#v as json: %w", oimage, err)
768
	}
769
	logrus.Debugf("OCIv1 config = %s", oconfig)
770

771
	// Add the configuration blob to the manifest.
772
	omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig)
773
	omanifest.Config.Size = int64(len(oconfig))
774
	omanifest.Config.MediaType = v1.MediaTypeImageConfig
775

776
	// Encode the manifest.
777
	omanifestbytes, err := json.Marshal(&omanifest)
778
	if err != nil {
779
		return nil, fmt.Errorf("encoding %#v as json: %w", omanifest, err)
780
	}
781
	logrus.Debugf("OCIv1 manifest = %s", omanifestbytes)
782

783
	// Encode the image configuration blob.
784
	dconfig, err := json.Marshal(&dimage)
785
	if err != nil {
786
		return nil, fmt.Errorf("encoding %#v as json: %w", dimage, err)
787
	}
788
	logrus.Debugf("Docker v2s2 config = %s", dconfig)
789

790
	// Add the configuration blob to the manifest.
791
	dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig)
792
	dmanifest.Config.Size = int64(len(dconfig))
793
	dmanifest.Config.MediaType = manifest.DockerV2Schema2ConfigMediaType
794

795
	// Encode the manifest.
796
	dmanifestbytes, err := json.Marshal(&dmanifest)
797
	if err != nil {
798
		return nil, fmt.Errorf("encoding %#v as json: %w", dmanifest, err)
799
	}
800
	logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes)
801

802
	// Decide which manifest and configuration blobs we'll actually output.
803
	var config []byte
804
	var imageManifest []byte
805
	switch manifestType {
806
	case v1.MediaTypeImageManifest:
807
		imageManifest = omanifestbytes
808
		config = oconfig
809
	case manifest.DockerV2Schema2MediaType:
810
		imageManifest = dmanifestbytes
811
		config = dconfig
812
	default:
813
		panic("unreachable code: unsupported manifest type")
814
	}
815
	src = &containerImageSource{
816
		path:          path,
817
		ref:           i,
818
		store:         i.store,
819
		containerID:   i.containerID,
820
		mountLabel:    i.mountLabel,
821
		layerID:       i.layerID,
822
		names:         i.names,
823
		compression:   i.compression,
824
		config:        config,
825
		configDigest:  digest.Canonical.FromBytes(config),
826
		manifest:      imageManifest,
827
		manifestType:  manifestType,
828
		blobDirectory: i.blobDirectory,
829
		blobLayers:    blobLayers,
830
	}
831
	return src, nil
832
}
833

834
func (i *containerImageRef) NewImageDestination(ctx context.Context, sc *types.SystemContext) (types.ImageDestination, error) {
835
	return nil, errors.New("can't write to a container")
836
}
837

838
func (i *containerImageRef) DockerReference() reference.Named {
839
	return i.name
840
}
841

842
func (i *containerImageRef) StringWithinTransport() string {
843
	if len(i.names) > 0 {
844
		return i.names[0]
845
	}
846
	return ""
847
}
848

849
func (i *containerImageRef) DeleteImage(context.Context, *types.SystemContext) error {
850
	// we were never here
851
	return nil
852
}
853

854
func (i *containerImageRef) PolicyConfigurationIdentity() string {
855
	return ""
856
}
857

858
func (i *containerImageRef) PolicyConfigurationNamespaces() []string {
859
	return nil
860
}
861

862
func (i *containerImageRef) Transport() types.ImageTransport {
863
	return is.Transport
864
}
865

866
func (i *containerImageSource) Close() error {
867
	err := os.RemoveAll(i.path)
868
	if err != nil {
869
		return fmt.Errorf("removing layer blob directory: %w", err)
870
	}
871
	return nil
872
}
873

874
func (i *containerImageSource) Reference() types.ImageReference {
875
	return i.ref
876
}
877

878
func (i *containerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
879
	return nil, nil
880
}
881

882
func (i *containerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
883
	return i.manifest, i.manifestType, nil
884
}
885

886
func (i *containerImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
887
	return nil, nil
888
}
889

890
func (i *containerImageSource) HasThreadSafeGetBlob() bool {
891
	return false
892
}
893

894
func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, cache types.BlobInfoCache) (reader io.ReadCloser, size int64, err error) {
895
	if blob.Digest == i.configDigest {
896
		logrus.Debugf("start reading config")
897
		reader := bytes.NewReader(i.config)
898
		closer := func() error {
899
			logrus.Debugf("finished reading config")
900
			return nil
901
		}
902
		return ioutils.NewReadCloserWrapper(reader, closer), reader.Size(), nil
903
	}
904
	var layerReadCloser io.ReadCloser
905
	size = -1
906
	if blobLayerInfo, ok := i.blobLayers[blob.Digest]; ok {
907
		noCompression := archive.Uncompressed
908
		diffOptions := &storage.DiffOptions{
909
			Compression: &noCompression,
910
		}
911
		layerReadCloser, err = i.store.Diff("", blobLayerInfo.ID, diffOptions)
912
		size = blobLayerInfo.Size
913
	} else {
914
		for _, blobDir := range []string{i.blobDirectory, i.path} {
915
			var layerFile *os.File
916
			layerFile, err = os.OpenFile(filepath.Join(blobDir, blob.Digest.String()), os.O_RDONLY, 0600)
917
			if err == nil {
918
				st, err := layerFile.Stat()
919
				if err != nil {
920
					logrus.Warnf("error reading size of layer file %q: %v", blob.Digest.String(), err)
921
				} else {
922
					size = st.Size()
923
					layerReadCloser = layerFile
924
					break
925
				}
926
				layerFile.Close()
927
			}
928
			if !errors.Is(err, os.ErrNotExist) {
929
				logrus.Debugf("error checking for layer %q in %q: %v", blob.Digest.String(), blobDir, err)
930
			}
931
		}
932
	}
933
	if err != nil || layerReadCloser == nil || size == -1 {
934
		logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err)
935
		return nil, -1, fmt.Errorf("opening layer blob: %w", err)
936
	}
937
	logrus.Debugf("reading layer %q", blob.Digest.String())
938
	closer := func() error {
939
		logrus.Debugf("finished reading layer %q", blob.Digest.String())
940
		if err := layerReadCloser.Close(); err != nil {
941
			return fmt.Errorf("closing layer %q after reading: %w", blob.Digest.String(), err)
942
		}
943
		return nil
944
	}
945
	return ioutils.NewReadCloserWrapper(layerReadCloser, closer), size, nil
946
}
947

948
// makeExtraImageContentDiff creates an archive file containing the contents of
949
// files named in i.extraImageContent.  The footer that marks the end of the
950
// archive may be omitted.
951
func (i *containerImageRef) makeExtraImageContentDiff(includeFooter bool) (string, digest.Digest, int64, error) {
952
	cdir, err := i.store.ContainerDirectory(i.containerID)
953
	if err != nil {
954
		return "", "", -1, err
955
	}
956
	diff, err := os.CreateTemp(cdir, "extradiff")
957
	if err != nil {
958
		return "", "", -1, err
959
	}
960
	defer diff.Close()
961
	digester := digest.Canonical.Digester()
962
	counter := ioutils.NewWriteCounter(digester.Hash())
963
	tw := tar.NewWriter(io.MultiWriter(diff, counter))
964
	created := time.Now()
965
	if i.created != nil {
966
		created = *i.created
967
	}
968
	for path, contents := range i.extraImageContent {
969
		if err := func() error {
970
			content, err := os.Open(contents)
971
			if err != nil {
972
				return err
973
			}
974
			defer content.Close()
975
			st, err := content.Stat()
976
			if err != nil {
977
				return err
978
			}
979
			if err := tw.WriteHeader(&tar.Header{
980
				Name:     path,
981
				Typeflag: tar.TypeReg,
982
				Mode:     0o644,
983
				ModTime:  created,
984
				Size:     st.Size(),
985
			}); err != nil {
986
				return err
987
			}
988
			if _, err := io.Copy(tw, content); err != nil {
989
				return err
990
			}
991
			if err := tw.Flush(); err != nil {
992
				return err
993
			}
994
			return nil
995
		}(); err != nil {
996
			return "", "", -1, err
997
		}
998
	}
999
	if !includeFooter {
1000
		return diff.Name(), "", -1, err
1001
	}
1002
	tw.Close()
1003
	return diff.Name(), digester.Digest(), counter.Count, err
1004
}
1005

1006
// makeContainerImageRef creates a containers/image/v5/types.ImageReference
1007
// which is mainly used for representing the working container as a source
1008
// image that can be copied, which is how we commit container to create the
1009
// image.
1010
func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageRef, error) {
1011
	var name reference.Named
1012
	container, err := b.store.Container(b.ContainerID)
1013
	if err != nil {
1014
		return nil, fmt.Errorf("locating container %q: %w", b.ContainerID, err)
1015
	}
1016
	if len(container.Names) > 0 {
1017
		if parsed, err2 := reference.ParseNamed(container.Names[0]); err2 == nil {
1018
			name = parsed
1019
		}
1020
	}
1021
	manifestType := options.PreferredManifestType
1022
	if manifestType == "" {
1023
		manifestType = define.OCIv1ImageManifest
1024
	}
1025

1026
	for _, u := range options.UnsetEnvs {
1027
		b.UnsetEnv(u)
1028
	}
1029
	oconfig, err := json.Marshal(&b.OCIv1)
1030
	if err != nil {
1031
		return nil, fmt.Errorf("encoding OCI-format image configuration %#v: %w", b.OCIv1, err)
1032
	}
1033
	dconfig, err := json.Marshal(&b.Docker)
1034
	if err != nil {
1035
		return nil, fmt.Errorf("encoding docker-format image configuration %#v: %w", b.Docker, err)
1036
	}
1037
	var created *time.Time
1038
	if options.HistoryTimestamp != nil {
1039
		historyTimestampUTC := options.HistoryTimestamp.UTC()
1040
		created = &historyTimestampUTC
1041
	}
1042
	createdBy := b.CreatedBy()
1043
	if createdBy == "" {
1044
		createdBy = strings.Join(b.Shell(), " ")
1045
		if createdBy == "" {
1046
			createdBy = "/bin/sh"
1047
		}
1048
	}
1049

1050
	parent := ""
1051
	forceOmitHistory := false
1052
	if b.FromImageID != "" {
1053
		parentDigest := digest.NewDigestFromEncoded(digest.Canonical, b.FromImageID)
1054
		if parentDigest.Validate() == nil {
1055
			parent = parentDigest.String()
1056
		}
1057
		if !options.OmitHistory && len(b.OCIv1.History) == 0 && len(b.OCIv1.RootFS.DiffIDs) != 0 {
1058
			// Parent had layers, but no history.  We shouldn't confuse
1059
			// our own confidence checks by adding history for layers
1060
			// that we're adding, creating an image with multiple layers,
1061
			// only some of which have history entries, which would be
1062
			// broken in confusing ways.
1063
			b.Logger.Debugf("parent image %q had no history but had %d layers, assuming OmitHistory", b.FromImageID, len(b.OCIv1.RootFS.DiffIDs))
1064
			forceOmitHistory = true
1065
		}
1066
	}
1067

1068
	ref := &containerImageRef{
1069
		fromImageName:         b.FromImage,
1070
		fromImageID:           b.FromImageID,
1071
		store:                 b.store,
1072
		compression:           options.Compression,
1073
		name:                  name,
1074
		names:                 container.Names,
1075
		containerID:           container.ID,
1076
		mountLabel:            b.MountLabel,
1077
		layerID:               container.LayerID,
1078
		oconfig:               oconfig,
1079
		dconfig:               dconfig,
1080
		created:               created,
1081
		createdBy:             createdBy,
1082
		historyComment:        b.HistoryComment(),
1083
		annotations:           b.Annotations(),
1084
		preferredManifestType: manifestType,
1085
		squash:                options.Squash,
1086
		confidentialWorkload:  options.ConfidentialWorkloadOptions,
1087
		omitHistory:           options.OmitHistory || forceOmitHistory,
1088
		emptyLayer:            options.EmptyLayer && !options.Squash && !options.ConfidentialWorkloadOptions.Convert,
1089
		idMappingOptions:      &b.IDMappingOptions,
1090
		parent:                parent,
1091
		blobDirectory:         options.BlobDirectory,
1092
		preEmptyLayers:        b.PrependedEmptyLayers,
1093
		postEmptyLayers:       b.AppendedEmptyLayers,
1094
		overrideChanges:       options.OverrideChanges,
1095
		overrideConfig:        options.OverrideConfig,
1096
		extraImageContent:     copyStringStringMap(options.ExtraImageContent),
1097
	}
1098
	return ref, nil
1099
}
1100

1101
// Extract the container's whole filesystem as if it were a single layer from current builder instance
1102
func (b *Builder) ExtractRootfs(options CommitOptions, opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) {
1103
	src, err := b.makeContainerImageRef(options)
1104
	if err != nil {
1105
		return nil, nil, fmt.Errorf("creating image reference for container %q to extract its contents: %w", b.ContainerID, err)
1106
	}
1107
	return src.extractRootfs(opts)
1108
}
1109

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.