podman
1108 строк · 37.6 Кб
1package buildah
2
3import (
4"archive/tar"
5"bytes"
6"context"
7"encoding/json"
8"errors"
9"fmt"
10"io"
11"os"
12"path/filepath"
13"strings"
14"time"
15
16"github.com/containers/buildah/copier"
17"github.com/containers/buildah/define"
18"github.com/containers/buildah/docker"
19"github.com/containers/buildah/internal/config"
20"github.com/containers/buildah/internal/mkcw"
21"github.com/containers/buildah/internal/tmpdir"
22"github.com/containers/image/v5/docker/reference"
23"github.com/containers/image/v5/image"
24"github.com/containers/image/v5/manifest"
25is "github.com/containers/image/v5/storage"
26"github.com/containers/image/v5/types"
27"github.com/containers/storage"
28"github.com/containers/storage/pkg/archive"
29"github.com/containers/storage/pkg/idtools"
30"github.com/containers/storage/pkg/ioutils"
31digest "github.com/opencontainers/go-digest"
32specs "github.com/opencontainers/image-spec/specs-go"
33v1 "github.com/opencontainers/image-spec/specs-go/v1"
34"github.com/sirupsen/logrus"
35)
36
37const (
38// OCIv1ImageManifest is the MIME type of an OCIv1 image manifest,
39// suitable for specifying as a value of the PreferredManifestType
40// member of a CommitOptions structure. It is also the default.
41OCIv1ImageManifest = define.OCIv1ImageManifest
42// Dockerv2ImageManifest is the MIME type of a Docker v2s2 image
43// manifest, suitable for specifying as a value of the
44// PreferredManifestType member of a CommitOptions structure.
45Dockerv2ImageManifest = define.Dockerv2ImageManifest
46)
47
48// ExtractRootfsOptions is consumed by ExtractRootfs() which allows users to
49// control whether various information like the like setuid and setgid bits and
50// xattrs are preserved when extracting file system objects.
51type ExtractRootfsOptions struct {
52StripSetuidBit bool // strip the setuid bit off of items being extracted.
53StripSetgidBit bool // strip the setgid bit off of items being extracted.
54StripXattrs bool // don't record extended attributes of items being extracted.
55}
56
57type containerImageRef struct {
58fromImageName string
59fromImageID string
60store storage.Store
61compression archive.Compression
62name reference.Named
63names []string
64containerID string
65mountLabel string
66layerID string
67oconfig []byte
68dconfig []byte
69created *time.Time
70createdBy string
71historyComment string
72annotations map[string]string
73preferredManifestType string
74squash bool
75confidentialWorkload ConfidentialWorkloadOptions
76omitHistory bool
77emptyLayer bool
78idMappingOptions *define.IDMappingOptions
79parent string
80blobDirectory string
81preEmptyLayers []v1.History
82postEmptyLayers []v1.History
83overrideChanges []string
84overrideConfig *manifest.Schema2Config
85extraImageContent map[string]string
86}
87
88type blobLayerInfo struct {
89ID string
90Size int64
91}
92
93type containerImageSource struct {
94path string
95ref *containerImageRef
96store storage.Store
97containerID string
98mountLabel string
99layerID string
100names []string
101compression archive.Compression
102config []byte
103configDigest digest.Digest
104manifest []byte
105manifestType string
106blobDirectory string
107blobLayers map[digest.Digest]blobLayerInfo
108}
109
110func (i *containerImageRef) NewImage(ctx context.Context, sc *types.SystemContext) (types.ImageCloser, error) {
111src, err := i.NewImageSource(ctx, sc)
112if err != nil {
113return nil, err
114}
115return image.FromSource(ctx, sc, src)
116}
117
118func expectedOCIDiffIDs(image v1.Image) int {
119expected := 0
120for _, history := range image.History {
121if !history.EmptyLayer {
122expected = expected + 1
123}
124}
125return expected
126}
127
128func expectedDockerDiffIDs(image docker.V2Image) int {
129expected := 0
130for _, history := range image.History {
131if !history.EmptyLayer {
132expected = expected + 1
133}
134}
135return expected
136}
137
138// Compute the media types which we need to attach to a layer, given the type of
139// compression that we'll be applying.
140func computeLayerMIMEType(what string, layerCompression archive.Compression) (omediaType, dmediaType string, err error) {
141omediaType = v1.MediaTypeImageLayer
142dmediaType = docker.V2S2MediaTypeUncompressedLayer
143if layerCompression != archive.Uncompressed {
144switch layerCompression {
145case archive.Gzip:
146omediaType = v1.MediaTypeImageLayerGzip
147dmediaType = manifest.DockerV2Schema2LayerMediaType
148logrus.Debugf("compressing %s with gzip", what)
149case archive.Bzip2:
150// Until the image specs define a media type for bzip2-compressed layers, even if we know
151// how to decompress them, we can't try to compress layers with bzip2.
152return "", "", errors.New("media type for bzip2-compressed layers is not defined")
153case archive.Xz:
154// Until the image specs define a media type for xz-compressed layers, even if we know
155// how to decompress them, we can't try to compress layers with xz.
156return "", "", errors.New("media type for xz-compressed layers is not defined")
157case archive.Zstd:
158// Until the image specs define a media type for zstd-compressed layers, even if we know
159// how to decompress them, we can't try to compress layers with zstd.
160return "", "", errors.New("media type for zstd-compressed layers is not defined")
161default:
162logrus.Debugf("compressing %s with unknown compressor(?)", what)
163}
164}
165return omediaType, dmediaType, nil
166}
167
168// Extract the container's whole filesystem as a filesystem image, wrapped
169// in LUKS-compatible encryption.
170func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWorkloadOptions) (io.ReadCloser, error) {
171var image v1.Image
172if err := json.Unmarshal(i.oconfig, &image); err != nil {
173return nil, fmt.Errorf("recreating OCI configuration for %q: %w", i.containerID, err)
174}
175if options.TempDir == "" {
176cdir, err := i.store.ContainerDirectory(i.containerID)
177if err != nil {
178return nil, fmt.Errorf("getting the per-container data directory for %q: %w", i.containerID, err)
179}
180tempdir, err := os.MkdirTemp(cdir, "buildah-rootfs")
181if err != nil {
182return nil, fmt.Errorf("creating a temporary data directory to hold a rootfs image for %q: %w", i.containerID, err)
183}
184defer func() {
185if err := os.RemoveAll(tempdir); err != nil {
186logrus.Warnf("removing temporary directory %q: %v", tempdir, err)
187}
188}()
189options.TempDir = tempdir
190}
191mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
192if err != nil {
193return nil, fmt.Errorf("mounting container %q: %w", i.containerID, err)
194}
195archiveOptions := mkcw.ArchiveOptions{
196AttestationURL: options.AttestationURL,
197CPUs: options.CPUs,
198Memory: options.Memory,
199TempDir: options.TempDir,
200TeeType: options.TeeType,
201IgnoreAttestationErrors: options.IgnoreAttestationErrors,
202WorkloadID: options.WorkloadID,
203DiskEncryptionPassphrase: options.DiskEncryptionPassphrase,
204Slop: options.Slop,
205FirmwareLibrary: options.FirmwareLibrary,
206GraphOptions: i.store.GraphOptions(),
207ExtraImageContent: i.extraImageContent,
208}
209rc, _, err := mkcw.Archive(mountPoint, &image, archiveOptions)
210if err != nil {
211if _, err2 := i.store.Unmount(i.containerID, false); err2 != nil {
212logrus.Debugf("unmounting container %q: %v", i.containerID, err2)
213}
214return nil, fmt.Errorf("converting rootfs %q: %w", i.containerID, err)
215}
216return ioutils.NewReadCloserWrapper(rc, func() error {
217if err = rc.Close(); err != nil {
218err = fmt.Errorf("closing tar archive of container %q: %w", i.containerID, err)
219}
220if _, err2 := i.store.Unmount(i.containerID, false); err == nil {
221if err2 != nil {
222err2 = fmt.Errorf("unmounting container %q: %w", i.containerID, err2)
223}
224err = err2
225} else {
226logrus.Debugf("unmounting container %q: %v", i.containerID, err2)
227}
228return err
229}), nil
230}
231
232// Extract the container's whole filesystem as if it were a single layer.
233// The ExtractRootfsOptions control whether or not to preserve setuid and
234// setgid bits and extended attributes on contents.
235func (i *containerImageRef) extractRootfs(opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) {
236var uidMap, gidMap []idtools.IDMap
237mountPoint, err := i.store.Mount(i.containerID, i.mountLabel)
238if err != nil {
239return nil, nil, fmt.Errorf("mounting container %q: %w", i.containerID, err)
240}
241pipeReader, pipeWriter := io.Pipe()
242errChan := make(chan error, 1)
243go func() {
244defer close(errChan)
245if len(i.extraImageContent) > 0 {
246// Abuse the tar format and _prepend_ the synthesized
247// data items to the archive we'll get from
248// copier.Get(), in a way that looks right to a reader
249// as long as we DON'T Close() the tar Writer.
250filename, _, _, err := i.makeExtraImageContentDiff(false)
251if err != nil {
252errChan <- err
253return
254}
255file, err := os.Open(filename)
256if err != nil {
257errChan <- err
258return
259}
260defer file.Close()
261if _, err = io.Copy(pipeWriter, file); err != nil {
262errChan <- err
263return
264}
265}
266if i.idMappingOptions != nil {
267uidMap, gidMap = convertRuntimeIDMaps(i.idMappingOptions.UIDMap, i.idMappingOptions.GIDMap)
268}
269copierOptions := copier.GetOptions{
270UIDMap: uidMap,
271GIDMap: gidMap,
272StripSetuidBit: opts.StripSetuidBit,
273StripSetgidBit: opts.StripSetgidBit,
274StripXattrs: opts.StripXattrs,
275}
276err := copier.Get(mountPoint, mountPoint, copierOptions, []string{"."}, pipeWriter)
277errChan <- err
278pipeWriter.Close()
279
280}()
281return ioutils.NewReadCloserWrapper(pipeReader, func() error {
282if err = pipeReader.Close(); err != nil {
283err = fmt.Errorf("closing tar archive of container %q: %w", i.containerID, err)
284}
285if _, err2 := i.store.Unmount(i.containerID, false); err == nil {
286if err2 != nil {
287err2 = fmt.Errorf("unmounting container %q: %w", i.containerID, err2)
288}
289err = err2
290}
291return err
292}), errChan, nil
293}
294
295// Build fresh copies of the container configuration structures so that we can edit them
296// without making unintended changes to the original Builder.
297func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, docker.V2Image, docker.V2S2Manifest, error) {
298created := time.Now().UTC()
299if i.created != nil {
300created = *i.created
301}
302
303// Build an empty image, and then decode over it.
304oimage := v1.Image{}
305if err := json.Unmarshal(i.oconfig, &oimage); err != nil {
306return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err
307}
308// Always replace this value, since we're newer than our base image.
309oimage.Created = &created
310// Clear the list of diffIDs, since we always repopulate it.
311oimage.RootFS.Type = docker.TypeLayers
312oimage.RootFS.DiffIDs = []digest.Digest{}
313// Only clear the history if we're squashing, otherwise leave it be so that we can append
314// entries to it.
315if i.confidentialWorkload.Convert || i.squash || i.omitHistory {
316oimage.History = []v1.History{}
317}
318
319// Build an empty image, and then decode over it.
320dimage := docker.V2Image{}
321if err := json.Unmarshal(i.dconfig, &dimage); err != nil {
322return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, err
323}
324dimage.Parent = docker.ID(i.parent)
325dimage.Container = i.containerID
326if dimage.Config != nil {
327dimage.ContainerConfig = *dimage.Config
328}
329// Always replace this value, since we're newer than our base image.
330dimage.Created = created
331// Clear the list of diffIDs, since we always repopulate it.
332dimage.RootFS = &docker.V2S2RootFS{}
333dimage.RootFS.Type = docker.TypeLayers
334dimage.RootFS.DiffIDs = []digest.Digest{}
335// Only clear the history if we're squashing, otherwise leave it be so
336// that we can append entries to it. Clear the parent, too, to reflect
337// that we no longer include its layers and history.
338if i.confidentialWorkload.Convert || i.squash || i.omitHistory {
339dimage.Parent = ""
340dimage.History = []docker.V2S2History{}
341}
342
343// If we were supplied with a configuration, copy fields from it to
344// matching fields in both formats.
345if err := config.Override(dimage.Config, &oimage.Config, i.overrideChanges, i.overrideConfig); err != nil {
346return v1.Image{}, v1.Manifest{}, docker.V2Image{}, docker.V2S2Manifest{}, fmt.Errorf("applying changes: %w", err)
347}
348
349// If we're producing a confidential workload, override the command and
350// assorted other settings that aren't expected to work correctly.
351if i.confidentialWorkload.Convert {
352dimage.Config.Entrypoint = []string{"/entrypoint"}
353oimage.Config.Entrypoint = []string{"/entrypoint"}
354dimage.Config.Cmd = nil
355oimage.Config.Cmd = nil
356dimage.Config.User = ""
357oimage.Config.User = ""
358dimage.Config.WorkingDir = ""
359oimage.Config.WorkingDir = ""
360dimage.Config.Healthcheck = nil
361dimage.Config.Shell = nil
362dimage.Config.Volumes = nil
363oimage.Config.Volumes = nil
364dimage.Config.ExposedPorts = nil
365oimage.Config.ExposedPorts = nil
366}
367
368// Build empty manifests. The Layers lists will be populated later.
369omanifest := v1.Manifest{
370Versioned: specs.Versioned{
371SchemaVersion: 2,
372},
373MediaType: v1.MediaTypeImageManifest,
374Config: v1.Descriptor{
375MediaType: v1.MediaTypeImageConfig,
376},
377Layers: []v1.Descriptor{},
378Annotations: i.annotations,
379}
380
381dmanifest := docker.V2S2Manifest{
382V2Versioned: docker.V2Versioned{
383SchemaVersion: 2,
384MediaType: manifest.DockerV2Schema2MediaType,
385},
386Config: docker.V2S2Descriptor{
387MediaType: manifest.DockerV2Schema2ConfigMediaType,
388},
389Layers: []docker.V2S2Descriptor{},
390}
391
392return oimage, omanifest, dimage, dmanifest, nil
393}
394
395func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.SystemContext) (src types.ImageSource, err error) {
396// Decide which type of manifest and configuration output we're going to provide.
397manifestType := i.preferredManifestType
398// If it's not a format we support, return an error.
399if manifestType != v1.MediaTypeImageManifest && manifestType != manifest.DockerV2Schema2MediaType {
400return nil, fmt.Errorf("no supported manifest types (attempted to use %q, only know %q and %q)",
401manifestType, v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType)
402}
403// Start building the list of layers using the read-write layer.
404layers := []string{}
405layerID := i.layerID
406layer, err := i.store.Layer(layerID)
407if err != nil {
408return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err)
409}
410// Walk the list of parent layers, prepending each as we go. If we're squashing
411// or making a confidential workload, we're only producing one layer, so stop at
412// the layer ID of the top layer, which we won't really be using anyway.
413for layer != nil {
414layers = append(append([]string{}, layerID), layers...)
415layerID = layer.Parent
416if layerID == "" || i.confidentialWorkload.Convert || i.squash {
417err = nil
418break
419}
420layer, err = i.store.Layer(layerID)
421if err != nil {
422return nil, fmt.Errorf("unable to read layer %q: %w", layerID, err)
423}
424}
425layer = nil
426
427// If we're slipping in a synthesized layer, we need to add a placeholder for it
428// to the list.
429const synthesizedLayerID = "(synthesized layer)"
430if len(i.extraImageContent) > 0 && !i.confidentialWorkload.Convert && !i.squash {
431layers = append(layers, synthesizedLayerID)
432}
433logrus.Debugf("layer list: %q", layers)
434
435// Make a temporary directory to hold blobs.
436path, err := os.MkdirTemp(tmpdir.GetTempDir(), define.Package)
437if err != nil {
438return nil, fmt.Errorf("creating temporary directory to hold layer blobs: %w", err)
439}
440logrus.Debugf("using %q to hold temporary data", path)
441defer func() {
442if src == nil {
443err2 := os.RemoveAll(path)
444if err2 != nil {
445logrus.Errorf("error removing layer blob directory: %v", err)
446}
447}
448}()
449
450// Build fresh copies of the configurations and manifest so that we don't mess with any
451// values in the Builder object itself.
452oimage, omanifest, dimage, dmanifest, err := i.createConfigsAndManifests()
453if err != nil {
454return nil, err
455}
456
457// Extract each layer and compute its digests, both compressed (if requested) and uncompressed.
458var extraImageContentDiff string
459var extraImageContentDiffDigest digest.Digest
460blobLayers := make(map[digest.Digest]blobLayerInfo)
461for _, layerID := range layers {
462what := fmt.Sprintf("layer %q", layerID)
463if i.confidentialWorkload.Convert || i.squash {
464what = fmt.Sprintf("container %q", i.containerID)
465}
466// The default layer media type assumes no compression.
467omediaType := v1.MediaTypeImageLayer
468dmediaType := docker.V2S2MediaTypeUncompressedLayer
469// Look up this layer.
470var layerUncompressedDigest digest.Digest
471var layerUncompressedSize int64
472if layerID != synthesizedLayerID {
473layer, err := i.store.Layer(layerID)
474if err != nil {
475return nil, fmt.Errorf("unable to locate layer %q: %w", layerID, err)
476}
477layerID = layer.ID
478layerUncompressedDigest = layer.UncompressedDigest
479layerUncompressedSize = layer.UncompressedSize
480} else {
481diffFilename, digest, size, err := i.makeExtraImageContentDiff(true)
482if err != nil {
483return nil, fmt.Errorf("unable to generate layer for additional content: %w", err)
484}
485extraImageContentDiff = diffFilename
486extraImageContentDiffDigest = digest
487layerUncompressedDigest = digest
488layerUncompressedSize = size
489}
490// If we already know the digest of the contents of parent
491// layers, reuse their blobsums, diff IDs, and sizes.
492if !i.confidentialWorkload.Convert && !i.squash && layerID != i.layerID && layerID != synthesizedLayerID && layerUncompressedDigest != "" {
493layerBlobSum := layerUncompressedDigest
494layerBlobSize := layerUncompressedSize
495diffID := layerUncompressedDigest
496// Note this layer in the manifest, using the appropriate blobsum.
497olayerDescriptor := v1.Descriptor{
498MediaType: omediaType,
499Digest: layerBlobSum,
500Size: layerBlobSize,
501}
502omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
503dlayerDescriptor := docker.V2S2Descriptor{
504MediaType: dmediaType,
505Digest: layerBlobSum,
506Size: layerBlobSize,
507}
508dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
509// Note this layer in the list of diffIDs, again using the uncompressed digest.
510oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, diffID)
511dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, diffID)
512blobLayers[diffID] = blobLayerInfo{
513ID: layerID,
514Size: layerBlobSize,
515}
516continue
517}
518// Figure out if we need to change the media type, in case we've changed the compression.
519omediaType, dmediaType, err = computeLayerMIMEType(what, i.compression)
520if err != nil {
521return nil, err
522}
523// Start reading either the layer or the whole container rootfs.
524noCompression := archive.Uncompressed
525diffOptions := &storage.DiffOptions{
526Compression: &noCompression,
527}
528var rc io.ReadCloser
529var errChan chan error
530if i.confidentialWorkload.Convert {
531// Convert the root filesystem into an encrypted disk image.
532rc, err = i.extractConfidentialWorkloadFS(i.confidentialWorkload)
533if err != nil {
534return nil, err
535}
536} else if i.squash {
537// Extract the root filesystem as a single layer.
538rc, errChan, err = i.extractRootfs(ExtractRootfsOptions{})
539if err != nil {
540return nil, err
541}
542} else {
543if layerID != synthesizedLayerID {
544// If we're up to the final layer, but we don't want to
545// include a diff for it, we're done.
546if i.emptyLayer && layerID == i.layerID {
547continue
548}
549// Extract this layer, one of possibly many.
550rc, err = i.store.Diff("", layerID, diffOptions)
551if err != nil {
552return nil, fmt.Errorf("extracting %s: %w", what, err)
553}
554} else {
555// Slip in additional content as an additional layer.
556if rc, err = os.Open(extraImageContentDiff); err != nil {
557return nil, err
558}
559}
560}
561srcHasher := digest.Canonical.Digester()
562// Set up to write the possibly-recompressed blob.
563layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0600)
564if err != nil {
565rc.Close()
566return nil, fmt.Errorf("opening file for %s: %w", what, err)
567}
568
569counter := ioutils.NewWriteCounter(layerFile)
570var destHasher digest.Digester
571var multiWriter io.Writer
572// Avoid rehashing when we do not compress.
573if i.compression != archive.Uncompressed {
574destHasher = digest.Canonical.Digester()
575multiWriter = io.MultiWriter(counter, destHasher.Hash())
576} else {
577destHasher = srcHasher
578multiWriter = counter
579}
580// Compress the layer, if we're recompressing it.
581writeCloser, err := archive.CompressStream(multiWriter, i.compression)
582if err != nil {
583layerFile.Close()
584rc.Close()
585return nil, fmt.Errorf("compressing %s: %w", what, err)
586}
587writer := io.MultiWriter(writeCloser, srcHasher.Hash())
588// Scrub any local user names that might correspond to UIDs or GIDs of
589// files in this layer.
590{
591nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close)
592writeCloser = newTarFilterer(nestedWriteCloser, func(hdr *tar.Header) (bool, bool, io.Reader) {
593hdr.Uname, hdr.Gname = "", ""
594return false, false, nil
595})
596writer = io.Writer(writeCloser)
597}
598// Use specified timestamps in the layer, if we're doing that for
599// history entries.
600if i.created != nil {
601nestedWriteCloser := ioutils.NewWriteCloserWrapper(writer, writeCloser.Close)
602writeCloser = newTarFilterer(nestedWriteCloser, func(hdr *tar.Header) (bool, bool, io.Reader) {
603// Changing a zeroed field to a non-zero field
604// can affect the format that the library uses
605// for writing the header, so only change
606// fields that are already set to avoid
607// changing the format (and as a result,
608// changing the length) of the header that we
609// write.
610if !hdr.ModTime.IsZero() {
611hdr.ModTime = *i.created
612}
613if !hdr.AccessTime.IsZero() {
614hdr.AccessTime = *i.created
615}
616if !hdr.ChangeTime.IsZero() {
617hdr.ChangeTime = *i.created
618}
619return false, false, nil
620})
621writer = io.Writer(writeCloser)
622}
623size, err := io.Copy(writer, rc)
624writeCloser.Close()
625layerFile.Close()
626rc.Close()
627
628if errChan != nil {
629err = <-errChan
630if err != nil {
631return nil, err
632}
633}
634
635if err != nil {
636return nil, fmt.Errorf("storing %s to file: %w", what, err)
637}
638if i.compression == archive.Uncompressed {
639if size != counter.Count {
640return nil, fmt.Errorf("storing %s to file: inconsistent layer size (copied %d, wrote %d)", what, size, counter.Count)
641}
642} else {
643size = counter.Count
644}
645logrus.Debugf("%s size is %d bytes, uncompressed digest %s, possibly-compressed digest %s", what, size, srcHasher.Digest().String(), destHasher.Digest().String())
646// Rename the layer so that we can more easily find it by digest later.
647finalBlobName := filepath.Join(path, destHasher.Digest().String())
648if err = os.Rename(filepath.Join(path, "layer"), finalBlobName); err != nil {
649return nil, fmt.Errorf("storing %s to file while renaming %q to %q: %w", what, filepath.Join(path, "layer"), finalBlobName, err)
650}
651// Add a note in the manifest about the layer. The blobs are identified by their possibly-
652// compressed blob digests.
653olayerDescriptor := v1.Descriptor{
654MediaType: omediaType,
655Digest: destHasher.Digest(),
656Size: size,
657}
658omanifest.Layers = append(omanifest.Layers, olayerDescriptor)
659dlayerDescriptor := docker.V2S2Descriptor{
660MediaType: dmediaType,
661Digest: destHasher.Digest(),
662Size: size,
663}
664dmanifest.Layers = append(dmanifest.Layers, dlayerDescriptor)
665// Add a note about the diffID, which is always the layer's uncompressed digest.
666oimage.RootFS.DiffIDs = append(oimage.RootFS.DiffIDs, srcHasher.Digest())
667dimage.RootFS.DiffIDs = append(dimage.RootFS.DiffIDs, srcHasher.Digest())
668}
669
670// Build history notes in the image configurations.
671appendHistory := func(history []v1.History) {
672for i := range history {
673var created *time.Time
674if history[i].Created != nil {
675copiedTimestamp := *history[i].Created
676created = &copiedTimestamp
677}
678onews := v1.History{
679Created: created,
680CreatedBy: history[i].CreatedBy,
681Author: history[i].Author,
682Comment: history[i].Comment,
683EmptyLayer: true,
684}
685oimage.History = append(oimage.History, onews)
686if created == nil {
687created = &time.Time{}
688}
689dnews := docker.V2S2History{
690Created: *created,
691CreatedBy: history[i].CreatedBy,
692Author: history[i].Author,
693Comment: history[i].Comment,
694EmptyLayer: true,
695}
696dimage.History = append(dimage.History, dnews)
697}
698}
699
700// Only attempt to append history if history was not disabled explicitly.
701if !i.omitHistory {
702// Keep track of how many entries the base image's history had
703// before we started adding to it.
704baseImageHistoryLen := len(oimage.History)
705appendHistory(i.preEmptyLayers)
706created := time.Now().UTC()
707if i.created != nil {
708created = (*i.created).UTC()
709}
710comment := i.historyComment
711// Add a comment indicating which base image was used, if it wasn't
712// just an image ID.
713if strings.Contains(i.parent, i.fromImageID) && i.fromImageName != i.fromImageID {
714comment += "FROM " + i.fromImageName
715}
716onews := v1.History{
717Created: &created,
718CreatedBy: i.createdBy,
719Author: oimage.Author,
720Comment: comment,
721EmptyLayer: i.emptyLayer,
722}
723oimage.History = append(oimage.History, onews)
724dnews := docker.V2S2History{
725Created: created,
726CreatedBy: i.createdBy,
727Author: dimage.Author,
728Comment: comment,
729EmptyLayer: i.emptyLayer,
730}
731dimage.History = append(dimage.History, dnews)
732appendHistory(i.postEmptyLayers)
733
734// Add a history entry for the extra image content if we added a layer for it.
735if extraImageContentDiff != "" {
736createdBy := fmt.Sprintf(`/bin/sh -c #(nop) ADD dir:%s in /",`, extraImageContentDiffDigest.Encoded())
737onews := v1.History{
738Created: &created,
739CreatedBy: createdBy,
740}
741oimage.History = append(oimage.History, onews)
742dnews := docker.V2S2History{
743Created: created,
744CreatedBy: createdBy,
745}
746dimage.History = append(dimage.History, dnews)
747}
748
749// Confidence check that we didn't just create a mismatch between non-empty layers in the
750// history and the number of diffIDs. Only applicable if the base image (if there was
751// one) provided us at least one entry to use as a starting point.
752if baseImageHistoryLen != 0 {
753expectedDiffIDs := expectedOCIDiffIDs(oimage)
754if len(oimage.RootFS.DiffIDs) != expectedDiffIDs {
755return nil, fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(oimage.RootFS.DiffIDs))
756}
757expectedDiffIDs = expectedDockerDiffIDs(dimage)
758if len(dimage.RootFS.DiffIDs) != expectedDiffIDs {
759return nil, fmt.Errorf("internal error: history lists %d non-empty layers, but we have %d layers on disk", expectedDiffIDs, len(dimage.RootFS.DiffIDs))
760}
761}
762}
763
764// Encode the image configuration blob.
765oconfig, err := json.Marshal(&oimage)
766if err != nil {
767return nil, fmt.Errorf("encoding %#v as json: %w", oimage, err)
768}
769logrus.Debugf("OCIv1 config = %s", oconfig)
770
771// Add the configuration blob to the manifest.
772omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig)
773omanifest.Config.Size = int64(len(oconfig))
774omanifest.Config.MediaType = v1.MediaTypeImageConfig
775
776// Encode the manifest.
777omanifestbytes, err := json.Marshal(&omanifest)
778if err != nil {
779return nil, fmt.Errorf("encoding %#v as json: %w", omanifest, err)
780}
781logrus.Debugf("OCIv1 manifest = %s", omanifestbytes)
782
783// Encode the image configuration blob.
784dconfig, err := json.Marshal(&dimage)
785if err != nil {
786return nil, fmt.Errorf("encoding %#v as json: %w", dimage, err)
787}
788logrus.Debugf("Docker v2s2 config = %s", dconfig)
789
790// Add the configuration blob to the manifest.
791dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig)
792dmanifest.Config.Size = int64(len(dconfig))
793dmanifest.Config.MediaType = manifest.DockerV2Schema2ConfigMediaType
794
795// Encode the manifest.
796dmanifestbytes, err := json.Marshal(&dmanifest)
797if err != nil {
798return nil, fmt.Errorf("encoding %#v as json: %w", dmanifest, err)
799}
800logrus.Debugf("Docker v2s2 manifest = %s", dmanifestbytes)
801
802// Decide which manifest and configuration blobs we'll actually output.
803var config []byte
804var imageManifest []byte
805switch manifestType {
806case v1.MediaTypeImageManifest:
807imageManifest = omanifestbytes
808config = oconfig
809case manifest.DockerV2Schema2MediaType:
810imageManifest = dmanifestbytes
811config = dconfig
812default:
813panic("unreachable code: unsupported manifest type")
814}
815src = &containerImageSource{
816path: path,
817ref: i,
818store: i.store,
819containerID: i.containerID,
820mountLabel: i.mountLabel,
821layerID: i.layerID,
822names: i.names,
823compression: i.compression,
824config: config,
825configDigest: digest.Canonical.FromBytes(config),
826manifest: imageManifest,
827manifestType: manifestType,
828blobDirectory: i.blobDirectory,
829blobLayers: blobLayers,
830}
831return src, nil
832}
833
834func (i *containerImageRef) NewImageDestination(ctx context.Context, sc *types.SystemContext) (types.ImageDestination, error) {
835return nil, errors.New("can't write to a container")
836}
837
838func (i *containerImageRef) DockerReference() reference.Named {
839return i.name
840}
841
842func (i *containerImageRef) StringWithinTransport() string {
843if len(i.names) > 0 {
844return i.names[0]
845}
846return ""
847}
848
849func (i *containerImageRef) DeleteImage(context.Context, *types.SystemContext) error {
850// we were never here
851return nil
852}
853
854func (i *containerImageRef) PolicyConfigurationIdentity() string {
855return ""
856}
857
858func (i *containerImageRef) PolicyConfigurationNamespaces() []string {
859return nil
860}
861
862func (i *containerImageRef) Transport() types.ImageTransport {
863return is.Transport
864}
865
866func (i *containerImageSource) Close() error {
867err := os.RemoveAll(i.path)
868if err != nil {
869return fmt.Errorf("removing layer blob directory: %w", err)
870}
871return nil
872}
873
874func (i *containerImageSource) Reference() types.ImageReference {
875return i.ref
876}
877
878func (i *containerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
879return nil, nil
880}
881
882func (i *containerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
883return i.manifest, i.manifestType, nil
884}
885
886func (i *containerImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
887return nil, nil
888}
889
890func (i *containerImageSource) HasThreadSafeGetBlob() bool {
891return false
892}
893
894func (i *containerImageSource) GetBlob(ctx context.Context, blob types.BlobInfo, cache types.BlobInfoCache) (reader io.ReadCloser, size int64, err error) {
895if blob.Digest == i.configDigest {
896logrus.Debugf("start reading config")
897reader := bytes.NewReader(i.config)
898closer := func() error {
899logrus.Debugf("finished reading config")
900return nil
901}
902return ioutils.NewReadCloserWrapper(reader, closer), reader.Size(), nil
903}
904var layerReadCloser io.ReadCloser
905size = -1
906if blobLayerInfo, ok := i.blobLayers[blob.Digest]; ok {
907noCompression := archive.Uncompressed
908diffOptions := &storage.DiffOptions{
909Compression: &noCompression,
910}
911layerReadCloser, err = i.store.Diff("", blobLayerInfo.ID, diffOptions)
912size = blobLayerInfo.Size
913} else {
914for _, blobDir := range []string{i.blobDirectory, i.path} {
915var layerFile *os.File
916layerFile, err = os.OpenFile(filepath.Join(blobDir, blob.Digest.String()), os.O_RDONLY, 0600)
917if err == nil {
918st, err := layerFile.Stat()
919if err != nil {
920logrus.Warnf("error reading size of layer file %q: %v", blob.Digest.String(), err)
921} else {
922size = st.Size()
923layerReadCloser = layerFile
924break
925}
926layerFile.Close()
927}
928if !errors.Is(err, os.ErrNotExist) {
929logrus.Debugf("error checking for layer %q in %q: %v", blob.Digest.String(), blobDir, err)
930}
931}
932}
933if err != nil || layerReadCloser == nil || size == -1 {
934logrus.Debugf("error reading layer %q: %v", blob.Digest.String(), err)
935return nil, -1, fmt.Errorf("opening layer blob: %w", err)
936}
937logrus.Debugf("reading layer %q", blob.Digest.String())
938closer := func() error {
939logrus.Debugf("finished reading layer %q", blob.Digest.String())
940if err := layerReadCloser.Close(); err != nil {
941return fmt.Errorf("closing layer %q after reading: %w", blob.Digest.String(), err)
942}
943return nil
944}
945return ioutils.NewReadCloserWrapper(layerReadCloser, closer), size, nil
946}
947
948// makeExtraImageContentDiff creates an archive file containing the contents of
949// files named in i.extraImageContent. The footer that marks the end of the
950// archive may be omitted.
951func (i *containerImageRef) makeExtraImageContentDiff(includeFooter bool) (string, digest.Digest, int64, error) {
952cdir, err := i.store.ContainerDirectory(i.containerID)
953if err != nil {
954return "", "", -1, err
955}
956diff, err := os.CreateTemp(cdir, "extradiff")
957if err != nil {
958return "", "", -1, err
959}
960defer diff.Close()
961digester := digest.Canonical.Digester()
962counter := ioutils.NewWriteCounter(digester.Hash())
963tw := tar.NewWriter(io.MultiWriter(diff, counter))
964created := time.Now()
965if i.created != nil {
966created = *i.created
967}
968for path, contents := range i.extraImageContent {
969if err := func() error {
970content, err := os.Open(contents)
971if err != nil {
972return err
973}
974defer content.Close()
975st, err := content.Stat()
976if err != nil {
977return err
978}
979if err := tw.WriteHeader(&tar.Header{
980Name: path,
981Typeflag: tar.TypeReg,
982Mode: 0o644,
983ModTime: created,
984Size: st.Size(),
985}); err != nil {
986return err
987}
988if _, err := io.Copy(tw, content); err != nil {
989return err
990}
991if err := tw.Flush(); err != nil {
992return err
993}
994return nil
995}(); err != nil {
996return "", "", -1, err
997}
998}
999if !includeFooter {
1000return diff.Name(), "", -1, err
1001}
1002tw.Close()
1003return diff.Name(), digester.Digest(), counter.Count, err
1004}
1005
1006// makeContainerImageRef creates a containers/image/v5/types.ImageReference
1007// which is mainly used for representing the working container as a source
1008// image that can be copied, which is how we commit container to create the
1009// image.
1010func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageRef, error) {
1011var name reference.Named
1012container, err := b.store.Container(b.ContainerID)
1013if err != nil {
1014return nil, fmt.Errorf("locating container %q: %w", b.ContainerID, err)
1015}
1016if len(container.Names) > 0 {
1017if parsed, err2 := reference.ParseNamed(container.Names[0]); err2 == nil {
1018name = parsed
1019}
1020}
1021manifestType := options.PreferredManifestType
1022if manifestType == "" {
1023manifestType = define.OCIv1ImageManifest
1024}
1025
1026for _, u := range options.UnsetEnvs {
1027b.UnsetEnv(u)
1028}
1029oconfig, err := json.Marshal(&b.OCIv1)
1030if err != nil {
1031return nil, fmt.Errorf("encoding OCI-format image configuration %#v: %w", b.OCIv1, err)
1032}
1033dconfig, err := json.Marshal(&b.Docker)
1034if err != nil {
1035return nil, fmt.Errorf("encoding docker-format image configuration %#v: %w", b.Docker, err)
1036}
1037var created *time.Time
1038if options.HistoryTimestamp != nil {
1039historyTimestampUTC := options.HistoryTimestamp.UTC()
1040created = &historyTimestampUTC
1041}
1042createdBy := b.CreatedBy()
1043if createdBy == "" {
1044createdBy = strings.Join(b.Shell(), " ")
1045if createdBy == "" {
1046createdBy = "/bin/sh"
1047}
1048}
1049
1050parent := ""
1051forceOmitHistory := false
1052if b.FromImageID != "" {
1053parentDigest := digest.NewDigestFromEncoded(digest.Canonical, b.FromImageID)
1054if parentDigest.Validate() == nil {
1055parent = parentDigest.String()
1056}
1057if !options.OmitHistory && len(b.OCIv1.History) == 0 && len(b.OCIv1.RootFS.DiffIDs) != 0 {
1058// Parent had layers, but no history. We shouldn't confuse
1059// our own confidence checks by adding history for layers
1060// that we're adding, creating an image with multiple layers,
1061// only some of which have history entries, which would be
1062// broken in confusing ways.
1063b.Logger.Debugf("parent image %q had no history but had %d layers, assuming OmitHistory", b.FromImageID, len(b.OCIv1.RootFS.DiffIDs))
1064forceOmitHistory = true
1065}
1066}
1067
1068ref := &containerImageRef{
1069fromImageName: b.FromImage,
1070fromImageID: b.FromImageID,
1071store: b.store,
1072compression: options.Compression,
1073name: name,
1074names: container.Names,
1075containerID: container.ID,
1076mountLabel: b.MountLabel,
1077layerID: container.LayerID,
1078oconfig: oconfig,
1079dconfig: dconfig,
1080created: created,
1081createdBy: createdBy,
1082historyComment: b.HistoryComment(),
1083annotations: b.Annotations(),
1084preferredManifestType: manifestType,
1085squash: options.Squash,
1086confidentialWorkload: options.ConfidentialWorkloadOptions,
1087omitHistory: options.OmitHistory || forceOmitHistory,
1088emptyLayer: options.EmptyLayer && !options.Squash && !options.ConfidentialWorkloadOptions.Convert,
1089idMappingOptions: &b.IDMappingOptions,
1090parent: parent,
1091blobDirectory: options.BlobDirectory,
1092preEmptyLayers: b.PrependedEmptyLayers,
1093postEmptyLayers: b.AppendedEmptyLayers,
1094overrideChanges: options.OverrideChanges,
1095overrideConfig: options.OverrideConfig,
1096extraImageContent: copyStringStringMap(options.ExtraImageContent),
1097}
1098return ref, nil
1099}
1100
1101// Extract the container's whole filesystem as if it were a single layer from current builder instance
1102func (b *Builder) ExtractRootfs(options CommitOptions, opts ExtractRootfsOptions) (io.ReadCloser, chan error, error) {
1103src, err := b.makeContainerImageRef(options)
1104if err != nil {
1105return nil, nil, fmt.Errorf("creating image reference for container %q to extract its contents: %w", b.ContainerID, err)
1106}
1107return src.extractRootfs(opts)
1108}
1109