github.com/moby/docker@v26.1.3+incompatible/builder/builder-next/adapters/containerimage/pull.go (about)

     1  // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
     2  //go:build go1.19
     3  
     4  package containerimage
     5  
     6  import (
     7  	"context"
     8  	"encoding/json"
     9  	"fmt"
    10  	"io"
    11  	"path"
    12  	"strconv"
    13  	"strings"
    14  	"sync"
    15  	"time"
    16  
    17  	"github.com/containerd/containerd/content"
    18  	cerrdefs "github.com/containerd/containerd/errdefs"
    19  	"github.com/containerd/containerd/gc"
    20  	"github.com/containerd/containerd/images"
    21  	"github.com/containerd/containerd/leases"
    22  	"github.com/containerd/containerd/platforms"
    23  	cdreference "github.com/containerd/containerd/reference"
    24  	ctdreference "github.com/containerd/containerd/reference"
    25  	"github.com/containerd/containerd/remotes"
    26  	"github.com/containerd/containerd/remotes/docker"
    27  	"github.com/containerd/containerd/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019: "github.com/containerd/containerd/remotes/docker/schema1" is deprecated: use images formatted in Docker Image Manifest v2, Schema 2, or OCI Image Spec v1.
    28  	"github.com/containerd/log"
    29  	distreference "github.com/distribution/reference"
    30  	dimages "github.com/docker/docker/daemon/images"
    31  	"github.com/docker/docker/distribution/metadata"
    32  	"github.com/docker/docker/distribution/xfer"
    33  	"github.com/docker/docker/image"
    34  	"github.com/docker/docker/layer"
    35  	pkgprogress "github.com/docker/docker/pkg/progress"
    36  	"github.com/docker/docker/reference"
    37  	"github.com/moby/buildkit/cache"
    38  	"github.com/moby/buildkit/client"
    39  	"github.com/moby/buildkit/client/llb/sourceresolver"
    40  	"github.com/moby/buildkit/session"
    41  	"github.com/moby/buildkit/solver"
    42  	"github.com/moby/buildkit/solver/pb"
    43  	"github.com/moby/buildkit/source"
    44  	"github.com/moby/buildkit/source/containerimage"
    45  	srctypes "github.com/moby/buildkit/source/types"
    46  	"github.com/moby/buildkit/sourcepolicy"
    47  	spb "github.com/moby/buildkit/sourcepolicy/pb"
    48  	"github.com/moby/buildkit/util/flightcontrol"
    49  	"github.com/moby/buildkit/util/imageutil"
    50  	"github.com/moby/buildkit/util/leaseutil"
    51  	"github.com/moby/buildkit/util/progress"
    52  	"github.com/moby/buildkit/util/resolver"
    53  	"github.com/opencontainers/go-digest"
    54  	"github.com/opencontainers/image-spec/identity"
    55  	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
    56  	"github.com/pkg/errors"
    57  	"golang.org/x/time/rate"
    58  )
    59  
    60  // SourceOpt is options for creating the image source
    61  type SourceOpt struct {
    62  	ContentStore    content.Store
    63  	CacheAccessor   cache.Accessor
    64  	ReferenceStore  reference.Store
    65  	DownloadManager *xfer.LayerDownloadManager
    66  	MetadataStore   metadata.V2MetadataService
    67  	ImageStore      image.Store
    68  	RegistryHosts   docker.RegistryHosts
    69  	LayerStore      layer.Store
    70  	LeaseManager    leases.Manager
    71  	GarbageCollect  func(ctx context.Context) (gc.Stats, error)
    72  }
    73  
    74  // Source is the source implementation for accessing container images
    75  type Source struct {
    76  	SourceOpt
    77  	g flightcontrol.Group[*resolveRemoteResult]
    78  }
    79  
    80  // NewSource creates a new image source
    81  func NewSource(opt SourceOpt) (*Source, error) {
    82  	return &Source{SourceOpt: opt}, nil
    83  }
    84  
    85  // Schemes returns a list of SourceOp identifier schemes that this source
    86  // should match.
    87  func (is *Source) Schemes() []string {
    88  	return []string{srctypes.DockerImageScheme}
    89  }
    90  
    91  // Identifier constructs an Identifier from the given scheme, ref, and attrs,
    92  // all of which come from a SourceOp.
    93  func (is *Source) Identifier(scheme, ref string, attrs map[string]string, platform *pb.Platform) (source.Identifier, error) {
    94  	return is.registryIdentifier(ref, attrs, platform)
    95  }
    96  
    97  // Copied from github.com/moby/buildkit/source/containerimage/source.go
    98  func (is *Source) registryIdentifier(ref string, attrs map[string]string, platform *pb.Platform) (source.Identifier, error) {
    99  	id, err := containerimage.NewImageIdentifier(ref)
   100  	if err != nil {
   101  		return nil, err
   102  	}
   103  
   104  	if platform != nil {
   105  		id.Platform = &ocispec.Platform{
   106  			OS:           platform.OS,
   107  			Architecture: platform.Architecture,
   108  			Variant:      platform.Variant,
   109  			OSVersion:    platform.OSVersion,
   110  		}
   111  		if platform.OSFeatures != nil {
   112  			id.Platform.OSFeatures = append([]string{}, platform.OSFeatures...)
   113  		}
   114  	}
   115  
   116  	for k, v := range attrs {
   117  		switch k {
   118  		case pb.AttrImageResolveMode:
   119  			rm, err := resolver.ParseImageResolveMode(v)
   120  			if err != nil {
   121  				return nil, err
   122  			}
   123  			id.ResolveMode = rm
   124  		case pb.AttrImageRecordType:
   125  			rt, err := parseImageRecordType(v)
   126  			if err != nil {
   127  				return nil, err
   128  			}
   129  			id.RecordType = rt
   130  		case pb.AttrImageLayerLimit:
   131  			l, err := strconv.Atoi(v)
   132  			if err != nil {
   133  				return nil, errors.Wrapf(err, "invalid layer limit %s", v)
   134  			}
   135  			if l <= 0 {
   136  				return nil, errors.Errorf("invalid layer limit %s", v)
   137  			}
   138  			id.LayerLimit = &l
   139  		}
   140  	}
   141  
   142  	return id, nil
   143  }
   144  
   145  func parseImageRecordType(v string) (client.UsageRecordType, error) {
   146  	switch client.UsageRecordType(v) {
   147  	case "", client.UsageRecordTypeRegular:
   148  		return client.UsageRecordTypeRegular, nil
   149  	case client.UsageRecordTypeInternal:
   150  		return client.UsageRecordTypeInternal, nil
   151  	case client.UsageRecordTypeFrontend:
   152  		return client.UsageRecordTypeFrontend, nil
   153  	default:
   154  		return "", errors.Errorf("invalid record type %s", v)
   155  	}
   156  }
   157  
   158  func (is *Source) resolveLocal(refStr string) (*image.Image, error) {
   159  	ref, err := distreference.ParseNormalizedNamed(refStr)
   160  	if err != nil {
   161  		return nil, err
   162  	}
   163  	dgst, err := is.ReferenceStore.Get(ref)
   164  	if err != nil {
   165  		return nil, err
   166  	}
   167  	img, err := is.ImageStore.Get(image.ID(dgst))
   168  	if err != nil {
   169  		return nil, err
   170  	}
   171  	return img, nil
   172  }
   173  
   174  type resolveRemoteResult struct {
   175  	ref  string
   176  	dgst digest.Digest
   177  	dt   []byte
   178  }
   179  
   180  func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
   181  	p := platforms.DefaultSpec()
   182  	if platform != nil {
   183  		p = *platform
   184  	}
   185  	// key is used to synchronize resolutions that can happen in parallel when doing multi-stage.
   186  	key := "getconfig::" + ref + "::" + platforms.Format(p)
   187  	res, err := is.g.Do(ctx, key, func(ctx context.Context) (*resolveRemoteResult, error) {
   188  		res := resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g)
   189  		dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, is.LeaseManager, platform)
   190  		if err != nil {
   191  			return nil, err
   192  		}
   193  		return &resolveRemoteResult{ref: ref, dgst: dgst, dt: dt}, nil
   194  	})
   195  	if err != nil {
   196  		return "", nil, err
   197  	}
   198  	return res.dgst, res.dt, nil
   199  }
   200  
   201  // ResolveImageConfig returns image config for an image
   202  func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt sourceresolver.Opt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
   203  	if opt.ImageOpt == nil {
   204  		return "", nil, fmt.Errorf("can only resolve an image: %v, opt: %v", ref, opt)
   205  	}
   206  	ref, err := applySourcePolicies(ctx, ref, opt.SourcePolicies)
   207  	if err != nil {
   208  		return "", nil, err
   209  	}
   210  	resolveMode, err := resolver.ParseImageResolveMode(opt.ImageOpt.ResolveMode)
   211  	if err != nil {
   212  		return "", nil, err
   213  	}
   214  	switch resolveMode {
   215  	case resolver.ResolveModeForcePull:
   216  		return is.resolveRemote(ctx, ref, opt.Platform, sm, g)
   217  		// TODO: pull should fallback to local in case of failure to allow offline behavior
   218  		// the fallback doesn't work currently
   219  		/*
   220  			if err == nil {
   221  				return dgst, dt, err
   222  			}
   223  			// fallback to local
   224  			dt, err = is.resolveLocal(ref)
   225  			return "", dt, err
   226  		*/
   227  
   228  	case resolver.ResolveModeDefault:
   229  		// default == prefer local, but in the future could be smarter
   230  		fallthrough
   231  	case resolver.ResolveModePreferLocal:
   232  		img, err := is.resolveLocal(ref)
   233  		if err == nil {
   234  			if opt.Platform != nil && !platformMatches(img, opt.Platform) {
   235  				log.G(ctx).WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, checking remote",
   236  					path.Join(opt.Platform.OS, opt.Platform.Architecture, opt.Platform.Variant),
   237  					path.Join(img.OS, img.Architecture, img.Variant),
   238  				)
   239  			} else {
   240  				return "", img.RawJSON(), err
   241  			}
   242  		}
   243  		// fallback to remote
   244  		return is.resolveRemote(ctx, ref, opt.Platform, sm, g)
   245  	}
   246  	// should never happen
   247  	return "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ImageOpt.ResolveMode)
   248  }
   249  
   250  // Resolve returns access to pulling for an identifier
   251  func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, vtx solver.Vertex) (source.SourceInstance, error) {
   252  	imageIdentifier, ok := id.(*containerimage.ImageIdentifier)
   253  	if !ok {
   254  		return nil, errors.Errorf("invalid image identifier %v", id)
   255  	}
   256  
   257  	platform := platforms.DefaultSpec()
   258  	if imageIdentifier.Platform != nil {
   259  		platform = *imageIdentifier.Platform
   260  	}
   261  
   262  	p := &puller{
   263  		src: imageIdentifier,
   264  		is:  is,
   265  		// resolver: is.getResolver(is.RegistryHosts, imageIdentifier.Reference.String(), sm, g),
   266  		platform: platform,
   267  		sm:       sm,
   268  	}
   269  	return p, nil
   270  }
   271  
   272  type puller struct {
   273  	is               *Source
   274  	resolveLocalOnce sync.Once
   275  	g                flightcontrol.Group[struct{}]
   276  	src              *containerimage.ImageIdentifier
   277  	desc             ocispec.Descriptor
   278  	ref              string
   279  	config           []byte
   280  	platform         ocispec.Platform
   281  	sm               *session.Manager
   282  }
   283  
   284  func (p *puller) resolver(g session.Group) remotes.Resolver {
   285  	return resolver.DefaultPool.GetResolver(p.is.RegistryHosts, p.src.Reference.String(), "pull", p.sm, g)
   286  }
   287  
   288  func (p *puller) mainManifestKey(platform ocispec.Platform) (digest.Digest, error) {
   289  	dt, err := json.Marshal(struct {
   290  		Digest  digest.Digest
   291  		OS      string
   292  		Arch    string
   293  		Variant string `json:",omitempty"`
   294  	}{
   295  		Digest:  p.desc.Digest,
   296  		OS:      platform.OS,
   297  		Arch:    platform.Architecture,
   298  		Variant: platform.Variant,
   299  	})
   300  	if err != nil {
   301  		return "", err
   302  	}
   303  	return digest.FromBytes(dt), nil
   304  }
   305  
   306  func (p *puller) resolveLocal() {
   307  	p.resolveLocalOnce.Do(func() {
   308  		dgst := p.src.Reference.Digest()
   309  		if dgst != "" {
   310  			info, err := p.is.ContentStore.Info(context.TODO(), dgst)
   311  			if err == nil {
   312  				p.ref = p.src.Reference.String()
   313  				desc := ocispec.Descriptor{
   314  					Size:   info.Size,
   315  					Digest: dgst,
   316  				}
   317  				ra, err := p.is.ContentStore.ReaderAt(context.TODO(), desc)
   318  				if err == nil {
   319  					mt, err := imageutil.DetectManifestMediaType(ra)
   320  					if err == nil {
   321  						desc.MediaType = mt
   322  						p.desc = desc
   323  					}
   324  				}
   325  			}
   326  		}
   327  
   328  		if p.src.ResolveMode == resolver.ResolveModeDefault || p.src.ResolveMode == resolver.ResolveModePreferLocal {
   329  			ref := p.src.Reference.String()
   330  			img, err := p.is.resolveLocal(ref)
   331  			if err == nil {
   332  				if !platformMatches(img, &p.platform) {
   333  					log.G(context.TODO()).WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, not resolving",
   334  						path.Join(p.platform.OS, p.platform.Architecture, p.platform.Variant),
   335  						path.Join(img.OS, img.Architecture, img.Variant),
   336  					)
   337  				} else {
   338  					p.config = img.RawJSON()
   339  				}
   340  			}
   341  		}
   342  	})
   343  }
   344  
   345  func (p *puller) resolve(ctx context.Context, g session.Group) error {
   346  	_, err := p.g.Do(ctx, "", func(ctx context.Context) (_ struct{}, err error) {
   347  		resolveProgressDone := oneOffProgress(ctx, "resolve "+p.src.Reference.String())
   348  		defer func() {
   349  			resolveProgressDone(err)
   350  		}()
   351  
   352  		ref, err := distreference.ParseNormalizedNamed(p.src.Reference.String())
   353  		if err != nil {
   354  			return struct{}{}, err
   355  		}
   356  
   357  		if p.desc.Digest == "" && p.config == nil {
   358  			origRef, desc, err := p.resolver(g).Resolve(ctx, ref.String())
   359  			if err != nil {
   360  				return struct{}{}, err
   361  			}
   362  
   363  			p.desc = desc
   364  			p.ref = origRef
   365  		}
   366  
   367  		// Schema 1 manifests cannot be resolved to an image config
   368  		// since the conversion must take place after all the content
   369  		// has been read.
   370  		// It may be possible to have a mapping between schema 1 manifests
   371  		// and the schema 2 manifests they are converted to.
   372  		if p.config == nil && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest {
   373  			ref, err := distreference.WithDigest(ref, p.desc.Digest)
   374  			if err != nil {
   375  				return struct{}{}, err
   376  			}
   377  			_, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), sourceresolver.Opt{
   378  				Platform: &p.platform,
   379  				ImageOpt: &sourceresolver.ResolveImageOpt{
   380  					ResolveMode: p.src.ResolveMode.String(),
   381  				},
   382  			}, p.sm, g)
   383  			if err != nil {
   384  				return struct{}{}, err
   385  			}
   386  
   387  			p.ref = ref.String()
   388  			p.config = dt
   389  		}
   390  		return struct{}{}, nil
   391  	})
   392  	return err
   393  }
   394  
   395  func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (string, string, solver.CacheOpts, bool, error) {
   396  	p.resolveLocal()
   397  
   398  	if p.desc.Digest != "" && index == 0 {
   399  		dgst, err := p.mainManifestKey(p.platform)
   400  		if err != nil {
   401  			return "", "", nil, false, err
   402  		}
   403  		return dgst.String(), p.desc.Digest.String(), nil, false, nil
   404  	}
   405  
   406  	if p.config != nil {
   407  		k := cacheKeyFromConfig(p.config).String()
   408  		if k == "" {
   409  			return digest.FromBytes(p.config).String(), digest.FromBytes(p.config).String(), nil, true, nil
   410  		}
   411  		return k, k, nil, true, nil
   412  	}
   413  
   414  	if err := p.resolve(ctx, g); err != nil {
   415  		return "", "", nil, false, err
   416  	}
   417  
   418  	if p.desc.Digest != "" && index == 0 {
   419  		dgst, err := p.mainManifestKey(p.platform)
   420  		if err != nil {
   421  			return "", "", nil, false, err
   422  		}
   423  		return dgst.String(), p.desc.Digest.String(), nil, false, nil
   424  	}
   425  
   426  	if len(p.config) == 0 && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest {
   427  		return "", "", nil, false, errors.Errorf("invalid empty config file resolved for %s", p.src.Reference.String())
   428  	}
   429  
   430  	k := cacheKeyFromConfig(p.config).String()
   431  	if k == "" || p.desc.MediaType == images.MediaTypeDockerSchema1Manifest {
   432  		dgst, err := p.mainManifestKey(p.platform)
   433  		if err != nil {
   434  			return "", "", nil, false, err
   435  		}
   436  		return dgst.String(), p.desc.Digest.String(), nil, true, nil
   437  	}
   438  
   439  	return k, k, nil, true, nil
   440  }
   441  
   442  func (p *puller) getRef(ctx context.Context, diffIDs []layer.DiffID, opts ...cache.RefOption) (cache.ImmutableRef, error) {
   443  	var parent cache.ImmutableRef
   444  	if len(diffIDs) > 1 {
   445  		var err error
   446  		parent, err = p.getRef(ctx, diffIDs[:len(diffIDs)-1], opts...)
   447  		if err != nil {
   448  			return nil, err
   449  		}
   450  		defer parent.Release(context.TODO())
   451  	}
   452  	return p.is.CacheAccessor.GetByBlob(ctx, ocispec.Descriptor{
   453  		Annotations: map[string]string{
   454  			"containerd.io/uncompressed": diffIDs[len(diffIDs)-1].String(),
   455  		},
   456  	}, parent, opts...)
   457  }
   458  
   459  func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.ImmutableRef, error) {
   460  	p.resolveLocal()
   461  	if len(p.config) == 0 {
   462  		if err := p.resolve(ctx, g); err != nil {
   463  			return nil, err
   464  		}
   465  	}
   466  
   467  	if p.config != nil {
   468  		img, err := p.is.ImageStore.Get(image.ID(digest.FromBytes(p.config)))
   469  		if err == nil {
   470  			if len(img.RootFS.DiffIDs) == 0 {
   471  				return nil, nil
   472  			}
   473  			l, err := p.is.LayerStore.Get(img.RootFS.ChainID())
   474  			if err == nil {
   475  				layer.ReleaseAndLog(p.is.LayerStore, l)
   476  				ref, err := p.getRef(ctx, img.RootFS.DiffIDs, cache.WithDescription(fmt.Sprintf("from local %s", p.ref)))
   477  				if err != nil {
   478  					return nil, err
   479  				}
   480  				return ref, nil
   481  			}
   482  		}
   483  	}
   484  
   485  	ongoing := newJobs(p.ref)
   486  
   487  	ctx, done, err := leaseutil.WithLease(ctx, p.is.LeaseManager, leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary)
   488  	if err != nil {
   489  		return nil, err
   490  	}
   491  	defer func() {
   492  		done(context.TODO())
   493  		if p.is.GarbageCollect != nil {
   494  			go p.is.GarbageCollect(context.TODO())
   495  		}
   496  	}()
   497  
   498  	pctx, stopProgress := context.WithCancel(ctx)
   499  
   500  	pw, _, ctx := progress.NewFromContext(ctx)
   501  	defer pw.Close()
   502  
   503  	progressDone := make(chan struct{})
   504  	go func() {
   505  		showProgress(pctx, ongoing, p.is.ContentStore, pw)
   506  		close(progressDone)
   507  	}()
   508  	defer func() {
   509  		<-progressDone
   510  	}()
   511  
   512  	fetcher, err := p.resolver(g).Fetcher(ctx, p.ref)
   513  	if err != nil {
   514  		stopProgress()
   515  		return nil, err
   516  	}
   517  
   518  	platform := platforms.Only(p.platform)
   519  
   520  	var nonLayers []digest.Digest
   521  
   522  	var (
   523  		schema1Converter *schema1.Converter
   524  		handlers         []images.Handler
   525  	)
   526  	if p.desc.MediaType == images.MediaTypeDockerSchema1Manifest {
   527  		schema1Converter = schema1.NewConverter(p.is.ContentStore, fetcher)
   528  		handlers = append(handlers, schema1Converter)
   529  
   530  		// TODO: Optimize to do dispatch and integrate pulling with download manager,
   531  		// leverage existing blob mapping and layer storage
   532  	} else {
   533  		// TODO: need a wrapper snapshot interface that combines content
   534  		// and snapshots as 1) buildkit shouldn't have a dependency on contentstore
   535  		// or 2) cachemanager should manage the contentstore
   536  		handlers = append(handlers, images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
   537  			switch desc.MediaType {
   538  			case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
   539  				images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex,
   540  				images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
   541  				nonLayers = append(nonLayers, desc.Digest)
   542  			default:
   543  				return nil, images.ErrSkipDesc
   544  			}
   545  			ongoing.add(desc)
   546  			return nil, nil
   547  		}))
   548  
   549  		// Get all the children for a descriptor
   550  		childrenHandler := images.ChildrenHandler(p.is.ContentStore)
   551  		// Filter the children by the platform
   552  		childrenHandler = images.FilterPlatforms(childrenHandler, platform)
   553  		// Limit manifests pulled to the best match in an index
   554  		childrenHandler = images.LimitManifests(childrenHandler, platform, 1)
   555  
   556  		handlers = append(handlers,
   557  			remotes.FetchHandler(p.is.ContentStore, fetcher),
   558  			childrenHandler,
   559  		)
   560  	}
   561  
   562  	if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, p.desc); err != nil {
   563  		stopProgress()
   564  		return nil, err
   565  	}
   566  	defer stopProgress()
   567  
   568  	if schema1Converter != nil {
   569  		p.desc, err = schema1Converter.Convert(ctx)
   570  		if err != nil {
   571  			return nil, err
   572  		}
   573  	}
   574  
   575  	mfst, err := images.Manifest(ctx, p.is.ContentStore, p.desc, platform)
   576  	if err != nil {
   577  		return nil, err
   578  	}
   579  
   580  	config, err := images.Config(ctx, p.is.ContentStore, p.desc, platform)
   581  	if err != nil {
   582  		return nil, err
   583  	}
   584  
   585  	dt, err := content.ReadBlob(ctx, p.is.ContentStore, config)
   586  	if err != nil {
   587  		return nil, err
   588  	}
   589  
   590  	var img ocispec.Image
   591  	if err := json.Unmarshal(dt, &img); err != nil {
   592  		return nil, err
   593  	}
   594  
   595  	if len(mfst.Layers) != len(img.RootFS.DiffIDs) {
   596  		return nil, errors.Errorf("invalid config for manifest")
   597  	}
   598  
   599  	pchan := make(chan pkgprogress.Progress, 10)
   600  	defer close(pchan)
   601  
   602  	go func() {
   603  		m := map[string]struct {
   604  			st      time.Time
   605  			limiter *rate.Limiter
   606  		}{}
   607  		for p := range pchan {
   608  			if p.Action == "Extracting" {
   609  				st, ok := m[p.ID]
   610  				if !ok {
   611  					st.st = time.Now()
   612  					st.limiter = rate.NewLimiter(rate.Every(100*time.Millisecond), 1)
   613  					m[p.ID] = st
   614  				}
   615  				var end *time.Time
   616  				if p.LastUpdate || st.limiter.Allow() {
   617  					if p.LastUpdate {
   618  						tm := time.Now()
   619  						end = &tm
   620  					}
   621  					_ = pw.Write("extracting "+p.ID, progress.Status{
   622  						Action:    "extract",
   623  						Started:   &st.st,
   624  						Completed: end,
   625  					})
   626  				}
   627  			}
   628  		}
   629  	}()
   630  
   631  	if len(mfst.Layers) == 0 {
   632  		return nil, nil
   633  	}
   634  
   635  	layers := make([]xfer.DownloadDescriptor, 0, len(mfst.Layers))
   636  
   637  	for i, desc := range mfst.Layers {
   638  		if err := desc.Digest.Validate(); err != nil {
   639  			return nil, errors.Wrap(err, "layer digest could not be validated")
   640  		}
   641  		ongoing.add(desc)
   642  		layers = append(layers, &layerDescriptor{
   643  			desc:    desc,
   644  			diffID:  layer.DiffID(img.RootFS.DiffIDs[i]),
   645  			fetcher: fetcher,
   646  			ref:     p.src.Reference,
   647  			is:      p.is,
   648  		})
   649  	}
   650  
   651  	defer func() {
   652  		<-progressDone
   653  	}()
   654  
   655  	r := image.NewRootFS()
   656  	rootFS, release, err := p.is.DownloadManager.Download(ctx, *r, layers, pkgprogress.ChanOutput(pchan))
   657  	stopProgress()
   658  	if err != nil {
   659  		return nil, err
   660  	}
   661  
   662  	ref, err := p.getRef(ctx, rootFS.DiffIDs, cache.WithDescription(fmt.Sprintf("pulled from %s", p.ref)))
   663  	release()
   664  	if err != nil {
   665  		return nil, err
   666  	}
   667  
   668  	// keep manifest blobs until ref is alive for cache
   669  	for _, nl := range nonLayers {
   670  		if err := p.is.LeaseManager.AddResource(ctx, leases.Lease{ID: ref.ID()}, leases.Resource{
   671  			ID:   nl.String(),
   672  			Type: "content",
   673  		}); err != nil {
   674  			return nil, err
   675  		}
   676  	}
   677  
   678  	// TODO: handle windows layers for cross platform builds
   679  
   680  	if p.src.RecordType != "" && ref.GetRecordType() == "" {
   681  		if err := ref.SetRecordType(p.src.RecordType); err != nil {
   682  			ref.Release(context.TODO())
   683  			return nil, err
   684  		}
   685  	}
   686  
   687  	return ref, nil
   688  }
   689  
   690  // Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
   691  type layerDescriptor struct {
   692  	is      *Source
   693  	fetcher remotes.Fetcher
   694  	desc    ocispec.Descriptor
   695  	diffID  layer.DiffID
   696  	ref     ctdreference.Spec
   697  }
   698  
   699  func (ld *layerDescriptor) Key() string {
   700  	return "v2:" + ld.desc.Digest.String()
   701  }
   702  
   703  func (ld *layerDescriptor) ID() string {
   704  	return ld.desc.Digest.String()
   705  }
   706  
   707  func (ld *layerDescriptor) DiffID() (layer.DiffID, error) {
   708  	return ld.diffID, nil
   709  }
   710  
   711  func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogress.Output) (io.ReadCloser, int64, error) {
   712  	rc, err := ld.fetcher.Fetch(ctx, ld.desc)
   713  	if err != nil {
   714  		return nil, 0, err
   715  	}
   716  	defer rc.Close()
   717  
   718  	refKey := remotes.MakeRefKey(ctx, ld.desc)
   719  
   720  	ld.is.ContentStore.Abort(ctx, refKey)
   721  
   722  	if err := content.WriteBlob(ctx, ld.is.ContentStore, refKey, rc, ld.desc); err != nil {
   723  		ld.is.ContentStore.Abort(ctx, refKey)
   724  		return nil, 0, err
   725  	}
   726  
   727  	ra, err := ld.is.ContentStore.ReaderAt(ctx, ld.desc)
   728  	if err != nil {
   729  		return nil, 0, err
   730  	}
   731  
   732  	return io.NopCloser(content.NewReader(ra)), ld.desc.Size, nil
   733  }
   734  
   735  func (ld *layerDescriptor) Close() {
   736  	// ld.is.ContentStore.Delete(context.TODO(), ld.desc.Digest))
   737  }
   738  
   739  func (ld *layerDescriptor) Registered(diffID layer.DiffID) {
   740  	// Cache mapping from this layer's DiffID to the blobsum
   741  	ld.is.MetadataStore.Add(diffID, metadata.V2Metadata{Digest: ld.desc.Digest, SourceRepository: ld.ref.Locator})
   742  }
   743  
   744  func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, pw progress.Writer) {
   745  	var (
   746  		ticker   = time.NewTicker(100 * time.Millisecond)
   747  		statuses = map[string]statusInfo{}
   748  		done     bool
   749  	)
   750  	defer ticker.Stop()
   751  
   752  	for {
   753  		select {
   754  		case <-ticker.C:
   755  		case <-ctx.Done():
   756  			done = true
   757  		}
   758  
   759  		resolved := "resolved"
   760  		if !ongoing.isResolved() {
   761  			resolved = "resolving"
   762  		}
   763  		statuses[ongoing.name] = statusInfo{
   764  			Ref:    ongoing.name,
   765  			Status: resolved,
   766  		}
   767  
   768  		actives := make(map[string]statusInfo)
   769  
   770  		if !done {
   771  			active, err := cs.ListStatuses(ctx)
   772  			if err != nil {
   773  				// log.G(ctx).WithError(err).Error("active check failed")
   774  				continue
   775  			}
   776  			// update status of active entries!
   777  			for _, active := range active {
   778  				actives[active.Ref] = statusInfo{
   779  					Ref:       active.Ref,
   780  					Status:    "downloading",
   781  					Offset:    active.Offset,
   782  					Total:     active.Total,
   783  					StartedAt: active.StartedAt,
   784  					UpdatedAt: active.UpdatedAt,
   785  				}
   786  			}
   787  		}
   788  
   789  		// now, update the items in jobs that are not in active
   790  		for _, j := range ongoing.jobs() {
   791  			refKey := remotes.MakeRefKey(ctx, j.Descriptor)
   792  			if a, ok := actives[refKey]; ok {
   793  				started := j.started
   794  				_ = pw.Write(j.Digest.String(), progress.Status{
   795  					Action:  a.Status,
   796  					Total:   int(a.Total),
   797  					Current: int(a.Offset),
   798  					Started: &started,
   799  				})
   800  				continue
   801  			}
   802  
   803  			if !j.done {
   804  				info, err := cs.Info(context.TODO(), j.Digest)
   805  				if err != nil {
   806  					if cerrdefs.IsNotFound(err) {
   807  						// _ = pw.Write(j.Digest.String(), progress.Status{
   808  						// 	Action: "waiting",
   809  						// })
   810  						continue
   811  					}
   812  				} else {
   813  					j.done = true
   814  				}
   815  
   816  				if done || j.done {
   817  					started := j.started
   818  					createdAt := info.CreatedAt
   819  					_ = pw.Write(j.Digest.String(), progress.Status{
   820  						Action:    "done",
   821  						Current:   int(info.Size),
   822  						Total:     int(info.Size),
   823  						Completed: &createdAt,
   824  						Started:   &started,
   825  					})
   826  				}
   827  			}
   828  		}
   829  		if done {
   830  			return
   831  		}
   832  	}
   833  }
   834  
   835  // jobs provides a way of identifying the download keys for a particular task
   836  // encountering during the pull walk.
   837  //
   838  // This is very minimal and will probably be replaced with something more
   839  // featured.
   840  type jobs struct {
   841  	name     string
   842  	added    map[digest.Digest]*job
   843  	mu       sync.Mutex
   844  	resolved bool
   845  }
   846  
   847  type job struct {
   848  	ocispec.Descriptor
   849  	done    bool
   850  	started time.Time
   851  }
   852  
   853  func newJobs(name string) *jobs {
   854  	return &jobs{
   855  		name:  name,
   856  		added: make(map[digest.Digest]*job),
   857  	}
   858  }
   859  
   860  func (j *jobs) add(desc ocispec.Descriptor) {
   861  	j.mu.Lock()
   862  	defer j.mu.Unlock()
   863  
   864  	if _, ok := j.added[desc.Digest]; ok {
   865  		return
   866  	}
   867  	j.added[desc.Digest] = &job{
   868  		Descriptor: desc,
   869  		started:    time.Now(),
   870  	}
   871  }
   872  
   873  func (j *jobs) jobs() []*job {
   874  	j.mu.Lock()
   875  	defer j.mu.Unlock()
   876  
   877  	descs := make([]*job, 0, len(j.added))
   878  	for _, j := range j.added {
   879  		descs = append(descs, j)
   880  	}
   881  	return descs
   882  }
   883  
   884  func (j *jobs) isResolved() bool {
   885  	j.mu.Lock()
   886  	defer j.mu.Unlock()
   887  	return j.resolved
   888  }
   889  
   890  type statusInfo struct {
   891  	Ref       string
   892  	Status    string
   893  	Offset    int64
   894  	Total     int64
   895  	StartedAt time.Time
   896  	UpdatedAt time.Time
   897  }
   898  
   899  func oneOffProgress(ctx context.Context, id string) func(err error) error {
   900  	pw, _, _ := progress.NewFromContext(ctx)
   901  	now := time.Now()
   902  	st := progress.Status{
   903  		Started: &now,
   904  	}
   905  	_ = pw.Write(id, st)
   906  	return func(err error) error {
   907  		// TODO: set error on status
   908  		now := time.Now()
   909  		st.Completed = &now
   910  		_ = pw.Write(id, st)
   911  		_ = pw.Close()
   912  		return err
   913  	}
   914  }
   915  
   916  // cacheKeyFromConfig returns a stable digest from image config. If image config
   917  // is a known oci image we will use chainID of layers.
   918  func cacheKeyFromConfig(dt []byte) digest.Digest {
   919  	var img ocispec.Image
   920  	err := json.Unmarshal(dt, &img)
   921  	if err != nil {
   922  		log.G(context.TODO()).WithError(err).Errorf("failed to unmarshal image config for cache key %v", err)
   923  		return digest.FromBytes(dt)
   924  	}
   925  	if img.RootFS.Type != "layers" || len(img.RootFS.DiffIDs) == 0 {
   926  		return ""
   927  	}
   928  	return identity.ChainID(img.RootFS.DiffIDs)
   929  }
   930  
   931  func platformMatches(img *image.Image, p *ocispec.Platform) bool {
   932  	return dimages.OnlyPlatformWithFallback(*p).Match(ocispec.Platform{
   933  		Architecture: img.Architecture,
   934  		OS:           img.OS,
   935  		OSVersion:    img.OSVersion,
   936  		OSFeatures:   img.OSFeatures,
   937  		Variant:      img.Variant,
   938  	})
   939  }
   940  
   941  func applySourcePolicies(ctx context.Context, str string, spls []*spb.Policy) (string, error) {
   942  	ref, err := cdreference.Parse(str)
   943  	if err != nil {
   944  		return "", errors.WithStack(err)
   945  	}
   946  	op := &pb.SourceOp{
   947  		Identifier: srctypes.DockerImageScheme + "://" + ref.String(),
   948  	}
   949  
   950  	mut, err := sourcepolicy.NewEngine(spls).Evaluate(ctx, op)
   951  	if err != nil {
   952  		return "", errors.Wrap(err, "could not resolve image due to policy")
   953  	}
   954  
   955  	if mut {
   956  		var (
   957  			t  string
   958  			ok bool
   959  		)
   960  		t, newRef, ok := strings.Cut(op.GetIdentifier(), "://")
   961  		if !ok {
   962  			return "", errors.Errorf("could not parse ref: %s", op.GetIdentifier())
   963  		}
   964  		if ok && t != srctypes.DockerImageScheme {
   965  			return "", &imageutil.ResolveToNonImageError{Ref: str, Updated: newRef}
   966  		}
   967  		ref, err = cdreference.Parse(newRef)
   968  		if err != nil {
   969  			return "", errors.WithStack(err)
   970  		}
   971  	}
   972  	return ref.String(), nil
   973  }