github.com/rish1988/moby@v25.0.2+incompatible/builder/builder-next/adapters/containerimage/pull.go (about)

     1  // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
     2  //go:build go1.19
     3  
     4  package containerimage
     5  
     6  import (
     7  	"context"
     8  	"encoding/json"
     9  	"fmt"
    10  	"io"
    11  	"path"
    12  	"strings"
    13  	"sync"
    14  	"time"
    15  
    16  	"github.com/containerd/containerd/content"
    17  	cerrdefs "github.com/containerd/containerd/errdefs"
    18  	"github.com/containerd/containerd/gc"
    19  	"github.com/containerd/containerd/images"
    20  	"github.com/containerd/containerd/leases"
    21  	"github.com/containerd/containerd/platforms"
    22  	cdreference "github.com/containerd/containerd/reference"
    23  	ctdreference "github.com/containerd/containerd/reference"
    24  	"github.com/containerd/containerd/remotes"
    25  	"github.com/containerd/containerd/remotes/docker"
    26  	"github.com/containerd/containerd/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019: "github.com/containerd/containerd/remotes/docker/schema1" is deprecated: use images formatted in Docker Image Manifest v2, Schema 2, or OCI Image Spec v1.
    27  	"github.com/containerd/log"
    28  	distreference "github.com/distribution/reference"
    29  	dimages "github.com/docker/docker/daemon/images"
    30  	"github.com/docker/docker/distribution/metadata"
    31  	"github.com/docker/docker/distribution/xfer"
    32  	"github.com/docker/docker/image"
    33  	"github.com/docker/docker/layer"
    34  	pkgprogress "github.com/docker/docker/pkg/progress"
    35  	"github.com/docker/docker/reference"
    36  	"github.com/moby/buildkit/cache"
    37  	"github.com/moby/buildkit/client/llb"
    38  	"github.com/moby/buildkit/session"
    39  	"github.com/moby/buildkit/solver"
    40  	"github.com/moby/buildkit/solver/pb"
    41  	"github.com/moby/buildkit/source"
    42  	srctypes "github.com/moby/buildkit/source/types"
    43  	"github.com/moby/buildkit/sourcepolicy"
    44  	policy "github.com/moby/buildkit/sourcepolicy/pb"
    45  	spb "github.com/moby/buildkit/sourcepolicy/pb"
    46  	"github.com/moby/buildkit/util/flightcontrol"
    47  	"github.com/moby/buildkit/util/imageutil"
    48  	"github.com/moby/buildkit/util/leaseutil"
    49  	"github.com/moby/buildkit/util/progress"
    50  	"github.com/moby/buildkit/util/resolver"
    51  	"github.com/opencontainers/go-digest"
    52  	"github.com/opencontainers/image-spec/identity"
    53  	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
    54  	"github.com/pkg/errors"
    55  	"golang.org/x/time/rate"
    56  )
    57  
    58  // SourceOpt is options for creating the image source
    59  type SourceOpt struct {
    60  	ContentStore    content.Store
    61  	CacheAccessor   cache.Accessor
    62  	ReferenceStore  reference.Store
    63  	DownloadManager *xfer.LayerDownloadManager
    64  	MetadataStore   metadata.V2MetadataService
    65  	ImageStore      image.Store
    66  	RegistryHosts   docker.RegistryHosts
    67  	LayerStore      layer.Store
    68  	LeaseManager    leases.Manager
    69  	GarbageCollect  func(ctx context.Context) (gc.Stats, error)
    70  }
    71  
    72  // Source is the source implementation for accessing container images
    73  type Source struct {
    74  	SourceOpt
    75  	g flightcontrol.Group[*resolveRemoteResult]
    76  }
    77  
    78  // NewSource creates a new image source
    79  func NewSource(opt SourceOpt) (*Source, error) {
    80  	return &Source{SourceOpt: opt}, nil
    81  }
    82  
    83  // ID returns image scheme identifier
    84  func (is *Source) ID() string {
    85  	return srctypes.DockerImageScheme
    86  }
    87  
    88  func (is *Source) resolveLocal(refStr string) (*image.Image, error) {
    89  	ref, err := distreference.ParseNormalizedNamed(refStr)
    90  	if err != nil {
    91  		return nil, err
    92  	}
    93  	dgst, err := is.ReferenceStore.Get(ref)
    94  	if err != nil {
    95  		return nil, err
    96  	}
    97  	img, err := is.ImageStore.Get(image.ID(dgst))
    98  	if err != nil {
    99  		return nil, err
   100  	}
   101  	return img, nil
   102  }
   103  
   104  type resolveRemoteResult struct {
   105  	ref  string
   106  	dgst digest.Digest
   107  	dt   []byte
   108  }
   109  
   110  func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) {
   111  	p := platforms.DefaultSpec()
   112  	if platform != nil {
   113  		p = *platform
   114  	}
   115  	// key is used to synchronize resolutions that can happen in parallel when doing multi-stage.
   116  	key := "getconfig::" + ref + "::" + platforms.Format(p)
   117  	res, err := is.g.Do(ctx, key, func(ctx context.Context) (*resolveRemoteResult, error) {
   118  		res := resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g)
   119  		ref, dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, is.LeaseManager, platform, []*policy.Policy{})
   120  		if err != nil {
   121  			return nil, err
   122  		}
   123  		return &resolveRemoteResult{ref: ref, dgst: dgst, dt: dt}, nil
   124  	})
   125  	if err != nil {
   126  		return ref, "", nil, err
   127  	}
   128  	return res.ref, res.dgst, res.dt, nil
   129  }
   130  
   131  // ResolveImageConfig returns image config for an image
   132  func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) {
   133  	ref, err := applySourcePolicies(ctx, ref, opt.SourcePolicies)
   134  	if err != nil {
   135  		return "", "", nil, err
   136  	}
   137  	resolveMode, err := source.ParseImageResolveMode(opt.ResolveMode)
   138  	if err != nil {
   139  		return ref, "", nil, err
   140  	}
   141  	switch resolveMode {
   142  	case source.ResolveModeForcePull:
   143  		ref, dgst, dt, err := is.resolveRemote(ctx, ref, opt.Platform, sm, g)
   144  		// TODO: pull should fallback to local in case of failure to allow offline behavior
   145  		// the fallback doesn't work currently
   146  		return ref, dgst, dt, err
   147  		/*
   148  			if err == nil {
   149  				return dgst, dt, err
   150  			}
   151  			// fallback to local
   152  			dt, err = is.resolveLocal(ref)
   153  			return "", dt, err
   154  		*/
   155  
   156  	case source.ResolveModeDefault:
   157  		// default == prefer local, but in the future could be smarter
   158  		fallthrough
   159  	case source.ResolveModePreferLocal:
   160  		img, err := is.resolveLocal(ref)
   161  		if err == nil {
   162  			if opt.Platform != nil && !platformMatches(img, opt.Platform) {
   163  				log.G(ctx).WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, checking remote",
   164  					path.Join(opt.Platform.OS, opt.Platform.Architecture, opt.Platform.Variant),
   165  					path.Join(img.OS, img.Architecture, img.Variant),
   166  				)
   167  			} else {
   168  				return ref, "", img.RawJSON(), err
   169  			}
   170  		}
   171  		// fallback to remote
   172  		return is.resolveRemote(ctx, ref, opt.Platform, sm, g)
   173  	}
   174  	// should never happen
   175  	return ref, "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ResolveMode)
   176  }
   177  
   178  // Resolve returns access to pulling for an identifier
   179  func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, vtx solver.Vertex) (source.SourceInstance, error) {
   180  	imageIdentifier, ok := id.(*source.ImageIdentifier)
   181  	if !ok {
   182  		return nil, errors.Errorf("invalid image identifier %v", id)
   183  	}
   184  
   185  	platform := platforms.DefaultSpec()
   186  	if imageIdentifier.Platform != nil {
   187  		platform = *imageIdentifier.Platform
   188  	}
   189  
   190  	p := &puller{
   191  		src: imageIdentifier,
   192  		is:  is,
   193  		// resolver: is.getResolver(is.RegistryHosts, imageIdentifier.Reference.String(), sm, g),
   194  		platform: platform,
   195  		sm:       sm,
   196  	}
   197  	return p, nil
   198  }
   199  
   200  type puller struct {
   201  	is               *Source
   202  	resolveLocalOnce sync.Once
   203  	g                flightcontrol.Group[struct{}]
   204  	src              *source.ImageIdentifier
   205  	desc             ocispec.Descriptor
   206  	ref              string
   207  	config           []byte
   208  	platform         ocispec.Platform
   209  	sm               *session.Manager
   210  }
   211  
   212  func (p *puller) resolver(g session.Group) remotes.Resolver {
   213  	return resolver.DefaultPool.GetResolver(p.is.RegistryHosts, p.src.Reference.String(), "pull", p.sm, g)
   214  }
   215  
   216  func (p *puller) mainManifestKey(platform ocispec.Platform) (digest.Digest, error) {
   217  	dt, err := json.Marshal(struct {
   218  		Digest  digest.Digest
   219  		OS      string
   220  		Arch    string
   221  		Variant string `json:",omitempty"`
   222  	}{
   223  		Digest:  p.desc.Digest,
   224  		OS:      platform.OS,
   225  		Arch:    platform.Architecture,
   226  		Variant: platform.Variant,
   227  	})
   228  	if err != nil {
   229  		return "", err
   230  	}
   231  	return digest.FromBytes(dt), nil
   232  }
   233  
   234  func (p *puller) resolveLocal() {
   235  	p.resolveLocalOnce.Do(func() {
   236  		dgst := p.src.Reference.Digest()
   237  		if dgst != "" {
   238  			info, err := p.is.ContentStore.Info(context.TODO(), dgst)
   239  			if err == nil {
   240  				p.ref = p.src.Reference.String()
   241  				desc := ocispec.Descriptor{
   242  					Size:   info.Size,
   243  					Digest: dgst,
   244  				}
   245  				ra, err := p.is.ContentStore.ReaderAt(context.TODO(), desc)
   246  				if err == nil {
   247  					mt, err := imageutil.DetectManifestMediaType(ra)
   248  					if err == nil {
   249  						desc.MediaType = mt
   250  						p.desc = desc
   251  					}
   252  				}
   253  			}
   254  		}
   255  
   256  		if p.src.ResolveMode == source.ResolveModeDefault || p.src.ResolveMode == source.ResolveModePreferLocal {
   257  			ref := p.src.Reference.String()
   258  			img, err := p.is.resolveLocal(ref)
   259  			if err == nil {
   260  				if !platformMatches(img, &p.platform) {
   261  					log.G(context.TODO()).WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, not resolving",
   262  						path.Join(p.platform.OS, p.platform.Architecture, p.platform.Variant),
   263  						path.Join(img.OS, img.Architecture, img.Variant),
   264  					)
   265  				} else {
   266  					p.config = img.RawJSON()
   267  				}
   268  			}
   269  		}
   270  	})
   271  }
   272  
   273  func (p *puller) resolve(ctx context.Context, g session.Group) error {
   274  	_, err := p.g.Do(ctx, "", func(ctx context.Context) (_ struct{}, err error) {
   275  		resolveProgressDone := oneOffProgress(ctx, "resolve "+p.src.Reference.String())
   276  		defer func() {
   277  			resolveProgressDone(err)
   278  		}()
   279  
   280  		ref, err := distreference.ParseNormalizedNamed(p.src.Reference.String())
   281  		if err != nil {
   282  			return struct{}{}, err
   283  		}
   284  
   285  		if p.desc.Digest == "" && p.config == nil {
   286  			origRef, desc, err := p.resolver(g).Resolve(ctx, ref.String())
   287  			if err != nil {
   288  				return struct{}{}, err
   289  			}
   290  
   291  			p.desc = desc
   292  			p.ref = origRef
   293  		}
   294  
   295  		// Schema 1 manifests cannot be resolved to an image config
   296  		// since the conversion must take place after all the content
   297  		// has been read.
   298  		// It may be possible to have a mapping between schema 1 manifests
   299  		// and the schema 2 manifests they are converted to.
   300  		if p.config == nil && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest {
   301  			ref, err := distreference.WithDigest(ref, p.desc.Digest)
   302  			if err != nil {
   303  				return struct{}{}, err
   304  			}
   305  			newRef, _, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: p.src.ResolveMode.String()}, p.sm, g)
   306  			if err != nil {
   307  				return struct{}{}, err
   308  			}
   309  
   310  			p.ref = newRef
   311  			p.config = dt
   312  		}
   313  		return struct{}{}, nil
   314  	})
   315  	return err
   316  }
   317  
   318  func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (string, string, solver.CacheOpts, bool, error) {
   319  	p.resolveLocal()
   320  
   321  	if p.desc.Digest != "" && index == 0 {
   322  		dgst, err := p.mainManifestKey(p.platform)
   323  		if err != nil {
   324  			return "", "", nil, false, err
   325  		}
   326  		return dgst.String(), p.desc.Digest.String(), nil, false, nil
   327  	}
   328  
   329  	if p.config != nil {
   330  		k := cacheKeyFromConfig(p.config).String()
   331  		if k == "" {
   332  			return digest.FromBytes(p.config).String(), digest.FromBytes(p.config).String(), nil, true, nil
   333  		}
   334  		return k, k, nil, true, nil
   335  	}
   336  
   337  	if err := p.resolve(ctx, g); err != nil {
   338  		return "", "", nil, false, err
   339  	}
   340  
   341  	if p.desc.Digest != "" && index == 0 {
   342  		dgst, err := p.mainManifestKey(p.platform)
   343  		if err != nil {
   344  			return "", "", nil, false, err
   345  		}
   346  		return dgst.String(), p.desc.Digest.String(), nil, false, nil
   347  	}
   348  
   349  	if len(p.config) == 0 && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest {
   350  		return "", "", nil, false, errors.Errorf("invalid empty config file resolved for %s", p.src.Reference.String())
   351  	}
   352  
   353  	k := cacheKeyFromConfig(p.config).String()
   354  	if k == "" || p.desc.MediaType == images.MediaTypeDockerSchema1Manifest {
   355  		dgst, err := p.mainManifestKey(p.platform)
   356  		if err != nil {
   357  			return "", "", nil, false, err
   358  		}
   359  		return dgst.String(), p.desc.Digest.String(), nil, true, nil
   360  	}
   361  
   362  	return k, k, nil, true, nil
   363  }
   364  
   365  func (p *puller) getRef(ctx context.Context, diffIDs []layer.DiffID, opts ...cache.RefOption) (cache.ImmutableRef, error) {
   366  	var parent cache.ImmutableRef
   367  	if len(diffIDs) > 1 {
   368  		var err error
   369  		parent, err = p.getRef(ctx, diffIDs[:len(diffIDs)-1], opts...)
   370  		if err != nil {
   371  			return nil, err
   372  		}
   373  		defer parent.Release(context.TODO())
   374  	}
   375  	return p.is.CacheAccessor.GetByBlob(ctx, ocispec.Descriptor{
   376  		Annotations: map[string]string{
   377  			"containerd.io/uncompressed": diffIDs[len(diffIDs)-1].String(),
   378  		},
   379  	}, parent, opts...)
   380  }
   381  
   382  func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.ImmutableRef, error) {
   383  	p.resolveLocal()
   384  	if len(p.config) == 0 {
   385  		if err := p.resolve(ctx, g); err != nil {
   386  			return nil, err
   387  		}
   388  	}
   389  
   390  	if p.config != nil {
   391  		img, err := p.is.ImageStore.Get(image.ID(digest.FromBytes(p.config)))
   392  		if err == nil {
   393  			if len(img.RootFS.DiffIDs) == 0 {
   394  				return nil, nil
   395  			}
   396  			l, err := p.is.LayerStore.Get(img.RootFS.ChainID())
   397  			if err == nil {
   398  				layer.ReleaseAndLog(p.is.LayerStore, l)
   399  				ref, err := p.getRef(ctx, img.RootFS.DiffIDs, cache.WithDescription(fmt.Sprintf("from local %s", p.ref)))
   400  				if err != nil {
   401  					return nil, err
   402  				}
   403  				return ref, nil
   404  			}
   405  		}
   406  	}
   407  
   408  	ongoing := newJobs(p.ref)
   409  
   410  	ctx, done, err := leaseutil.WithLease(ctx, p.is.LeaseManager, leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary)
   411  	if err != nil {
   412  		return nil, err
   413  	}
   414  	defer func() {
   415  		done(context.TODO())
   416  		if p.is.GarbageCollect != nil {
   417  			go p.is.GarbageCollect(context.TODO())
   418  		}
   419  	}()
   420  
   421  	pctx, stopProgress := context.WithCancel(ctx)
   422  
   423  	pw, _, ctx := progress.NewFromContext(ctx)
   424  	defer pw.Close()
   425  
   426  	progressDone := make(chan struct{})
   427  	go func() {
   428  		showProgress(pctx, ongoing, p.is.ContentStore, pw)
   429  		close(progressDone)
   430  	}()
   431  	defer func() {
   432  		<-progressDone
   433  	}()
   434  
   435  	fetcher, err := p.resolver(g).Fetcher(ctx, p.ref)
   436  	if err != nil {
   437  		stopProgress()
   438  		return nil, err
   439  	}
   440  
   441  	platform := platforms.Only(p.platform)
   442  
   443  	var nonLayers []digest.Digest
   444  
   445  	var (
   446  		schema1Converter *schema1.Converter
   447  		handlers         []images.Handler
   448  	)
   449  	if p.desc.MediaType == images.MediaTypeDockerSchema1Manifest {
   450  		schema1Converter = schema1.NewConverter(p.is.ContentStore, fetcher)
   451  		handlers = append(handlers, schema1Converter)
   452  
   453  		// TODO: Optimize to do dispatch and integrate pulling with download manager,
   454  		// leverage existing blob mapping and layer storage
   455  	} else {
   456  		// TODO: need a wrapper snapshot interface that combines content
   457  		// and snapshots as 1) buildkit shouldn't have a dependency on contentstore
   458  		// or 2) cachemanager should manage the contentstore
   459  		handlers = append(handlers, images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
   460  			switch desc.MediaType {
   461  			case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
   462  				images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex,
   463  				images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
   464  				nonLayers = append(nonLayers, desc.Digest)
   465  			default:
   466  				return nil, images.ErrSkipDesc
   467  			}
   468  			ongoing.add(desc)
   469  			return nil, nil
   470  		}))
   471  
   472  		// Get all the children for a descriptor
   473  		childrenHandler := images.ChildrenHandler(p.is.ContentStore)
   474  		// Filter the children by the platform
   475  		childrenHandler = images.FilterPlatforms(childrenHandler, platform)
   476  		// Limit manifests pulled to the best match in an index
   477  		childrenHandler = images.LimitManifests(childrenHandler, platform, 1)
   478  
   479  		handlers = append(handlers,
   480  			remotes.FetchHandler(p.is.ContentStore, fetcher),
   481  			childrenHandler,
   482  		)
   483  	}
   484  
   485  	if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, p.desc); err != nil {
   486  		stopProgress()
   487  		return nil, err
   488  	}
   489  	defer stopProgress()
   490  
   491  	if schema1Converter != nil {
   492  		p.desc, err = schema1Converter.Convert(ctx)
   493  		if err != nil {
   494  			return nil, err
   495  		}
   496  	}
   497  
   498  	mfst, err := images.Manifest(ctx, p.is.ContentStore, p.desc, platform)
   499  	if err != nil {
   500  		return nil, err
   501  	}
   502  
   503  	config, err := images.Config(ctx, p.is.ContentStore, p.desc, platform)
   504  	if err != nil {
   505  		return nil, err
   506  	}
   507  
   508  	dt, err := content.ReadBlob(ctx, p.is.ContentStore, config)
   509  	if err != nil {
   510  		return nil, err
   511  	}
   512  
   513  	var img ocispec.Image
   514  	if err := json.Unmarshal(dt, &img); err != nil {
   515  		return nil, err
   516  	}
   517  
   518  	if len(mfst.Layers) != len(img.RootFS.DiffIDs) {
   519  		return nil, errors.Errorf("invalid config for manifest")
   520  	}
   521  
   522  	pchan := make(chan pkgprogress.Progress, 10)
   523  	defer close(pchan)
   524  
   525  	go func() {
   526  		m := map[string]struct {
   527  			st      time.Time
   528  			limiter *rate.Limiter
   529  		}{}
   530  		for p := range pchan {
   531  			if p.Action == "Extracting" {
   532  				st, ok := m[p.ID]
   533  				if !ok {
   534  					st.st = time.Now()
   535  					st.limiter = rate.NewLimiter(rate.Every(100*time.Millisecond), 1)
   536  					m[p.ID] = st
   537  				}
   538  				var end *time.Time
   539  				if p.LastUpdate || st.limiter.Allow() {
   540  					if p.LastUpdate {
   541  						tm := time.Now()
   542  						end = &tm
   543  					}
   544  					_ = pw.Write("extracting "+p.ID, progress.Status{
   545  						Action:    "extract",
   546  						Started:   &st.st,
   547  						Completed: end,
   548  					})
   549  				}
   550  			}
   551  		}
   552  	}()
   553  
   554  	if len(mfst.Layers) == 0 {
   555  		return nil, nil
   556  	}
   557  
   558  	layers := make([]xfer.DownloadDescriptor, 0, len(mfst.Layers))
   559  
   560  	for i, desc := range mfst.Layers {
   561  		if err := desc.Digest.Validate(); err != nil {
   562  			return nil, errors.Wrap(err, "layer digest could not be validated")
   563  		}
   564  		ongoing.add(desc)
   565  		layers = append(layers, &layerDescriptor{
   566  			desc:    desc,
   567  			diffID:  layer.DiffID(img.RootFS.DiffIDs[i]),
   568  			fetcher: fetcher,
   569  			ref:     p.src.Reference,
   570  			is:      p.is,
   571  		})
   572  	}
   573  
   574  	defer func() {
   575  		<-progressDone
   576  	}()
   577  
   578  	r := image.NewRootFS()
   579  	rootFS, release, err := p.is.DownloadManager.Download(ctx, *r, layers, pkgprogress.ChanOutput(pchan))
   580  	stopProgress()
   581  	if err != nil {
   582  		return nil, err
   583  	}
   584  
   585  	ref, err := p.getRef(ctx, rootFS.DiffIDs, cache.WithDescription(fmt.Sprintf("pulled from %s", p.ref)))
   586  	release()
   587  	if err != nil {
   588  		return nil, err
   589  	}
   590  
   591  	// keep manifest blobs until ref is alive for cache
   592  	for _, nl := range nonLayers {
   593  		if err := p.is.LeaseManager.AddResource(ctx, leases.Lease{ID: ref.ID()}, leases.Resource{
   594  			ID:   nl.String(),
   595  			Type: "content",
   596  		}); err != nil {
   597  			return nil, err
   598  		}
   599  	}
   600  
   601  	// TODO: handle windows layers for cross platform builds
   602  
   603  	if p.src.RecordType != "" && ref.GetRecordType() == "" {
   604  		if err := ref.SetRecordType(p.src.RecordType); err != nil {
   605  			ref.Release(context.TODO())
   606  			return nil, err
   607  		}
   608  	}
   609  
   610  	return ref, nil
   611  }
   612  
   613  // Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
   614  type layerDescriptor struct {
   615  	is      *Source
   616  	fetcher remotes.Fetcher
   617  	desc    ocispec.Descriptor
   618  	diffID  layer.DiffID
   619  	ref     ctdreference.Spec
   620  }
   621  
   622  func (ld *layerDescriptor) Key() string {
   623  	return "v2:" + ld.desc.Digest.String()
   624  }
   625  
   626  func (ld *layerDescriptor) ID() string {
   627  	return ld.desc.Digest.String()
   628  }
   629  
   630  func (ld *layerDescriptor) DiffID() (layer.DiffID, error) {
   631  	return ld.diffID, nil
   632  }
   633  
   634  func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogress.Output) (io.ReadCloser, int64, error) {
   635  	rc, err := ld.fetcher.Fetch(ctx, ld.desc)
   636  	if err != nil {
   637  		return nil, 0, err
   638  	}
   639  	defer rc.Close()
   640  
   641  	refKey := remotes.MakeRefKey(ctx, ld.desc)
   642  
   643  	ld.is.ContentStore.Abort(ctx, refKey)
   644  
   645  	if err := content.WriteBlob(ctx, ld.is.ContentStore, refKey, rc, ld.desc); err != nil {
   646  		ld.is.ContentStore.Abort(ctx, refKey)
   647  		return nil, 0, err
   648  	}
   649  
   650  	ra, err := ld.is.ContentStore.ReaderAt(ctx, ld.desc)
   651  	if err != nil {
   652  		return nil, 0, err
   653  	}
   654  
   655  	return io.NopCloser(content.NewReader(ra)), ld.desc.Size, nil
   656  }
   657  
   658  func (ld *layerDescriptor) Close() {
   659  	// ld.is.ContentStore.Delete(context.TODO(), ld.desc.Digest))
   660  }
   661  
   662  func (ld *layerDescriptor) Registered(diffID layer.DiffID) {
   663  	// Cache mapping from this layer's DiffID to the blobsum
   664  	ld.is.MetadataStore.Add(diffID, metadata.V2Metadata{Digest: ld.desc.Digest, SourceRepository: ld.ref.Locator})
   665  }
   666  
   667  func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, pw progress.Writer) {
   668  	var (
   669  		ticker   = time.NewTicker(100 * time.Millisecond)
   670  		statuses = map[string]statusInfo{}
   671  		done     bool
   672  	)
   673  	defer ticker.Stop()
   674  
   675  	for {
   676  		select {
   677  		case <-ticker.C:
   678  		case <-ctx.Done():
   679  			done = true
   680  		}
   681  
   682  		resolved := "resolved"
   683  		if !ongoing.isResolved() {
   684  			resolved = "resolving"
   685  		}
   686  		statuses[ongoing.name] = statusInfo{
   687  			Ref:    ongoing.name,
   688  			Status: resolved,
   689  		}
   690  
   691  		actives := make(map[string]statusInfo)
   692  
   693  		if !done {
   694  			active, err := cs.ListStatuses(ctx)
   695  			if err != nil {
   696  				// log.G(ctx).WithError(err).Error("active check failed")
   697  				continue
   698  			}
   699  			// update status of active entries!
   700  			for _, active := range active {
   701  				actives[active.Ref] = statusInfo{
   702  					Ref:       active.Ref,
   703  					Status:    "downloading",
   704  					Offset:    active.Offset,
   705  					Total:     active.Total,
   706  					StartedAt: active.StartedAt,
   707  					UpdatedAt: active.UpdatedAt,
   708  				}
   709  			}
   710  		}
   711  
   712  		// now, update the items in jobs that are not in active
   713  		for _, j := range ongoing.jobs() {
   714  			refKey := remotes.MakeRefKey(ctx, j.Descriptor)
   715  			if a, ok := actives[refKey]; ok {
   716  				started := j.started
   717  				_ = pw.Write(j.Digest.String(), progress.Status{
   718  					Action:  a.Status,
   719  					Total:   int(a.Total),
   720  					Current: int(a.Offset),
   721  					Started: &started,
   722  				})
   723  				continue
   724  			}
   725  
   726  			if !j.done {
   727  				info, err := cs.Info(context.TODO(), j.Digest)
   728  				if err != nil {
   729  					if cerrdefs.IsNotFound(err) {
   730  						// _ = pw.Write(j.Digest.String(), progress.Status{
   731  						// 	Action: "waiting",
   732  						// })
   733  						continue
   734  					}
   735  				} else {
   736  					j.done = true
   737  				}
   738  
   739  				if done || j.done {
   740  					started := j.started
   741  					createdAt := info.CreatedAt
   742  					_ = pw.Write(j.Digest.String(), progress.Status{
   743  						Action:    "done",
   744  						Current:   int(info.Size),
   745  						Total:     int(info.Size),
   746  						Completed: &createdAt,
   747  						Started:   &started,
   748  					})
   749  				}
   750  			}
   751  		}
   752  		if done {
   753  			return
   754  		}
   755  	}
   756  }
   757  
   758  // jobs provides a way of identifying the download keys for a particular task
   759  // encountering during the pull walk.
   760  //
   761  // This is very minimal and will probably be replaced with something more
   762  // featured.
   763  type jobs struct {
   764  	name     string
   765  	added    map[digest.Digest]*job
   766  	mu       sync.Mutex
   767  	resolved bool
   768  }
   769  
   770  type job struct {
   771  	ocispec.Descriptor
   772  	done    bool
   773  	started time.Time
   774  }
   775  
   776  func newJobs(name string) *jobs {
   777  	return &jobs{
   778  		name:  name,
   779  		added: make(map[digest.Digest]*job),
   780  	}
   781  }
   782  
   783  func (j *jobs) add(desc ocispec.Descriptor) {
   784  	j.mu.Lock()
   785  	defer j.mu.Unlock()
   786  
   787  	if _, ok := j.added[desc.Digest]; ok {
   788  		return
   789  	}
   790  	j.added[desc.Digest] = &job{
   791  		Descriptor: desc,
   792  		started:    time.Now(),
   793  	}
   794  }
   795  
   796  func (j *jobs) jobs() []*job {
   797  	j.mu.Lock()
   798  	defer j.mu.Unlock()
   799  
   800  	descs := make([]*job, 0, len(j.added))
   801  	for _, j := range j.added {
   802  		descs = append(descs, j)
   803  	}
   804  	return descs
   805  }
   806  
   807  func (j *jobs) isResolved() bool {
   808  	j.mu.Lock()
   809  	defer j.mu.Unlock()
   810  	return j.resolved
   811  }
   812  
   813  type statusInfo struct {
   814  	Ref       string
   815  	Status    string
   816  	Offset    int64
   817  	Total     int64
   818  	StartedAt time.Time
   819  	UpdatedAt time.Time
   820  }
   821  
   822  func oneOffProgress(ctx context.Context, id string) func(err error) error {
   823  	pw, _, _ := progress.NewFromContext(ctx)
   824  	now := time.Now()
   825  	st := progress.Status{
   826  		Started: &now,
   827  	}
   828  	_ = pw.Write(id, st)
   829  	return func(err error) error {
   830  		// TODO: set error on status
   831  		now := time.Now()
   832  		st.Completed = &now
   833  		_ = pw.Write(id, st)
   834  		_ = pw.Close()
   835  		return err
   836  	}
   837  }
   838  
   839  // cacheKeyFromConfig returns a stable digest from image config. If image config
   840  // is a known oci image we will use chainID of layers.
   841  func cacheKeyFromConfig(dt []byte) digest.Digest {
   842  	var img ocispec.Image
   843  	err := json.Unmarshal(dt, &img)
   844  	if err != nil {
   845  		log.G(context.TODO()).WithError(err).Errorf("failed to unmarshal image config for cache key %v", err)
   846  		return digest.FromBytes(dt)
   847  	}
   848  	if img.RootFS.Type != "layers" || len(img.RootFS.DiffIDs) == 0 {
   849  		return ""
   850  	}
   851  	return identity.ChainID(img.RootFS.DiffIDs)
   852  }
   853  
   854  func platformMatches(img *image.Image, p *ocispec.Platform) bool {
   855  	return dimages.OnlyPlatformWithFallback(*p).Match(ocispec.Platform{
   856  		Architecture: img.Architecture,
   857  		OS:           img.OS,
   858  		OSVersion:    img.OSVersion,
   859  		OSFeatures:   img.OSFeatures,
   860  		Variant:      img.Variant,
   861  	})
   862  }
   863  
   864  func applySourcePolicies(ctx context.Context, str string, spls []*spb.Policy) (string, error) {
   865  	ref, err := cdreference.Parse(str)
   866  	if err != nil {
   867  		return "", errors.WithStack(err)
   868  	}
   869  	op := &pb.Op{
   870  		Op: &pb.Op_Source{
   871  			Source: &pb.SourceOp{
   872  				Identifier: srctypes.DockerImageScheme + "://" + ref.String(),
   873  			},
   874  		},
   875  	}
   876  
   877  	mut, err := sourcepolicy.NewEngine(spls).Evaluate(ctx, op)
   878  	if err != nil {
   879  		return "", errors.Wrap(err, "could not resolve image due to policy")
   880  	}
   881  
   882  	if mut {
   883  		var (
   884  			t  string
   885  			ok bool
   886  		)
   887  		t, newRef, ok := strings.Cut(op.GetSource().GetIdentifier(), "://")
   888  		if !ok {
   889  			return "", errors.Errorf("could not parse ref: %s", op.GetSource().GetIdentifier())
   890  		}
   891  		if ok && t != srctypes.DockerImageScheme {
   892  			return "", &imageutil.ResolveToNonImageError{Ref: str, Updated: newRef}
   893  		}
   894  		ref, err = cdreference.Parse(newRef)
   895  		if err != nil {
   896  			return "", errors.WithStack(err)
   897  		}
   898  	}
   899  	return ref.String(), nil
   900  }