github.com/Prakhar-Agarwal-byte/moby@v0.0.0-20231027092010-a14e3e8ab87e/builder/builder-next/adapters/containerimage/pull.go (about)

     1  package containerimage
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"fmt"
     7  	"io"
     8  	"path"
     9  	"strings"
    10  	"sync"
    11  	"time"
    12  
    13  	"github.com/containerd/containerd/content"
    14  	cerrdefs "github.com/containerd/containerd/errdefs"
    15  	"github.com/containerd/containerd/gc"
    16  	"github.com/containerd/containerd/images"
    17  	"github.com/containerd/containerd/leases"
    18  	"github.com/containerd/containerd/platforms"
    19  	cdreference "github.com/containerd/containerd/reference"
    20  	ctdreference "github.com/containerd/containerd/reference"
    21  	"github.com/containerd/containerd/remotes"
    22  	"github.com/containerd/containerd/remotes/docker"
    23  	"github.com/containerd/containerd/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019: "github.com/containerd/containerd/remotes/docker/schema1" is deprecated: use images formatted in Docker Image Manifest v2, Schema 2, or OCI Image Spec v1.
    24  	"github.com/containerd/log"
    25  	distreference "github.com/distribution/reference"
    26  	dimages "github.com/Prakhar-Agarwal-byte/moby/daemon/images"
    27  	"github.com/Prakhar-Agarwal-byte/moby/distribution/metadata"
    28  	"github.com/Prakhar-Agarwal-byte/moby/distribution/xfer"
    29  	"github.com/Prakhar-Agarwal-byte/moby/image"
    30  	"github.com/Prakhar-Agarwal-byte/moby/layer"
    31  	pkgprogress "github.com/Prakhar-Agarwal-byte/moby/pkg/progress"
    32  	"github.com/Prakhar-Agarwal-byte/moby/reference"
    33  	"github.com/moby/buildkit/cache"
    34  	"github.com/moby/buildkit/client/llb"
    35  	"github.com/moby/buildkit/session"
    36  	"github.com/moby/buildkit/solver"
    37  	"github.com/moby/buildkit/solver/pb"
    38  	"github.com/moby/buildkit/source"
    39  	srctypes "github.com/moby/buildkit/source/types"
    40  	"github.com/moby/buildkit/sourcepolicy"
    41  	policy "github.com/moby/buildkit/sourcepolicy/pb"
    42  	spb "github.com/moby/buildkit/sourcepolicy/pb"
    43  	"github.com/moby/buildkit/util/flightcontrol"
    44  	"github.com/moby/buildkit/util/imageutil"
    45  	"github.com/moby/buildkit/util/leaseutil"
    46  	"github.com/moby/buildkit/util/progress"
    47  	"github.com/moby/buildkit/util/resolver"
    48  	"github.com/opencontainers/go-digest"
    49  	"github.com/opencontainers/image-spec/identity"
    50  	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
    51  	"github.com/pkg/errors"
    52  	"golang.org/x/time/rate"
    53  )
    54  
    55  // SourceOpt is options for creating the image source
    56  type SourceOpt struct {
    57  	ContentStore    content.Store
    58  	CacheAccessor   cache.Accessor
    59  	ReferenceStore  reference.Store
    60  	DownloadManager *xfer.LayerDownloadManager
    61  	MetadataStore   metadata.V2MetadataService
    62  	ImageStore      image.Store
    63  	RegistryHosts   docker.RegistryHosts
    64  	LayerStore      layer.Store
    65  	LeaseManager    leases.Manager
    66  	GarbageCollect  func(ctx context.Context) (gc.Stats, error)
    67  }
    68  
    69  // Source is the source implementation for accessing container images
    70  type Source struct {
    71  	SourceOpt
    72  	g flightcontrol.Group[*resolveRemoteResult]
    73  }
    74  
    75  // NewSource creates a new image source
    76  func NewSource(opt SourceOpt) (*Source, error) {
    77  	return &Source{SourceOpt: opt}, nil
    78  }
    79  
    80  // ID returns image scheme identifier
    81  func (is *Source) ID() string {
    82  	return srctypes.DockerImageScheme
    83  }
    84  
    85  func (is *Source) resolveLocal(refStr string) (*image.Image, error) {
    86  	ref, err := distreference.ParseNormalizedNamed(refStr)
    87  	if err != nil {
    88  		return nil, err
    89  	}
    90  	dgst, err := is.ReferenceStore.Get(ref)
    91  	if err != nil {
    92  		return nil, err
    93  	}
    94  	img, err := is.ImageStore.Get(image.ID(dgst))
    95  	if err != nil {
    96  		return nil, err
    97  	}
    98  	return img, nil
    99  }
   100  
   101  type resolveRemoteResult struct {
   102  	ref  string
   103  	dgst digest.Digest
   104  	dt   []byte
   105  }
   106  
   107  func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) {
   108  	p := platforms.DefaultSpec()
   109  	if platform != nil {
   110  		p = *platform
   111  	}
   112  	// key is used to synchronize resolutions that can happen in parallel when doing multi-stage.
   113  	key := "getconfig::" + ref + "::" + platforms.Format(p)
   114  	res, err := is.g.Do(ctx, key, func(ctx context.Context) (*resolveRemoteResult, error) {
   115  		res := resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g)
   116  		ref, dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, is.LeaseManager, platform, []*policy.Policy{})
   117  		if err != nil {
   118  			return nil, err
   119  		}
   120  		return &resolveRemoteResult{ref: ref, dgst: dgst, dt: dt}, nil
   121  	})
   122  	if err != nil {
   123  		return ref, "", nil, err
   124  	}
   125  	return res.ref, res.dgst, res.dt, nil
   126  }
   127  
   128  // ResolveImageConfig returns image config for an image
   129  func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) {
   130  	ref, err := applySourcePolicies(ctx, ref, opt.SourcePolicies)
   131  	if err != nil {
   132  		return "", "", nil, err
   133  	}
   134  	resolveMode, err := source.ParseImageResolveMode(opt.ResolveMode)
   135  	if err != nil {
   136  		return ref, "", nil, err
   137  	}
   138  	switch resolveMode {
   139  	case source.ResolveModeForcePull:
   140  		ref, dgst, dt, err := is.resolveRemote(ctx, ref, opt.Platform, sm, g)
   141  		// TODO: pull should fallback to local in case of failure to allow offline behavior
   142  		// the fallback doesn't work currently
   143  		return ref, dgst, dt, err
   144  		/*
   145  			if err == nil {
   146  				return dgst, dt, err
   147  			}
   148  			// fallback to local
   149  			dt, err = is.resolveLocal(ref)
   150  			return "", dt, err
   151  		*/
   152  
   153  	case source.ResolveModeDefault:
   154  		// default == prefer local, but in the future could be smarter
   155  		fallthrough
   156  	case source.ResolveModePreferLocal:
   157  		img, err := is.resolveLocal(ref)
   158  		if err == nil {
   159  			if opt.Platform != nil && !platformMatches(img, opt.Platform) {
   160  				log.G(ctx).WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, checking remote",
   161  					path.Join(opt.Platform.OS, opt.Platform.Architecture, opt.Platform.Variant),
   162  					path.Join(img.OS, img.Architecture, img.Variant),
   163  				)
   164  			} else {
   165  				return ref, "", img.RawJSON(), err
   166  			}
   167  		}
   168  		// fallback to remote
   169  		return is.resolveRemote(ctx, ref, opt.Platform, sm, g)
   170  	}
   171  	// should never happen
   172  	return ref, "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ResolveMode)
   173  }
   174  
   175  // Resolve returns access to pulling for an identifier
   176  func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, vtx solver.Vertex) (source.SourceInstance, error) {
   177  	imageIdentifier, ok := id.(*source.ImageIdentifier)
   178  	if !ok {
   179  		return nil, errors.Errorf("invalid image identifier %v", id)
   180  	}
   181  
   182  	platform := platforms.DefaultSpec()
   183  	if imageIdentifier.Platform != nil {
   184  		platform = *imageIdentifier.Platform
   185  	}
   186  
   187  	p := &puller{
   188  		src: imageIdentifier,
   189  		is:  is,
   190  		// resolver: is.getResolver(is.RegistryHosts, imageIdentifier.Reference.String(), sm, g),
   191  		platform: platform,
   192  		sm:       sm,
   193  	}
   194  	return p, nil
   195  }
   196  
   197  type puller struct {
   198  	is               *Source
   199  	resolveLocalOnce sync.Once
   200  	g                flightcontrol.Group[struct{}]
   201  	src              *source.ImageIdentifier
   202  	desc             ocispec.Descriptor
   203  	ref              string
   204  	config           []byte
   205  	platform         ocispec.Platform
   206  	sm               *session.Manager
   207  }
   208  
   209  func (p *puller) resolver(g session.Group) remotes.Resolver {
   210  	return resolver.DefaultPool.GetResolver(p.is.RegistryHosts, p.src.Reference.String(), "pull", p.sm, g)
   211  }
   212  
   213  func (p *puller) mainManifestKey(platform ocispec.Platform) (digest.Digest, error) {
   214  	dt, err := json.Marshal(struct {
   215  		Digest  digest.Digest
   216  		OS      string
   217  		Arch    string
   218  		Variant string `json:",omitempty"`
   219  	}{
   220  		Digest:  p.desc.Digest,
   221  		OS:      platform.OS,
   222  		Arch:    platform.Architecture,
   223  		Variant: platform.Variant,
   224  	})
   225  	if err != nil {
   226  		return "", err
   227  	}
   228  	return digest.FromBytes(dt), nil
   229  }
   230  
   231  func (p *puller) resolveLocal() {
   232  	p.resolveLocalOnce.Do(func() {
   233  		dgst := p.src.Reference.Digest()
   234  		if dgst != "" {
   235  			info, err := p.is.ContentStore.Info(context.TODO(), dgst)
   236  			if err == nil {
   237  				p.ref = p.src.Reference.String()
   238  				desc := ocispec.Descriptor{
   239  					Size:   info.Size,
   240  					Digest: dgst,
   241  				}
   242  				ra, err := p.is.ContentStore.ReaderAt(context.TODO(), desc)
   243  				if err == nil {
   244  					mt, err := imageutil.DetectManifestMediaType(ra)
   245  					if err == nil {
   246  						desc.MediaType = mt
   247  						p.desc = desc
   248  					}
   249  				}
   250  			}
   251  		}
   252  
   253  		if p.src.ResolveMode == source.ResolveModeDefault || p.src.ResolveMode == source.ResolveModePreferLocal {
   254  			ref := p.src.Reference.String()
   255  			img, err := p.is.resolveLocal(ref)
   256  			if err == nil {
   257  				if !platformMatches(img, &p.platform) {
   258  					log.G(context.TODO()).WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, not resolving",
   259  						path.Join(p.platform.OS, p.platform.Architecture, p.platform.Variant),
   260  						path.Join(img.OS, img.Architecture, img.Variant),
   261  					)
   262  				} else {
   263  					p.config = img.RawJSON()
   264  				}
   265  			}
   266  		}
   267  	})
   268  }
   269  
   270  func (p *puller) resolve(ctx context.Context, g session.Group) error {
   271  	_, err := p.g.Do(ctx, "", func(ctx context.Context) (_ struct{}, err error) {
   272  		resolveProgressDone := oneOffProgress(ctx, "resolve "+p.src.Reference.String())
   273  		defer func() {
   274  			resolveProgressDone(err)
   275  		}()
   276  
   277  		ref, err := distreference.ParseNormalizedNamed(p.src.Reference.String())
   278  		if err != nil {
   279  			return struct{}{}, err
   280  		}
   281  
   282  		if p.desc.Digest == "" && p.config == nil {
   283  			origRef, desc, err := p.resolver(g).Resolve(ctx, ref.String())
   284  			if err != nil {
   285  				return struct{}{}, err
   286  			}
   287  
   288  			p.desc = desc
   289  			p.ref = origRef
   290  		}
   291  
   292  		// Schema 1 manifests cannot be resolved to an image config
   293  		// since the conversion must take place after all the content
   294  		// has been read.
   295  		// It may be possible to have a mapping between schema 1 manifests
   296  		// and the schema 2 manifests they are converted to.
   297  		if p.config == nil && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest {
   298  			ref, err := distreference.WithDigest(ref, p.desc.Digest)
   299  			if err != nil {
   300  				return struct{}{}, err
   301  			}
   302  			newRef, _, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: p.src.ResolveMode.String()}, p.sm, g)
   303  			if err != nil {
   304  				return struct{}{}, err
   305  			}
   306  
   307  			p.ref = newRef
   308  			p.config = dt
   309  		}
   310  		return struct{}{}, nil
   311  	})
   312  	return err
   313  }
   314  
   315  func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (string, string, solver.CacheOpts, bool, error) {
   316  	p.resolveLocal()
   317  
   318  	if p.desc.Digest != "" && index == 0 {
   319  		dgst, err := p.mainManifestKey(p.platform)
   320  		if err != nil {
   321  			return "", "", nil, false, err
   322  		}
   323  		return dgst.String(), p.desc.Digest.String(), nil, false, nil
   324  	}
   325  
   326  	if p.config != nil {
   327  		k := cacheKeyFromConfig(p.config).String()
   328  		if k == "" {
   329  			return digest.FromBytes(p.config).String(), digest.FromBytes(p.config).String(), nil, true, nil
   330  		}
   331  		return k, k, nil, true, nil
   332  	}
   333  
   334  	if err := p.resolve(ctx, g); err != nil {
   335  		return "", "", nil, false, err
   336  	}
   337  
   338  	if p.desc.Digest != "" && index == 0 {
   339  		dgst, err := p.mainManifestKey(p.platform)
   340  		if err != nil {
   341  			return "", "", nil, false, err
   342  		}
   343  		return dgst.String(), p.desc.Digest.String(), nil, false, nil
   344  	}
   345  
   346  	if len(p.config) == 0 && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest {
   347  		return "", "", nil, false, errors.Errorf("invalid empty config file resolved for %s", p.src.Reference.String())
   348  	}
   349  
   350  	k := cacheKeyFromConfig(p.config).String()
   351  	if k == "" || p.desc.MediaType == images.MediaTypeDockerSchema1Manifest {
   352  		dgst, err := p.mainManifestKey(p.platform)
   353  		if err != nil {
   354  			return "", "", nil, false, err
   355  		}
   356  		return dgst.String(), p.desc.Digest.String(), nil, true, nil
   357  	}
   358  
   359  	return k, k, nil, true, nil
   360  }
   361  
   362  func (p *puller) getRef(ctx context.Context, diffIDs []layer.DiffID, opts ...cache.RefOption) (cache.ImmutableRef, error) {
   363  	var parent cache.ImmutableRef
   364  	if len(diffIDs) > 1 {
   365  		var err error
   366  		parent, err = p.getRef(ctx, diffIDs[:len(diffIDs)-1], opts...)
   367  		if err != nil {
   368  			return nil, err
   369  		}
   370  		defer parent.Release(context.TODO())
   371  	}
   372  	return p.is.CacheAccessor.GetByBlob(ctx, ocispec.Descriptor{
   373  		Annotations: map[string]string{
   374  			"containerd.io/uncompressed": diffIDs[len(diffIDs)-1].String(),
   375  		},
   376  	}, parent, opts...)
   377  }
   378  
   379  func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.ImmutableRef, error) {
   380  	p.resolveLocal()
   381  	if len(p.config) == 0 {
   382  		if err := p.resolve(ctx, g); err != nil {
   383  			return nil, err
   384  		}
   385  	}
   386  
   387  	if p.config != nil {
   388  		img, err := p.is.ImageStore.Get(image.ID(digest.FromBytes(p.config)))
   389  		if err == nil {
   390  			if len(img.RootFS.DiffIDs) == 0 {
   391  				return nil, nil
   392  			}
   393  			l, err := p.is.LayerStore.Get(img.RootFS.ChainID())
   394  			if err == nil {
   395  				layer.ReleaseAndLog(p.is.LayerStore, l)
   396  				ref, err := p.getRef(ctx, img.RootFS.DiffIDs, cache.WithDescription(fmt.Sprintf("from local %s", p.ref)))
   397  				if err != nil {
   398  					return nil, err
   399  				}
   400  				return ref, nil
   401  			}
   402  		}
   403  	}
   404  
   405  	ongoing := newJobs(p.ref)
   406  
   407  	ctx, done, err := leaseutil.WithLease(ctx, p.is.LeaseManager, leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary)
   408  	if err != nil {
   409  		return nil, err
   410  	}
   411  	defer func() {
   412  		done(context.TODO())
   413  		if p.is.GarbageCollect != nil {
   414  			go p.is.GarbageCollect(context.TODO())
   415  		}
   416  	}()
   417  
   418  	pctx, stopProgress := context.WithCancel(ctx)
   419  
   420  	pw, _, ctx := progress.NewFromContext(ctx)
   421  	defer pw.Close()
   422  
   423  	progressDone := make(chan struct{})
   424  	go func() {
   425  		showProgress(pctx, ongoing, p.is.ContentStore, pw)
   426  		close(progressDone)
   427  	}()
   428  	defer func() {
   429  		<-progressDone
   430  	}()
   431  
   432  	fetcher, err := p.resolver(g).Fetcher(ctx, p.ref)
   433  	if err != nil {
   434  		stopProgress()
   435  		return nil, err
   436  	}
   437  
   438  	platform := platforms.Only(p.platform)
   439  
   440  	var nonLayers []digest.Digest
   441  
   442  	var (
   443  		schema1Converter *schema1.Converter
   444  		handlers         []images.Handler
   445  	)
   446  	if p.desc.MediaType == images.MediaTypeDockerSchema1Manifest {
   447  		schema1Converter = schema1.NewConverter(p.is.ContentStore, fetcher)
   448  		handlers = append(handlers, schema1Converter)
   449  
   450  		// TODO: Optimize to do dispatch and integrate pulling with download manager,
   451  		// leverage existing blob mapping and layer storage
   452  	} else {
   453  		// TODO: need a wrapper snapshot interface that combines content
   454  		// and snapshots as 1) buildkit shouldn't have a dependency on contentstore
   455  		// or 2) cachemanager should manage the contentstore
   456  		handlers = append(handlers, images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
   457  			switch desc.MediaType {
   458  			case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
   459  				images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex,
   460  				images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
   461  				nonLayers = append(nonLayers, desc.Digest)
   462  			default:
   463  				return nil, images.ErrSkipDesc
   464  			}
   465  			ongoing.add(desc)
   466  			return nil, nil
   467  		}))
   468  
   469  		// Get all the children for a descriptor
   470  		childrenHandler := images.ChildrenHandler(p.is.ContentStore)
   471  		// Filter the children by the platform
   472  		childrenHandler = images.FilterPlatforms(childrenHandler, platform)
   473  		// Limit manifests pulled to the best match in an index
   474  		childrenHandler = images.LimitManifests(childrenHandler, platform, 1)
   475  
   476  		handlers = append(handlers,
   477  			remotes.FetchHandler(p.is.ContentStore, fetcher),
   478  			childrenHandler,
   479  		)
   480  	}
   481  
   482  	if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, p.desc); err != nil {
   483  		stopProgress()
   484  		return nil, err
   485  	}
   486  	defer stopProgress()
   487  
   488  	if schema1Converter != nil {
   489  		p.desc, err = schema1Converter.Convert(ctx)
   490  		if err != nil {
   491  			return nil, err
   492  		}
   493  	}
   494  
   495  	mfst, err := images.Manifest(ctx, p.is.ContentStore, p.desc, platform)
   496  	if err != nil {
   497  		return nil, err
   498  	}
   499  
   500  	config, err := images.Config(ctx, p.is.ContentStore, p.desc, platform)
   501  	if err != nil {
   502  		return nil, err
   503  	}
   504  
   505  	dt, err := content.ReadBlob(ctx, p.is.ContentStore, config)
   506  	if err != nil {
   507  		return nil, err
   508  	}
   509  
   510  	var img ocispec.Image
   511  	if err := json.Unmarshal(dt, &img); err != nil {
   512  		return nil, err
   513  	}
   514  
   515  	if len(mfst.Layers) != len(img.RootFS.DiffIDs) {
   516  		return nil, errors.Errorf("invalid config for manifest")
   517  	}
   518  
   519  	pchan := make(chan pkgprogress.Progress, 10)
   520  	defer close(pchan)
   521  
   522  	go func() {
   523  		m := map[string]struct {
   524  			st      time.Time
   525  			limiter *rate.Limiter
   526  		}{}
   527  		for p := range pchan {
   528  			if p.Action == "Extracting" {
   529  				st, ok := m[p.ID]
   530  				if !ok {
   531  					st.st = time.Now()
   532  					st.limiter = rate.NewLimiter(rate.Every(100*time.Millisecond), 1)
   533  					m[p.ID] = st
   534  				}
   535  				var end *time.Time
   536  				if p.LastUpdate || st.limiter.Allow() {
   537  					if p.LastUpdate {
   538  						tm := time.Now()
   539  						end = &tm
   540  					}
   541  					_ = pw.Write("extracting "+p.ID, progress.Status{
   542  						Action:    "extract",
   543  						Started:   &st.st,
   544  						Completed: end,
   545  					})
   546  				}
   547  			}
   548  		}
   549  	}()
   550  
   551  	if len(mfst.Layers) == 0 {
   552  		return nil, nil
   553  	}
   554  
   555  	layers := make([]xfer.DownloadDescriptor, 0, len(mfst.Layers))
   556  
   557  	for i, desc := range mfst.Layers {
   558  		if err := desc.Digest.Validate(); err != nil {
   559  			return nil, errors.Wrap(err, "layer digest could not be validated")
   560  		}
   561  		ongoing.add(desc)
   562  		layers = append(layers, &layerDescriptor{
   563  			desc:    desc,
   564  			diffID:  layer.DiffID(img.RootFS.DiffIDs[i]),
   565  			fetcher: fetcher,
   566  			ref:     p.src.Reference,
   567  			is:      p.is,
   568  		})
   569  	}
   570  
   571  	defer func() {
   572  		<-progressDone
   573  	}()
   574  
   575  	r := image.NewRootFS()
   576  	rootFS, release, err := p.is.DownloadManager.Download(ctx, *r, layers, pkgprogress.ChanOutput(pchan))
   577  	stopProgress()
   578  	if err != nil {
   579  		return nil, err
   580  	}
   581  
   582  	ref, err := p.getRef(ctx, rootFS.DiffIDs, cache.WithDescription(fmt.Sprintf("pulled from %s", p.ref)))
   583  	release()
   584  	if err != nil {
   585  		return nil, err
   586  	}
   587  
   588  	// keep manifest blobs until ref is alive for cache
   589  	for _, nl := range nonLayers {
   590  		if err := p.is.LeaseManager.AddResource(ctx, leases.Lease{ID: ref.ID()}, leases.Resource{
   591  			ID:   nl.String(),
   592  			Type: "content",
   593  		}); err != nil {
   594  			return nil, err
   595  		}
   596  	}
   597  
   598  	// TODO: handle windows layers for cross platform builds
   599  
   600  	if p.src.RecordType != "" && ref.GetRecordType() == "" {
   601  		if err := ref.SetRecordType(p.src.RecordType); err != nil {
   602  			ref.Release(context.TODO())
   603  			return nil, err
   604  		}
   605  	}
   606  
   607  	return ref, nil
   608  }
   609  
   610  // Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
   611  type layerDescriptor struct {
   612  	is      *Source
   613  	fetcher remotes.Fetcher
   614  	desc    ocispec.Descriptor
   615  	diffID  layer.DiffID
   616  	ref     ctdreference.Spec
   617  }
   618  
   619  func (ld *layerDescriptor) Key() string {
   620  	return "v2:" + ld.desc.Digest.String()
   621  }
   622  
   623  func (ld *layerDescriptor) ID() string {
   624  	return ld.desc.Digest.String()
   625  }
   626  
   627  func (ld *layerDescriptor) DiffID() (layer.DiffID, error) {
   628  	return ld.diffID, nil
   629  }
   630  
   631  func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogress.Output) (io.ReadCloser, int64, error) {
   632  	rc, err := ld.fetcher.Fetch(ctx, ld.desc)
   633  	if err != nil {
   634  		return nil, 0, err
   635  	}
   636  	defer rc.Close()
   637  
   638  	refKey := remotes.MakeRefKey(ctx, ld.desc)
   639  
   640  	ld.is.ContentStore.Abort(ctx, refKey)
   641  
   642  	if err := content.WriteBlob(ctx, ld.is.ContentStore, refKey, rc, ld.desc); err != nil {
   643  		ld.is.ContentStore.Abort(ctx, refKey)
   644  		return nil, 0, err
   645  	}
   646  
   647  	ra, err := ld.is.ContentStore.ReaderAt(ctx, ld.desc)
   648  	if err != nil {
   649  		return nil, 0, err
   650  	}
   651  
   652  	return io.NopCloser(content.NewReader(ra)), ld.desc.Size, nil
   653  }
   654  
   655  func (ld *layerDescriptor) Close() {
   656  	// ld.is.ContentStore.Delete(context.TODO(), ld.desc.Digest))
   657  }
   658  
   659  func (ld *layerDescriptor) Registered(diffID layer.DiffID) {
   660  	// Cache mapping from this layer's DiffID to the blobsum
   661  	ld.is.MetadataStore.Add(diffID, metadata.V2Metadata{Digest: ld.desc.Digest, SourceRepository: ld.ref.Locator})
   662  }
   663  
   664  func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, pw progress.Writer) {
   665  	var (
   666  		ticker   = time.NewTicker(100 * time.Millisecond)
   667  		statuses = map[string]statusInfo{}
   668  		done     bool
   669  	)
   670  	defer ticker.Stop()
   671  
   672  	for {
   673  		select {
   674  		case <-ticker.C:
   675  		case <-ctx.Done():
   676  			done = true
   677  		}
   678  
   679  		resolved := "resolved"
   680  		if !ongoing.isResolved() {
   681  			resolved = "resolving"
   682  		}
   683  		statuses[ongoing.name] = statusInfo{
   684  			Ref:    ongoing.name,
   685  			Status: resolved,
   686  		}
   687  
   688  		actives := make(map[string]statusInfo)
   689  
   690  		if !done {
   691  			active, err := cs.ListStatuses(ctx)
   692  			if err != nil {
   693  				// log.G(ctx).WithError(err).Error("active check failed")
   694  				continue
   695  			}
   696  			// update status of active entries!
   697  			for _, active := range active {
   698  				actives[active.Ref] = statusInfo{
   699  					Ref:       active.Ref,
   700  					Status:    "downloading",
   701  					Offset:    active.Offset,
   702  					Total:     active.Total,
   703  					StartedAt: active.StartedAt,
   704  					UpdatedAt: active.UpdatedAt,
   705  				}
   706  			}
   707  		}
   708  
   709  		// now, update the items in jobs that are not in active
   710  		for _, j := range ongoing.jobs() {
   711  			refKey := remotes.MakeRefKey(ctx, j.Descriptor)
   712  			if a, ok := actives[refKey]; ok {
   713  				started := j.started
   714  				_ = pw.Write(j.Digest.String(), progress.Status{
   715  					Action:  a.Status,
   716  					Total:   int(a.Total),
   717  					Current: int(a.Offset),
   718  					Started: &started,
   719  				})
   720  				continue
   721  			}
   722  
   723  			if !j.done {
   724  				info, err := cs.Info(context.TODO(), j.Digest)
   725  				if err != nil {
   726  					if cerrdefs.IsNotFound(err) {
   727  						// _ = pw.Write(j.Digest.String(), progress.Status{
   728  						// 	Action: "waiting",
   729  						// })
   730  						continue
   731  					}
   732  				} else {
   733  					j.done = true
   734  				}
   735  
   736  				if done || j.done {
   737  					started := j.started
   738  					createdAt := info.CreatedAt
   739  					_ = pw.Write(j.Digest.String(), progress.Status{
   740  						Action:    "done",
   741  						Current:   int(info.Size),
   742  						Total:     int(info.Size),
   743  						Completed: &createdAt,
   744  						Started:   &started,
   745  					})
   746  				}
   747  			}
   748  		}
   749  		if done {
   750  			return
   751  		}
   752  	}
   753  }
   754  
   755  // jobs provides a way of identifying the download keys for a particular task
   756  // encountering during the pull walk.
   757  //
   758  // This is very minimal and will probably be replaced with something more
   759  // featured.
   760  type jobs struct {
   761  	name     string
   762  	added    map[digest.Digest]*job
   763  	mu       sync.Mutex
   764  	resolved bool
   765  }
   766  
   767  type job struct {
   768  	ocispec.Descriptor
   769  	done    bool
   770  	started time.Time
   771  }
   772  
   773  func newJobs(name string) *jobs {
   774  	return &jobs{
   775  		name:  name,
   776  		added: make(map[digest.Digest]*job),
   777  	}
   778  }
   779  
   780  func (j *jobs) add(desc ocispec.Descriptor) {
   781  	j.mu.Lock()
   782  	defer j.mu.Unlock()
   783  
   784  	if _, ok := j.added[desc.Digest]; ok {
   785  		return
   786  	}
   787  	j.added[desc.Digest] = &job{
   788  		Descriptor: desc,
   789  		started:    time.Now(),
   790  	}
   791  }
   792  
   793  func (j *jobs) jobs() []*job {
   794  	j.mu.Lock()
   795  	defer j.mu.Unlock()
   796  
   797  	descs := make([]*job, 0, len(j.added))
   798  	for _, j := range j.added {
   799  		descs = append(descs, j)
   800  	}
   801  	return descs
   802  }
   803  
   804  func (j *jobs) isResolved() bool {
   805  	j.mu.Lock()
   806  	defer j.mu.Unlock()
   807  	return j.resolved
   808  }
   809  
   810  type statusInfo struct {
   811  	Ref       string
   812  	Status    string
   813  	Offset    int64
   814  	Total     int64
   815  	StartedAt time.Time
   816  	UpdatedAt time.Time
   817  }
   818  
   819  func oneOffProgress(ctx context.Context, id string) func(err error) error {
   820  	pw, _, _ := progress.NewFromContext(ctx)
   821  	now := time.Now()
   822  	st := progress.Status{
   823  		Started: &now,
   824  	}
   825  	_ = pw.Write(id, st)
   826  	return func(err error) error {
   827  		// TODO: set error on status
   828  		now := time.Now()
   829  		st.Completed = &now
   830  		_ = pw.Write(id, st)
   831  		_ = pw.Close()
   832  		return err
   833  	}
   834  }
   835  
   836  // cacheKeyFromConfig returns a stable digest from image config. If image config
   837  // is a known oci image we will use chainID of layers.
   838  func cacheKeyFromConfig(dt []byte) digest.Digest {
   839  	var img ocispec.Image
   840  	err := json.Unmarshal(dt, &img)
   841  	if err != nil {
   842  		log.G(context.TODO()).WithError(err).Errorf("failed to unmarshal image config for cache key %v", err)
   843  		return digest.FromBytes(dt)
   844  	}
   845  	if img.RootFS.Type != "layers" || len(img.RootFS.DiffIDs) == 0 {
   846  		return ""
   847  	}
   848  	return identity.ChainID(img.RootFS.DiffIDs)
   849  }
   850  
   851  func platformMatches(img *image.Image, p *ocispec.Platform) bool {
   852  	return dimages.OnlyPlatformWithFallback(*p).Match(ocispec.Platform{
   853  		Architecture: img.Architecture,
   854  		OS:           img.OS,
   855  		OSVersion:    img.OSVersion,
   856  		OSFeatures:   img.OSFeatures,
   857  		Variant:      img.Variant,
   858  	})
   859  }
   860  
   861  func applySourcePolicies(ctx context.Context, str string, spls []*spb.Policy) (string, error) {
   862  	ref, err := cdreference.Parse(str)
   863  	if err != nil {
   864  		return "", errors.WithStack(err)
   865  	}
   866  	op := &pb.Op{
   867  		Op: &pb.Op_Source{
   868  			Source: &pb.SourceOp{
   869  				Identifier: srctypes.DockerImageScheme + "://" + ref.String(),
   870  			},
   871  		},
   872  	}
   873  
   874  	mut, err := sourcepolicy.NewEngine(spls).Evaluate(ctx, op)
   875  	if err != nil {
   876  		return "", errors.Wrap(err, "could not resolve image due to policy")
   877  	}
   878  
   879  	if mut {
   880  		var (
   881  			t  string
   882  			ok bool
   883  		)
   884  		t, newRef, ok := strings.Cut(op.GetSource().GetIdentifier(), "://")
   885  		if !ok {
   886  			return "", errors.Errorf("could not parse ref: %s", op.GetSource().GetIdentifier())
   887  		}
   888  		if ok && t != srctypes.DockerImageScheme {
   889  			return "", &imageutil.ResolveToNonImageError{Ref: str, Updated: newRef}
   890  		}
   891  		ref, err = cdreference.Parse(newRef)
   892  		if err != nil {
   893  			return "", errors.WithStack(err)
   894  		}
   895  	}
   896  	return ref.String(), nil
   897  }