github.com/Heebron/moby@v0.0.0-20221111184709-6eab4f55faf7/builder/builder-next/adapters/containerimage/pull.go (about)

     1  package containerimage
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"fmt"
     7  	"io"
     8  	"path"
     9  	"sync"
    10  	"time"
    11  
    12  	"github.com/containerd/containerd/content"
    13  	containerderrors "github.com/containerd/containerd/errdefs"
    14  	"github.com/containerd/containerd/gc"
    15  	"github.com/containerd/containerd/images"
    16  	"github.com/containerd/containerd/leases"
    17  	"github.com/containerd/containerd/platforms"
    18  	ctdreference "github.com/containerd/containerd/reference"
    19  	"github.com/containerd/containerd/remotes"
    20  	"github.com/containerd/containerd/remotes/docker"
    21  	"github.com/containerd/containerd/remotes/docker/schema1"
    22  	distreference "github.com/docker/distribution/reference"
    23  	dimages "github.com/docker/docker/daemon/images"
    24  	"github.com/docker/docker/distribution/metadata"
    25  	"github.com/docker/docker/distribution/xfer"
    26  	"github.com/docker/docker/image"
    27  	"github.com/docker/docker/layer"
    28  	pkgprogress "github.com/docker/docker/pkg/progress"
    29  	"github.com/docker/docker/reference"
    30  	"github.com/moby/buildkit/cache"
    31  	"github.com/moby/buildkit/client/llb"
    32  	"github.com/moby/buildkit/session"
    33  	"github.com/moby/buildkit/solver"
    34  	"github.com/moby/buildkit/source"
    35  	srctypes "github.com/moby/buildkit/source/types"
    36  	"github.com/moby/buildkit/util/flightcontrol"
    37  	"github.com/moby/buildkit/util/imageutil"
    38  	"github.com/moby/buildkit/util/leaseutil"
    39  	"github.com/moby/buildkit/util/progress"
    40  	"github.com/moby/buildkit/util/resolver"
    41  	"github.com/opencontainers/go-digest"
    42  	"github.com/opencontainers/image-spec/identity"
    43  	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
    44  	"github.com/pkg/errors"
    45  	"github.com/sirupsen/logrus"
    46  	"golang.org/x/time/rate"
    47  )
    48  
    49  // SourceOpt is options for creating the image source
    50  type SourceOpt struct {
    51  	ContentStore    content.Store
    52  	CacheAccessor   cache.Accessor
    53  	ReferenceStore  reference.Store
    54  	DownloadManager *xfer.LayerDownloadManager
    55  	MetadataStore   metadata.V2MetadataService
    56  	ImageStore      image.Store
    57  	RegistryHosts   docker.RegistryHosts
    58  	LayerStore      layer.Store
    59  	LeaseManager    leases.Manager
    60  	GarbageCollect  func(ctx context.Context) (gc.Stats, error)
    61  }
    62  
    63  // Source is the source implementation for accessing container images
    64  type Source struct {
    65  	SourceOpt
    66  	g flightcontrol.Group
    67  }
    68  
    69  // NewSource creates a new image source
    70  func NewSource(opt SourceOpt) (*Source, error) {
    71  	return &Source{SourceOpt: opt}, nil
    72  }
    73  
    74  // ID returns image scheme identifier
    75  func (is *Source) ID() string {
    76  	return srctypes.DockerImageScheme
    77  }
    78  
    79  func (is *Source) resolveLocal(refStr string) (*image.Image, error) {
    80  	ref, err := distreference.ParseNormalizedNamed(refStr)
    81  	if err != nil {
    82  		return nil, err
    83  	}
    84  	dgst, err := is.ReferenceStore.Get(ref)
    85  	if err != nil {
    86  		return nil, err
    87  	}
    88  	img, err := is.ImageStore.Get(image.ID(dgst))
    89  	if err != nil {
    90  		return nil, err
    91  	}
    92  	return img, nil
    93  }
    94  
    95  func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
    96  	type t struct {
    97  		dgst digest.Digest
    98  		dt   []byte
    99  	}
   100  	p := platforms.DefaultSpec()
   101  	if platform != nil {
   102  		p = *platform
   103  	}
   104  	// key is used to synchronize resolutions that can happen in parallel when doing multi-stage.
   105  	key := "getconfig::" + ref + "::" + platforms.Format(p)
   106  	res, err := is.g.Do(ctx, key, func(ctx context.Context) (interface{}, error) {
   107  		res := resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g)
   108  		dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, is.LeaseManager, platform)
   109  		if err != nil {
   110  			return nil, err
   111  		}
   112  		return &t{dgst: dgst, dt: dt}, nil
   113  	})
   114  	var typed *t
   115  	if err != nil {
   116  		return "", nil, err
   117  	}
   118  	typed = res.(*t)
   119  	return typed.dgst, typed.dt, nil
   120  }
   121  
   122  // ResolveImageConfig returns image config for an image
   123  func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
   124  	resolveMode, err := source.ParseImageResolveMode(opt.ResolveMode)
   125  	if err != nil {
   126  		return "", nil, err
   127  	}
   128  	switch resolveMode {
   129  	case source.ResolveModeForcePull:
   130  		dgst, dt, err := is.resolveRemote(ctx, ref, opt.Platform, sm, g)
   131  		// TODO: pull should fallback to local in case of failure to allow offline behavior
   132  		// the fallback doesn't work currently
   133  		return dgst, dt, err
   134  		/*
   135  			if err == nil {
   136  				return dgst, dt, err
   137  			}
   138  			// fallback to local
   139  			dt, err = is.resolveLocal(ref)
   140  			return "", dt, err
   141  		*/
   142  
   143  	case source.ResolveModeDefault:
   144  		// default == prefer local, but in the future could be smarter
   145  		fallthrough
   146  	case source.ResolveModePreferLocal:
   147  		img, err := is.resolveLocal(ref)
   148  		if err == nil {
   149  			if opt.Platform != nil && !platformMatches(img, opt.Platform) {
   150  				logrus.WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, checking remote",
   151  					path.Join(opt.Platform.OS, opt.Platform.Architecture, opt.Platform.Variant),
   152  					path.Join(img.OS, img.Architecture, img.Variant),
   153  				)
   154  			} else {
   155  				return "", img.RawJSON(), err
   156  			}
   157  		}
   158  		// fallback to remote
   159  		return is.resolveRemote(ctx, ref, opt.Platform, sm, g)
   160  	}
   161  	// should never happen
   162  	return "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ResolveMode)
   163  }
   164  
   165  // Resolve returns access to pulling for an identifier
   166  func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, vtx solver.Vertex) (source.SourceInstance, error) {
   167  	imageIdentifier, ok := id.(*source.ImageIdentifier)
   168  	if !ok {
   169  		return nil, errors.Errorf("invalid image identifier %v", id)
   170  	}
   171  
   172  	platform := platforms.DefaultSpec()
   173  	if imageIdentifier.Platform != nil {
   174  		platform = *imageIdentifier.Platform
   175  	}
   176  
   177  	p := &puller{
   178  		src: imageIdentifier,
   179  		is:  is,
   180  		// resolver: is.getResolver(is.RegistryHosts, imageIdentifier.Reference.String(), sm, g),
   181  		platform: platform,
   182  		sm:       sm,
   183  	}
   184  	return p, nil
   185  }
   186  
   187  type puller struct {
   188  	is               *Source
   189  	resolveLocalOnce sync.Once
   190  	g                flightcontrol.Group
   191  	src              *source.ImageIdentifier
   192  	desc             ocispec.Descriptor
   193  	ref              string
   194  	config           []byte
   195  	platform         ocispec.Platform
   196  	sm               *session.Manager
   197  }
   198  
   199  func (p *puller) resolver(g session.Group) remotes.Resolver {
   200  	return resolver.DefaultPool.GetResolver(p.is.RegistryHosts, p.src.Reference.String(), "pull", p.sm, g)
   201  }
   202  
   203  func (p *puller) mainManifestKey(platform ocispec.Platform) (digest.Digest, error) {
   204  	dt, err := json.Marshal(struct {
   205  		Digest  digest.Digest
   206  		OS      string
   207  		Arch    string
   208  		Variant string `json:",omitempty"`
   209  	}{
   210  		Digest:  p.desc.Digest,
   211  		OS:      platform.OS,
   212  		Arch:    platform.Architecture,
   213  		Variant: platform.Variant,
   214  	})
   215  	if err != nil {
   216  		return "", err
   217  	}
   218  	return digest.FromBytes(dt), nil
   219  }
   220  
   221  func (p *puller) resolveLocal() {
   222  	p.resolveLocalOnce.Do(func() {
   223  		dgst := p.src.Reference.Digest()
   224  		if dgst != "" {
   225  			info, err := p.is.ContentStore.Info(context.TODO(), dgst)
   226  			if err == nil {
   227  				p.ref = p.src.Reference.String()
   228  				desc := ocispec.Descriptor{
   229  					Size:   info.Size,
   230  					Digest: dgst,
   231  				}
   232  				ra, err := p.is.ContentStore.ReaderAt(context.TODO(), desc)
   233  				if err == nil {
   234  					mt, err := imageutil.DetectManifestMediaType(ra)
   235  					if err == nil {
   236  						desc.MediaType = mt
   237  						p.desc = desc
   238  					}
   239  				}
   240  			}
   241  		}
   242  
   243  		if p.src.ResolveMode == source.ResolveModeDefault || p.src.ResolveMode == source.ResolveModePreferLocal {
   244  			ref := p.src.Reference.String()
   245  			img, err := p.is.resolveLocal(ref)
   246  			if err == nil {
   247  				if !platformMatches(img, &p.platform) {
   248  					logrus.WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, not resolving",
   249  						path.Join(p.platform.OS, p.platform.Architecture, p.platform.Variant),
   250  						path.Join(img.OS, img.Architecture, img.Variant),
   251  					)
   252  				} else {
   253  					p.config = img.RawJSON()
   254  				}
   255  			}
   256  		}
   257  	})
   258  }
   259  
   260  func (p *puller) resolve(ctx context.Context, g session.Group) error {
   261  	_, err := p.g.Do(ctx, "", func(ctx context.Context) (_ interface{}, err error) {
   262  		resolveProgressDone := oneOffProgress(ctx, "resolve "+p.src.Reference.String())
   263  		defer func() {
   264  			resolveProgressDone(err)
   265  		}()
   266  
   267  		ref, err := distreference.ParseNormalizedNamed(p.src.Reference.String())
   268  		if err != nil {
   269  			return nil, err
   270  		}
   271  
   272  		if p.desc.Digest == "" && p.config == nil {
   273  			origRef, desc, err := p.resolver(g).Resolve(ctx, ref.String())
   274  			if err != nil {
   275  				return nil, err
   276  			}
   277  
   278  			p.desc = desc
   279  			p.ref = origRef
   280  		}
   281  
   282  		// Schema 1 manifests cannot be resolved to an image config
   283  		// since the conversion must take place after all the content
   284  		// has been read.
   285  		// It may be possible to have a mapping between schema 1 manifests
   286  		// and the schema 2 manifests they are converted to.
   287  		if p.config == nil && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest {
   288  			ref, err := distreference.WithDigest(ref, p.desc.Digest)
   289  			if err != nil {
   290  				return nil, err
   291  			}
   292  			_, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: resolveModeToString(p.src.ResolveMode)}, p.sm, g)
   293  			if err != nil {
   294  				return nil, err
   295  			}
   296  
   297  			p.config = dt
   298  		}
   299  		return nil, nil
   300  	})
   301  	return err
   302  }
   303  
   304  func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (string, string, solver.CacheOpts, bool, error) {
   305  	p.resolveLocal()
   306  
   307  	if p.desc.Digest != "" && index == 0 {
   308  		dgst, err := p.mainManifestKey(p.platform)
   309  		if err != nil {
   310  			return "", "", nil, false, err
   311  		}
   312  		return dgst.String(), p.desc.Digest.String(), nil, false, nil
   313  	}
   314  
   315  	if p.config != nil {
   316  		k := cacheKeyFromConfig(p.config).String()
   317  		if k == "" {
   318  			return digest.FromBytes(p.config).String(), digest.FromBytes(p.config).String(), nil, true, nil
   319  		}
   320  		return k, k, nil, true, nil
   321  	}
   322  
   323  	if err := p.resolve(ctx, g); err != nil {
   324  		return "", "", nil, false, err
   325  	}
   326  
   327  	if p.desc.Digest != "" && index == 0 {
   328  		dgst, err := p.mainManifestKey(p.platform)
   329  		if err != nil {
   330  			return "", "", nil, false, err
   331  		}
   332  		return dgst.String(), p.desc.Digest.String(), nil, false, nil
   333  	}
   334  
   335  	if len(p.config) == 0 && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest {
   336  		return "", "", nil, false, errors.Errorf("invalid empty config file resolved for %s", p.src.Reference.String())
   337  	}
   338  
   339  	k := cacheKeyFromConfig(p.config).String()
   340  	if k == "" || p.desc.MediaType == images.MediaTypeDockerSchema1Manifest {
   341  		dgst, err := p.mainManifestKey(p.platform)
   342  		if err != nil {
   343  			return "", "", nil, false, err
   344  		}
   345  		return dgst.String(), p.desc.Digest.String(), nil, true, nil
   346  	}
   347  
   348  	return k, k, nil, true, nil
   349  }
   350  
   351  func (p *puller) getRef(ctx context.Context, diffIDs []layer.DiffID, opts ...cache.RefOption) (cache.ImmutableRef, error) {
   352  	var parent cache.ImmutableRef
   353  	if len(diffIDs) > 1 {
   354  		var err error
   355  		parent, err = p.getRef(ctx, diffIDs[:len(diffIDs)-1], opts...)
   356  		if err != nil {
   357  			return nil, err
   358  		}
   359  		defer parent.Release(context.TODO())
   360  	}
   361  	return p.is.CacheAccessor.GetByBlob(ctx, ocispec.Descriptor{
   362  		Annotations: map[string]string{
   363  			"containerd.io/uncompressed": diffIDs[len(diffIDs)-1].String(),
   364  		},
   365  	}, parent, opts...)
   366  }
   367  
   368  func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.ImmutableRef, error) {
   369  	p.resolveLocal()
   370  	if len(p.config) == 0 {
   371  		if err := p.resolve(ctx, g); err != nil {
   372  			return nil, err
   373  		}
   374  	}
   375  
   376  	if p.config != nil {
   377  		img, err := p.is.ImageStore.Get(image.ID(digest.FromBytes(p.config)))
   378  		if err == nil {
   379  			if len(img.RootFS.DiffIDs) == 0 {
   380  				return nil, nil
   381  			}
   382  			l, err := p.is.LayerStore.Get(img.RootFS.ChainID())
   383  			if err == nil {
   384  				layer.ReleaseAndLog(p.is.LayerStore, l)
   385  				ref, err := p.getRef(ctx, img.RootFS.DiffIDs, cache.WithDescription(fmt.Sprintf("from local %s", p.ref)))
   386  				if err != nil {
   387  					return nil, err
   388  				}
   389  				return ref, nil
   390  			}
   391  		}
   392  	}
   393  
   394  	ongoing := newJobs(p.ref)
   395  
   396  	ctx, done, err := leaseutil.WithLease(ctx, p.is.LeaseManager, leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary)
   397  	if err != nil {
   398  		return nil, err
   399  	}
   400  	defer func() {
   401  		done(context.TODO())
   402  		if p.is.GarbageCollect != nil {
   403  			go p.is.GarbageCollect(context.TODO())
   404  		}
   405  	}()
   406  
   407  	pctx, stopProgress := context.WithCancel(ctx)
   408  
   409  	pw, _, ctx := progress.NewFromContext(ctx)
   410  	defer pw.Close()
   411  
   412  	progressDone := make(chan struct{})
   413  	go func() {
   414  		showProgress(pctx, ongoing, p.is.ContentStore, pw)
   415  		close(progressDone)
   416  	}()
   417  	defer func() {
   418  		<-progressDone
   419  	}()
   420  
   421  	fetcher, err := p.resolver(g).Fetcher(ctx, p.ref)
   422  	if err != nil {
   423  		stopProgress()
   424  		return nil, err
   425  	}
   426  
   427  	platform := platforms.Only(p.platform)
   428  
   429  	var nonLayers []digest.Digest
   430  
   431  	var (
   432  		schema1Converter *schema1.Converter
   433  		handlers         []images.Handler
   434  	)
   435  	if p.desc.MediaType == images.MediaTypeDockerSchema1Manifest {
   436  		schema1Converter = schema1.NewConverter(p.is.ContentStore, fetcher)
   437  		handlers = append(handlers, schema1Converter)
   438  
   439  		// TODO: Optimize to do dispatch and integrate pulling with download manager,
   440  		// leverage existing blob mapping and layer storage
   441  	} else {
   442  		// TODO: need a wrapper snapshot interface that combines content
   443  		// and snapshots as 1) buildkit shouldn't have a dependency on contentstore
   444  		// or 2) cachemanager should manage the contentstore
   445  		handlers = append(handlers, images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
   446  			switch desc.MediaType {
   447  			case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
   448  				images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex,
   449  				images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
   450  				nonLayers = append(nonLayers, desc.Digest)
   451  			default:
   452  				return nil, images.ErrSkipDesc
   453  			}
   454  			ongoing.add(desc)
   455  			return nil, nil
   456  		}))
   457  
   458  		// Get all the children for a descriptor
   459  		childrenHandler := images.ChildrenHandler(p.is.ContentStore)
   460  		// Filter the children by the platform
   461  		childrenHandler = images.FilterPlatforms(childrenHandler, platform)
   462  		// Limit manifests pulled to the best match in an index
   463  		childrenHandler = images.LimitManifests(childrenHandler, platform, 1)
   464  
   465  		handlers = append(handlers,
   466  			remotes.FetchHandler(p.is.ContentStore, fetcher),
   467  			childrenHandler,
   468  		)
   469  	}
   470  
   471  	if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, p.desc); err != nil {
   472  		stopProgress()
   473  		return nil, err
   474  	}
   475  	defer stopProgress()
   476  
   477  	if schema1Converter != nil {
   478  		p.desc, err = schema1Converter.Convert(ctx)
   479  		if err != nil {
   480  			return nil, err
   481  		}
   482  	}
   483  
   484  	mfst, err := images.Manifest(ctx, p.is.ContentStore, p.desc, platform)
   485  	if err != nil {
   486  		return nil, err
   487  	}
   488  
   489  	config, err := images.Config(ctx, p.is.ContentStore, p.desc, platform)
   490  	if err != nil {
   491  		return nil, err
   492  	}
   493  
   494  	dt, err := content.ReadBlob(ctx, p.is.ContentStore, config)
   495  	if err != nil {
   496  		return nil, err
   497  	}
   498  
   499  	var img ocispec.Image
   500  	if err := json.Unmarshal(dt, &img); err != nil {
   501  		return nil, err
   502  	}
   503  
   504  	if len(mfst.Layers) != len(img.RootFS.DiffIDs) {
   505  		return nil, errors.Errorf("invalid config for manifest")
   506  	}
   507  
   508  	pchan := make(chan pkgprogress.Progress, 10)
   509  	defer close(pchan)
   510  
   511  	go func() {
   512  		m := map[string]struct {
   513  			st      time.Time
   514  			limiter *rate.Limiter
   515  		}{}
   516  		for p := range pchan {
   517  			if p.Action == "Extracting" {
   518  				st, ok := m[p.ID]
   519  				if !ok {
   520  					st.st = time.Now()
   521  					st.limiter = rate.NewLimiter(rate.Every(100*time.Millisecond), 1)
   522  					m[p.ID] = st
   523  				}
   524  				var end *time.Time
   525  				if p.LastUpdate || st.limiter.Allow() {
   526  					if p.LastUpdate {
   527  						tm := time.Now()
   528  						end = &tm
   529  					}
   530  					_ = pw.Write("extracting "+p.ID, progress.Status{
   531  						Action:    "extract",
   532  						Started:   &st.st,
   533  						Completed: end,
   534  					})
   535  				}
   536  			}
   537  		}
   538  	}()
   539  
   540  	if len(mfst.Layers) == 0 {
   541  		return nil, nil
   542  	}
   543  
   544  	layers := make([]xfer.DownloadDescriptor, 0, len(mfst.Layers))
   545  
   546  	for i, desc := range mfst.Layers {
   547  		if err := desc.Digest.Validate(); err != nil {
   548  			return nil, errors.Wrap(err, "layer digest could not be validated")
   549  		}
   550  		ongoing.add(desc)
   551  		layers = append(layers, &layerDescriptor{
   552  			desc:    desc,
   553  			diffID:  layer.DiffID(img.RootFS.DiffIDs[i]),
   554  			fetcher: fetcher,
   555  			ref:     p.src.Reference,
   556  			is:      p.is,
   557  		})
   558  	}
   559  
   560  	defer func() {
   561  		<-progressDone
   562  	}()
   563  
   564  	r := image.NewRootFS()
   565  	rootFS, release, err := p.is.DownloadManager.Download(ctx, *r, layers, pkgprogress.ChanOutput(pchan))
   566  	stopProgress()
   567  	if err != nil {
   568  		return nil, err
   569  	}
   570  
   571  	ref, err := p.getRef(ctx, rootFS.DiffIDs, cache.WithDescription(fmt.Sprintf("pulled from %s", p.ref)))
   572  	release()
   573  	if err != nil {
   574  		return nil, err
   575  	}
   576  
   577  	// keep manifest blobs until ref is alive for cache
   578  	for _, nl := range nonLayers {
   579  		if err := p.is.LeaseManager.AddResource(ctx, leases.Lease{ID: ref.ID()}, leases.Resource{
   580  			ID:   nl.String(),
   581  			Type: "content",
   582  		}); err != nil {
   583  			return nil, err
   584  		}
   585  	}
   586  
   587  	// TODO: handle windows layers for cross platform builds
   588  
   589  	if p.src.RecordType != "" && ref.GetRecordType() == "" {
   590  		if err := ref.SetRecordType(p.src.RecordType); err != nil {
   591  			ref.Release(context.TODO())
   592  			return nil, err
   593  		}
   594  	}
   595  
   596  	return ref, nil
   597  }
   598  
   599  // Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
   600  type layerDescriptor struct {
   601  	is      *Source
   602  	fetcher remotes.Fetcher
   603  	desc    ocispec.Descriptor
   604  	diffID  layer.DiffID
   605  	ref     ctdreference.Spec
   606  }
   607  
   608  func (ld *layerDescriptor) Key() string {
   609  	return "v2:" + ld.desc.Digest.String()
   610  }
   611  
   612  func (ld *layerDescriptor) ID() string {
   613  	return ld.desc.Digest.String()
   614  }
   615  
   616  func (ld *layerDescriptor) DiffID() (layer.DiffID, error) {
   617  	return ld.diffID, nil
   618  }
   619  
   620  func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogress.Output) (io.ReadCloser, int64, error) {
   621  	rc, err := ld.fetcher.Fetch(ctx, ld.desc)
   622  	if err != nil {
   623  		return nil, 0, err
   624  	}
   625  	defer rc.Close()
   626  
   627  	refKey := remotes.MakeRefKey(ctx, ld.desc)
   628  
   629  	ld.is.ContentStore.Abort(ctx, refKey)
   630  
   631  	if err := content.WriteBlob(ctx, ld.is.ContentStore, refKey, rc, ld.desc); err != nil {
   632  		ld.is.ContentStore.Abort(ctx, refKey)
   633  		return nil, 0, err
   634  	}
   635  
   636  	ra, err := ld.is.ContentStore.ReaderAt(ctx, ld.desc)
   637  	if err != nil {
   638  		return nil, 0, err
   639  	}
   640  
   641  	return io.NopCloser(content.NewReader(ra)), ld.desc.Size, nil
   642  }
   643  
   644  func (ld *layerDescriptor) Close() {
   645  	// ld.is.ContentStore.Delete(context.TODO(), ld.desc.Digest))
   646  }
   647  
   648  func (ld *layerDescriptor) Registered(diffID layer.DiffID) {
   649  	// Cache mapping from this layer's DiffID to the blobsum
   650  	ld.is.MetadataStore.Add(diffID, metadata.V2Metadata{Digest: ld.desc.Digest, SourceRepository: ld.ref.Locator})
   651  }
   652  
   653  func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, pw progress.Writer) {
   654  	var (
   655  		ticker   = time.NewTicker(100 * time.Millisecond)
   656  		statuses = map[string]statusInfo{}
   657  		done     bool
   658  	)
   659  	defer ticker.Stop()
   660  
   661  	for {
   662  		select {
   663  		case <-ticker.C:
   664  		case <-ctx.Done():
   665  			done = true
   666  		}
   667  
   668  		resolved := "resolved"
   669  		if !ongoing.isResolved() {
   670  			resolved = "resolving"
   671  		}
   672  		statuses[ongoing.name] = statusInfo{
   673  			Ref:    ongoing.name,
   674  			Status: resolved,
   675  		}
   676  
   677  		actives := make(map[string]statusInfo)
   678  
   679  		if !done {
   680  			active, err := cs.ListStatuses(ctx)
   681  			if err != nil {
   682  				// log.G(ctx).WithError(err).Error("active check failed")
   683  				continue
   684  			}
   685  			// update status of active entries!
   686  			for _, active := range active {
   687  				actives[active.Ref] = statusInfo{
   688  					Ref:       active.Ref,
   689  					Status:    "downloading",
   690  					Offset:    active.Offset,
   691  					Total:     active.Total,
   692  					StartedAt: active.StartedAt,
   693  					UpdatedAt: active.UpdatedAt,
   694  				}
   695  			}
   696  		}
   697  
   698  		// now, update the items in jobs that are not in active
   699  		for _, j := range ongoing.jobs() {
   700  			refKey := remotes.MakeRefKey(ctx, j.Descriptor)
   701  			if a, ok := actives[refKey]; ok {
   702  				started := j.started
   703  				_ = pw.Write(j.Digest.String(), progress.Status{
   704  					Action:  a.Status,
   705  					Total:   int(a.Total),
   706  					Current: int(a.Offset),
   707  					Started: &started,
   708  				})
   709  				continue
   710  			}
   711  
   712  			if !j.done {
   713  				info, err := cs.Info(context.TODO(), j.Digest)
   714  				if err != nil {
   715  					if containerderrors.IsNotFound(err) {
   716  						// _ = pw.Write(j.Digest.String(), progress.Status{
   717  						// 	Action: "waiting",
   718  						// })
   719  						continue
   720  					}
   721  				} else {
   722  					j.done = true
   723  				}
   724  
   725  				if done || j.done {
   726  					started := j.started
   727  					createdAt := info.CreatedAt
   728  					_ = pw.Write(j.Digest.String(), progress.Status{
   729  						Action:    "done",
   730  						Current:   int(info.Size),
   731  						Total:     int(info.Size),
   732  						Completed: &createdAt,
   733  						Started:   &started,
   734  					})
   735  				}
   736  			}
   737  		}
   738  		if done {
   739  			return
   740  		}
   741  	}
   742  }
   743  
   744  // jobs provides a way of identifying the download keys for a particular task
   745  // encountering during the pull walk.
   746  //
   747  // This is very minimal and will probably be replaced with something more
   748  // featured.
   749  type jobs struct {
   750  	name     string
   751  	added    map[digest.Digest]*job
   752  	mu       sync.Mutex
   753  	resolved bool
   754  }
   755  
   756  type job struct {
   757  	ocispec.Descriptor
   758  	done    bool
   759  	started time.Time
   760  }
   761  
   762  func newJobs(name string) *jobs {
   763  	return &jobs{
   764  		name:  name,
   765  		added: make(map[digest.Digest]*job),
   766  	}
   767  }
   768  
   769  func (j *jobs) add(desc ocispec.Descriptor) {
   770  	j.mu.Lock()
   771  	defer j.mu.Unlock()
   772  
   773  	if _, ok := j.added[desc.Digest]; ok {
   774  		return
   775  	}
   776  	j.added[desc.Digest] = &job{
   777  		Descriptor: desc,
   778  		started:    time.Now(),
   779  	}
   780  }
   781  
   782  func (j *jobs) jobs() []*job {
   783  	j.mu.Lock()
   784  	defer j.mu.Unlock()
   785  
   786  	descs := make([]*job, 0, len(j.added))
   787  	for _, j := range j.added {
   788  		descs = append(descs, j)
   789  	}
   790  	return descs
   791  }
   792  
   793  func (j *jobs) isResolved() bool {
   794  	j.mu.Lock()
   795  	defer j.mu.Unlock()
   796  	return j.resolved
   797  }
   798  
   799  type statusInfo struct {
   800  	Ref       string
   801  	Status    string
   802  	Offset    int64
   803  	Total     int64
   804  	StartedAt time.Time
   805  	UpdatedAt time.Time
   806  }
   807  
   808  func oneOffProgress(ctx context.Context, id string) func(err error) error {
   809  	pw, _, _ := progress.NewFromContext(ctx)
   810  	now := time.Now()
   811  	st := progress.Status{
   812  		Started: &now,
   813  	}
   814  	_ = pw.Write(id, st)
   815  	return func(err error) error {
   816  		// TODO: set error on status
   817  		now := time.Now()
   818  		st.Completed = &now
   819  		_ = pw.Write(id, st)
   820  		_ = pw.Close()
   821  		return err
   822  	}
   823  }
   824  
   825  // cacheKeyFromConfig returns a stable digest from image config. If image config
   826  // is a known oci image we will use chainID of layers.
   827  func cacheKeyFromConfig(dt []byte) digest.Digest {
   828  	var img ocispec.Image
   829  	err := json.Unmarshal(dt, &img)
   830  	if err != nil {
   831  		logrus.WithError(err).Errorf("failed to unmarshal image config for cache key %v", err)
   832  		return digest.FromBytes(dt)
   833  	}
   834  	if img.RootFS.Type != "layers" || len(img.RootFS.DiffIDs) == 0 {
   835  		return ""
   836  	}
   837  	return identity.ChainID(img.RootFS.DiffIDs)
   838  }
   839  
   840  // resolveModeToString is the equivalent of github.com/moby/buildkit/solver/llb.ResolveMode.String()
   841  // FIXME: add String method on source.ResolveMode
   842  func resolveModeToString(rm source.ResolveMode) string {
   843  	switch rm {
   844  	case source.ResolveModeDefault:
   845  		return "default"
   846  	case source.ResolveModeForcePull:
   847  		return "pull"
   848  	case source.ResolveModePreferLocal:
   849  		return "local"
   850  	}
   851  	return ""
   852  }
   853  
   854  func platformMatches(img *image.Image, p *ocispec.Platform) bool {
   855  	return dimages.OnlyPlatformWithFallback(*p).Match(ocispec.Platform{
   856  		Architecture: img.Architecture,
   857  		OS:           img.OS,
   858  		OSVersion:    img.OSVersion,
   859  		OSFeatures:   img.OSFeatures,
   860  		Variant:      img.Variant,
   861  	})
   862  }