github.com/pwn-term/docker@v0.0.0-20210616085119-6e977cce2565/moby/builder/builder-next/adapters/containerimage/pull.go (about)

     1  package containerimage
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"fmt"
     7  	"io"
     8  	"io/ioutil"
     9  	"path"
    10  	"runtime"
    11  	"sync"
    12  	"time"
    13  
    14  	"github.com/containerd/containerd/content"
    15  	containerderrors "github.com/containerd/containerd/errdefs"
    16  	"github.com/containerd/containerd/images"
    17  	"github.com/containerd/containerd/platforms"
    18  	ctdreference "github.com/containerd/containerd/reference"
    19  	"github.com/containerd/containerd/remotes"
    20  	"github.com/containerd/containerd/remotes/docker"
    21  	"github.com/containerd/containerd/remotes/docker/schema1"
    22  	distreference "github.com/docker/distribution/reference"
    23  	"github.com/docker/docker/distribution"
    24  	"github.com/docker/docker/distribution/metadata"
    25  	"github.com/docker/docker/distribution/xfer"
    26  	"github.com/docker/docker/image"
    27  	"github.com/docker/docker/layer"
    28  	pkgprogress "github.com/docker/docker/pkg/progress"
    29  	"github.com/docker/docker/reference"
    30  	"github.com/moby/buildkit/cache"
    31  	"github.com/moby/buildkit/client/llb"
    32  	"github.com/moby/buildkit/session"
    33  	"github.com/moby/buildkit/solver"
    34  	"github.com/moby/buildkit/source"
    35  	"github.com/moby/buildkit/util/flightcontrol"
    36  	"github.com/moby/buildkit/util/imageutil"
    37  	"github.com/moby/buildkit/util/progress"
    38  	"github.com/moby/buildkit/util/resolver"
    39  	digest "github.com/opencontainers/go-digest"
    40  	"github.com/opencontainers/image-spec/identity"
    41  	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
    42  	"github.com/pkg/errors"
    43  	"github.com/sirupsen/logrus"
    44  	"golang.org/x/time/rate"
    45  )
    46  
    47  // SourceOpt is options for creating the image source
    48  type SourceOpt struct {
    49  	ContentStore    content.Store
    50  	CacheAccessor   cache.Accessor
    51  	ReferenceStore  reference.Store
    52  	DownloadManager distribution.RootFSDownloadManager
    53  	MetadataStore   metadata.V2MetadataService
    54  	ImageStore      image.Store
    55  	RegistryHosts   docker.RegistryHosts
    56  	LayerStore      layer.Store
    57  }
    58  
    59  // Source is the source implementation for accessing container images
    60  type Source struct {
    61  	SourceOpt
    62  	g flightcontrol.Group
    63  }
    64  
    65  // NewSource creates a new image source
    66  func NewSource(opt SourceOpt) (*Source, error) {
    67  	return &Source{SourceOpt: opt}, nil
    68  }
    69  
    70  // ID returns image scheme identifier
    71  func (is *Source) ID() string {
    72  	return source.DockerImageScheme
    73  }
    74  
    75  func (is *Source) resolveLocal(refStr string) (*image.Image, error) {
    76  	ref, err := distreference.ParseNormalizedNamed(refStr)
    77  	if err != nil {
    78  		return nil, err
    79  	}
    80  	dgst, err := is.ReferenceStore.Get(ref)
    81  	if err != nil {
    82  		return nil, err
    83  	}
    84  	img, err := is.ImageStore.Get(image.ID(dgst))
    85  	if err != nil {
    86  		return nil, err
    87  	}
    88  	return img, nil
    89  }
    90  
    91  func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
    92  	type t struct {
    93  		dgst digest.Digest
    94  		dt   []byte
    95  	}
    96  	p := platforms.DefaultSpec()
    97  	if platform != nil {
    98  		p = *platform
    99  	}
   100  	// key is used to synchronize resolutions that can happen in parallel when doing multi-stage.
   101  	key := "getconfig::" + ref + "::" + platforms.Format(p)
   102  	res, err := is.g.Do(ctx, key, func(ctx context.Context) (interface{}, error) {
   103  		res := resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g)
   104  		dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, nil, platform)
   105  		if err != nil {
   106  			return nil, err
   107  		}
   108  		return &t{dgst: dgst, dt: dt}, nil
   109  	})
   110  	var typed *t
   111  	if err != nil {
   112  		return "", nil, err
   113  	}
   114  	typed = res.(*t)
   115  	return typed.dgst, typed.dt, nil
   116  }
   117  
   118  // ResolveImageConfig returns image config for an image
   119  func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
   120  	resolveMode, err := source.ParseImageResolveMode(opt.ResolveMode)
   121  	if err != nil {
   122  		return "", nil, err
   123  	}
   124  	switch resolveMode {
   125  	case source.ResolveModeForcePull:
   126  		dgst, dt, err := is.resolveRemote(ctx, ref, opt.Platform, sm, g)
   127  		// TODO: pull should fallback to local in case of failure to allow offline behavior
   128  		// the fallback doesn't work currently
   129  		return dgst, dt, err
   130  		/*
   131  			if err == nil {
   132  				return dgst, dt, err
   133  			}
   134  			// fallback to local
   135  			dt, err = is.resolveLocal(ref)
   136  			return "", dt, err
   137  		*/
   138  
   139  	case source.ResolveModeDefault:
   140  		// default == prefer local, but in the future could be smarter
   141  		fallthrough
   142  	case source.ResolveModePreferLocal:
   143  		img, err := is.resolveLocal(ref)
   144  		if err == nil {
   145  			if opt.Platform != nil && !platformMatches(img, opt.Platform) {
   146  				logrus.WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, checking remote",
   147  					path.Join(opt.Platform.OS, opt.Platform.Architecture, opt.Platform.Variant),
   148  					path.Join(img.OS, img.Architecture, img.Variant),
   149  				)
   150  			} else {
   151  				return "", img.RawJSON(), err
   152  			}
   153  		}
   154  		// fallback to remote
   155  		return is.resolveRemote(ctx, ref, opt.Platform, sm, g)
   156  	}
   157  	// should never happen
   158  	return "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ResolveMode)
   159  }
   160  
   161  // Resolve returns access to pulling for an identifier
   162  func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, vtx solver.Vertex) (source.SourceInstance, error) {
   163  	imageIdentifier, ok := id.(*source.ImageIdentifier)
   164  	if !ok {
   165  		return nil, errors.Errorf("invalid image identifier %v", id)
   166  	}
   167  
   168  	platform := platforms.DefaultSpec()
   169  	if imageIdentifier.Platform != nil {
   170  		platform = *imageIdentifier.Platform
   171  	}
   172  
   173  	p := &puller{
   174  		src: imageIdentifier,
   175  		is:  is,
   176  		//resolver: is.getResolver(is.RegistryHosts, imageIdentifier.Reference.String(), sm, g),
   177  		platform: platform,
   178  		sm:       sm,
   179  	}
   180  	return p, nil
   181  }
   182  
   183  type puller struct {
   184  	is               *Source
   185  	resolveLocalOnce sync.Once
   186  	src              *source.ImageIdentifier
   187  	desc             ocispec.Descriptor
   188  	ref              string
   189  	config           []byte
   190  	platform         ocispec.Platform
   191  	sm               *session.Manager
   192  }
   193  
   194  func (p *puller) resolver(g session.Group) remotes.Resolver {
   195  	return resolver.DefaultPool.GetResolver(p.is.RegistryHosts, p.src.Reference.String(), "pull", p.sm, g)
   196  }
   197  
   198  func (p *puller) mainManifestKey(platform ocispec.Platform) (digest.Digest, error) {
   199  	dt, err := json.Marshal(struct {
   200  		Digest  digest.Digest
   201  		OS      string
   202  		Arch    string
   203  		Variant string `json:",omitempty"`
   204  	}{
   205  		Digest:  p.desc.Digest,
   206  		OS:      platform.OS,
   207  		Arch:    platform.Architecture,
   208  		Variant: platform.Variant,
   209  	})
   210  	if err != nil {
   211  		return "", err
   212  	}
   213  	return digest.FromBytes(dt), nil
   214  }
   215  
   216  func (p *puller) resolveLocal() {
   217  	p.resolveLocalOnce.Do(func() {
   218  		dgst := p.src.Reference.Digest()
   219  		if dgst != "" {
   220  			info, err := p.is.ContentStore.Info(context.TODO(), dgst)
   221  			if err == nil {
   222  				p.ref = p.src.Reference.String()
   223  				desc := ocispec.Descriptor{
   224  					Size:   info.Size,
   225  					Digest: dgst,
   226  				}
   227  				ra, err := p.is.ContentStore.ReaderAt(context.TODO(), desc)
   228  				if err == nil {
   229  					mt, err := imageutil.DetectManifestMediaType(ra)
   230  					if err == nil {
   231  						desc.MediaType = mt
   232  						p.desc = desc
   233  					}
   234  				}
   235  			}
   236  		}
   237  
   238  		if p.src.ResolveMode == source.ResolveModeDefault || p.src.ResolveMode == source.ResolveModePreferLocal {
   239  			ref := p.src.Reference.String()
   240  			img, err := p.is.resolveLocal(ref)
   241  			if err == nil {
   242  				if !platformMatches(img, &p.platform) {
   243  					logrus.WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, not resolving",
   244  						path.Join(p.platform.OS, p.platform.Architecture, p.platform.Variant),
   245  						path.Join(img.OS, img.Architecture, img.Variant),
   246  					)
   247  				} else {
   248  					p.config = img.RawJSON()
   249  				}
   250  			}
   251  		}
   252  	})
   253  }
   254  
   255  func (p *puller) resolve(ctx context.Context, g session.Group) error {
   256  	// key is used to synchronize resolutions that can happen in parallel when doing multi-stage.
   257  	key := "resolve::" + p.ref + "::" + platforms.Format(p.platform)
   258  	_, err := p.is.g.Do(ctx, key, func(ctx context.Context) (_ interface{}, err error) {
   259  		resolveProgressDone := oneOffProgress(ctx, "resolve "+p.src.Reference.String())
   260  		defer func() {
   261  			resolveProgressDone(err)
   262  		}()
   263  
   264  		ref, err := distreference.ParseNormalizedNamed(p.src.Reference.String())
   265  		if err != nil {
   266  			return nil, err
   267  		}
   268  
   269  		if p.desc.Digest == "" && p.config == nil {
   270  			origRef, desc, err := p.resolver(g).Resolve(ctx, ref.String())
   271  			if err != nil {
   272  				return nil, err
   273  			}
   274  
   275  			p.desc = desc
   276  			p.ref = origRef
   277  		}
   278  
   279  		// Schema 1 manifests cannot be resolved to an image config
   280  		// since the conversion must take place after all the content
   281  		// has been read.
   282  		// It may be possible to have a mapping between schema 1 manifests
   283  		// and the schema 2 manifests they are converted to.
   284  		if p.config == nil && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest {
   285  			ref, err := distreference.WithDigest(ref, p.desc.Digest)
   286  			if err != nil {
   287  				return nil, err
   288  			}
   289  			_, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: resolveModeToString(p.src.ResolveMode)}, p.sm, g)
   290  			if err != nil {
   291  				return nil, err
   292  			}
   293  
   294  			p.config = dt
   295  		}
   296  		return nil, nil
   297  	})
   298  	return err
   299  }
   300  
   301  func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (string, solver.CacheOpts, bool, error) {
   302  	p.resolveLocal()
   303  
   304  	if p.desc.Digest != "" && index == 0 {
   305  		dgst, err := p.mainManifestKey(p.platform)
   306  		if err != nil {
   307  			return "", nil, false, err
   308  		}
   309  		return dgst.String(), nil, false, nil
   310  	}
   311  
   312  	if p.config != nil {
   313  		k := cacheKeyFromConfig(p.config).String()
   314  		if k == "" {
   315  			return digest.FromBytes(p.config).String(), nil, true, nil
   316  		}
   317  		return k, nil, true, nil
   318  	}
   319  
   320  	if err := p.resolve(ctx, g); err != nil {
   321  		return "", nil, false, err
   322  	}
   323  
   324  	if p.desc.Digest != "" && index == 0 {
   325  		dgst, err := p.mainManifestKey(p.platform)
   326  		if err != nil {
   327  			return "", nil, false, err
   328  		}
   329  		return dgst.String(), nil, false, nil
   330  	}
   331  
   332  	k := cacheKeyFromConfig(p.config).String()
   333  	if k == "" {
   334  		dgst, err := p.mainManifestKey(p.platform)
   335  		if err != nil {
   336  			return "", nil, false, err
   337  		}
   338  		return dgst.String(), nil, true, nil
   339  	}
   340  
   341  	return k, nil, true, nil
   342  }
   343  
   344  func (p *puller) getRef(ctx context.Context, diffIDs []layer.DiffID, opts ...cache.RefOption) (cache.ImmutableRef, error) {
   345  	var parent cache.ImmutableRef
   346  	if len(diffIDs) > 1 {
   347  		var err error
   348  		parent, err = p.getRef(ctx, diffIDs[:len(diffIDs)-1], opts...)
   349  		if err != nil {
   350  			return nil, err
   351  		}
   352  		defer parent.Release(context.TODO())
   353  	}
   354  	return p.is.CacheAccessor.GetByBlob(ctx, ocispec.Descriptor{
   355  		Annotations: map[string]string{
   356  			"containerd.io/uncompressed": diffIDs[len(diffIDs)-1].String(),
   357  		},
   358  	}, parent, opts...)
   359  }
   360  
   361  func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.ImmutableRef, error) {
   362  	p.resolveLocal()
   363  	if err := p.resolve(ctx, g); err != nil {
   364  		return nil, err
   365  	}
   366  
   367  	if p.config != nil {
   368  		img, err := p.is.ImageStore.Get(image.ID(digest.FromBytes(p.config)))
   369  		if err == nil {
   370  			if len(img.RootFS.DiffIDs) == 0 {
   371  				return nil, nil
   372  			}
   373  			l, err := p.is.LayerStore.Get(img.RootFS.ChainID())
   374  			if err == nil {
   375  				layer.ReleaseAndLog(p.is.LayerStore, l)
   376  				ref, err := p.getRef(ctx, img.RootFS.DiffIDs, cache.WithDescription(fmt.Sprintf("from local %s", p.ref)))
   377  				if err != nil {
   378  					return nil, err
   379  				}
   380  				return ref, nil
   381  			}
   382  		}
   383  	}
   384  
   385  	ongoing := newJobs(p.ref)
   386  
   387  	pctx, stopProgress := context.WithCancel(ctx)
   388  
   389  	pw, _, ctx := progress.FromContext(ctx)
   390  	defer pw.Close()
   391  
   392  	progressDone := make(chan struct{})
   393  	go func() {
   394  		showProgress(pctx, ongoing, p.is.ContentStore, pw)
   395  		close(progressDone)
   396  	}()
   397  	defer func() {
   398  		<-progressDone
   399  	}()
   400  
   401  	fetcher, err := p.resolver(g).Fetcher(ctx, p.ref)
   402  	if err != nil {
   403  		stopProgress()
   404  		return nil, err
   405  	}
   406  
   407  	platform := platforms.Only(p.platform)
   408  
   409  	var (
   410  		schema1Converter *schema1.Converter
   411  		handlers         []images.Handler
   412  	)
   413  	if p.desc.MediaType == images.MediaTypeDockerSchema1Manifest {
   414  		schema1Converter = schema1.NewConverter(p.is.ContentStore, fetcher)
   415  		handlers = append(handlers, schema1Converter)
   416  
   417  		// TODO: Optimize to do dispatch and integrate pulling with download manager,
   418  		// leverage existing blob mapping and layer storage
   419  	} else {
   420  
   421  		// TODO: need a wrapper snapshot interface that combines content
   422  		// and snapshots as 1) buildkit shouldn't have a dependency on contentstore
   423  		// or 2) cachemanager should manage the contentstore
   424  		handlers = append(handlers, images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
   425  			switch desc.MediaType {
   426  			case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
   427  				images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex,
   428  				images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
   429  			default:
   430  				return nil, images.ErrSkipDesc
   431  			}
   432  			ongoing.add(desc)
   433  			return nil, nil
   434  		}))
   435  
   436  		// Get all the children for a descriptor
   437  		childrenHandler := images.ChildrenHandler(p.is.ContentStore)
   438  		// Set any children labels for that content
   439  		childrenHandler = images.SetChildrenLabels(p.is.ContentStore, childrenHandler)
   440  		// Filter the children by the platform
   441  		childrenHandler = images.FilterPlatforms(childrenHandler, platform)
   442  		// Limit manifests pulled to the best match in an index
   443  		childrenHandler = images.LimitManifests(childrenHandler, platform, 1)
   444  
   445  		handlers = append(handlers,
   446  			remotes.FetchHandler(p.is.ContentStore, fetcher),
   447  			childrenHandler,
   448  		)
   449  	}
   450  
   451  	if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, p.desc); err != nil {
   452  		stopProgress()
   453  		return nil, err
   454  	}
   455  	defer stopProgress()
   456  
   457  	if schema1Converter != nil {
   458  		p.desc, err = schema1Converter.Convert(ctx)
   459  		if err != nil {
   460  			return nil, err
   461  		}
   462  	}
   463  
   464  	mfst, err := images.Manifest(ctx, p.is.ContentStore, p.desc, platform)
   465  	if err != nil {
   466  		return nil, err
   467  	}
   468  
   469  	config, err := images.Config(ctx, p.is.ContentStore, p.desc, platform)
   470  	if err != nil {
   471  		return nil, err
   472  	}
   473  
   474  	dt, err := content.ReadBlob(ctx, p.is.ContentStore, config)
   475  	if err != nil {
   476  		return nil, err
   477  	}
   478  
   479  	var img ocispec.Image
   480  	if err := json.Unmarshal(dt, &img); err != nil {
   481  		return nil, err
   482  	}
   483  
   484  	if len(mfst.Layers) != len(img.RootFS.DiffIDs) {
   485  		return nil, errors.Errorf("invalid config for manifest")
   486  	}
   487  
   488  	pchan := make(chan pkgprogress.Progress, 10)
   489  	defer close(pchan)
   490  
   491  	go func() {
   492  		m := map[string]struct {
   493  			st      time.Time
   494  			limiter *rate.Limiter
   495  		}{}
   496  		for p := range pchan {
   497  			if p.Action == "Extracting" {
   498  				st, ok := m[p.ID]
   499  				if !ok {
   500  					st.st = time.Now()
   501  					st.limiter = rate.NewLimiter(rate.Every(100*time.Millisecond), 1)
   502  					m[p.ID] = st
   503  				}
   504  				var end *time.Time
   505  				if p.LastUpdate || st.limiter.Allow() {
   506  					if p.LastUpdate {
   507  						tm := time.Now()
   508  						end = &tm
   509  					}
   510  					_ = pw.Write("extracting "+p.ID, progress.Status{
   511  						Action:    "extract",
   512  						Started:   &st.st,
   513  						Completed: end,
   514  					})
   515  				}
   516  			}
   517  		}
   518  	}()
   519  
   520  	if len(mfst.Layers) == 0 {
   521  		return nil, nil
   522  	}
   523  
   524  	layers := make([]xfer.DownloadDescriptor, 0, len(mfst.Layers))
   525  
   526  	for i, desc := range mfst.Layers {
   527  		ongoing.add(desc)
   528  		layers = append(layers, &layerDescriptor{
   529  			desc:    desc,
   530  			diffID:  layer.DiffID(img.RootFS.DiffIDs[i]),
   531  			fetcher: fetcher,
   532  			ref:     p.src.Reference,
   533  			is:      p.is,
   534  		})
   535  	}
   536  
   537  	defer func() {
   538  		<-progressDone
   539  		for _, desc := range mfst.Layers {
   540  			p.is.ContentStore.Delete(context.TODO(), desc.Digest)
   541  		}
   542  	}()
   543  
   544  	r := image.NewRootFS()
   545  	rootFS, release, err := p.is.DownloadManager.Download(ctx, *r, "linux", layers, pkgprogress.ChanOutput(pchan))
   546  	stopProgress()
   547  	if err != nil {
   548  		return nil, err
   549  	}
   550  
   551  	ref, err := p.getRef(ctx, rootFS.DiffIDs, cache.WithDescription(fmt.Sprintf("pulled from %s", p.ref)))
   552  	release()
   553  	if err != nil {
   554  		return nil, err
   555  	}
   556  
   557  	// TODO: handle windows layers for cross platform builds
   558  
   559  	if p.src.RecordType != "" && cache.GetRecordType(ref) == "" {
   560  		if err := cache.SetRecordType(ref, p.src.RecordType); err != nil {
   561  			ref.Release(context.TODO())
   562  			return nil, err
   563  		}
   564  	}
   565  
   566  	return ref, nil
   567  }
   568  
   569  // Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
   570  type layerDescriptor struct {
   571  	is      *Source
   572  	fetcher remotes.Fetcher
   573  	desc    ocispec.Descriptor
   574  	diffID  layer.DiffID
   575  	ref     ctdreference.Spec
   576  }
   577  
   578  func (ld *layerDescriptor) Key() string {
   579  	return "v2:" + ld.desc.Digest.String()
   580  }
   581  
   582  func (ld *layerDescriptor) ID() string {
   583  	return ld.desc.Digest.String()
   584  }
   585  
   586  func (ld *layerDescriptor) DiffID() (layer.DiffID, error) {
   587  	return ld.diffID, nil
   588  }
   589  
   590  func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogress.Output) (io.ReadCloser, int64, error) {
   591  	rc, err := ld.fetcher.Fetch(ctx, ld.desc)
   592  	if err != nil {
   593  		return nil, 0, err
   594  	}
   595  	defer rc.Close()
   596  
   597  	refKey := remotes.MakeRefKey(ctx, ld.desc)
   598  
   599  	ld.is.ContentStore.Abort(ctx, refKey)
   600  
   601  	if err := content.WriteBlob(ctx, ld.is.ContentStore, refKey, rc, ld.desc); err != nil {
   602  		ld.is.ContentStore.Abort(ctx, refKey)
   603  		return nil, 0, err
   604  	}
   605  
   606  	ra, err := ld.is.ContentStore.ReaderAt(ctx, ld.desc)
   607  	if err != nil {
   608  		return nil, 0, err
   609  	}
   610  
   611  	return ioutil.NopCloser(content.NewReader(ra)), ld.desc.Size, nil
   612  }
   613  
   614  func (ld *layerDescriptor) Close() {
   615  	// ld.is.ContentStore.Delete(context.TODO(), ld.desc.Digest))
   616  }
   617  
   618  func (ld *layerDescriptor) Registered(diffID layer.DiffID) {
   619  	// Cache mapping from this layer's DiffID to the blobsum
   620  	ld.is.MetadataStore.Add(diffID, metadata.V2Metadata{Digest: ld.desc.Digest, SourceRepository: ld.ref.Locator})
   621  }
   622  
   623  func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, pw progress.Writer) {
   624  	var (
   625  		ticker   = time.NewTicker(100 * time.Millisecond)
   626  		statuses = map[string]statusInfo{}
   627  		done     bool
   628  	)
   629  	defer ticker.Stop()
   630  
   631  	for {
   632  		select {
   633  		case <-ticker.C:
   634  		case <-ctx.Done():
   635  			done = true
   636  		}
   637  
   638  		resolved := "resolved"
   639  		if !ongoing.isResolved() {
   640  			resolved = "resolving"
   641  		}
   642  		statuses[ongoing.name] = statusInfo{
   643  			Ref:    ongoing.name,
   644  			Status: resolved,
   645  		}
   646  
   647  		actives := make(map[string]statusInfo)
   648  
   649  		if !done {
   650  			active, err := cs.ListStatuses(ctx)
   651  			if err != nil {
   652  				// log.G(ctx).WithError(err).Error("active check failed")
   653  				continue
   654  			}
   655  			// update status of active entries!
   656  			for _, active := range active {
   657  				actives[active.Ref] = statusInfo{
   658  					Ref:       active.Ref,
   659  					Status:    "downloading",
   660  					Offset:    active.Offset,
   661  					Total:     active.Total,
   662  					StartedAt: active.StartedAt,
   663  					UpdatedAt: active.UpdatedAt,
   664  				}
   665  			}
   666  		}
   667  
   668  		// now, update the items in jobs that are not in active
   669  		for _, j := range ongoing.jobs() {
   670  			refKey := remotes.MakeRefKey(ctx, j.Descriptor)
   671  			if a, ok := actives[refKey]; ok {
   672  				started := j.started
   673  				_ = pw.Write(j.Digest.String(), progress.Status{
   674  					Action:  a.Status,
   675  					Total:   int(a.Total),
   676  					Current: int(a.Offset),
   677  					Started: &started,
   678  				})
   679  				continue
   680  			}
   681  
   682  			if !j.done {
   683  				info, err := cs.Info(context.TODO(), j.Digest)
   684  				if err != nil {
   685  					if containerderrors.IsNotFound(err) {
   686  						// _ = pw.Write(j.Digest.String(), progress.Status{
   687  						// 	Action: "waiting",
   688  						// })
   689  						continue
   690  					}
   691  				} else {
   692  					j.done = true
   693  				}
   694  
   695  				if done || j.done {
   696  					started := j.started
   697  					createdAt := info.CreatedAt
   698  					_ = pw.Write(j.Digest.String(), progress.Status{
   699  						Action:    "done",
   700  						Current:   int(info.Size),
   701  						Total:     int(info.Size),
   702  						Completed: &createdAt,
   703  						Started:   &started,
   704  					})
   705  				}
   706  			}
   707  		}
   708  		if done {
   709  			return
   710  		}
   711  	}
   712  }
   713  
   714  // jobs provides a way of identifying the download keys for a particular task
   715  // encountering during the pull walk.
   716  //
   717  // This is very minimal and will probably be replaced with something more
   718  // featured.
   719  type jobs struct {
   720  	name     string
   721  	added    map[digest.Digest]*job
   722  	mu       sync.Mutex
   723  	resolved bool
   724  }
   725  
   726  type job struct {
   727  	ocispec.Descriptor
   728  	done    bool
   729  	started time.Time
   730  }
   731  
   732  func newJobs(name string) *jobs {
   733  	return &jobs{
   734  		name:  name,
   735  		added: make(map[digest.Digest]*job),
   736  	}
   737  }
   738  
   739  func (j *jobs) add(desc ocispec.Descriptor) {
   740  	j.mu.Lock()
   741  	defer j.mu.Unlock()
   742  
   743  	if _, ok := j.added[desc.Digest]; ok {
   744  		return
   745  	}
   746  	j.added[desc.Digest] = &job{
   747  		Descriptor: desc,
   748  		started:    time.Now(),
   749  	}
   750  }
   751  
   752  func (j *jobs) jobs() []*job {
   753  	j.mu.Lock()
   754  	defer j.mu.Unlock()
   755  
   756  	descs := make([]*job, 0, len(j.added))
   757  	for _, j := range j.added {
   758  		descs = append(descs, j)
   759  	}
   760  	return descs
   761  }
   762  
   763  func (j *jobs) isResolved() bool {
   764  	j.mu.Lock()
   765  	defer j.mu.Unlock()
   766  	return j.resolved
   767  }
   768  
   769  type statusInfo struct {
   770  	Ref       string
   771  	Status    string
   772  	Offset    int64
   773  	Total     int64
   774  	StartedAt time.Time
   775  	UpdatedAt time.Time
   776  }
   777  
   778  func oneOffProgress(ctx context.Context, id string) func(err error) error {
   779  	pw, _, _ := progress.FromContext(ctx)
   780  	now := time.Now()
   781  	st := progress.Status{
   782  		Started: &now,
   783  	}
   784  	_ = pw.Write(id, st)
   785  	return func(err error) error {
   786  		// TODO: set error on status
   787  		now := time.Now()
   788  		st.Completed = &now
   789  		_ = pw.Write(id, st)
   790  		_ = pw.Close()
   791  		return err
   792  	}
   793  }
   794  
   795  // cacheKeyFromConfig returns a stable digest from image config. If image config
   796  // is a known oci image we will use chainID of layers.
   797  func cacheKeyFromConfig(dt []byte) digest.Digest {
   798  	var img ocispec.Image
   799  	err := json.Unmarshal(dt, &img)
   800  	if err != nil {
   801  		return digest.FromBytes(dt)
   802  	}
   803  	if img.RootFS.Type != "layers" || len(img.RootFS.DiffIDs) == 0 {
   804  		return ""
   805  	}
   806  	return identity.ChainID(img.RootFS.DiffIDs)
   807  }
   808  
   809  // resolveModeToString is the equivalent of github.com/moby/buildkit/solver/llb.ResolveMode.String()
   810  // FIXME: add String method on source.ResolveMode
   811  func resolveModeToString(rm source.ResolveMode) string {
   812  	switch rm {
   813  	case source.ResolveModeDefault:
   814  		return "default"
   815  	case source.ResolveModeForcePull:
   816  		return "pull"
   817  	case source.ResolveModePreferLocal:
   818  		return "local"
   819  	}
   820  	return ""
   821  }
   822  
   823  func platformMatches(img *image.Image, p *ocispec.Platform) bool {
   824  	if img.Architecture != p.Architecture {
   825  		return false
   826  	}
   827  	if img.Variant != "" && img.Variant != p.Variant {
   828  		return false
   829  	}
   830  	return img.OS == p.OS
   831  }