github.com/wozhu6104/docker@v20.10.10+incompatible/builder/builder-next/adapters/containerimage/pull.go (about)

     1  package containerimage
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"fmt"
     7  	"io"
     8  	"io/ioutil"
     9  	"path"
    10  	"runtime"
    11  	"sync"
    12  	"time"
    13  
    14  	"github.com/containerd/containerd/content"
    15  	containerderrors "github.com/containerd/containerd/errdefs"
    16  	"github.com/containerd/containerd/gc"
    17  	"github.com/containerd/containerd/images"
    18  	"github.com/containerd/containerd/leases"
    19  	"github.com/containerd/containerd/platforms"
    20  	ctdreference "github.com/containerd/containerd/reference"
    21  	"github.com/containerd/containerd/remotes"
    22  	"github.com/containerd/containerd/remotes/docker"
    23  	"github.com/containerd/containerd/remotes/docker/schema1"
    24  	distreference "github.com/docker/distribution/reference"
    25  	dimages "github.com/docker/docker/daemon/images"
    26  	"github.com/docker/docker/distribution"
    27  	"github.com/docker/docker/distribution/metadata"
    28  	"github.com/docker/docker/distribution/xfer"
    29  	"github.com/docker/docker/image"
    30  	"github.com/docker/docker/layer"
    31  	pkgprogress "github.com/docker/docker/pkg/progress"
    32  	"github.com/docker/docker/reference"
    33  	"github.com/moby/buildkit/cache"
    34  	"github.com/moby/buildkit/client/llb"
    35  	"github.com/moby/buildkit/session"
    36  	"github.com/moby/buildkit/solver"
    37  	"github.com/moby/buildkit/source"
    38  	"github.com/moby/buildkit/util/flightcontrol"
    39  	"github.com/moby/buildkit/util/imageutil"
    40  	"github.com/moby/buildkit/util/leaseutil"
    41  	"github.com/moby/buildkit/util/progress"
    42  	"github.com/moby/buildkit/util/resolver"
    43  	digest "github.com/opencontainers/go-digest"
    44  	"github.com/opencontainers/image-spec/identity"
    45  	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
    46  	"github.com/pkg/errors"
    47  	"github.com/sirupsen/logrus"
    48  	"golang.org/x/time/rate"
    49  )
    50  
    51  // SourceOpt is options for creating the image source
    52  type SourceOpt struct {
    53  	ContentStore    content.Store
    54  	CacheAccessor   cache.Accessor
    55  	ReferenceStore  reference.Store
    56  	DownloadManager distribution.RootFSDownloadManager
    57  	MetadataStore   metadata.V2MetadataService
    58  	ImageStore      image.Store
    59  	RegistryHosts   docker.RegistryHosts
    60  	LayerStore      layer.Store
    61  	LeaseManager    leases.Manager
    62  	GarbageCollect  func(ctx context.Context) (gc.Stats, error)
    63  }
    64  
    65  // Source is the source implementation for accessing container images
    66  type Source struct {
    67  	SourceOpt
    68  	g flightcontrol.Group
    69  }
    70  
    71  // NewSource creates a new image source
    72  func NewSource(opt SourceOpt) (*Source, error) {
    73  	return &Source{SourceOpt: opt}, nil
    74  }
    75  
    76  // ID returns image scheme identifier
    77  func (is *Source) ID() string {
    78  	return source.DockerImageScheme
    79  }
    80  
    81  func (is *Source) resolveLocal(refStr string) (*image.Image, error) {
    82  	ref, err := distreference.ParseNormalizedNamed(refStr)
    83  	if err != nil {
    84  		return nil, err
    85  	}
    86  	dgst, err := is.ReferenceStore.Get(ref)
    87  	if err != nil {
    88  		return nil, err
    89  	}
    90  	img, err := is.ImageStore.Get(image.ID(dgst))
    91  	if err != nil {
    92  		return nil, err
    93  	}
    94  	return img, nil
    95  }
    96  
    97  func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
    98  	type t struct {
    99  		dgst digest.Digest
   100  		dt   []byte
   101  	}
   102  	p := platforms.DefaultSpec()
   103  	if platform != nil {
   104  		p = *platform
   105  	}
   106  	// key is used to synchronize resolutions that can happen in parallel when doing multi-stage.
   107  	key := "getconfig::" + ref + "::" + platforms.Format(p)
   108  	res, err := is.g.Do(ctx, key, func(ctx context.Context) (interface{}, error) {
   109  		res := resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g)
   110  		dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, is.LeaseManager, platform)
   111  		if err != nil {
   112  			return nil, err
   113  		}
   114  		return &t{dgst: dgst, dt: dt}, nil
   115  	})
   116  	var typed *t
   117  	if err != nil {
   118  		return "", nil, err
   119  	}
   120  	typed = res.(*t)
   121  	return typed.dgst, typed.dt, nil
   122  }
   123  
   124  // ResolveImageConfig returns image config for an image
   125  func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
   126  	resolveMode, err := source.ParseImageResolveMode(opt.ResolveMode)
   127  	if err != nil {
   128  		return "", nil, err
   129  	}
   130  	switch resolveMode {
   131  	case source.ResolveModeForcePull:
   132  		dgst, dt, err := is.resolveRemote(ctx, ref, opt.Platform, sm, g)
   133  		// TODO: pull should fallback to local in case of failure to allow offline behavior
   134  		// the fallback doesn't work currently
   135  		return dgst, dt, err
   136  		/*
   137  			if err == nil {
   138  				return dgst, dt, err
   139  			}
   140  			// fallback to local
   141  			dt, err = is.resolveLocal(ref)
   142  			return "", dt, err
   143  		*/
   144  
   145  	case source.ResolveModeDefault:
   146  		// default == prefer local, but in the future could be smarter
   147  		fallthrough
   148  	case source.ResolveModePreferLocal:
   149  		img, err := is.resolveLocal(ref)
   150  		if err == nil {
   151  			if opt.Platform != nil && !platformMatches(img, opt.Platform) {
   152  				logrus.WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, checking remote",
   153  					path.Join(opt.Platform.OS, opt.Platform.Architecture, opt.Platform.Variant),
   154  					path.Join(img.OS, img.Architecture, img.Variant),
   155  				)
   156  			} else {
   157  				return "", img.RawJSON(), err
   158  			}
   159  		}
   160  		// fallback to remote
   161  		return is.resolveRemote(ctx, ref, opt.Platform, sm, g)
   162  	}
   163  	// should never happen
   164  	return "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ResolveMode)
   165  }
   166  
   167  // Resolve returns access to pulling for an identifier
   168  func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, vtx solver.Vertex) (source.SourceInstance, error) {
   169  	imageIdentifier, ok := id.(*source.ImageIdentifier)
   170  	if !ok {
   171  		return nil, errors.Errorf("invalid image identifier %v", id)
   172  	}
   173  
   174  	platform := platforms.DefaultSpec()
   175  	if imageIdentifier.Platform != nil {
   176  		platform = *imageIdentifier.Platform
   177  	}
   178  
   179  	p := &puller{
   180  		src: imageIdentifier,
   181  		is:  is,
   182  		//resolver: is.getResolver(is.RegistryHosts, imageIdentifier.Reference.String(), sm, g),
   183  		platform: platform,
   184  		sm:       sm,
   185  	}
   186  	return p, nil
   187  }
   188  
   189  type puller struct {
   190  	is               *Source
   191  	resolveLocalOnce sync.Once
   192  	g                flightcontrol.Group
   193  	src              *source.ImageIdentifier
   194  	desc             ocispec.Descriptor
   195  	ref              string
   196  	config           []byte
   197  	platform         ocispec.Platform
   198  	sm               *session.Manager
   199  }
   200  
   201  func (p *puller) resolver(g session.Group) remotes.Resolver {
   202  	return resolver.DefaultPool.GetResolver(p.is.RegistryHosts, p.src.Reference.String(), "pull", p.sm, g)
   203  }
   204  
   205  func (p *puller) mainManifestKey(platform ocispec.Platform) (digest.Digest, error) {
   206  	dt, err := json.Marshal(struct {
   207  		Digest  digest.Digest
   208  		OS      string
   209  		Arch    string
   210  		Variant string `json:",omitempty"`
   211  	}{
   212  		Digest:  p.desc.Digest,
   213  		OS:      platform.OS,
   214  		Arch:    platform.Architecture,
   215  		Variant: platform.Variant,
   216  	})
   217  	if err != nil {
   218  		return "", err
   219  	}
   220  	return digest.FromBytes(dt), nil
   221  }
   222  
   223  func (p *puller) resolveLocal() {
   224  	p.resolveLocalOnce.Do(func() {
   225  		dgst := p.src.Reference.Digest()
   226  		if dgst != "" {
   227  			info, err := p.is.ContentStore.Info(context.TODO(), dgst)
   228  			if err == nil {
   229  				p.ref = p.src.Reference.String()
   230  				desc := ocispec.Descriptor{
   231  					Size:   info.Size,
   232  					Digest: dgst,
   233  				}
   234  				ra, err := p.is.ContentStore.ReaderAt(context.TODO(), desc)
   235  				if err == nil {
   236  					mt, err := imageutil.DetectManifestMediaType(ra)
   237  					if err == nil {
   238  						desc.MediaType = mt
   239  						p.desc = desc
   240  					}
   241  				}
   242  			}
   243  		}
   244  
   245  		if p.src.ResolveMode == source.ResolveModeDefault || p.src.ResolveMode == source.ResolveModePreferLocal {
   246  			ref := p.src.Reference.String()
   247  			img, err := p.is.resolveLocal(ref)
   248  			if err == nil {
   249  				if !platformMatches(img, &p.platform) {
   250  					logrus.WithField("ref", ref).Debugf("Requested build platform %s does not match local image platform %s, not resolving",
   251  						path.Join(p.platform.OS, p.platform.Architecture, p.platform.Variant),
   252  						path.Join(img.OS, img.Architecture, img.Variant),
   253  					)
   254  				} else {
   255  					p.config = img.RawJSON()
   256  				}
   257  			}
   258  		}
   259  	})
   260  }
   261  
   262  func (p *puller) resolve(ctx context.Context, g session.Group) error {
   263  	_, err := p.g.Do(ctx, "", func(ctx context.Context) (_ interface{}, err error) {
   264  		resolveProgressDone := oneOffProgress(ctx, "resolve "+p.src.Reference.String())
   265  		defer func() {
   266  			resolveProgressDone(err)
   267  		}()
   268  
   269  		ref, err := distreference.ParseNormalizedNamed(p.src.Reference.String())
   270  		if err != nil {
   271  			return nil, err
   272  		}
   273  
   274  		if p.desc.Digest == "" && p.config == nil {
   275  			origRef, desc, err := p.resolver(g).Resolve(ctx, ref.String())
   276  			if err != nil {
   277  				return nil, err
   278  			}
   279  
   280  			p.desc = desc
   281  			p.ref = origRef
   282  		}
   283  
   284  		// Schema 1 manifests cannot be resolved to an image config
   285  		// since the conversion must take place after all the content
   286  		// has been read.
   287  		// It may be possible to have a mapping between schema 1 manifests
   288  		// and the schema 2 manifests they are converted to.
   289  		if p.config == nil && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest {
   290  			ref, err := distreference.WithDigest(ref, p.desc.Digest)
   291  			if err != nil {
   292  				return nil, err
   293  			}
   294  			_, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: resolveModeToString(p.src.ResolveMode)}, p.sm, g)
   295  			if err != nil {
   296  				return nil, err
   297  			}
   298  
   299  			p.config = dt
   300  		}
   301  		return nil, nil
   302  	})
   303  	return err
   304  }
   305  
   306  func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (string, solver.CacheOpts, bool, error) {
   307  	p.resolveLocal()
   308  
   309  	if p.desc.Digest != "" && index == 0 {
   310  		dgst, err := p.mainManifestKey(p.platform)
   311  		if err != nil {
   312  			return "", nil, false, err
   313  		}
   314  		return dgst.String(), nil, false, nil
   315  	}
   316  
   317  	if p.config != nil {
   318  		k := cacheKeyFromConfig(p.config).String()
   319  		if k == "" {
   320  			return digest.FromBytes(p.config).String(), nil, true, nil
   321  		}
   322  		return k, nil, true, nil
   323  	}
   324  
   325  	if err := p.resolve(ctx, g); err != nil {
   326  		return "", nil, false, err
   327  	}
   328  
   329  	if p.desc.Digest != "" && index == 0 {
   330  		dgst, err := p.mainManifestKey(p.platform)
   331  		if err != nil {
   332  			return "", nil, false, err
   333  		}
   334  		return dgst.String(), nil, false, nil
   335  	}
   336  
   337  	if len(p.config) == 0 && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest {
   338  		return "", nil, false, errors.Errorf("invalid empty config file resolved for %s", p.src.Reference.String())
   339  	}
   340  
   341  	k := cacheKeyFromConfig(p.config).String()
   342  	if k == "" || p.desc.MediaType == images.MediaTypeDockerSchema1Manifest {
   343  		dgst, err := p.mainManifestKey(p.platform)
   344  		if err != nil {
   345  			return "", nil, false, err
   346  		}
   347  		return dgst.String(), nil, true, nil
   348  	}
   349  
   350  	return k, nil, true, nil
   351  }
   352  
   353  func (p *puller) getRef(ctx context.Context, diffIDs []layer.DiffID, opts ...cache.RefOption) (cache.ImmutableRef, error) {
   354  	var parent cache.ImmutableRef
   355  	if len(diffIDs) > 1 {
   356  		var err error
   357  		parent, err = p.getRef(ctx, diffIDs[:len(diffIDs)-1], opts...)
   358  		if err != nil {
   359  			return nil, err
   360  		}
   361  		defer parent.Release(context.TODO())
   362  	}
   363  	return p.is.CacheAccessor.GetByBlob(ctx, ocispec.Descriptor{
   364  		Annotations: map[string]string{
   365  			"containerd.io/uncompressed": diffIDs[len(diffIDs)-1].String(),
   366  		},
   367  	}, parent, opts...)
   368  }
   369  
   370  func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.ImmutableRef, error) {
   371  	p.resolveLocal()
   372  	if len(p.config) == 0 {
   373  		if err := p.resolve(ctx, g); err != nil {
   374  			return nil, err
   375  		}
   376  	}
   377  
   378  	if p.config != nil {
   379  		img, err := p.is.ImageStore.Get(image.ID(digest.FromBytes(p.config)))
   380  		if err == nil {
   381  			if len(img.RootFS.DiffIDs) == 0 {
   382  				return nil, nil
   383  			}
   384  			l, err := p.is.LayerStore.Get(img.RootFS.ChainID())
   385  			if err == nil {
   386  				layer.ReleaseAndLog(p.is.LayerStore, l)
   387  				ref, err := p.getRef(ctx, img.RootFS.DiffIDs, cache.WithDescription(fmt.Sprintf("from local %s", p.ref)))
   388  				if err != nil {
   389  					return nil, err
   390  				}
   391  				return ref, nil
   392  			}
   393  		}
   394  	}
   395  
   396  	ongoing := newJobs(p.ref)
   397  
   398  	ctx, done, err := leaseutil.WithLease(ctx, p.is.LeaseManager, leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary)
   399  	if err != nil {
   400  		return nil, err
   401  	}
   402  	defer func() {
   403  		done(context.TODO())
   404  		if p.is.GarbageCollect != nil {
   405  			go p.is.GarbageCollect(context.TODO())
   406  		}
   407  	}()
   408  
   409  	pctx, stopProgress := context.WithCancel(ctx)
   410  
   411  	pw, _, ctx := progress.FromContext(ctx)
   412  	defer pw.Close()
   413  
   414  	progressDone := make(chan struct{})
   415  	go func() {
   416  		showProgress(pctx, ongoing, p.is.ContentStore, pw)
   417  		close(progressDone)
   418  	}()
   419  	defer func() {
   420  		<-progressDone
   421  	}()
   422  
   423  	fetcher, err := p.resolver(g).Fetcher(ctx, p.ref)
   424  	if err != nil {
   425  		stopProgress()
   426  		return nil, err
   427  	}
   428  
   429  	platform := platforms.Only(p.platform)
   430  
   431  	var nonLayers []digest.Digest
   432  
   433  	var (
   434  		schema1Converter *schema1.Converter
   435  		handlers         []images.Handler
   436  	)
   437  	if p.desc.MediaType == images.MediaTypeDockerSchema1Manifest {
   438  		schema1Converter = schema1.NewConverter(p.is.ContentStore, fetcher)
   439  		handlers = append(handlers, schema1Converter)
   440  
   441  		// TODO: Optimize to do dispatch and integrate pulling with download manager,
   442  		// leverage existing blob mapping and layer storage
   443  	} else {
   444  
   445  		// TODO: need a wrapper snapshot interface that combines content
   446  		// and snapshots as 1) buildkit shouldn't have a dependency on contentstore
   447  		// or 2) cachemanager should manage the contentstore
   448  		handlers = append(handlers, images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
   449  			switch desc.MediaType {
   450  			case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
   451  				images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex,
   452  				images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
   453  				nonLayers = append(nonLayers, desc.Digest)
   454  			default:
   455  				return nil, images.ErrSkipDesc
   456  			}
   457  			ongoing.add(desc)
   458  			return nil, nil
   459  		}))
   460  
   461  		// Get all the children for a descriptor
   462  		childrenHandler := images.ChildrenHandler(p.is.ContentStore)
   463  		// Filter the children by the platform
   464  		childrenHandler = images.FilterPlatforms(childrenHandler, platform)
   465  		// Limit manifests pulled to the best match in an index
   466  		childrenHandler = images.LimitManifests(childrenHandler, platform, 1)
   467  
   468  		handlers = append(handlers,
   469  			remotes.FetchHandler(p.is.ContentStore, fetcher),
   470  			childrenHandler,
   471  		)
   472  	}
   473  
   474  	if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, p.desc); err != nil {
   475  		stopProgress()
   476  		return nil, err
   477  	}
   478  	defer stopProgress()
   479  
   480  	if schema1Converter != nil {
   481  		p.desc, err = schema1Converter.Convert(ctx)
   482  		if err != nil {
   483  			return nil, err
   484  		}
   485  	}
   486  
   487  	mfst, err := images.Manifest(ctx, p.is.ContentStore, p.desc, platform)
   488  	if err != nil {
   489  		return nil, err
   490  	}
   491  
   492  	config, err := images.Config(ctx, p.is.ContentStore, p.desc, platform)
   493  	if err != nil {
   494  		return nil, err
   495  	}
   496  
   497  	dt, err := content.ReadBlob(ctx, p.is.ContentStore, config)
   498  	if err != nil {
   499  		return nil, err
   500  	}
   501  
   502  	var img ocispec.Image
   503  	if err := json.Unmarshal(dt, &img); err != nil {
   504  		return nil, err
   505  	}
   506  
   507  	if len(mfst.Layers) != len(img.RootFS.DiffIDs) {
   508  		return nil, errors.Errorf("invalid config for manifest")
   509  	}
   510  
   511  	pchan := make(chan pkgprogress.Progress, 10)
   512  	defer close(pchan)
   513  
   514  	go func() {
   515  		m := map[string]struct {
   516  			st      time.Time
   517  			limiter *rate.Limiter
   518  		}{}
   519  		for p := range pchan {
   520  			if p.Action == "Extracting" {
   521  				st, ok := m[p.ID]
   522  				if !ok {
   523  					st.st = time.Now()
   524  					st.limiter = rate.NewLimiter(rate.Every(100*time.Millisecond), 1)
   525  					m[p.ID] = st
   526  				}
   527  				var end *time.Time
   528  				if p.LastUpdate || st.limiter.Allow() {
   529  					if p.LastUpdate {
   530  						tm := time.Now()
   531  						end = &tm
   532  					}
   533  					_ = pw.Write("extracting "+p.ID, progress.Status{
   534  						Action:    "extract",
   535  						Started:   &st.st,
   536  						Completed: end,
   537  					})
   538  				}
   539  			}
   540  		}
   541  	}()
   542  
   543  	if len(mfst.Layers) == 0 {
   544  		return nil, nil
   545  	}
   546  
   547  	layers := make([]xfer.DownloadDescriptor, 0, len(mfst.Layers))
   548  
   549  	for i, desc := range mfst.Layers {
   550  		if err := desc.Digest.Validate(); err != nil {
   551  			return nil, errors.Wrap(err, "layer digest could not be validated")
   552  		}
   553  		ongoing.add(desc)
   554  		layers = append(layers, &layerDescriptor{
   555  			desc:    desc,
   556  			diffID:  layer.DiffID(img.RootFS.DiffIDs[i]),
   557  			fetcher: fetcher,
   558  			ref:     p.src.Reference,
   559  			is:      p.is,
   560  		})
   561  	}
   562  
   563  	defer func() {
   564  		<-progressDone
   565  	}()
   566  
   567  	r := image.NewRootFS()
   568  	rootFS, release, err := p.is.DownloadManager.Download(ctx, *r, runtime.GOOS, layers, pkgprogress.ChanOutput(pchan))
   569  	stopProgress()
   570  	if err != nil {
   571  		return nil, err
   572  	}
   573  
   574  	ref, err := p.getRef(ctx, rootFS.DiffIDs, cache.WithDescription(fmt.Sprintf("pulled from %s", p.ref)))
   575  	release()
   576  	if err != nil {
   577  		return nil, err
   578  	}
   579  
   580  	// keep manifest blobs until ref is alive for cache
   581  	for _, nl := range nonLayers {
   582  		if err := p.is.LeaseManager.AddResource(ctx, leases.Lease{ID: ref.ID()}, leases.Resource{
   583  			ID:   nl.String(),
   584  			Type: "content",
   585  		}); err != nil {
   586  			return nil, err
   587  		}
   588  	}
   589  
   590  	// TODO: handle windows layers for cross platform builds
   591  
   592  	if p.src.RecordType != "" && cache.GetRecordType(ref) == "" {
   593  		if err := cache.SetRecordType(ref, p.src.RecordType); err != nil {
   594  			ref.Release(context.TODO())
   595  			return nil, err
   596  		}
   597  	}
   598  
   599  	return ref, nil
   600  }
   601  
   602  // Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
   603  type layerDescriptor struct {
   604  	is      *Source
   605  	fetcher remotes.Fetcher
   606  	desc    ocispec.Descriptor
   607  	diffID  layer.DiffID
   608  	ref     ctdreference.Spec
   609  }
   610  
   611  func (ld *layerDescriptor) Key() string {
   612  	return "v2:" + ld.desc.Digest.String()
   613  }
   614  
   615  func (ld *layerDescriptor) ID() string {
   616  	return ld.desc.Digest.String()
   617  }
   618  
   619  func (ld *layerDescriptor) DiffID() (layer.DiffID, error) {
   620  	return ld.diffID, nil
   621  }
   622  
   623  func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogress.Output) (io.ReadCloser, int64, error) {
   624  	rc, err := ld.fetcher.Fetch(ctx, ld.desc)
   625  	if err != nil {
   626  		return nil, 0, err
   627  	}
   628  	defer rc.Close()
   629  
   630  	refKey := remotes.MakeRefKey(ctx, ld.desc)
   631  
   632  	ld.is.ContentStore.Abort(ctx, refKey)
   633  
   634  	if err := content.WriteBlob(ctx, ld.is.ContentStore, refKey, rc, ld.desc); err != nil {
   635  		ld.is.ContentStore.Abort(ctx, refKey)
   636  		return nil, 0, err
   637  	}
   638  
   639  	ra, err := ld.is.ContentStore.ReaderAt(ctx, ld.desc)
   640  	if err != nil {
   641  		return nil, 0, err
   642  	}
   643  
   644  	return ioutil.NopCloser(content.NewReader(ra)), ld.desc.Size, nil
   645  }
   646  
   647  func (ld *layerDescriptor) Close() {
   648  	// ld.is.ContentStore.Delete(context.TODO(), ld.desc.Digest))
   649  }
   650  
   651  func (ld *layerDescriptor) Registered(diffID layer.DiffID) {
   652  	// Cache mapping from this layer's DiffID to the blobsum
   653  	ld.is.MetadataStore.Add(diffID, metadata.V2Metadata{Digest: ld.desc.Digest, SourceRepository: ld.ref.Locator})
   654  }
   655  
   656  func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, pw progress.Writer) {
   657  	var (
   658  		ticker   = time.NewTicker(100 * time.Millisecond)
   659  		statuses = map[string]statusInfo{}
   660  		done     bool
   661  	)
   662  	defer ticker.Stop()
   663  
   664  	for {
   665  		select {
   666  		case <-ticker.C:
   667  		case <-ctx.Done():
   668  			done = true
   669  		}
   670  
   671  		resolved := "resolved"
   672  		if !ongoing.isResolved() {
   673  			resolved = "resolving"
   674  		}
   675  		statuses[ongoing.name] = statusInfo{
   676  			Ref:    ongoing.name,
   677  			Status: resolved,
   678  		}
   679  
   680  		actives := make(map[string]statusInfo)
   681  
   682  		if !done {
   683  			active, err := cs.ListStatuses(ctx)
   684  			if err != nil {
   685  				// log.G(ctx).WithError(err).Error("active check failed")
   686  				continue
   687  			}
   688  			// update status of active entries!
   689  			for _, active := range active {
   690  				actives[active.Ref] = statusInfo{
   691  					Ref:       active.Ref,
   692  					Status:    "downloading",
   693  					Offset:    active.Offset,
   694  					Total:     active.Total,
   695  					StartedAt: active.StartedAt,
   696  					UpdatedAt: active.UpdatedAt,
   697  				}
   698  			}
   699  		}
   700  
   701  		// now, update the items in jobs that are not in active
   702  		for _, j := range ongoing.jobs() {
   703  			refKey := remotes.MakeRefKey(ctx, j.Descriptor)
   704  			if a, ok := actives[refKey]; ok {
   705  				started := j.started
   706  				_ = pw.Write(j.Digest.String(), progress.Status{
   707  					Action:  a.Status,
   708  					Total:   int(a.Total),
   709  					Current: int(a.Offset),
   710  					Started: &started,
   711  				})
   712  				continue
   713  			}
   714  
   715  			if !j.done {
   716  				info, err := cs.Info(context.TODO(), j.Digest)
   717  				if err != nil {
   718  					if containerderrors.IsNotFound(err) {
   719  						// _ = pw.Write(j.Digest.String(), progress.Status{
   720  						// 	Action: "waiting",
   721  						// })
   722  						continue
   723  					}
   724  				} else {
   725  					j.done = true
   726  				}
   727  
   728  				if done || j.done {
   729  					started := j.started
   730  					createdAt := info.CreatedAt
   731  					_ = pw.Write(j.Digest.String(), progress.Status{
   732  						Action:    "done",
   733  						Current:   int(info.Size),
   734  						Total:     int(info.Size),
   735  						Completed: &createdAt,
   736  						Started:   &started,
   737  					})
   738  				}
   739  			}
   740  		}
   741  		if done {
   742  			return
   743  		}
   744  	}
   745  }
   746  
   747  // jobs provides a way of identifying the download keys for a particular task
   748  // encountering during the pull walk.
   749  //
   750  // This is very minimal and will probably be replaced with something more
   751  // featured.
   752  type jobs struct {
   753  	name     string
   754  	added    map[digest.Digest]*job
   755  	mu       sync.Mutex
   756  	resolved bool
   757  }
   758  
   759  type job struct {
   760  	ocispec.Descriptor
   761  	done    bool
   762  	started time.Time
   763  }
   764  
   765  func newJobs(name string) *jobs {
   766  	return &jobs{
   767  		name:  name,
   768  		added: make(map[digest.Digest]*job),
   769  	}
   770  }
   771  
   772  func (j *jobs) add(desc ocispec.Descriptor) {
   773  	j.mu.Lock()
   774  	defer j.mu.Unlock()
   775  
   776  	if _, ok := j.added[desc.Digest]; ok {
   777  		return
   778  	}
   779  	j.added[desc.Digest] = &job{
   780  		Descriptor: desc,
   781  		started:    time.Now(),
   782  	}
   783  }
   784  
   785  func (j *jobs) jobs() []*job {
   786  	j.mu.Lock()
   787  	defer j.mu.Unlock()
   788  
   789  	descs := make([]*job, 0, len(j.added))
   790  	for _, j := range j.added {
   791  		descs = append(descs, j)
   792  	}
   793  	return descs
   794  }
   795  
   796  func (j *jobs) isResolved() bool {
   797  	j.mu.Lock()
   798  	defer j.mu.Unlock()
   799  	return j.resolved
   800  }
   801  
   802  type statusInfo struct {
   803  	Ref       string
   804  	Status    string
   805  	Offset    int64
   806  	Total     int64
   807  	StartedAt time.Time
   808  	UpdatedAt time.Time
   809  }
   810  
   811  func oneOffProgress(ctx context.Context, id string) func(err error) error {
   812  	pw, _, _ := progress.FromContext(ctx)
   813  	now := time.Now()
   814  	st := progress.Status{
   815  		Started: &now,
   816  	}
   817  	_ = pw.Write(id, st)
   818  	return func(err error) error {
   819  		// TODO: set error on status
   820  		now := time.Now()
   821  		st.Completed = &now
   822  		_ = pw.Write(id, st)
   823  		_ = pw.Close()
   824  		return err
   825  	}
   826  }
   827  
   828  // cacheKeyFromConfig returns a stable digest from image config. If image config
   829  // is a known oci image we will use chainID of layers.
   830  func cacheKeyFromConfig(dt []byte) digest.Digest {
   831  	var img ocispec.Image
   832  	err := json.Unmarshal(dt, &img)
   833  	if err != nil {
   834  		logrus.WithError(err).Errorf("failed to unmarshal image config for cache key %v", err)
   835  		return digest.FromBytes(dt)
   836  	}
   837  	if img.RootFS.Type != "layers" || len(img.RootFS.DiffIDs) == 0 {
   838  		return ""
   839  	}
   840  	return identity.ChainID(img.RootFS.DiffIDs)
   841  }
   842  
   843  // resolveModeToString is the equivalent of github.com/moby/buildkit/solver/llb.ResolveMode.String()
   844  // FIXME: add String method on source.ResolveMode
   845  func resolveModeToString(rm source.ResolveMode) string {
   846  	switch rm {
   847  	case source.ResolveModeDefault:
   848  		return "default"
   849  	case source.ResolveModeForcePull:
   850  		return "pull"
   851  	case source.ResolveModePreferLocal:
   852  		return "local"
   853  	}
   854  	return ""
   855  }
   856  
   857  func platformMatches(img *image.Image, p *ocispec.Platform) bool {
   858  	return dimages.OnlyPlatformWithFallback(*p).Match(ocispec.Platform{
   859  		Architecture: img.Architecture,
   860  		OS:           img.OS,
   861  		OSVersion:    img.OSVersion,
   862  		OSFeatures:   img.OSFeatures,
   863  		Variant:      img.Variant,
   864  	})
   865  }