github.com/jfrazelle/docker@v1.1.2-0.20210712172922-bf78e25fe508/builder/builder-next/worker/worker.go (about)

     1  package worker
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"io"
     7  	"io/ioutil"
     8  	nethttp "net/http"
     9  	"runtime"
    10  	"strings"
    11  	"time"
    12  
    13  	"github.com/containerd/containerd/content"
    14  	"github.com/containerd/containerd/images"
    15  	"github.com/containerd/containerd/platforms"
    16  	"github.com/containerd/containerd/rootfs"
    17  	"github.com/docker/docker/builder/builder-next/adapters/containerimage"
    18  	"github.com/docker/docker/distribution"
    19  	distmetadata "github.com/docker/docker/distribution/metadata"
    20  	"github.com/docker/docker/distribution/xfer"
    21  	"github.com/docker/docker/image"
    22  	"github.com/docker/docker/layer"
    23  	pkgprogress "github.com/docker/docker/pkg/progress"
    24  	"github.com/moby/buildkit/cache"
    25  	"github.com/moby/buildkit/cache/metadata"
    26  	"github.com/moby/buildkit/client"
    27  	"github.com/moby/buildkit/client/llb"
    28  	"github.com/moby/buildkit/executor"
    29  	"github.com/moby/buildkit/exporter"
    30  	localexporter "github.com/moby/buildkit/exporter/local"
    31  	tarexporter "github.com/moby/buildkit/exporter/tar"
    32  	"github.com/moby/buildkit/frontend"
    33  	"github.com/moby/buildkit/session"
    34  	"github.com/moby/buildkit/snapshot"
    35  	"github.com/moby/buildkit/solver"
    36  	"github.com/moby/buildkit/solver/llbsolver/mounts"
    37  	"github.com/moby/buildkit/solver/llbsolver/ops"
    38  	"github.com/moby/buildkit/solver/pb"
    39  	"github.com/moby/buildkit/source"
    40  	"github.com/moby/buildkit/source/git"
    41  	"github.com/moby/buildkit/source/http"
    42  	"github.com/moby/buildkit/source/local"
    43  	"github.com/moby/buildkit/util/archutil"
    44  	"github.com/moby/buildkit/util/compression"
    45  	"github.com/moby/buildkit/util/contentutil"
    46  	"github.com/moby/buildkit/util/progress"
    47  	digest "github.com/opencontainers/go-digest"
    48  	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
    49  	"github.com/pkg/errors"
    50  	"github.com/sirupsen/logrus"
    51  	bolt "go.etcd.io/bbolt"
    52  	"golang.org/x/sync/semaphore"
    53  )
    54  
    55  const labelCreatedAt = "buildkit/createdat"
    56  
    57  // LayerAccess provides access to a moby layer from a snapshot
    58  type LayerAccess interface {
    59  	GetDiffIDs(ctx context.Context, key string) ([]layer.DiffID, error)
    60  	EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error)
    61  }
    62  
    63  // Opt defines a structure for creating a worker.
    64  type Opt struct {
    65  	ID                string
    66  	Labels            map[string]string
    67  	GCPolicy          []client.PruneInfo
    68  	MetadataStore     *metadata.Store
    69  	Executor          executor.Executor
    70  	Snapshotter       snapshot.Snapshotter
    71  	ContentStore      content.Store
    72  	CacheManager      cache.Manager
    73  	ImageSource       *containerimage.Source
    74  	DownloadManager   distribution.RootFSDownloadManager
    75  	V2MetadataService distmetadata.V2MetadataService
    76  	Transport         nethttp.RoundTripper
    77  	Exporter          exporter.Exporter
    78  	Layers            LayerAccess
    79  	Platforms         []ocispec.Platform
    80  }
    81  
    82  // Worker is a local worker instance with dedicated snapshotter, cache, and so on.
    83  // TODO: s/Worker/OpWorker/g ?
    84  type Worker struct {
    85  	Opt
    86  	SourceManager *source.Manager
    87  }
    88  
    89  // NewWorker instantiates a local worker
    90  func NewWorker(opt Opt) (*Worker, error) {
    91  	sm, err := source.NewManager()
    92  	if err != nil {
    93  		return nil, err
    94  	}
    95  
    96  	cm := opt.CacheManager
    97  	sm.Register(opt.ImageSource)
    98  
    99  	gs, err := git.NewSource(git.Opt{
   100  		CacheAccessor: cm,
   101  		MetadataStore: opt.MetadataStore,
   102  	})
   103  	if err == nil {
   104  		sm.Register(gs)
   105  	} else {
   106  		logrus.Warnf("Could not register builder git source: %s", err)
   107  	}
   108  
   109  	hs, err := http.NewSource(http.Opt{
   110  		CacheAccessor: cm,
   111  		MetadataStore: opt.MetadataStore,
   112  		Transport:     opt.Transport,
   113  	})
   114  	if err == nil {
   115  		sm.Register(hs)
   116  	} else {
   117  		logrus.Warnf("Could not register builder http source: %s", err)
   118  	}
   119  
   120  	ss, err := local.NewSource(local.Opt{
   121  		CacheAccessor: cm,
   122  		MetadataStore: opt.MetadataStore,
   123  	})
   124  	if err == nil {
   125  		sm.Register(ss)
   126  	} else {
   127  		logrus.Warnf("Could not register builder local source: %s", err)
   128  	}
   129  
   130  	return &Worker{
   131  		Opt:           opt,
   132  		SourceManager: sm,
   133  	}, nil
   134  }
   135  
   136  // ID returns worker ID
   137  func (w *Worker) ID() string {
   138  	return w.Opt.ID
   139  }
   140  
   141  // Labels returns map of all worker labels
   142  func (w *Worker) Labels() map[string]string {
   143  	return w.Opt.Labels
   144  }
   145  
   146  // Platforms returns one or more platforms supported by the image.
   147  func (w *Worker) Platforms(noCache bool) []ocispec.Platform {
   148  	if noCache {
   149  		pm := make(map[string]struct{}, len(w.Opt.Platforms))
   150  		for _, p := range w.Opt.Platforms {
   151  			pm[platforms.Format(p)] = struct{}{}
   152  		}
   153  		for _, p := range archutil.SupportedPlatforms(noCache) {
   154  			if _, ok := pm[p]; !ok {
   155  				pp, _ := platforms.Parse(p)
   156  				w.Opt.Platforms = append(w.Opt.Platforms, pp)
   157  			}
   158  		}
   159  	}
   160  	if len(w.Opt.Platforms) == 0 {
   161  		return []ocispec.Platform{platforms.DefaultSpec()}
   162  	}
   163  	return w.Opt.Platforms
   164  }
   165  
   166  // GCPolicy returns automatic GC Policy
   167  func (w *Worker) GCPolicy() []client.PruneInfo {
   168  	return w.Opt.GCPolicy
   169  }
   170  
   171  // ContentStore returns content store
   172  func (w *Worker) ContentStore() content.Store {
   173  	return w.Opt.ContentStore
   174  }
   175  
   176  // MetadataStore returns the metadata store
   177  func (w *Worker) MetadataStore() *metadata.Store {
   178  	return w.Opt.MetadataStore
   179  }
   180  
   181  // LoadRef loads a reference by ID
   182  func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) {
   183  	var opts []cache.RefOption
   184  	if hidden {
   185  		opts = append(opts, cache.NoUpdateLastUsed)
   186  	}
   187  	return w.CacheManager().Get(ctx, id, opts...)
   188  }
   189  
   190  // ResolveOp converts a LLB vertex into a LLB operation
   191  func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) {
   192  	if baseOp, ok := v.Sys().(*pb.Op); ok {
   193  		// TODO do we need to pass a value here? Where should it come from? https://github.com/moby/buildkit/commit/b3cf7c43cfefdfd7a945002c0e76b54e346ab6cf
   194  		var parallelism *semaphore.Weighted
   195  		switch op := baseOp.Op.(type) {
   196  		case *pb.Op_Source:
   197  			return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, parallelism, sm, w)
   198  		case *pb.Op_Exec:
   199  			return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager(), parallelism, sm, w.Opt.MetadataStore, w.Executor(), w)
   200  		case *pb.Op_File:
   201  			return ops.NewFileOp(v, op, w.CacheManager(), parallelism, w.Opt.MetadataStore, w)
   202  		case *pb.Op_Build:
   203  			return ops.NewBuildOp(v, op, s, w)
   204  		}
   205  	}
   206  	return nil, errors.Errorf("could not resolve %v", v)
   207  }
   208  
   209  // ResolveImageConfig returns image config for an image
   210  func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
   211  	return w.ImageSource.ResolveImageConfig(ctx, ref, opt, sm, g)
   212  }
   213  
   214  // DiskUsage returns disk usage report
   215  func (w *Worker) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) {
   216  	return w.CacheManager().DiskUsage(ctx, opt)
   217  }
   218  
   219  // Prune deletes reclaimable build cache
   220  func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo, info ...client.PruneInfo) error {
   221  	return w.CacheManager().Prune(ctx, ch, info...)
   222  }
   223  
   224  // Exporter returns exporter by name
   225  func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) {
   226  	switch name {
   227  	case "moby":
   228  		return w.Opt.Exporter, nil
   229  	case client.ExporterLocal:
   230  		return localexporter.New(localexporter.Opt{
   231  			SessionManager: sm,
   232  		})
   233  	case client.ExporterTar:
   234  		return tarexporter.New(tarexporter.Opt{
   235  			SessionManager: sm,
   236  		})
   237  	default:
   238  		return nil, errors.Errorf("exporter %q could not be found", name)
   239  	}
   240  }
   241  
   242  // GetRemote returns a remote snapshot reference for a local one
   243  func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool, _ compression.Type, _ session.Group) (*solver.Remote, error) {
   244  	var diffIDs []layer.DiffID
   245  	var err error
   246  	if !createIfNeeded {
   247  		diffIDs, err = w.Layers.GetDiffIDs(ctx, ref.ID())
   248  		if err != nil {
   249  			return nil, err
   250  		}
   251  	} else {
   252  		if err := ref.Finalize(ctx, true); err != nil {
   253  			return nil, err
   254  		}
   255  		diffIDs, err = w.Layers.EnsureLayer(ctx, ref.ID())
   256  		if err != nil {
   257  			return nil, err
   258  		}
   259  	}
   260  
   261  	descriptors := make([]ocispec.Descriptor, len(diffIDs))
   262  	for i, dgst := range diffIDs {
   263  		descriptors[i] = ocispec.Descriptor{
   264  			MediaType: images.MediaTypeDockerSchema2Layer,
   265  			Digest:    digest.Digest(dgst),
   266  			Size:      -1,
   267  		}
   268  	}
   269  
   270  	return &solver.Remote{
   271  		Descriptors: descriptors,
   272  		Provider:    &emptyProvider{},
   273  	}, nil
   274  }
   275  
   276  // PruneCacheMounts removes the current cache snapshots for specified IDs
   277  func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error {
   278  	mu := mounts.CacheMountsLocker()
   279  	mu.Lock()
   280  	defer mu.Unlock()
   281  
   282  	for _, id := range ids {
   283  		id = "cache-dir:" + id
   284  		sis, err := w.Opt.MetadataStore.Search(id)
   285  		if err != nil {
   286  			return err
   287  		}
   288  		for _, si := range sis {
   289  			for _, k := range si.Indexes() {
   290  				if k == id || strings.HasPrefix(k, id+":") {
   291  					if siCached := w.CacheManager().Metadata(si.ID()); siCached != nil {
   292  						si = siCached
   293  					}
   294  					if err := cache.CachePolicyDefault(si); err != nil {
   295  						return err
   296  					}
   297  					si.Queue(func(b *bolt.Bucket) error {
   298  						return si.SetValue(b, k, nil)
   299  					})
   300  					if err := si.Commit(); err != nil {
   301  						return err
   302  					}
   303  					// if ref is unused try to clean it up right away by releasing it
   304  					if mref, err := w.CacheManager().GetMutable(ctx, si.ID()); err == nil {
   305  						go mref.Release(context.TODO())
   306  					}
   307  					break
   308  				}
   309  			}
   310  		}
   311  	}
   312  
   313  	mounts.ClearActiveCacheMounts()
   314  	return nil
   315  }
   316  
   317  func (w *Worker) getRef(ctx context.Context, diffIDs []layer.DiffID, opts ...cache.RefOption) (cache.ImmutableRef, error) {
   318  	var parent cache.ImmutableRef
   319  	if len(diffIDs) > 1 {
   320  		var err error
   321  		parent, err = w.getRef(ctx, diffIDs[:len(diffIDs)-1], opts...)
   322  		if err != nil {
   323  			return nil, err
   324  		}
   325  		defer parent.Release(context.TODO())
   326  	}
   327  	return w.CacheManager().GetByBlob(context.TODO(), ocispec.Descriptor{
   328  		Annotations: map[string]string{
   329  			"containerd.io/uncompressed": diffIDs[len(diffIDs)-1].String(),
   330  		},
   331  	}, parent, opts...)
   332  }
   333  
   334  // FromRemote converts a remote snapshot reference to a local one
   335  func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) {
   336  	rootfs, err := getLayers(ctx, remote.Descriptors)
   337  	if err != nil {
   338  		return nil, err
   339  	}
   340  
   341  	layers := make([]xfer.DownloadDescriptor, 0, len(rootfs))
   342  
   343  	for _, l := range rootfs {
   344  		// ongoing.add(desc)
   345  		layers = append(layers, &layerDescriptor{
   346  			desc:     l.Blob,
   347  			diffID:   layer.DiffID(l.Diff.Digest),
   348  			provider: remote.Provider,
   349  			w:        w,
   350  			pctx:     ctx,
   351  		})
   352  	}
   353  
   354  	defer func() {
   355  		for _, l := range rootfs {
   356  			w.ContentStore().Delete(context.TODO(), l.Blob.Digest)
   357  		}
   358  	}()
   359  
   360  	r := image.NewRootFS()
   361  	rootFS, release, err := w.DownloadManager.Download(ctx, *r, runtime.GOOS, layers, &discardProgress{})
   362  	if err != nil {
   363  		return nil, err
   364  	}
   365  	defer release()
   366  
   367  	if len(rootFS.DiffIDs) != len(layers) {
   368  		return nil, errors.Errorf("invalid layer count mismatch %d vs %d", len(rootFS.DiffIDs), len(layers))
   369  	}
   370  
   371  	for i := range rootFS.DiffIDs {
   372  		tm := time.Now()
   373  		if tmstr, ok := remote.Descriptors[i].Annotations[labelCreatedAt]; ok {
   374  			if err := (&tm).UnmarshalText([]byte(tmstr)); err != nil {
   375  				return nil, err
   376  			}
   377  		}
   378  		descr := fmt.Sprintf("imported %s", remote.Descriptors[i].Digest)
   379  		if v, ok := remote.Descriptors[i].Annotations["buildkit/description"]; ok {
   380  			descr = v
   381  		}
   382  		ref, err := w.getRef(ctx, rootFS.DiffIDs[:i+1], cache.WithDescription(descr), cache.WithCreationTime(tm))
   383  		if err != nil {
   384  			return nil, err
   385  		}
   386  		if i == len(remote.Descriptors)-1 {
   387  			return ref, nil
   388  		}
   389  		defer ref.Release(context.TODO())
   390  	}
   391  
   392  	return nil, errors.Errorf("unreachable")
   393  }
   394  
   395  // Executor returns executor.Executor for running processes
   396  func (w *Worker) Executor() executor.Executor {
   397  	return w.Opt.Executor
   398  }
   399  
   400  // CacheManager returns cache.Manager for accessing local storage
   401  func (w *Worker) CacheManager() cache.Manager {
   402  	return w.Opt.CacheManager
   403  }
   404  
   405  type discardProgress struct{}
   406  
   407  func (*discardProgress) WriteProgress(_ pkgprogress.Progress) error {
   408  	return nil
   409  }
   410  
   411  // Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
   412  type layerDescriptor struct {
   413  	provider content.Provider
   414  	desc     ocispec.Descriptor
   415  	diffID   layer.DiffID
   416  	// ref      ctdreference.Spec
   417  	w    *Worker
   418  	pctx context.Context
   419  }
   420  
   421  func (ld *layerDescriptor) Key() string {
   422  	return "v2:" + ld.desc.Digest.String()
   423  }
   424  
   425  func (ld *layerDescriptor) ID() string {
   426  	return ld.desc.Digest.String()
   427  }
   428  
   429  func (ld *layerDescriptor) DiffID() (layer.DiffID, error) {
   430  	return ld.diffID, nil
   431  }
   432  
   433  func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogress.Output) (io.ReadCloser, int64, error) {
   434  	done := oneOffProgress(ld.pctx, fmt.Sprintf("pulling %s", ld.desc.Digest))
   435  
   436  	// TODO should this write output to progressOutput? Or use something similar to loggerFromContext()? see https://github.com/moby/buildkit/commit/aa29e7729464f3c2a773e27795e584023c751cb8
   437  	discardLogs := func(_ []byte) {}
   438  	if err := contentutil.Copy(ctx, ld.w.ContentStore(), ld.provider, ld.desc, discardLogs); err != nil {
   439  		return nil, 0, done(err)
   440  	}
   441  	_ = done(nil)
   442  
   443  	ra, err := ld.w.ContentStore().ReaderAt(ctx, ld.desc)
   444  	if err != nil {
   445  		return nil, 0, err
   446  	}
   447  
   448  	return ioutil.NopCloser(content.NewReader(ra)), ld.desc.Size, nil
   449  }
   450  
   451  func (ld *layerDescriptor) Close() {
   452  	// ld.is.ContentStore().Delete(context.TODO(), ld.desc.Digest)
   453  }
   454  
   455  func (ld *layerDescriptor) Registered(diffID layer.DiffID) {
   456  	// Cache mapping from this layer's DiffID to the blobsum
   457  	ld.w.V2MetadataService.Add(diffID, distmetadata.V2Metadata{Digest: ld.desc.Digest})
   458  }
   459  
   460  func getLayers(ctx context.Context, descs []ocispec.Descriptor) ([]rootfs.Layer, error) {
   461  	layers := make([]rootfs.Layer, len(descs))
   462  	for i, desc := range descs {
   463  		diffIDStr := desc.Annotations["containerd.io/uncompressed"]
   464  		if diffIDStr == "" {
   465  			return nil, errors.Errorf("%s missing uncompressed digest", desc.Digest)
   466  		}
   467  		diffID, err := digest.Parse(diffIDStr)
   468  		if err != nil {
   469  			return nil, err
   470  		}
   471  		layers[i].Diff = ocispec.Descriptor{
   472  			MediaType: ocispec.MediaTypeImageLayer,
   473  			Digest:    diffID,
   474  		}
   475  		layers[i].Blob = ocispec.Descriptor{
   476  			MediaType: desc.MediaType,
   477  			Digest:    desc.Digest,
   478  			Size:      desc.Size,
   479  		}
   480  	}
   481  	return layers, nil
   482  }
   483  
   484  func oneOffProgress(ctx context.Context, id string) func(err error) error {
   485  	pw, _, _ := progress.FromContext(ctx)
   486  	now := time.Now()
   487  	st := progress.Status{
   488  		Started: &now,
   489  	}
   490  	_ = pw.Write(id, st)
   491  	return func(err error) error {
   492  		// TODO: set error on status
   493  		now := time.Now()
   494  		st.Completed = &now
   495  		_ = pw.Write(id, st)
   496  		_ = pw.Close()
   497  		return err
   498  	}
   499  }
   500  
   501  type emptyProvider struct {
   502  }
   503  
   504  func (p *emptyProvider) ReaderAt(ctx context.Context, dec ocispec.Descriptor) (content.ReaderAt, error) {
   505  	return nil, errors.Errorf("ReaderAt not implemented for empty provider")
   506  }