github.com/docker/engine@v22.0.0-20211208180946-d456264580cf+incompatible/builder/builder-next/worker/worker.go (about)

     1  package worker
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"io"
     7  	nethttp "net/http"
     8  	"runtime"
     9  	"strings"
    10  	"time"
    11  
    12  	"github.com/containerd/containerd/content"
    13  	"github.com/containerd/containerd/images"
    14  	"github.com/containerd/containerd/platforms"
    15  	"github.com/containerd/containerd/rootfs"
    16  	"github.com/docker/docker/builder/builder-next/adapters/containerimage"
    17  	"github.com/docker/docker/distribution"
    18  	distmetadata "github.com/docker/docker/distribution/metadata"
    19  	"github.com/docker/docker/distribution/xfer"
    20  	"github.com/docker/docker/image"
    21  	"github.com/docker/docker/layer"
    22  	pkgprogress "github.com/docker/docker/pkg/progress"
    23  	"github.com/moby/buildkit/cache"
    24  	"github.com/moby/buildkit/cache/metadata"
    25  	"github.com/moby/buildkit/client"
    26  	"github.com/moby/buildkit/client/llb"
    27  	"github.com/moby/buildkit/executor"
    28  	"github.com/moby/buildkit/exporter"
    29  	localexporter "github.com/moby/buildkit/exporter/local"
    30  	tarexporter "github.com/moby/buildkit/exporter/tar"
    31  	"github.com/moby/buildkit/frontend"
    32  	"github.com/moby/buildkit/session"
    33  	"github.com/moby/buildkit/snapshot"
    34  	"github.com/moby/buildkit/solver"
    35  	"github.com/moby/buildkit/solver/llbsolver/mounts"
    36  	"github.com/moby/buildkit/solver/llbsolver/ops"
    37  	"github.com/moby/buildkit/solver/pb"
    38  	"github.com/moby/buildkit/source"
    39  	"github.com/moby/buildkit/source/git"
    40  	"github.com/moby/buildkit/source/http"
    41  	"github.com/moby/buildkit/source/local"
    42  	"github.com/moby/buildkit/util/archutil"
    43  	"github.com/moby/buildkit/util/compression"
    44  	"github.com/moby/buildkit/util/contentutil"
    45  	"github.com/moby/buildkit/util/progress"
    46  	digest "github.com/opencontainers/go-digest"
    47  	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
    48  	"github.com/pkg/errors"
    49  	"github.com/sirupsen/logrus"
    50  	bolt "go.etcd.io/bbolt"
    51  	"golang.org/x/sync/semaphore"
    52  )
    53  
    54  const labelCreatedAt = "buildkit/createdat"
    55  
    56  // LayerAccess provides access to a moby layer from a snapshot
    57  type LayerAccess interface {
    58  	GetDiffIDs(ctx context.Context, key string) ([]layer.DiffID, error)
    59  	EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error)
    60  }
    61  
    62  // Opt defines a structure for creating a worker.
    63  type Opt struct {
    64  	ID                string
    65  	Labels            map[string]string
    66  	GCPolicy          []client.PruneInfo
    67  	MetadataStore     *metadata.Store
    68  	Executor          executor.Executor
    69  	Snapshotter       snapshot.Snapshotter
    70  	ContentStore      content.Store
    71  	CacheManager      cache.Manager
    72  	ImageSource       *containerimage.Source
    73  	DownloadManager   distribution.RootFSDownloadManager
    74  	V2MetadataService distmetadata.V2MetadataService
    75  	Transport         nethttp.RoundTripper
    76  	Exporter          exporter.Exporter
    77  	Layers            LayerAccess
    78  	Platforms         []ocispec.Platform
    79  }
    80  
    81  // Worker is a local worker instance with dedicated snapshotter, cache, and so on.
    82  // TODO: s/Worker/OpWorker/g ?
    83  type Worker struct {
    84  	Opt
    85  	SourceManager *source.Manager
    86  }
    87  
    88  // NewWorker instantiates a local worker
    89  func NewWorker(opt Opt) (*Worker, error) {
    90  	sm, err := source.NewManager()
    91  	if err != nil {
    92  		return nil, err
    93  	}
    94  
    95  	cm := opt.CacheManager
    96  	sm.Register(opt.ImageSource)
    97  
    98  	gs, err := git.NewSource(git.Opt{
    99  		CacheAccessor: cm,
   100  		MetadataStore: opt.MetadataStore,
   101  	})
   102  	if err == nil {
   103  		sm.Register(gs)
   104  	} else {
   105  		logrus.Warnf("Could not register builder git source: %s", err)
   106  	}
   107  
   108  	hs, err := http.NewSource(http.Opt{
   109  		CacheAccessor: cm,
   110  		MetadataStore: opt.MetadataStore,
   111  		Transport:     opt.Transport,
   112  	})
   113  	if err == nil {
   114  		sm.Register(hs)
   115  	} else {
   116  		logrus.Warnf("Could not register builder http source: %s", err)
   117  	}
   118  
   119  	ss, err := local.NewSource(local.Opt{
   120  		CacheAccessor: cm,
   121  		MetadataStore: opt.MetadataStore,
   122  	})
   123  	if err == nil {
   124  		sm.Register(ss)
   125  	} else {
   126  		logrus.Warnf("Could not register builder local source: %s", err)
   127  	}
   128  
   129  	return &Worker{
   130  		Opt:           opt,
   131  		SourceManager: sm,
   132  	}, nil
   133  }
   134  
   135  // ID returns worker ID
   136  func (w *Worker) ID() string {
   137  	return w.Opt.ID
   138  }
   139  
   140  // Labels returns map of all worker labels
   141  func (w *Worker) Labels() map[string]string {
   142  	return w.Opt.Labels
   143  }
   144  
   145  // Platforms returns one or more platforms supported by the image.
   146  func (w *Worker) Platforms(noCache bool) []ocispec.Platform {
   147  	if noCache {
   148  		pm := make(map[string]struct{}, len(w.Opt.Platforms))
   149  		for _, p := range w.Opt.Platforms {
   150  			pm[platforms.Format(p)] = struct{}{}
   151  		}
   152  		for _, p := range archutil.SupportedPlatforms(noCache) {
   153  			if _, ok := pm[p]; !ok {
   154  				pp, _ := platforms.Parse(p)
   155  				w.Opt.Platforms = append(w.Opt.Platforms, pp)
   156  			}
   157  		}
   158  	}
   159  	if len(w.Opt.Platforms) == 0 {
   160  		return []ocispec.Platform{platforms.DefaultSpec()}
   161  	}
   162  	return w.Opt.Platforms
   163  }
   164  
   165  // GCPolicy returns automatic GC Policy
   166  func (w *Worker) GCPolicy() []client.PruneInfo {
   167  	return w.Opt.GCPolicy
   168  }
   169  
   170  // ContentStore returns content store
   171  func (w *Worker) ContentStore() content.Store {
   172  	return w.Opt.ContentStore
   173  }
   174  
   175  // MetadataStore returns the metadata store
   176  func (w *Worker) MetadataStore() *metadata.Store {
   177  	return w.Opt.MetadataStore
   178  }
   179  
   180  // LoadRef loads a reference by ID
   181  func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) {
   182  	var opts []cache.RefOption
   183  	if hidden {
   184  		opts = append(opts, cache.NoUpdateLastUsed)
   185  	}
   186  	return w.CacheManager().Get(ctx, id, opts...)
   187  }
   188  
   189  // ResolveOp converts a LLB vertex into a LLB operation
   190  func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) {
   191  	if baseOp, ok := v.Sys().(*pb.Op); ok {
   192  		// TODO do we need to pass a value here? Where should it come from? https://github.com/moby/buildkit/commit/b3cf7c43cfefdfd7a945002c0e76b54e346ab6cf
   193  		var parallelism *semaphore.Weighted
   194  		switch op := baseOp.Op.(type) {
   195  		case *pb.Op_Source:
   196  			return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, parallelism, sm, w)
   197  		case *pb.Op_Exec:
   198  			return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager(), parallelism, sm, w.Opt.MetadataStore, w.Executor(), w)
   199  		case *pb.Op_File:
   200  			return ops.NewFileOp(v, op, w.CacheManager(), parallelism, w.Opt.MetadataStore, w)
   201  		case *pb.Op_Build:
   202  			return ops.NewBuildOp(v, op, s, w)
   203  		}
   204  	}
   205  	return nil, errors.Errorf("could not resolve %v", v)
   206  }
   207  
   208  // ResolveImageConfig returns image config for an image
   209  func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
   210  	return w.ImageSource.ResolveImageConfig(ctx, ref, opt, sm, g)
   211  }
   212  
   213  // DiskUsage returns disk usage report
   214  func (w *Worker) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) {
   215  	return w.CacheManager().DiskUsage(ctx, opt)
   216  }
   217  
   218  // Prune deletes reclaimable build cache
   219  func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo, info ...client.PruneInfo) error {
   220  	return w.CacheManager().Prune(ctx, ch, info...)
   221  }
   222  
   223  // Exporter returns exporter by name
   224  func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) {
   225  	switch name {
   226  	case "moby":
   227  		return w.Opt.Exporter, nil
   228  	case client.ExporterLocal:
   229  		return localexporter.New(localexporter.Opt{
   230  			SessionManager: sm,
   231  		})
   232  	case client.ExporterTar:
   233  		return tarexporter.New(tarexporter.Opt{
   234  			SessionManager: sm,
   235  		})
   236  	default:
   237  		return nil, errors.Errorf("exporter %q could not be found", name)
   238  	}
   239  }
   240  
   241  // GetRemote returns a remote snapshot reference for a local one
   242  func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool, _ compression.Type, _ session.Group) (*solver.Remote, error) {
   243  	var diffIDs []layer.DiffID
   244  	var err error
   245  	if !createIfNeeded {
   246  		diffIDs, err = w.Layers.GetDiffIDs(ctx, ref.ID())
   247  		if err != nil {
   248  			return nil, err
   249  		}
   250  	} else {
   251  		if err := ref.Finalize(ctx, true); err != nil {
   252  			return nil, err
   253  		}
   254  		diffIDs, err = w.Layers.EnsureLayer(ctx, ref.ID())
   255  		if err != nil {
   256  			return nil, err
   257  		}
   258  	}
   259  
   260  	descriptors := make([]ocispec.Descriptor, len(diffIDs))
   261  	for i, dgst := range diffIDs {
   262  		descriptors[i] = ocispec.Descriptor{
   263  			MediaType: images.MediaTypeDockerSchema2Layer,
   264  			Digest:    digest.Digest(dgst),
   265  			Size:      -1,
   266  		}
   267  	}
   268  
   269  	return &solver.Remote{
   270  		Descriptors: descriptors,
   271  		Provider:    &emptyProvider{},
   272  	}, nil
   273  }
   274  
   275  // PruneCacheMounts removes the current cache snapshots for specified IDs
   276  func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error {
   277  	mu := mounts.CacheMountsLocker()
   278  	mu.Lock()
   279  	defer mu.Unlock()
   280  
   281  	for _, id := range ids {
   282  		id = "cache-dir:" + id
   283  		sis, err := w.Opt.MetadataStore.Search(id)
   284  		if err != nil {
   285  			return err
   286  		}
   287  		for _, si := range sis {
   288  			for _, k := range si.Indexes() {
   289  				if k == id || strings.HasPrefix(k, id+":") {
   290  					if siCached := w.CacheManager().Metadata(si.ID()); siCached != nil {
   291  						si = siCached
   292  					}
   293  					if err := cache.CachePolicyDefault(si); err != nil {
   294  						return err
   295  					}
   296  					si.Queue(func(b *bolt.Bucket) error {
   297  						return si.SetValue(b, k, nil)
   298  					})
   299  					if err := si.Commit(); err != nil {
   300  						return err
   301  					}
   302  					// if ref is unused try to clean it up right away by releasing it
   303  					if mref, err := w.CacheManager().GetMutable(ctx, si.ID()); err == nil {
   304  						go mref.Release(context.TODO())
   305  					}
   306  					break
   307  				}
   308  			}
   309  		}
   310  	}
   311  
   312  	mounts.ClearActiveCacheMounts()
   313  	return nil
   314  }
   315  
   316  func (w *Worker) getRef(ctx context.Context, diffIDs []layer.DiffID, opts ...cache.RefOption) (cache.ImmutableRef, error) {
   317  	var parent cache.ImmutableRef
   318  	if len(diffIDs) > 1 {
   319  		var err error
   320  		parent, err = w.getRef(ctx, diffIDs[:len(diffIDs)-1], opts...)
   321  		if err != nil {
   322  			return nil, err
   323  		}
   324  		defer parent.Release(context.TODO())
   325  	}
   326  	return w.CacheManager().GetByBlob(context.TODO(), ocispec.Descriptor{
   327  		Annotations: map[string]string{
   328  			"containerd.io/uncompressed": diffIDs[len(diffIDs)-1].String(),
   329  		},
   330  	}, parent, opts...)
   331  }
   332  
   333  // FromRemote converts a remote snapshot reference to a local one
   334  func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) {
   335  	rootfs, err := getLayers(ctx, remote.Descriptors)
   336  	if err != nil {
   337  		return nil, err
   338  	}
   339  
   340  	layers := make([]xfer.DownloadDescriptor, 0, len(rootfs))
   341  
   342  	for _, l := range rootfs {
   343  		// ongoing.add(desc)
   344  		layers = append(layers, &layerDescriptor{
   345  			desc:     l.Blob,
   346  			diffID:   layer.DiffID(l.Diff.Digest),
   347  			provider: remote.Provider,
   348  			w:        w,
   349  			pctx:     ctx,
   350  		})
   351  	}
   352  
   353  	defer func() {
   354  		for _, l := range rootfs {
   355  			w.ContentStore().Delete(context.TODO(), l.Blob.Digest)
   356  		}
   357  	}()
   358  
   359  	r := image.NewRootFS()
   360  	rootFS, release, err := w.DownloadManager.Download(ctx, *r, runtime.GOOS, layers, &discardProgress{})
   361  	if err != nil {
   362  		return nil, err
   363  	}
   364  	defer release()
   365  
   366  	if len(rootFS.DiffIDs) != len(layers) {
   367  		return nil, errors.Errorf("invalid layer count mismatch %d vs %d", len(rootFS.DiffIDs), len(layers))
   368  	}
   369  
   370  	for i := range rootFS.DiffIDs {
   371  		tm := time.Now()
   372  		if tmstr, ok := remote.Descriptors[i].Annotations[labelCreatedAt]; ok {
   373  			if err := (&tm).UnmarshalText([]byte(tmstr)); err != nil {
   374  				return nil, err
   375  			}
   376  		}
   377  		descr := fmt.Sprintf("imported %s", remote.Descriptors[i].Digest)
   378  		if v, ok := remote.Descriptors[i].Annotations["buildkit/description"]; ok {
   379  			descr = v
   380  		}
   381  		ref, err := w.getRef(ctx, rootFS.DiffIDs[:i+1], cache.WithDescription(descr), cache.WithCreationTime(tm))
   382  		if err != nil {
   383  			return nil, err
   384  		}
   385  		if i == len(remote.Descriptors)-1 {
   386  			return ref, nil
   387  		}
   388  		defer ref.Release(context.TODO())
   389  	}
   390  
   391  	return nil, errors.Errorf("unreachable")
   392  }
   393  
   394  // Executor returns executor.Executor for running processes
   395  func (w *Worker) Executor() executor.Executor {
   396  	return w.Opt.Executor
   397  }
   398  
   399  // CacheManager returns cache.Manager for accessing local storage
   400  func (w *Worker) CacheManager() cache.Manager {
   401  	return w.Opt.CacheManager
   402  }
   403  
   404  type discardProgress struct{}
   405  
   406  func (*discardProgress) WriteProgress(_ pkgprogress.Progress) error {
   407  	return nil
   408  }
   409  
   410  // Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
   411  type layerDescriptor struct {
   412  	provider content.Provider
   413  	desc     ocispec.Descriptor
   414  	diffID   layer.DiffID
   415  	// ref      ctdreference.Spec
   416  	w    *Worker
   417  	pctx context.Context
   418  }
   419  
   420  func (ld *layerDescriptor) Key() string {
   421  	return "v2:" + ld.desc.Digest.String()
   422  }
   423  
   424  func (ld *layerDescriptor) ID() string {
   425  	return ld.desc.Digest.String()
   426  }
   427  
   428  func (ld *layerDescriptor) DiffID() (layer.DiffID, error) {
   429  	return ld.diffID, nil
   430  }
   431  
   432  func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogress.Output) (io.ReadCloser, int64, error) {
   433  	done := oneOffProgress(ld.pctx, fmt.Sprintf("pulling %s", ld.desc.Digest))
   434  
   435  	// TODO should this write output to progressOutput? Or use something similar to loggerFromContext()? see https://github.com/moby/buildkit/commit/aa29e7729464f3c2a773e27795e584023c751cb8
   436  	discardLogs := func(_ []byte) {}
   437  	if err := contentutil.Copy(ctx, ld.w.ContentStore(), ld.provider, ld.desc, discardLogs); err != nil {
   438  		return nil, 0, done(err)
   439  	}
   440  	_ = done(nil)
   441  
   442  	ra, err := ld.w.ContentStore().ReaderAt(ctx, ld.desc)
   443  	if err != nil {
   444  		return nil, 0, err
   445  	}
   446  
   447  	return io.NopCloser(content.NewReader(ra)), ld.desc.Size, nil
   448  }
   449  
   450  func (ld *layerDescriptor) Close() {
   451  	// ld.is.ContentStore().Delete(context.TODO(), ld.desc.Digest)
   452  }
   453  
   454  func (ld *layerDescriptor) Registered(diffID layer.DiffID) {
   455  	// Cache mapping from this layer's DiffID to the blobsum
   456  	ld.w.V2MetadataService.Add(diffID, distmetadata.V2Metadata{Digest: ld.desc.Digest})
   457  }
   458  
   459  func getLayers(ctx context.Context, descs []ocispec.Descriptor) ([]rootfs.Layer, error) {
   460  	layers := make([]rootfs.Layer, len(descs))
   461  	for i, desc := range descs {
   462  		diffIDStr := desc.Annotations["containerd.io/uncompressed"]
   463  		if diffIDStr == "" {
   464  			return nil, errors.Errorf("%s missing uncompressed digest", desc.Digest)
   465  		}
   466  		diffID, err := digest.Parse(diffIDStr)
   467  		if err != nil {
   468  			return nil, err
   469  		}
   470  		layers[i].Diff = ocispec.Descriptor{
   471  			MediaType: ocispec.MediaTypeImageLayer,
   472  			Digest:    diffID,
   473  		}
   474  		layers[i].Blob = ocispec.Descriptor{
   475  			MediaType: desc.MediaType,
   476  			Digest:    desc.Digest,
   477  			Size:      desc.Size,
   478  		}
   479  	}
   480  	return layers, nil
   481  }
   482  
   483  func oneOffProgress(ctx context.Context, id string) func(err error) error {
   484  	pw, _, _ := progress.FromContext(ctx)
   485  	now := time.Now()
   486  	st := progress.Status{
   487  		Started: &now,
   488  	}
   489  	_ = pw.Write(id, st)
   490  	return func(err error) error {
   491  		// TODO: set error on status
   492  		now := time.Now()
   493  		st.Completed = &now
   494  		_ = pw.Write(id, st)
   495  		_ = pw.Close()
   496  		return err
   497  	}
   498  }
   499  
   500  type emptyProvider struct {
   501  }
   502  
   503  func (p *emptyProvider) ReaderAt(ctx context.Context, dec ocispec.Descriptor) (content.ReaderAt, error) {
   504  	return nil, errors.Errorf("ReaderAt not implemented for empty provider")
   505  }