github.com/tilt-dev/tilt@v0.36.0/internal/build/docker_builder.go (about)

     1  package build
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"fmt"
     7  	"io"
     8  	"os"
     9  	"path/filepath"
    10  	"regexp"
    11  	"strings"
    12  	"time"
    13  
    14  	"github.com/distribution/reference"
    15  	"github.com/docker/docker/api/types"
    16  	"github.com/docker/docker/client"
    17  	"github.com/docker/docker/pkg/jsonmessage"
    18  	controlapi "github.com/moby/buildkit/api/services/control"
    19  	"github.com/moby/buildkit/session/filesync"
    20  	"github.com/opencontainers/go-digest"
    21  	"github.com/pkg/errors"
    22  	"github.com/tonistiigi/fsutil"
    23  	fsutiltypes "github.com/tonistiigi/fsutil/types"
    24  	"golang.org/x/sync/errgroup"
    25  	ktypes "k8s.io/apimachinery/pkg/types"
    26  
    27  	"github.com/tilt-dev/tilt/internal/container"
    28  	"github.com/tilt-dev/tilt/internal/docker"
    29  	"github.com/tilt-dev/tilt/internal/dockerfile"
    30  	"github.com/tilt-dev/tilt/internal/k8s"
    31  	"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
    32  	"github.com/tilt-dev/tilt/pkg/logger"
    33  	"github.com/tilt-dev/tilt/pkg/model"
    34  )
    35  
    36  type DockerBuilder struct {
    37  	dCli docker.Client
    38  
    39  	// A set of extra labels to attach to all builds
    40  	// created by this image builder.
    41  	//
    42  	// By default, all builds are labeled with a build mode.
    43  	extraLabels dockerfile.Labels
    44  }
    45  
    46  // Describes how a docker instance connects to kubernetes instances.
    47  type DockerKubeConnection interface {
    48  	// Returns whether this docker builder is going to build to the given kubernetes context.
    49  	WillBuildToKubeContext(kctx k8s.KubeContext) bool
    50  }
    51  
    52  func NewDockerBuilder(dCli docker.Client, extraLabels dockerfile.Labels) *DockerBuilder {
    53  	return &DockerBuilder{
    54  		dCli:        dCli,
    55  		extraLabels: extraLabels,
    56  	}
    57  }
    58  
    59  func (d *DockerBuilder) WillBuildToKubeContext(kctx k8s.KubeContext) bool {
    60  	return d.dCli.Env().WillBuildToKubeContext(kctx)
    61  }
    62  
    63  func (d *DockerBuilder) DumpImageDeployRef(ctx context.Context, ref string) (reference.NamedTagged, error) {
    64  	refParsed, err := container.ParseNamed(ref)
    65  	if err != nil {
    66  		return nil, errors.Wrap(err, "DumpImageDeployRef")
    67  	}
    68  
    69  	data, _, err := d.dCli.ImageInspectWithRaw(ctx, ref)
    70  	if err != nil {
    71  		return nil, errors.Wrap(err, "DumpImageDeployRef")
    72  	}
    73  	dig := digest.Digest(data.ID)
    74  
    75  	tag, err := digestAsTag(dig)
    76  	if err != nil {
    77  		return nil, errors.Wrap(err, "DumpImageDeployRef")
    78  	}
    79  
    80  	tagged, err := reference.WithTag(refParsed, tag)
    81  	if err != nil {
    82  		return nil, errors.Wrap(err, "DumpImageDeployRef")
    83  	}
    84  
    85  	return tagged, nil
    86  }
    87  
    88  // Tag the digest with the given name and wm-tilt tag.
    89  func (d *DockerBuilder) TagRefs(ctx context.Context, refs container.RefSet, dig digest.Digest) (container.TaggedRefs, error) {
    90  	tag, err := digestAsTag(dig)
    91  	if err != nil {
    92  		return container.TaggedRefs{}, errors.Wrap(err, "TagImage")
    93  	}
    94  
    95  	tagged, err := refs.AddTagSuffix(tag)
    96  	if err != nil {
    97  		return container.TaggedRefs{}, errors.Wrap(err, "TagImage")
    98  	}
    99  
   100  	// Docker client only needs to care about the localImage
   101  	err = d.dCli.ImageTag(ctx, dig.String(), tagged.LocalRef.String())
   102  	if err != nil {
   103  		return container.TaggedRefs{}, errors.Wrap(err, "TagImage#ImageTag")
   104  	}
   105  
   106  	return tagged, nil
   107  }
   108  
   109  // Push the specified ref up to the docker registry specified in the name.
   110  //
   111  // TODO(nick) In the future, I would like us to be smarter about checking if the kubernetes cluster
   112  // we're running in has access to the given registry. And if it doesn't, we should either emit an
   113  // error, or push to a registry that kubernetes does have access to (e.g., a local registry).
   114  func (d *DockerBuilder) PushImage(ctx context.Context, ref reference.NamedTagged) error {
   115  	l := logger.Get(ctx)
   116  
   117  	imagePushResponse, err := d.dCli.ImagePush(ctx, ref)
   118  	if err != nil {
   119  		return errors.Wrap(err, "PushImage#ImagePush")
   120  	}
   121  
   122  	defer func() {
   123  		err := imagePushResponse.Close()
   124  		if err != nil {
   125  			l.Infof("unable to close imagePushResponse: %s", err)
   126  		}
   127  	}()
   128  
   129  	_, _, err = readDockerOutput(ctx, imagePushResponse)
   130  	if err != nil {
   131  		return errors.Wrapf(err, "pushing image %q", ref.Name())
   132  	}
   133  
   134  	return nil
   135  }
   136  
   137  func (d *DockerBuilder) ImageExists(ctx context.Context, ref reference.NamedTagged) (bool, error) {
   138  	_, _, err := d.dCli.ImageInspectWithRaw(ctx, ref.String())
   139  	if err != nil {
   140  		if client.IsErrNotFound(err) {
   141  			return false, nil
   142  		}
   143  		return false, errors.Wrapf(err, "error checking if %s exists", ref.String())
   144  	}
   145  	return true, nil
   146  }
   147  
   148  func (d *DockerBuilder) BuildImage(ctx context.Context, ps *PipelineState, refs container.RefSet,
   149  	spec v1alpha1.DockerImageSpec,
   150  	cluster *v1alpha1.Cluster,
   151  	imageMaps map[ktypes.NamespacedName]*v1alpha1.ImageMap,
   152  	filter model.PathMatcher) (container.TaggedRefs, []v1alpha1.DockerImageStageStatus, error) {
   153  	spec = InjectClusterPlatform(spec, cluster)
   154  	spec, err := InjectImageDependencies(spec, imageMaps)
   155  	if err != nil {
   156  		return container.TaggedRefs{}, nil, err
   157  	}
   158  
   159  	platformSuffix := ""
   160  	if spec.Platform != "" {
   161  		platformSuffix = fmt.Sprintf(" for platform %s", spec.Platform)
   162  	}
   163  	logger.Get(ctx).Infof("Building Dockerfile%s:\n%s\n", platformSuffix, indent(spec.DockerfileContents, "  "))
   164  
   165  	ps.StartBuildStep(ctx, "Building image")
   166  	allowBuildkit := true
   167  	ctx = ps.AttachLogger(ctx)
   168  	digest, stages, err := d.buildToDigest(ctx, spec, filter, allowBuildkit)
   169  	if err != nil {
   170  		isMysteriousCorruption := strings.Contains(err.Error(), "failed precondition") &&
   171  			strings.Contains(err.Error(), "failed commit on ref")
   172  		if isMysteriousCorruption {
   173  			// We've seen weird corruption issues on buildkit
   174  			// that look like
   175  			//
   176  			// Build Failed: ImageBuild: failed to create LLB definition:
   177  			// failed commit on ref "unknown-sha256:b72fa303a3a5fbf52c723bfcfb93948bb53b3d7e8d22418e9d171a27ad7dcd84":
   178  			// "unknown-sha256:b72fa303a3a5fbf52c723bfcfb93948bb53b3d7e8d22418e9d171a27ad7dcd84"
   179  			// failed size validation: 80941 != 80929: failed precondition
   180  			//
   181  			// Build Failed: ImageBuild: failed to load cache key: failed commit on
   182  			// ref
   183  			// "unknown-sha256:d8ad5905555e3af3fa9122515f2b3d4762d4e8734b7ed12f1271bcdee3541267":
   184  			// unexpected commit size 69764, expected 76810: failed precondition
   185  			//
   186  			// If this happens, just try again without buildkit.
   187  			allowBuildkit = false
   188  			logger.Get(ctx).Infof("Detected Buildkit corruption. Rebuilding without Buildkit")
   189  			digest, stages, err = d.buildToDigest(ctx, spec, filter, allowBuildkit)
   190  		}
   191  
   192  		if err != nil {
   193  			return container.TaggedRefs{}, stages, err
   194  		}
   195  	}
   196  
   197  	tagged, err := d.TagRefs(ctx, refs, digest)
   198  	if err != nil {
   199  		return container.TaggedRefs{}, stages, errors.Wrap(err, "docker tag")
   200  	}
   201  
   202  	return tagged, stages, nil
   203  }
   204  
   205  // A helper function that builds the paths to the given docker image,
   206  // then returns the output digest.
   207  func (d *DockerBuilder) buildToDigest(ctx context.Context, spec v1alpha1.DockerImageSpec, filter model.PathMatcher, allowBuildkit bool) (digest.Digest, []v1alpha1.DockerImageStageStatus, error) {
   208  	ctx, cancelBuildSession := context.WithCancel(ctx)
   209  	defer cancelBuildSession()
   210  
   211  	g, ctx := errgroup.WithContext(ctx)
   212  	var contextReader io.Reader
   213  
   214  	buildContext := spec.Context
   215  
   216  	// Treat context: "-" as an empty context.
   217  	if buildContext == "-" {
   218  		emptyContextDir, err := os.MkdirTemp("", "tilt-dockercontext-")
   219  		if err != nil {
   220  			return "", nil, fmt.Errorf("creating context directory: %v", err)
   221  		}
   222  
   223  		defer func() {
   224  			_ = os.RemoveAll(emptyContextDir)
   225  		}()
   226  
   227  		buildContext = emptyContextDir
   228  	}
   229  
   230  	_, err := os.Stat(buildContext)
   231  	if err != nil {
   232  		return "", nil, fmt.Errorf("reading build context: %v", err)
   233  	}
   234  
   235  	builderVersion, err := d.dCli.BuilderVersion(ctx)
   236  	if err != nil {
   237  		return "", nil, err
   238  	}
   239  
   240  	// Buildkit allows us to use a fs sync server instead of uploading up-front.
   241  	useFSSync := allowBuildkit && builderVersion == types.BuilderBuildKit
   242  	if !useFSSync {
   243  		pipeReader, pipeWriter := io.Pipe()
   244  		w := NewProgressWriter(ctx, pipeWriter)
   245  		w.Init()
   246  
   247  		// TODO(nick): Express tarring as a build stage.
   248  		g.Go(func() error {
   249  			paths := []PathMapping{
   250  				{
   251  					LocalPath:     buildContext,
   252  					ContainerPath: "/",
   253  				},
   254  			}
   255  			err := tarContextAndUpdateDf(ctx, w, dockerfile.Dockerfile(spec.DockerfileContents), paths, filter)
   256  			if err != nil {
   257  				_ = pipeWriter.CloseWithError(err)
   258  			} else {
   259  				_ = pipeWriter.Close()
   260  			}
   261  			w.Close() // Print the final progress message
   262  			return nil
   263  		})
   264  
   265  		contextReader = pipeReader
   266  		defer func() {
   267  			_ = pipeReader.Close()
   268  		}()
   269  	}
   270  
   271  	options := Options(contextReader, spec)
   272  	if useFSSync {
   273  		dockerfileDir, err := writeTempDockerfileSyncdir(spec.DockerfileContents)
   274  		if err != nil {
   275  			return "", nil, err
   276  		}
   277  		options.DirSource, err = toDirSource(buildContext, dockerfileDir, filter)
   278  		if err != nil {
   279  			return "", nil, err
   280  		}
   281  		options.Dockerfile = DockerfileName
   282  
   283  		defer func() {
   284  			_ = os.RemoveAll(dockerfileDir)
   285  		}()
   286  	}
   287  	if !allowBuildkit {
   288  		options.ForceLegacyBuilder = true
   289  	}
   290  
   291  	var digest digest.Digest
   292  	var status []v1alpha1.DockerImageStageStatus
   293  	g.Go(func() error {
   294  		defer cancelBuildSession()
   295  		imageBuildResponse, err := d.dCli.ImageBuild(
   296  			ctx,
   297  			g,
   298  			contextReader,
   299  			options,
   300  		)
   301  		if err != nil {
   302  			return err
   303  		}
   304  
   305  		defer func() {
   306  			err := imageBuildResponse.Body.Close()
   307  			if err != nil {
   308  				logger.Get(ctx).Infof("unable to close imageBuildResponse: %s", err)
   309  			}
   310  		}()
   311  
   312  		digest, status, err = d.getDigestFromBuildOutput(ctx, imageBuildResponse.Body)
   313  		return err
   314  	})
   315  
   316  	err = g.Wait()
   317  	return digest, status, err
   318  }
   319  
   320  func (d *DockerBuilder) getDigestFromBuildOutput(ctx context.Context, reader io.Reader) (digest.Digest, []v1alpha1.DockerImageStageStatus, error) {
   321  	result, stageStatuses, err := readDockerOutput(ctx, reader)
   322  	if err != nil {
   323  		return "", stageStatuses, errors.Wrap(err, "ImageBuild")
   324  	}
   325  
   326  	digest, err := d.getDigestFromDockerOutput(ctx, result)
   327  	if err != nil {
   328  		return "", stageStatuses, errors.Wrap(err, "getDigestFromBuildOutput")
   329  	}
   330  
   331  	return digest, stageStatuses, nil
   332  }
   333  
   334  var dockerBuildCleanupRexes = []*regexp.Regexp{
   335  	// the "runc did not determinate sucessfully" just seems redundant on top of "executor failed running"
   336  	// nolint
   337  	regexp.MustCompile("(executor failed running.*): runc did not terminate sucessfully"), // sucessfully (sic)
   338  	// when a file is missing, it generates an error like "failed to compute cache key: foo.txt not found: not found"
   339  	// most of that seems redundant and/or confusing
   340  	regexp.MustCompile("failed to compute cache key: (.* not found): not found"),
   341  	regexp.MustCompile("failed to compute cache key: (?:failed to walk [^ ]+): lstat (?:/.*buildkit-[^/]*/)?(.*: no such file or directory)"),
   342  }
   343  
   344  // buildkit emits errors that might be useful for people who are into buildkit internals, but aren't really
   345  // at the optimal level for people who just wanna build something
   346  // ideally we'll get buildkit to emit errors with more structure so that we don't have to rely on string manipulation,
   347  // but to have impact via that route, we've got to get the change in and users have to upgrade to a version of docker
   348  // that has that change. So let's clean errors up here until that's in a good place.
   349  func cleanupDockerBuildError(err string) string {
   350  	// this is pretty much always the same, and meaningless noise to most users
   351  	ret := strings.TrimPrefix(err, "failed to solve with frontend dockerfile.v0: ")
   352  	ret = strings.TrimPrefix(ret, "failed to solve with frontend gateway.v0: ")
   353  	ret = strings.TrimPrefix(ret, "rpc error: code = Unknown desc = ")
   354  	ret = strings.TrimPrefix(ret, "failed to build LLB: ")
   355  	for _, re := range dockerBuildCleanupRexes {
   356  		ret = re.ReplaceAllString(ret, "$1")
   357  	}
   358  	return ret
   359  }
   360  
   361  type dockerMessageID string
   362  
   363  // Docker API commands stream back a sequence of JSON messages.
   364  //
   365  // The result of the command is in a JSON object with field "aux".
   366  //
   367  // Errors are reported in a JSON object with field "errorDetail"
   368  //
   369  // NOTE(nick): I haven't found a good document describing this protocol
   370  // but you can find it implemented in Docker here:
   371  // https://github.com/moby/moby/blob/1da7d2eebf0a7a60ce585f89a05cebf7f631019c/pkg/jsonmessage/jsonmessage.go#L139
   372  func readDockerOutput(ctx context.Context, reader io.Reader) (dockerOutput, []v1alpha1.DockerImageStageStatus, error) {
   373  	progressLastPrinted := make(map[dockerMessageID]time.Time)
   374  
   375  	result := dockerOutput{}
   376  	decoder := json.NewDecoder(reader)
   377  	b := newBuildkitPrinter(logger.Get(ctx))
   378  
   379  	for decoder.More() {
   380  		message := jsonmessage.JSONMessage{}
   381  		err := decoder.Decode(&message)
   382  		if err != nil {
   383  			return dockerOutput{}, b.toStageStatuses(), errors.Wrap(err, "decoding docker output")
   384  		}
   385  
   386  		if len(message.Stream) > 0 {
   387  			msg := message.Stream
   388  
   389  			builtDigestMatch := oldDigestRegexp.FindStringSubmatch(msg)
   390  			if len(builtDigestMatch) >= 2 {
   391  				// Old versions of docker (pre 1.30) didn't send down an aux message.
   392  				result.shortDigest = builtDigestMatch[1]
   393  			}
   394  
   395  			logger.Get(ctx).Write(logger.InfoLvl, []byte(msg))
   396  		}
   397  
   398  		if message.ErrorMessage != "" {
   399  			return dockerOutput{}, b.toStageStatuses(), errors.New(cleanupDockerBuildError(message.ErrorMessage))
   400  		}
   401  
   402  		if message.Error != nil {
   403  			return dockerOutput{}, b.toStageStatuses(), errors.New(cleanupDockerBuildError(message.Error.Message))
   404  		}
   405  
   406  		id := dockerMessageID(message.ID)
   407  		if id != "" && message.Progress != nil {
   408  			// Add a small 2-second backoff so that we don't overwhelm the logstore.
   409  			lastPrinted, hasBeenPrinted := progressLastPrinted[id]
   410  			shouldPrint := !hasBeenPrinted ||
   411  				message.Progress.Current == message.Progress.Total ||
   412  				time.Since(lastPrinted) > 2*time.Second
   413  			shouldSkip := message.Progress.Current == 0 &&
   414  				(message.Status == "Waiting" || message.Status == "Preparing")
   415  			if shouldPrint && !shouldSkip {
   416  				fields := logger.Fields{logger.FieldNameProgressID: message.ID}
   417  				if message.Progress.Current == message.Progress.Total {
   418  					fields[logger.FieldNameProgressMustPrint] = "1"
   419  				}
   420  				logger.Get(ctx).WithFields(fields).
   421  					Infof("%s: %s %s", id, message.Status, message.Progress.String())
   422  				progressLastPrinted[id] = time.Now()
   423  			}
   424  		}
   425  
   426  		if messageIsFromBuildkit(message) {
   427  			err := toBuildkitStatus(message.Aux, b)
   428  			if err != nil {
   429  				return dockerOutput{}, b.toStageStatuses(), err
   430  			}
   431  		}
   432  
   433  		if message.Aux != nil && !messageIsFromBuildkit(message) {
   434  			result.aux = message.Aux
   435  		}
   436  	}
   437  
   438  	if ctx.Err() != nil {
   439  		return dockerOutput{}, b.toStageStatuses(), ctx.Err()
   440  	}
   441  	return result, b.toStageStatuses(), nil
   442  }
   443  
   444  func toBuildkitStatus(aux *json.RawMessage, b *buildkitPrinter) error {
   445  	var resp controlapi.StatusResponse
   446  	var dt []byte
   447  	// ignoring all messages that are not understood
   448  	if err := json.Unmarshal(*aux, &dt); err != nil {
   449  		return err
   450  	}
   451  	if err := (&resp).UnmarshalVT(dt); err != nil {
   452  		return err
   453  	}
   454  	return b.parseAndPrint(toVertexes(&resp))
   455  }
   456  
   457  func toVertexes(resp *controlapi.StatusResponse) ([]*vertex, []*vertexLog, []*vertexStatus) {
   458  	vertexes := []*vertex{}
   459  	logs := []*vertexLog{}
   460  	statuses := []*vertexStatus{}
   461  
   462  	for _, v := range resp.Vertexes {
   463  		duration := time.Duration(0)
   464  		started := v.Started != nil
   465  		completed := v.Completed != nil
   466  		if started && completed {
   467  			duration = v.Completed.AsTime().Sub(v.Started.AsTime())
   468  		}
   469  		vertexes = append(vertexes, &vertex{
   470  			digest:        v.Digest,
   471  			name:          v.Name,
   472  			error:         v.Error,
   473  			started:       started,
   474  			completed:     completed,
   475  			cached:        v.Cached,
   476  			duration:      duration,
   477  			startedTime:   v.Started,
   478  			completedTime: v.Completed,
   479  		})
   480  
   481  	}
   482  	for _, v := range resp.Logs {
   483  		logs = append(logs, &vertexLog{
   484  			vertex: v.Vertex,
   485  			msg:    v.Msg,
   486  		})
   487  	}
   488  	for _, s := range resp.Statuses {
   489  		statuses = append(statuses, &vertexStatus{
   490  			vertex:    s.Vertex,
   491  			id:        s.ID,
   492  			total:     s.Total,
   493  			current:   s.Current,
   494  			timestamp: s.Timestamp.AsTime(),
   495  		})
   496  	}
   497  	return vertexes, logs, statuses
   498  }
   499  
   500  func messageIsFromBuildkit(msg jsonmessage.JSONMessage) bool {
   501  	return msg.ID == "moby.buildkit.trace"
   502  }
   503  
   504  func (d *DockerBuilder) getDigestFromDockerOutput(ctx context.Context, output dockerOutput) (digest.Digest, error) {
   505  	if output.aux != nil {
   506  		return getDigestFromAux(*output.aux)
   507  	}
   508  
   509  	if output.shortDigest != "" {
   510  		data, _, err := d.dCli.ImageInspectWithRaw(ctx, output.shortDigest)
   511  		if err != nil {
   512  			return "", err
   513  		}
   514  		return digest.Digest(data.ID), nil
   515  	}
   516  
   517  	return "", fmt.Errorf("Docker is not responding. Maybe Docker is out of disk space? Try running `docker system prune`")
   518  }
   519  
   520  func getDigestFromAux(aux json.RawMessage) (digest.Digest, error) {
   521  	digestMap := make(map[string]string)
   522  	err := json.Unmarshal(aux, &digestMap)
   523  	if err != nil {
   524  		return "", errors.Wrap(err, "getDigestFromAux")
   525  	}
   526  
   527  	id, ok := digestMap["ID"]
   528  	if !ok {
   529  		return "", fmt.Errorf("getDigestFromAux: ID not found")
   530  	}
   531  	return digest.Digest(id), nil
   532  }
   533  
   534  func digestAsTag(d digest.Digest) (string, error) {
   535  	str := d.Encoded()
   536  	if len(str) < 16 {
   537  		return "", fmt.Errorf("digest too short: %s", str)
   538  	}
   539  	return fmt.Sprintf("%s%s", ImageTagPrefix, str[:16]), nil
   540  }
   541  
   542  func digestMatchesRef(ref reference.NamedTagged, digest digest.Digest) bool {
   543  	digestHash := digest.Encoded()
   544  	tag := ref.Tag()
   545  	if len(tag) <= len(ImageTagPrefix) {
   546  		return false
   547  	}
   548  
   549  	tagHash := tag[len(ImageTagPrefix):]
   550  	return strings.HasPrefix(digestHash, tagHash)
   551  }
   552  
   553  var oldDigestRegexp = regexp.MustCompile(`^Successfully built ([0-9a-f]+)\s*$`)
   554  
   555  type dockerOutput struct {
   556  	aux         *json.RawMessage
   557  	shortDigest string
   558  }
   559  
   560  func indent(text, indent string) string {
   561  	if text == "" {
   562  		return indent + text
   563  	}
   564  	if text[len(text)-1:] == "\n" {
   565  		result := ""
   566  		for _, j := range strings.Split(text[:len(text)-1], "\n") {
   567  			result += indent + j + "\n"
   568  		}
   569  		return result
   570  	}
   571  	result := ""
   572  	for _, j := range strings.Split(strings.TrimRight(text, "\n"), "\n") {
   573  		result += indent + j + "\n"
   574  	}
   575  	return result[:len(result)-1]
   576  }
   577  
   578  const DockerfileName = "Dockerfile"
   579  
   580  // Creates a specification for the buildkit filesyncer
   581  //
   582  // Welcome to the magnificent complexity of the fssync protocol!
   583  //
   584  // Originally, the Docker CLI was responsible for creating a context (basically a tarball)
   585  // and sending it to the build server. The Docker CLI used .dockerignore to exclude things
   586  // from that tarball.
   587  //
   588  // Soon, people realized that tarballing the docker context was a huge bottleneck
   589  // for monorepos.
   590  //
   591  // Buildkit solves this problem with the fssync server. You create a fssync.SyncedDir
   592  // for two directories:
   593  // - the "context" dir (with the main build contents)
   594  // - the "dockerfile" dir (with build instructions, i.e., my.Dockerfile and my.Dockerfile.dockerignore)
   595  // and Buildkit requests the files it needs lazily.
   596  //
   597  // As part of this, they decided to do all .dockerignore interpretation
   598  // server-side.  There's a little dance Buildkit does to determine whether to
   599  // grab my.Dockerfile.dockerignore from the dockerfile dir, or whether to grab
   600  // .dockerignore from the context dir.
   601  //
   602  // Tilt has its own context filtering rules (ignore= and only= in particular).
   603  // So Tilt can't rely on Buildkit's logic. Instead, Tilt
   604  // - creates a "context" dir (with the main build contents filtered client-side)
   605  // - the "dockerfile" dir with a Dockerfile and a fake Dockerfile.dockerignore
   606  //
   607  // The fake Dockerfile.dockerignore tells buildkit not do to its server-side
   608  // filtering dance.
   609  func toDirSource(context string, dockerfileSyncDir string, filter model.PathMatcher) (filesync.DirSource, error) {
   610  	fileMap := func(path string, s *fsutiltypes.Stat) fsutil.MapResult {
   611  		if !filepath.IsAbs(path) {
   612  			path = filepath.Join(context, path)
   613  		}
   614  
   615  		isDir := s != nil && s.IsDir()
   616  		if isDir {
   617  			entireDir, _ := filter.MatchesEntireDir(path)
   618  			if entireDir {
   619  				return fsutil.MapResultSkipDir
   620  			}
   621  		} else {
   622  			matches, _ := filter.Matches(path)
   623  			if matches {
   624  				return fsutil.MapResultExclude
   625  			}
   626  		}
   627  		s.Uid = 0
   628  		s.Gid = 0
   629  		return fsutil.MapResultKeep
   630  	}
   631  
   632  	contextFS, err := fsutil.NewFS(context)
   633  	if err != nil {
   634  		return nil, err
   635  	}
   636  
   637  	contextFS, err = fsutil.NewFilterFS(contextFS, &fsutil.FilterOpt{
   638  		Map: fileMap,
   639  	})
   640  	if err != nil {
   641  		return nil, err
   642  	}
   643  
   644  	dockerfileFS, err := fsutil.NewFS(dockerfileSyncDir)
   645  	if err != nil {
   646  		return nil, err
   647  	}
   648  
   649  	return filesync.StaticDirSource{
   650  		"context":    contextFS,
   651  		"dockerfile": dockerfileFS,
   652  	}, nil
   653  }
   654  
   655  // Writes Dockerfile and Dockerfile.dockerignore to a temporary directory.
   656  func writeTempDockerfileSyncdir(contents string) (string, error) {
   657  	// err is a named return value, due to the defer call below.
   658  	dockerfileDir, err := os.MkdirTemp("", "tilt-tempdockerfile-")
   659  	if err != nil {
   660  		return "", fmt.Errorf("creating temp dockerfile directory: %v", err)
   661  	}
   662  
   663  	err = os.WriteFile(filepath.Join(dockerfileDir, "Dockerfile"), []byte(contents), 0777)
   664  	if err != nil {
   665  		_ = os.RemoveAll(dockerfileDir)
   666  		return "", fmt.Errorf("creating temp dockerfile: %v", err)
   667  	}
   668  
   669  	dockerignoreContents := `# Tilt's fake dockerignore file`
   670  	err = os.WriteFile(filepath.Join(dockerfileDir, "Dockerfile.dockerignore"), []byte(dockerignoreContents), 0777)
   671  	if err != nil {
   672  		_ = os.RemoveAll(dockerfileDir)
   673  		return "", fmt.Errorf("creating temp dockerignore file: %v", err)
   674  	}
   675  	return dockerfileDir, nil
   676  }