github.com/tilt-dev/tilt@v0.33.15-0.20240515162809-0a22ed45d8a0/internal/build/docker_builder.go (about)

     1  package build
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"fmt"
     7  	"io"
     8  	"os"
     9  	"path/filepath"
    10  	"regexp"
    11  	"strings"
    12  	"time"
    13  
    14  	"github.com/distribution/reference"
    15  	"github.com/docker/docker/api/types"
    16  	"github.com/docker/docker/client"
    17  	"github.com/docker/docker/pkg/jsonmessage"
    18  	controlapi "github.com/moby/buildkit/api/services/control"
    19  	"github.com/moby/buildkit/session/filesync"
    20  	"github.com/opencontainers/go-digest"
    21  	"github.com/pkg/errors"
    22  	"github.com/tonistiigi/fsutil"
    23  	fsutiltypes "github.com/tonistiigi/fsutil/types"
    24  	"golang.org/x/sync/errgroup"
    25  	ktypes "k8s.io/apimachinery/pkg/types"
    26  
    27  	"github.com/tilt-dev/tilt/internal/container"
    28  	"github.com/tilt-dev/tilt/internal/docker"
    29  	"github.com/tilt-dev/tilt/internal/dockerfile"
    30  	"github.com/tilt-dev/tilt/internal/k8s"
    31  	"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
    32  	"github.com/tilt-dev/tilt/pkg/logger"
    33  	"github.com/tilt-dev/tilt/pkg/model"
    34  )
    35  
    36  type DockerBuilder struct {
    37  	dCli docker.Client
    38  
    39  	// A set of extra labels to attach to all builds
    40  	// created by this image builder.
    41  	//
    42  	// By default, all builds are labeled with a build mode.
    43  	extraLabels dockerfile.Labels
    44  }
    45  
    46  // Describes how a docker instance connects to kubernetes instances.
    47  type DockerKubeConnection interface {
    48  	// Returns whether this docker builder is going to build to the given kubernetes context.
    49  	WillBuildToKubeContext(kctx k8s.KubeContext) bool
    50  }
    51  
    52  func NewDockerBuilder(dCli docker.Client, extraLabels dockerfile.Labels) *DockerBuilder {
    53  	return &DockerBuilder{
    54  		dCli:        dCli,
    55  		extraLabels: extraLabels,
    56  	}
    57  }
    58  
    59  func (d *DockerBuilder) WillBuildToKubeContext(kctx k8s.KubeContext) bool {
    60  	return d.dCli.Env().WillBuildToKubeContext(kctx)
    61  }
    62  
    63  func (d *DockerBuilder) DumpImageDeployRef(ctx context.Context, ref string) (reference.NamedTagged, error) {
    64  	refParsed, err := container.ParseNamed(ref)
    65  	if err != nil {
    66  		return nil, errors.Wrap(err, "DumpImageDeployRef")
    67  	}
    68  
    69  	data, _, err := d.dCli.ImageInspectWithRaw(ctx, ref)
    70  	if err != nil {
    71  		return nil, errors.Wrap(err, "DumpImageDeployRef")
    72  	}
    73  	dig := digest.Digest(data.ID)
    74  
    75  	tag, err := digestAsTag(dig)
    76  	if err != nil {
    77  		return nil, errors.Wrap(err, "DumpImageDeployRef")
    78  	}
    79  
    80  	tagged, err := reference.WithTag(refParsed, tag)
    81  	if err != nil {
    82  		return nil, errors.Wrap(err, "DumpImageDeployRef")
    83  	}
    84  
    85  	return tagged, nil
    86  }
    87  
    88  // Tag the digest with the given name and wm-tilt tag.
    89  func (d *DockerBuilder) TagRefs(ctx context.Context, refs container.RefSet, dig digest.Digest) (container.TaggedRefs, error) {
    90  	tag, err := digestAsTag(dig)
    91  	if err != nil {
    92  		return container.TaggedRefs{}, errors.Wrap(err, "TagImage")
    93  	}
    94  
    95  	tagged, err := refs.AddTagSuffix(tag)
    96  	if err != nil {
    97  		return container.TaggedRefs{}, errors.Wrap(err, "TagImage")
    98  	}
    99  
   100  	// Docker client only needs to care about the localImage
   101  	err = d.dCli.ImageTag(ctx, dig.String(), tagged.LocalRef.String())
   102  	if err != nil {
   103  		return container.TaggedRefs{}, errors.Wrap(err, "TagImage#ImageTag")
   104  	}
   105  
   106  	return tagged, nil
   107  }
   108  
   109  // Push the specified ref up to the docker registry specified in the name.
   110  //
   111  // TODO(nick) In the future, I would like us to be smarter about checking if the kubernetes cluster
   112  // we're running in has access to the given registry. And if it doesn't, we should either emit an
   113  // error, or push to a registry that kubernetes does have access to (e.g., a local registry).
   114  func (d *DockerBuilder) PushImage(ctx context.Context, ref reference.NamedTagged) error {
   115  	l := logger.Get(ctx)
   116  
   117  	imagePushResponse, err := d.dCli.ImagePush(ctx, ref)
   118  	if err != nil {
   119  		return errors.Wrap(err, "PushImage#ImagePush")
   120  	}
   121  
   122  	defer func() {
   123  		err := imagePushResponse.Close()
   124  		if err != nil {
   125  			l.Infof("unable to close imagePushResponse: %s", err)
   126  		}
   127  	}()
   128  
   129  	_, _, err = readDockerOutput(ctx, imagePushResponse)
   130  	if err != nil {
   131  		return errors.Wrapf(err, "pushing image %q", ref.Name())
   132  	}
   133  
   134  	return nil
   135  }
   136  
   137  func (d *DockerBuilder) ImageExists(ctx context.Context, ref reference.NamedTagged) (bool, error) {
   138  	_, _, err := d.dCli.ImageInspectWithRaw(ctx, ref.String())
   139  	if err != nil {
   140  		if client.IsErrNotFound(err) {
   141  			return false, nil
   142  		}
   143  		return false, errors.Wrapf(err, "error checking if %s exists", ref.String())
   144  	}
   145  	return true, nil
   146  }
   147  
   148  func (d *DockerBuilder) BuildImage(ctx context.Context, ps *PipelineState, refs container.RefSet,
   149  	spec v1alpha1.DockerImageSpec,
   150  	cluster *v1alpha1.Cluster,
   151  	imageMaps map[ktypes.NamespacedName]*v1alpha1.ImageMap,
   152  	filter model.PathMatcher) (container.TaggedRefs, []v1alpha1.DockerImageStageStatus, error) {
   153  	spec = InjectClusterPlatform(spec, cluster)
   154  	spec, err := InjectImageDependencies(spec, imageMaps)
   155  	if err != nil {
   156  		return container.TaggedRefs{}, nil, err
   157  	}
   158  
   159  	platformSuffix := ""
   160  	if spec.Platform != "" {
   161  		platformSuffix = fmt.Sprintf(" for platform %s", spec.Platform)
   162  	}
   163  	logger.Get(ctx).Infof("Building Dockerfile%s:\n%s\n", platformSuffix, indent(spec.DockerfileContents, "  "))
   164  
   165  	ps.StartBuildStep(ctx, "Building image")
   166  	allowBuildkit := true
   167  	ctx = ps.AttachLogger(ctx)
   168  	digest, stages, err := d.buildToDigest(ctx, spec, filter, allowBuildkit)
   169  	if err != nil {
   170  		isMysteriousCorruption := strings.Contains(err.Error(), "failed precondition") &&
   171  			strings.Contains(err.Error(), "failed commit on ref")
   172  		if isMysteriousCorruption {
   173  			// We've seen weird corruption issues on buildkit
   174  			// that look like
   175  			//
   176  			// Build Failed: ImageBuild: failed to create LLB definition:
   177  			// failed commit on ref "unknown-sha256:b72fa303a3a5fbf52c723bfcfb93948bb53b3d7e8d22418e9d171a27ad7dcd84":
   178  			// "unknown-sha256:b72fa303a3a5fbf52c723bfcfb93948bb53b3d7e8d22418e9d171a27ad7dcd84"
   179  			// failed size validation: 80941 != 80929: failed precondition
   180  			//
   181  			// Build Failed: ImageBuild: failed to load cache key: failed commit on
   182  			// ref
   183  			// "unknown-sha256:d8ad5905555e3af3fa9122515f2b3d4762d4e8734b7ed12f1271bcdee3541267":
   184  			// unexpected commit size 69764, expected 76810: failed precondition
   185  			//
   186  			// If this happens, just try again without buildkit.
   187  			allowBuildkit = false
   188  			logger.Get(ctx).Infof("Detected Buildkit corruption. Rebuilding without Buildkit")
   189  			digest, stages, err = d.buildToDigest(ctx, spec, filter, allowBuildkit)
   190  		}
   191  
   192  		if err != nil {
   193  			return container.TaggedRefs{}, stages, err
   194  		}
   195  	}
   196  
   197  	tagged, err := d.TagRefs(ctx, refs, digest)
   198  	if err != nil {
   199  		return container.TaggedRefs{}, stages, errors.Wrap(err, "docker tag")
   200  	}
   201  
   202  	return tagged, stages, nil
   203  }
   204  
   205  // A helper function that builds the paths to the given docker image,
   206  // then returns the output digest.
   207  func (d *DockerBuilder) buildToDigest(ctx context.Context, spec v1alpha1.DockerImageSpec, filter model.PathMatcher, allowBuildkit bool) (digest.Digest, []v1alpha1.DockerImageStageStatus, error) {
   208  	ctx, cancelBuildSession := context.WithCancel(ctx)
   209  	defer cancelBuildSession()
   210  
   211  	g, ctx := errgroup.WithContext(ctx)
   212  	var contextReader io.Reader
   213  
   214  	buildContext := spec.Context
   215  
   216  	// Treat context: "-" as an empty context.
   217  	if buildContext == "-" {
   218  		emptyContextDir, err := os.MkdirTemp("", "tilt-dockercontext-")
   219  		if err != nil {
   220  			return "", nil, fmt.Errorf("creating context directory: %v", err)
   221  		}
   222  
   223  		defer func() {
   224  			_ = os.RemoveAll(emptyContextDir)
   225  		}()
   226  
   227  		buildContext = emptyContextDir
   228  	}
   229  
   230  	_, err := os.Stat(buildContext)
   231  	if err != nil {
   232  		return "", nil, fmt.Errorf("reading build context: %v", err)
   233  	}
   234  
   235  	builderVersion, err := d.dCli.BuilderVersion(ctx)
   236  	if err != nil {
   237  		return "", nil, err
   238  	}
   239  
   240  	// Buildkit allows us to use a fs sync server instead of uploading up-front.
   241  	useFSSync := allowBuildkit && builderVersion == types.BuilderBuildKit
   242  	if !useFSSync {
   243  		pipeReader, pipeWriter := io.Pipe()
   244  		w := NewProgressWriter(ctx, pipeWriter)
   245  		w.Init()
   246  
   247  		// TODO(nick): Express tarring as a build stage.
   248  		g.Go(func() error {
   249  			paths := []PathMapping{
   250  				{
   251  					LocalPath:     buildContext,
   252  					ContainerPath: "/",
   253  				},
   254  			}
   255  			err := tarContextAndUpdateDf(ctx, w, dockerfile.Dockerfile(spec.DockerfileContents), paths, filter)
   256  			if err != nil {
   257  				_ = pipeWriter.CloseWithError(err)
   258  			} else {
   259  				_ = pipeWriter.Close()
   260  			}
   261  			w.Close() // Print the final progress message
   262  			return nil
   263  		})
   264  
   265  		contextReader = pipeReader
   266  		defer func() {
   267  			_ = pipeReader.Close()
   268  		}()
   269  	}
   270  
   271  	options := Options(contextReader, spec)
   272  	if useFSSync {
   273  		dockerfileDir, err := writeTempDockerfileSyncdir(spec.DockerfileContents)
   274  		if err != nil {
   275  			return "", nil, err
   276  		}
   277  		options.DirSource = toDirSource(buildContext, dockerfileDir, filter)
   278  		options.Dockerfile = DockerfileName
   279  
   280  		defer func() {
   281  			_ = os.RemoveAll(dockerfileDir)
   282  		}()
   283  	}
   284  	if !allowBuildkit {
   285  		options.ForceLegacyBuilder = true
   286  	}
   287  
   288  	var digest digest.Digest
   289  	var status []v1alpha1.DockerImageStageStatus
   290  	g.Go(func() error {
   291  		defer cancelBuildSession()
   292  		imageBuildResponse, err := d.dCli.ImageBuild(
   293  			ctx,
   294  			g,
   295  			contextReader,
   296  			options,
   297  		)
   298  		if err != nil {
   299  			return err
   300  		}
   301  
   302  		defer func() {
   303  			err := imageBuildResponse.Body.Close()
   304  			if err != nil {
   305  				logger.Get(ctx).Infof("unable to close imageBuildResponse: %s", err)
   306  			}
   307  		}()
   308  
   309  		digest, status, err = d.getDigestFromBuildOutput(ctx, imageBuildResponse.Body)
   310  		return err
   311  	})
   312  
   313  	err = g.Wait()
   314  	return digest, status, err
   315  }
   316  
   317  func (d *DockerBuilder) getDigestFromBuildOutput(ctx context.Context, reader io.Reader) (digest.Digest, []v1alpha1.DockerImageStageStatus, error) {
   318  	result, stageStatuses, err := readDockerOutput(ctx, reader)
   319  	if err != nil {
   320  		return "", stageStatuses, errors.Wrap(err, "ImageBuild")
   321  	}
   322  
   323  	digest, err := d.getDigestFromDockerOutput(ctx, result)
   324  	if err != nil {
   325  		return "", stageStatuses, errors.Wrap(err, "getDigestFromBuildOutput")
   326  	}
   327  
   328  	return digest, stageStatuses, nil
   329  }
   330  
   331  var dockerBuildCleanupRexes = []*regexp.Regexp{
   332  	// the "runc did not determinate sucessfully" just seems redundant on top of "executor failed running"
   333  	// nolint
   334  	regexp.MustCompile("(executor failed running.*): runc did not terminate sucessfully"), // sucessfully (sic)
   335  	// when a file is missing, it generates an error like "failed to compute cache key: foo.txt not found: not found"
   336  	// most of that seems redundant and/or confusing
   337  	regexp.MustCompile("failed to compute cache key: (.* not found): not found"),
   338  	regexp.MustCompile("failed to compute cache key: (?:failed to walk [^ ]+): lstat (?:/.*buildkit-[^/]*/)?(.*: no such file or directory)"),
   339  }
   340  
   341  // buildkit emits errors that might be useful for people who are into buildkit internals, but aren't really
   342  // at the optimal level for people who just wanna build something
   343  // ideally we'll get buildkit to emit errors with more structure so that we don't have to rely on string manipulation,
   344  // but to have impact via that route, we've got to get the change in and users have to upgrade to a version of docker
   345  // that has that change. So let's clean errors up here until that's in a good place.
   346  func cleanupDockerBuildError(err string) string {
   347  	// this is pretty much always the same, and meaningless noise to most users
   348  	ret := strings.TrimPrefix(err, "failed to solve with frontend dockerfile.v0: ")
   349  	ret = strings.TrimPrefix(ret, "failed to solve with frontend gateway.v0: ")
   350  	ret = strings.TrimPrefix(ret, "rpc error: code = Unknown desc = ")
   351  	ret = strings.TrimPrefix(ret, "failed to build LLB: ")
   352  	for _, re := range dockerBuildCleanupRexes {
   353  		ret = re.ReplaceAllString(ret, "$1")
   354  	}
   355  	return ret
   356  }
   357  
   358  type dockerMessageID string
   359  
   360  // Docker API commands stream back a sequence of JSON messages.
   361  //
   362  // The result of the command is in a JSON object with field "aux".
   363  //
   364  // Errors are reported in a JSON object with field "errorDetail"
   365  //
   366  // NOTE(nick): I haven't found a good document describing this protocol
   367  // but you can find it implemented in Docker here:
   368  // https://github.com/moby/moby/blob/1da7d2eebf0a7a60ce585f89a05cebf7f631019c/pkg/jsonmessage/jsonmessage.go#L139
   369  func readDockerOutput(ctx context.Context, reader io.Reader) (dockerOutput, []v1alpha1.DockerImageStageStatus, error) {
   370  	progressLastPrinted := make(map[dockerMessageID]time.Time)
   371  
   372  	result := dockerOutput{}
   373  	decoder := json.NewDecoder(reader)
   374  	b := newBuildkitPrinter(logger.Get(ctx))
   375  
   376  	for decoder.More() {
   377  		message := jsonmessage.JSONMessage{}
   378  		err := decoder.Decode(&message)
   379  		if err != nil {
   380  			return dockerOutput{}, b.toStageStatuses(), errors.Wrap(err, "decoding docker output")
   381  		}
   382  
   383  		if len(message.Stream) > 0 {
   384  			msg := message.Stream
   385  
   386  			builtDigestMatch := oldDigestRegexp.FindStringSubmatch(msg)
   387  			if len(builtDigestMatch) >= 2 {
   388  				// Old versions of docker (pre 1.30) didn't send down an aux message.
   389  				result.shortDigest = builtDigestMatch[1]
   390  			}
   391  
   392  			logger.Get(ctx).Write(logger.InfoLvl, []byte(msg))
   393  		}
   394  
   395  		if message.ErrorMessage != "" {
   396  			return dockerOutput{}, b.toStageStatuses(), errors.New(cleanupDockerBuildError(message.ErrorMessage))
   397  		}
   398  
   399  		if message.Error != nil {
   400  			return dockerOutput{}, b.toStageStatuses(), errors.New(cleanupDockerBuildError(message.Error.Message))
   401  		}
   402  
   403  		id := dockerMessageID(message.ID)
   404  		if id != "" && message.Progress != nil {
   405  			// Add a small 2-second backoff so that we don't overwhelm the logstore.
   406  			lastPrinted, hasBeenPrinted := progressLastPrinted[id]
   407  			shouldPrint := !hasBeenPrinted ||
   408  				message.Progress.Current == message.Progress.Total ||
   409  				time.Since(lastPrinted) > 2*time.Second
   410  			shouldSkip := message.Progress.Current == 0 &&
   411  				(message.Status == "Waiting" || message.Status == "Preparing")
   412  			if shouldPrint && !shouldSkip {
   413  				fields := logger.Fields{logger.FieldNameProgressID: message.ID}
   414  				if message.Progress.Current == message.Progress.Total {
   415  					fields[logger.FieldNameProgressMustPrint] = "1"
   416  				}
   417  				logger.Get(ctx).WithFields(fields).
   418  					Infof("%s: %s %s", id, message.Status, message.Progress.String())
   419  				progressLastPrinted[id] = time.Now()
   420  			}
   421  		}
   422  
   423  		if messageIsFromBuildkit(message) {
   424  			err := toBuildkitStatus(message.Aux, b)
   425  			if err != nil {
   426  				return dockerOutput{}, b.toStageStatuses(), err
   427  			}
   428  		}
   429  
   430  		if message.Aux != nil && !messageIsFromBuildkit(message) {
   431  			result.aux = message.Aux
   432  		}
   433  	}
   434  
   435  	if ctx.Err() != nil {
   436  		return dockerOutput{}, b.toStageStatuses(), ctx.Err()
   437  	}
   438  	return result, b.toStageStatuses(), nil
   439  }
   440  
   441  func toBuildkitStatus(aux *json.RawMessage, b *buildkitPrinter) error {
   442  	var resp controlapi.StatusResponse
   443  	var dt []byte
   444  	// ignoring all messages that are not understood
   445  	if err := json.Unmarshal(*aux, &dt); err != nil {
   446  		return err
   447  	}
   448  	if err := (&resp).Unmarshal(dt); err != nil {
   449  		return err
   450  	}
   451  	return b.parseAndPrint(toVertexes(resp))
   452  }
   453  
   454  func toVertexes(resp controlapi.StatusResponse) ([]*vertex, []*vertexLog, []*vertexStatus) {
   455  	vertexes := []*vertex{}
   456  	logs := []*vertexLog{}
   457  	statuses := []*vertexStatus{}
   458  
   459  	for _, v := range resp.Vertexes {
   460  		duration := time.Duration(0)
   461  		started := v.Started != nil
   462  		completed := v.Completed != nil
   463  		if started && completed {
   464  			duration = (*v.Completed).Sub((*v.Started))
   465  		}
   466  		vertexes = append(vertexes, &vertex{
   467  			digest:        v.Digest,
   468  			name:          v.Name,
   469  			error:         v.Error,
   470  			started:       started,
   471  			completed:     completed,
   472  			cached:        v.Cached,
   473  			duration:      duration,
   474  			startedTime:   v.Started,
   475  			completedTime: v.Completed,
   476  		})
   477  
   478  	}
   479  	for _, v := range resp.Logs {
   480  		logs = append(logs, &vertexLog{
   481  			vertex: v.Vertex,
   482  			msg:    v.Msg,
   483  		})
   484  	}
   485  	for _, s := range resp.Statuses {
   486  		statuses = append(statuses, &vertexStatus{
   487  			vertex:    s.Vertex,
   488  			id:        s.ID,
   489  			total:     s.Total,
   490  			current:   s.Current,
   491  			timestamp: s.Timestamp,
   492  		})
   493  	}
   494  	return vertexes, logs, statuses
   495  }
   496  
   497  func messageIsFromBuildkit(msg jsonmessage.JSONMessage) bool {
   498  	return msg.ID == "moby.buildkit.trace"
   499  }
   500  
   501  func (d *DockerBuilder) getDigestFromDockerOutput(ctx context.Context, output dockerOutput) (digest.Digest, error) {
   502  	if output.aux != nil {
   503  		return getDigestFromAux(*output.aux)
   504  	}
   505  
   506  	if output.shortDigest != "" {
   507  		data, _, err := d.dCli.ImageInspectWithRaw(ctx, output.shortDigest)
   508  		if err != nil {
   509  			return "", err
   510  		}
   511  		return digest.Digest(data.ID), nil
   512  	}
   513  
   514  	return "", fmt.Errorf("Docker is not responding. Maybe Docker is out of disk space? Try running `docker system prune`")
   515  }
   516  
   517  func getDigestFromAux(aux json.RawMessage) (digest.Digest, error) {
   518  	digestMap := make(map[string]string)
   519  	err := json.Unmarshal(aux, &digestMap)
   520  	if err != nil {
   521  		return "", errors.Wrap(err, "getDigestFromAux")
   522  	}
   523  
   524  	id, ok := digestMap["ID"]
   525  	if !ok {
   526  		return "", fmt.Errorf("getDigestFromAux: ID not found")
   527  	}
   528  	return digest.Digest(id), nil
   529  }
   530  
   531  func digestAsTag(d digest.Digest) (string, error) {
   532  	str := d.Encoded()
   533  	if len(str) < 16 {
   534  		return "", fmt.Errorf("digest too short: %s", str)
   535  	}
   536  	return fmt.Sprintf("%s%s", ImageTagPrefix, str[:16]), nil
   537  }
   538  
   539  func digestMatchesRef(ref reference.NamedTagged, digest digest.Digest) bool {
   540  	digestHash := digest.Encoded()
   541  	tag := ref.Tag()
   542  	if len(tag) <= len(ImageTagPrefix) {
   543  		return false
   544  	}
   545  
   546  	tagHash := tag[len(ImageTagPrefix):]
   547  	return strings.HasPrefix(digestHash, tagHash)
   548  }
   549  
   550  var oldDigestRegexp = regexp.MustCompile(`^Successfully built ([0-9a-f]+)\s*$`)
   551  
   552  type dockerOutput struct {
   553  	aux         *json.RawMessage
   554  	shortDigest string
   555  }
   556  
   557  func indent(text, indent string) string {
   558  	if text == "" {
   559  		return indent + text
   560  	}
   561  	if text[len(text)-1:] == "\n" {
   562  		result := ""
   563  		for _, j := range strings.Split(text[:len(text)-1], "\n") {
   564  			result += indent + j + "\n"
   565  		}
   566  		return result
   567  	}
   568  	result := ""
   569  	for _, j := range strings.Split(strings.TrimRight(text, "\n"), "\n") {
   570  		result += indent + j + "\n"
   571  	}
   572  	return result[:len(result)-1]
   573  }
   574  
   575  const DockerfileName = "Dockerfile"
   576  
   577  // Creates a specification for the buildkit filesyncer
   578  //
   579  // Welcome to the magnificent complexity of the fssync protocol!
   580  //
   581  // Originally, the Docker CLI was responsible for creating a context (basically a tarball)
   582  // and sending it to the build server. The Docker CLI used .dockerignore to exclude things
   583  // from that tarball.
   584  //
   585  // Soon, people realized that tarballing the docker context was a huge bottleneck
   586  // for monorepos.
   587  //
   588  // Buildkit solves this problem with the fssync server. You create a fssync.SyncedDir
   589  // for two directories:
   590  // - the "context" dir (with the main build contents)
   591  // - the "dockerfile" dir (with build instructions, i.e., my.Dockerfile and my.Dockerfile.dockerignore)
   592  // and Buildkit requests the files it needs lazily.
   593  //
   594  // As part of this, they decided to do all .dockerignore interpretation
   595  // server-side.  There's a little dance Buildkit does to determine whether to
   596  // grab my.Dockerfile.dockerignore from the dockerfile dir, or whether to grab
   597  // .dockerignore from the context dir.
   598  //
   599  // Tilt has its own context filtering rules (ignore= and only= in particular).
   600  // So Tilt can't rely on Buildkit's logic. Instead, Tilt
   601  // - creates a "context" dir (with the main build contents filtered client-side)
   602  // - the "dockerfile" dir with a Dockerfile and a fake Dockerfile.dockerignore
   603  //
   604  // The fake Dockerfile.dockerignore tells buildkit not do to its server-side
   605  // filtering dance.
   606  func toDirSource(context string, dockerfileSyncDir string, filter model.PathMatcher) filesync.DirSource {
   607  	fileMap := func(path string, s *fsutiltypes.Stat) fsutil.MapResult {
   608  		if !filepath.IsAbs(path) {
   609  			path = filepath.Join(context, path)
   610  		}
   611  
   612  		isDir := s != nil && s.IsDir()
   613  		if isDir {
   614  			entireDir, _ := filter.MatchesEntireDir(path)
   615  			if entireDir {
   616  				return fsutil.MapResultSkipDir
   617  			}
   618  		} else {
   619  			matches, _ := filter.Matches(path)
   620  			if matches {
   621  				return fsutil.MapResultExclude
   622  			}
   623  		}
   624  		s.Uid = 0
   625  		s.Gid = 0
   626  		return fsutil.MapResultKeep
   627  	}
   628  
   629  	return filesync.StaticDirSource{
   630  		"context": filesync.SyncedDir{
   631  			Dir: context,
   632  			Map: fileMap,
   633  		},
   634  		"dockerfile": filesync.SyncedDir{
   635  			Dir: dockerfileSyncDir,
   636  		},
   637  	}
   638  }
   639  
   640  // Writes Dockerfile and Dockerfile.dockerignore to a temporary directory.
   641  func writeTempDockerfileSyncdir(contents string) (string, error) {
   642  	// err is a named return value, due to the defer call below.
   643  	dockerfileDir, err := os.MkdirTemp("", "tilt-tempdockerfile-")
   644  	if err != nil {
   645  		return "", fmt.Errorf("creating temp dockerfile directory: %v", err)
   646  	}
   647  
   648  	err = os.WriteFile(filepath.Join(dockerfileDir, "Dockerfile"), []byte(contents), 0777)
   649  	if err != nil {
   650  		_ = os.RemoveAll(dockerfileDir)
   651  		return "", fmt.Errorf("creating temp dockerfile: %v", err)
   652  	}
   653  
   654  	dockerignoreContents := `# Tilt's fake dockerignore file`
   655  	err = os.WriteFile(filepath.Join(dockerfileDir, "Dockerfile.dockerignore"), []byte(dockerignoreContents), 0777)
   656  	if err != nil {
   657  		_ = os.RemoveAll(dockerfileDir)
   658  		return "", fmt.Errorf("creating temp dockerignore file: %v", err)
   659  	}
   660  	return dockerfileDir, nil
   661  }