github.com/tilt-dev/tilt@v0.36.0/internal/docker/client.go (about)

     1  package docker
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"io"
     7  	"net"
     8  	"net/http"
     9  	"os"
    10  	"path/filepath"
    11  	"strconv"
    12  	"sync"
    13  	"time"
    14  
    15  	"github.com/blang/semver"
    16  	"github.com/distribution/reference"
    17  	"github.com/docker/cli/cli/command"
    18  	"github.com/docker/cli/cli/config"
    19  	"github.com/docker/docker/api/types"
    20  	typescontainer "github.com/docker/docker/api/types/container"
    21  	"github.com/docker/docker/api/types/filters"
    22  	typesimage "github.com/docker/docker/api/types/image"
    23  	typesregistry "github.com/docker/docker/api/types/registry"
    24  	"github.com/docker/docker/client"
    25  	"github.com/docker/docker/pkg/stdcopy"
    26  	"github.com/docker/docker/registry"
    27  	"github.com/docker/go-connections/tlsconfig"
    28  	"github.com/moby/buildkit/identity"
    29  	"github.com/moby/buildkit/session"
    30  	"github.com/moby/buildkit/session/auth/authprovider"
    31  	"github.com/moby/buildkit/session/filesync"
    32  	"github.com/pkg/errors"
    33  	"golang.org/x/sync/errgroup"
    34  
    35  	"github.com/tilt-dev/tilt/internal/container"
    36  	"github.com/tilt-dev/tilt/internal/docker/buildkit"
    37  	"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
    38  	"github.com/tilt-dev/tilt/pkg/logger"
    39  	"github.com/tilt-dev/tilt/pkg/model"
    40  )
    41  
    42  const (
    43  	// Indicates that an image was built by tilt's docker client.
    44  	BuiltLabel = "dev.tilt.built"
    45  
    46  	// Indicates that an image is eligible for garbage collection
    47  	// by Tilt's pruner.
    48  	GCEnabledLabel = "dev.tilt.gc"
    49  )
    50  
    51  var (
    52  	BuiltLabelSet = map[string]string{
    53  		BuiltLabel:     "true",
    54  		GCEnabledLabel: "true",
    55  	}
    56  )
    57  
    58  const clientSessionRemote = "client-session"
    59  
    60  // Minimum docker version we've tested on.
    61  // A good way to test old versions is to connect to an old version of Minikube,
    62  // so that we connect to the docker server in minikube instead of futzing with
    63  // the docker version on your machine.
    64  // https://github.com/kubernetes/minikube/releases/tag/v0.13.1
    65  var minDockerVersion = semver.MustParse("1.23.0")
    66  
    67  var minDockerVersionStableBuildkit = semver.MustParse("1.39.0")
    68  var minDockerVersionExperimentalBuildkit = semver.MustParse("1.38.0")
    69  
    70  var versionTimeout = 5 * time.Second
    71  
    72  // Create an interface so this can be mocked out.
    73  type Client interface {
    74  	CheckConnected() error
    75  
    76  	// If you'd like to call this Docker instance in a separate process, these
    77  	// are the environment variables you'll need to do so.
    78  	Env() Env
    79  
    80  	// If you'd like to call this Docker instance in a separate process, this
    81  	// is the default builder version you want (buildkit or legacy)
    82  	BuilderVersion(ctx context.Context) (types.BuilderVersion, error)
    83  
    84  	ServerVersion(ctx context.Context) (types.Version, error)
    85  
    86  	// Set the orchestrator we're talking to. This is only relevant to switchClient,
    87  	// which can talk to either the Local or in-cluster docker daemon.
    88  	SetOrchestrator(orc model.Orchestrator)
    89  	// Return a client suitable for use with the given orchestrator. Only
    90  	// relevant for the switchClient which has clients for both types.
    91  	ForOrchestrator(orc model.Orchestrator) Client
    92  
    93  	ContainerLogs(ctx context.Context, container string, options typescontainer.LogsOptions) (io.ReadCloser, error)
    94  	ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error)
    95  	ContainerList(ctx context.Context, options typescontainer.ListOptions) ([]types.Container, error)
    96  	ContainerRestartNoWait(ctx context.Context, containerID string) error
    97  
    98  	Run(ctx context.Context, opts RunConfig) (RunResult, error)
    99  
   100  	// Execute a command in a container, streaming the command output to `out`.
   101  	// Returns an ExitError if the command exits with a non-zero exit code.
   102  	ExecInContainer(ctx context.Context, cID container.ID, cmd model.Cmd, in io.Reader, out io.Writer) error
   103  
   104  	ImagePull(ctx context.Context, ref reference.Named) (reference.Canonical, error)
   105  	ImagePush(ctx context.Context, image reference.NamedTagged) (io.ReadCloser, error)
   106  	ImageBuild(ctx context.Context, g *errgroup.Group, buildContext io.Reader, options BuildOptions) (types.ImageBuildResponse, error)
   107  	ImageTag(ctx context.Context, source, target string) error
   108  	ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error)
   109  	ImageList(ctx context.Context, options typesimage.ListOptions) ([]typesimage.Summary, error)
   110  	ImageRemove(ctx context.Context, imageID string, options typesimage.RemoveOptions) ([]typesimage.DeleteResponse, error)
   111  
   112  	NewVersionError(ctx context.Context, APIrequired, feature string) error
   113  	BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error)
   114  	ContainersPrune(ctx context.Context, pruneFilters filters.Args) (typescontainer.PruneReport, error)
   115  }
   116  
   117  // Add-on interface for a client that manages multiple clients transparently.
   118  type CompositeClient interface {
   119  	Client
   120  	DefaultLocalClient() Client
   121  	DefaultClusterClient() Client
   122  	ClientFor(cluster v1alpha1.Cluster) Client
   123  	HasMultipleClients() bool
   124  }
   125  
   126  type ExitError struct {
   127  	ExitCode int
   128  }
   129  
   130  func (e ExitError) Error() string {
   131  	return fmt.Sprintf("Exec command exited with status code: %d", e.ExitCode)
   132  }
   133  
   134  func IsExitError(err error) bool {
   135  	_, ok := err.(ExitError)
   136  	return ok
   137  }
   138  
   139  var _ error = ExitError{}
   140  
   141  var _ Client = &Cli{}
   142  
   143  type Cli struct {
   144  	*client.Client
   145  
   146  	authConfigsOnce func() map[string]typesregistry.AuthConfig
   147  	env             Env
   148  
   149  	versionsOnce   sync.Once
   150  	builderVersion types.BuilderVersion
   151  	serverVersion  types.Version
   152  	versionError   error
   153  }
   154  
   155  func NewDockerClient(ctx context.Context, env Env) Client {
   156  	if env.Error != nil {
   157  		return newExplodingClient(env.Error)
   158  	}
   159  
   160  	d := env.Client.(*client.Client)
   161  
   162  	return &Cli{
   163  		Client:          d,
   164  		env:             env,
   165  		authConfigsOnce: sync.OnceValue(authConfigs),
   166  	}
   167  }
   168  
   169  func SupportedVersion(v types.Version) bool {
   170  	version, err := semver.ParseTolerant(v.APIVersion)
   171  	if err != nil {
   172  		// If the server version doesn't parse, we shouldn't even start
   173  		return false
   174  	}
   175  
   176  	return version.GTE(minDockerVersion)
   177  }
   178  
   179  func getDockerBuilderVersion(v types.Version, env Env) (types.BuilderVersion, error) {
   180  	// If the user has explicitly chosen to enable/disable buildkit, respect that.
   181  	buildkitEnv := os.Getenv("DOCKER_BUILDKIT")
   182  	if buildkitEnv != "" {
   183  		buildkitEnabled, err := strconv.ParseBool(buildkitEnv)
   184  		if err != nil {
   185  			// This error message is copied from Docker, for consistency.
   186  			return "", errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value")
   187  		}
   188  		if buildkitEnabled && SupportsBuildkit(v, env) {
   189  			return types.BuilderBuildKit, nil
   190  
   191  		}
   192  		return types.BuilderV1, nil
   193  	}
   194  
   195  	if SupportsBuildkit(v, env) {
   196  		return types.BuilderBuildKit, nil
   197  	}
   198  	return types.BuilderV1, nil
   199  }
   200  
   201  // Sadly, certain versions of docker return an error if the client requests
   202  // buildkit. We have to infer whether it supports buildkit from version numbers.
   203  //
   204  // Inferred from release notes
   205  // https://docs.docker.com/engine/release-notes/
   206  func SupportsBuildkit(v types.Version, env Env) bool {
   207  	if env.IsOldMinikube {
   208  		// Buildkit for Minikube is busted on some versions. See
   209  		// https://github.com/kubernetes/minikube/issues/4143
   210  		return false
   211  	}
   212  
   213  	version, err := semver.ParseTolerant(v.APIVersion)
   214  	if err != nil {
   215  		// If the server version doesn't parse, disable buildkit
   216  		return false
   217  	}
   218  
   219  	if minDockerVersionStableBuildkit.LTE(version) {
   220  		return true
   221  	}
   222  
   223  	if minDockerVersionExperimentalBuildkit.LTE(version) && v.Experimental {
   224  		return true
   225  	}
   226  
   227  	return false
   228  }
   229  
   230  // Adapted from client.FromEnv
   231  //
   232  // Supported environment variables:
   233  // DOCKER_HOST to set the url to the docker server.
   234  // DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest.
   235  // DOCKER_CERT_PATH to load the TLS certificates from.
   236  // DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default.
   237  func CreateClientOpts(envMap map[string]string) ([]client.Opt, error) {
   238  	result := make([]client.Opt, 0)
   239  
   240  	certPath := envMap["DOCKER_CERT_PATH"]
   241  	tlsVerify := envMap["DOCKER_TLS_VERIFY"]
   242  	if certPath != "" {
   243  		options := tlsconfig.Options{
   244  			CAFile:             filepath.Join(certPath, "ca.pem"),
   245  			CertFile:           filepath.Join(certPath, "cert.pem"),
   246  			KeyFile:            filepath.Join(certPath, "key.pem"),
   247  			InsecureSkipVerify: tlsVerify == "",
   248  		}
   249  		tlsc, err := tlsconfig.Client(options)
   250  		if err != nil {
   251  			return nil, err
   252  		}
   253  
   254  		result = append(result, client.WithHTTPClient(&http.Client{
   255  			Transport:     &http.Transport{TLSClientConfig: tlsc},
   256  			CheckRedirect: client.CheckRedirect,
   257  		}))
   258  	}
   259  
   260  	host := envMap["DOCKER_HOST"]
   261  	if host != "" {
   262  		result = append(result, client.WithHost(host))
   263  	}
   264  
   265  	apiVersion := envMap["DOCKER_API_VERSION"]
   266  	if apiVersion != "" {
   267  		result = append(result, client.WithVersion(apiVersion))
   268  	} else {
   269  		// WithAPIVersionNegotiation makes the Docker client negotiate down to a lower
   270  		// version if Docker's current API version is newer than the server version.
   271  		result = append(result, client.WithAPIVersionNegotiation())
   272  	}
   273  
   274  	return result, nil
   275  }
   276  
   277  func (c *Cli) initVersion(ctx context.Context) {
   278  	c.versionsOnce.Do(func() {
   279  		ctx, cancel := context.WithTimeout(ctx, versionTimeout)
   280  		defer cancel()
   281  
   282  		serverVersion, err := c.Client.ServerVersion(ctx)
   283  		if err != nil {
   284  			c.versionError = err
   285  			return
   286  		}
   287  
   288  		if !SupportedVersion(serverVersion) {
   289  			c.versionError = fmt.Errorf("Tilt requires a Docker server newer than %s. Current Docker server: %s",
   290  				minDockerVersion, serverVersion.APIVersion)
   291  			return
   292  		}
   293  
   294  		builderVersion, err := getDockerBuilderVersion(serverVersion, c.env)
   295  		if err != nil {
   296  			c.versionError = err
   297  			return
   298  		}
   299  
   300  		c.builderVersion = builderVersion
   301  		c.serverVersion = serverVersion
   302  	})
   303  }
   304  
   305  func (c *Cli) startBuildkitSession(ctx context.Context, g *errgroup.Group, key string, dirSource filesync.DirSource, sshSpecs []string, secretSpecs []string) (*session.Session, error) {
   306  	session, err := session.NewSession(ctx, key)
   307  	if err != nil {
   308  		return nil, err
   309  	}
   310  
   311  	if dirSource != nil {
   312  		session.Allow(filesync.NewFSSyncProvider(dirSource))
   313  	}
   314  
   315  	dockerConfig := config.LoadDefaultConfigFile(
   316  		logger.Get(ctx).Writer(logger.InfoLvl))
   317  	provider := authprovider.NewDockerAuthProvider(authprovider.DockerAuthProviderConfig{
   318  		ConfigFile: dockerConfig,
   319  	})
   320  	session.Allow(provider)
   321  
   322  	if len(secretSpecs) > 0 {
   323  		ss, err := buildkit.ParseSecretSpecs(secretSpecs)
   324  		if err != nil {
   325  			return nil, errors.Wrapf(err, "could not parse secret: %v", secretSpecs)
   326  		}
   327  		session.Allow(ss)
   328  	}
   329  
   330  	if len(sshSpecs) > 0 {
   331  		sshp, err := buildkit.ParseSSHSpecs(sshSpecs)
   332  		if err != nil {
   333  			return nil, errors.Wrapf(err, "could not parse ssh: %v", sshSpecs)
   334  		}
   335  		session.Allow(sshp)
   336  	}
   337  
   338  	g.Go(func() error {
   339  		defer func() {
   340  			_ = session.Close()
   341  		}()
   342  
   343  		// Start the server
   344  		dialSession := func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) {
   345  			return c.Client.DialHijack(ctx, "/session", proto, meta)
   346  		}
   347  		return session.Run(ctx, dialSession)
   348  	})
   349  	return session, nil
   350  }
   351  
   352  // When we pull from a private docker registry, we have to get credentials
   353  // from somewhere. These credentials are not stored on the server. The client
   354  // is responsible for managing them.
   355  //
   356  // Docker uses two different protocols:
   357  //  1. In the legacy build engine, you have to get all the creds ahead of time
   358  //     and pass them in the ImageBuild call.
   359  //  2. In BuildKit, you have to create a persistent session. The client
   360  //     side of the session manages a miniature server that just responds
   361  //     to credential requests as the server asks for them.
   362  //
   363  // Protocol (1) is very slow. If you're using the gcloud credential store,
   364  // fetching all the creds ahead of time can take ~3 seconds.
   365  // Protocol (2) is more efficient, but also more complex to manage. We manage it lazily.
   366  func authConfigs() map[string]typesregistry.AuthConfig {
   367  	configFile := config.LoadDefaultConfigFile(io.Discard)
   368  
   369  	// If we fail to get credentials for some reason, that's OK.
   370  	// even the docker CLI ignores this:
   371  	// https://github.com/docker/cli/blob/23446275646041f9b598d64c51be24d5d0e49376/cli/command/image/build.go#L386
   372  	credentials, _ := configFile.GetAllCredentials()
   373  	authConfigs := make(map[string]typesregistry.AuthConfig, len(credentials))
   374  	for k, auth := range credentials {
   375  		authConfigs[k] = typesregistry.AuthConfig(auth)
   376  	}
   377  	return authConfigs
   378  }
   379  
   380  func (c *Cli) CheckConnected() error                  { return nil }
   381  func (c *Cli) SetOrchestrator(orc model.Orchestrator) {}
   382  func (c *Cli) ForOrchestrator(orc model.Orchestrator) Client {
   383  	return c
   384  }
   385  func (c *Cli) Env() Env {
   386  	return c.env
   387  }
   388  
   389  func (c *Cli) BuilderVersion(ctx context.Context) (types.BuilderVersion, error) {
   390  	c.initVersion(ctx)
   391  	return c.builderVersion, c.versionError
   392  }
   393  
   394  func (c *Cli) ServerVersion(ctx context.Context) (types.Version, error) {
   395  	c.initVersion(ctx)
   396  	return c.serverVersion, c.versionError
   397  }
   398  
   399  type encodedAuth string
   400  
   401  func (c *Cli) authInfo(ctx context.Context, repoInfo *registry.RepositoryInfo, cmdName string) (encodedAuth, error) {
   402  	cli, err := newDockerCli(ctx)
   403  	if err != nil {
   404  		return "", errors.Wrap(err, "authInfo")
   405  	}
   406  	authConfig := command.ResolveAuthConfig(cli.ConfigFile(), repoInfo.Index)
   407  	auth, err := typesregistry.EncodeAuthConfig(authConfig)
   408  	if err != nil {
   409  		return "", errors.Wrap(err, "authInfo#EncodeAuthConfig")
   410  	}
   411  	return encodedAuth(auth), nil
   412  }
   413  
   414  func (c *Cli) ImagePull(ctx context.Context, ref reference.Named) (reference.Canonical, error) {
   415  	repoInfo, err := registry.ParseRepositoryInfo(ref)
   416  	if err != nil {
   417  		return nil, fmt.Errorf("could not parse registry for %q: %v", ref.String(), err)
   418  	}
   419  
   420  	encodedAuth, err := c.authInfo(ctx, repoInfo, "push")
   421  	if err != nil {
   422  		return nil, fmt.Errorf("could not authenticate: %v", err)
   423  	}
   424  
   425  	image := ref.String()
   426  	pullResp, err := c.Client.ImagePull(ctx, image, typesimage.PullOptions{
   427  		RegistryAuth: string(encodedAuth),
   428  	})
   429  	if err != nil {
   430  		return nil, fmt.Errorf("could not pull image %q: %v", image, err)
   431  	}
   432  	defer func() {
   433  		_ = pullResp.Close()
   434  	}()
   435  
   436  	// the /images/create API is a bit chaotic, returning JSON lines of status as it pulls
   437  	// including ASCII progress bar animation etc.
   438  	// there's not really any guarantees with it, so the prevailing guidance is to try and
   439  	// inspect the image immediately afterwards to ensure it was pulled successfully
   440  	// (this is racy and could be improved by _trying_ to get the digest out of this response
   441  	// and making sure it matches with the result of inspect, but Docker itself suffers from
   442  	// this same race condition during a docker run that triggers a pull, so it's reasonable
   443  	// to deem it as acceptable here as well)
   444  	_, err = io.Copy(io.Discard, pullResp)
   445  	if err != nil {
   446  		return nil, fmt.Errorf("connection error while pulling image %q: %v", image, err)
   447  	}
   448  
   449  	imgInspect, _, err := c.ImageInspectWithRaw(ctx, image)
   450  	if err != nil {
   451  		return nil, fmt.Errorf("failed to inspect after pull for image %q: %v", image, err)
   452  	}
   453  
   454  	pulledRef, err := reference.ParseNormalizedNamed(imgInspect.RepoDigests[0])
   455  	if err != nil {
   456  		return nil, fmt.Errorf("invalid reference %q for image %q: %v", imgInspect.RepoDigests[0], image, err)
   457  	}
   458  	cRef, ok := pulledRef.(reference.Canonical)
   459  	if !ok {
   460  		// this indicates a bug/behavior change within Docker because we just parsed a digest reference
   461  		return nil, fmt.Errorf("reference %q is not canonical", pulledRef.String())
   462  	}
   463  	// the reference from the repo digest will be missing the tag (if specified), so we attach the digest to the
   464  	// original reference to get something like `docker.io/library/nginx:1.21.32@sha256:<hash>` for an input of
   465  	// `docker.io/library/nginx:1.21.3` (if we used the repo digest, it'd be `docker.io/library/nginx@sha256:<hash>`
   466  	// with no tag, so this ensures all parts are preserved).
   467  	cRef, err = reference.WithDigest(ref, cRef.Digest())
   468  	if err != nil {
   469  		return nil, fmt.Errorf("invalid digest for reference %q: %v", pulledRef.String(), err)
   470  	}
   471  	return cRef, nil
   472  }
   473  
   474  func (c *Cli) ImagePush(ctx context.Context, ref reference.NamedTagged) (io.ReadCloser, error) {
   475  	repoInfo, err := registry.ParseRepositoryInfo(ref)
   476  	if err != nil {
   477  		return nil, errors.Wrap(err, "ImagePush#ParseRepositoryInfo")
   478  	}
   479  
   480  	logger.Get(ctx).Infof("Authenticating to image repo: %s", repoInfo.Index.Name)
   481  	encodedAuth, err := c.authInfo(ctx, repoInfo, "push")
   482  	if err != nil {
   483  		return nil, errors.Wrap(err, "ImagePush: authenticate")
   484  	}
   485  
   486  	options := typesimage.PushOptions{
   487  		RegistryAuth: string(encodedAuth),
   488  	}
   489  
   490  	if reference.Domain(ref) == "" {
   491  		return nil, errors.New("ImagePush: no domain in container name")
   492  	}
   493  	logger.Get(ctx).Infof("Sending image data")
   494  	return c.Client.ImagePush(ctx, ref.String(), options)
   495  }
   496  
   497  func (c *Cli) ImageBuild(ctx context.Context, g *errgroup.Group, buildContext io.Reader, options BuildOptions) (types.ImageBuildResponse, error) {
   498  	// Always use a one-time session when using buildkit, since credential
   499  	// passing is fast and we want to get the latest creds.
   500  	// https://github.com/tilt-dev/tilt/issues/4043
   501  	var oneTimeSession *session.Session
   502  	sessionID := ""
   503  
   504  	mustUseBuildkit := len(options.SSHSpecs) > 0 || len(options.SecretSpecs) > 0 || options.DirSource != nil
   505  	builderVersion, err := c.BuilderVersion(ctx)
   506  	if err != nil {
   507  		return types.ImageBuildResponse{}, err
   508  	}
   509  	if options.ForceLegacyBuilder {
   510  		builderVersion = types.BuilderV1
   511  	}
   512  
   513  	isUsingBuildkit := builderVersion == types.BuilderBuildKit
   514  	if isUsingBuildkit {
   515  		var err error
   516  		oneTimeSession, err = c.startBuildkitSession(ctx, g, identity.NewID(), options.DirSource, options.SSHSpecs, options.SecretSpecs)
   517  		if err != nil {
   518  			return types.ImageBuildResponse{}, errors.Wrapf(err, "ImageBuild")
   519  		}
   520  		sessionID = oneTimeSession.ID()
   521  	} else if mustUseBuildkit {
   522  		return types.ImageBuildResponse{},
   523  			fmt.Errorf("Docker SSH secrets only work on Buildkit, but Buildkit has been disabled")
   524  	}
   525  
   526  	opts := types.ImageBuildOptions{}
   527  	opts.Version = builderVersion
   528  
   529  	if isUsingBuildkit {
   530  		opts.SessionID = sessionID
   531  	} else {
   532  		opts.AuthConfigs = c.authConfigsOnce()
   533  	}
   534  
   535  	opts.Remove = options.Remove
   536  	opts.Context = options.Context
   537  	opts.BuildArgs = options.BuildArgs
   538  	opts.Dockerfile = options.Dockerfile
   539  	opts.Tags = append([]string{}, options.ExtraTags...)
   540  	opts.Target = options.Target
   541  	opts.NetworkMode = options.Network
   542  	opts.CacheFrom = options.CacheFrom
   543  	opts.PullParent = options.PullParent
   544  	opts.Platform = options.Platform
   545  	opts.ExtraHosts = append([]string{}, options.ExtraHosts...)
   546  
   547  	if options.DirSource != nil {
   548  		opts.RemoteContext = clientSessionRemote
   549  	}
   550  
   551  	opts.Labels = BuiltLabelSet // label all images as built by us
   552  
   553  	response, err := c.Client.ImageBuild(ctx, buildContext, opts)
   554  	if err != nil {
   555  		if oneTimeSession != nil {
   556  			_ = oneTimeSession.Close()
   557  		}
   558  		return response, err
   559  	}
   560  
   561  	if oneTimeSession != nil {
   562  		response.Body = WrapReadCloserWithTearDown(response.Body, oneTimeSession.Close)
   563  	}
   564  	return response, err
   565  }
   566  
   567  func (c *Cli) ContainerRestartNoWait(ctx context.Context, containerID string) error {
   568  
   569  	// Don't wait on the container to fully start.
   570  	dur := 0
   571  
   572  	return c.ContainerRestart(ctx, containerID, typescontainer.StopOptions{
   573  		Timeout: &dur,
   574  	})
   575  }
   576  
   577  func (c *Cli) ExecInContainer(ctx context.Context, cID container.ID, cmd model.Cmd, in io.Reader, out io.Writer) error {
   578  	attachStdin := in != nil
   579  	cfg := typescontainer.ExecOptions{
   580  		Cmd:          cmd.Argv,
   581  		AttachStdout: true,
   582  		AttachStderr: true,
   583  		AttachStdin:  attachStdin,
   584  		Tty:          !attachStdin,
   585  	}
   586  
   587  	// ContainerExecCreate error-handling is awful, so before we Create
   588  	// we do a dummy inspect, to get more reasonable error messages. See:
   589  	// https://github.com/docker/cli/blob/ae1618713f83e7da07317d579d0675f578de22fa/cli/command/container/exec.go#L77
   590  	if _, err := c.ContainerInspect(ctx, cID.String()); err != nil {
   591  		return errors.Wrap(err, "ExecInContainer")
   592  	}
   593  
   594  	// We've sometimes seen ExecCreate/Attach/Start hang, so we add a timeout
   595  	// here.  It happens very rarely and is not consistently reproducible. It
   596  	// seems to happen when running the exec inside a volume.
   597  	// https://github.com/tilt-dev/tilt/issues/6521
   598  	createCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
   599  	defer cancel()
   600  	execId, err := c.ContainerExecCreate(createCtx, cID.String(), cfg)
   601  	if err != nil {
   602  		return errors.Wrap(err, "ExecInContainer#create")
   603  	}
   604  
   605  	connection, err := c.ContainerExecAttach(createCtx, execId.ID, typescontainer.ExecAttachOptions{Tty: true})
   606  	if err != nil {
   607  		return errors.Wrap(err, "ExecInContainer#attach")
   608  	}
   609  	defer connection.Close()
   610  
   611  	err = c.ContainerExecStart(createCtx, execId.ID, typescontainer.ExecStartOptions{})
   612  	if err != nil {
   613  		return errors.Wrap(err, "ExecInContainer#start")
   614  	}
   615  
   616  	_, err = fmt.Fprintf(out, "RUNNING: %s\n", cmd)
   617  	if err != nil {
   618  		return errors.Wrap(err, "ExecInContainer#print")
   619  	}
   620  
   621  	inputDone := make(chan struct{})
   622  	if attachStdin {
   623  		go func() {
   624  			_, err := io.Copy(connection.Conn, in)
   625  			if err != nil {
   626  				logger.Get(ctx).Debugf("copy error: %v", err)
   627  			}
   628  			err = connection.CloseWrite()
   629  			if err != nil {
   630  				logger.Get(ctx).Debugf("close write error: %v", err)
   631  			}
   632  			close(inputDone)
   633  		}()
   634  	} else {
   635  		close(inputDone)
   636  	}
   637  
   638  	_, err = io.Copy(out, connection.Reader)
   639  	if err != nil {
   640  		return errors.Wrap(err, "ExecInContainer#copy")
   641  	}
   642  
   643  	<-inputDone
   644  
   645  	for {
   646  		inspected, err := c.ContainerExecInspect(ctx, execId.ID)
   647  		if err != nil {
   648  			return errors.Wrap(err, "ExecInContainer#inspect")
   649  		}
   650  
   651  		if inspected.Running {
   652  			continue
   653  		}
   654  
   655  		status := inspected.ExitCode
   656  		if status != 0 {
   657  			return ExitError{ExitCode: status}
   658  		}
   659  		return nil
   660  	}
   661  }
   662  
   663  func (c *Cli) Run(ctx context.Context, opts RunConfig) (RunResult, error) {
   664  	if opts.Pull {
   665  		namedRef, ok := opts.Image.(reference.Named)
   666  		if !ok {
   667  			return RunResult{}, fmt.Errorf("invalid reference type %T for pull", opts.Image)
   668  		}
   669  		if _, err := c.ImagePull(ctx, namedRef); err != nil {
   670  			return RunResult{}, fmt.Errorf("error pulling image %q: %v", opts.Image, err)
   671  		}
   672  	}
   673  
   674  	cc := &typescontainer.Config{
   675  		Image:        opts.Image.String(),
   676  		AttachStdout: opts.Stdout != nil,
   677  		AttachStderr: opts.Stderr != nil,
   678  		Cmd:          opts.Cmd,
   679  		Labels:       BuiltLabelSet,
   680  	}
   681  
   682  	hc := &typescontainer.HostConfig{
   683  		Mounts: opts.Mounts,
   684  	}
   685  
   686  	createResp, err := c.Client.ContainerCreate(ctx,
   687  		cc,
   688  		hc,
   689  		nil,
   690  		nil,
   691  		opts.ContainerName,
   692  	)
   693  	if err != nil {
   694  		return RunResult{}, fmt.Errorf("could not create container: %v", err)
   695  	}
   696  
   697  	tearDown := func(containerID string) error {
   698  		return c.Client.ContainerRemove(ctx, createResp.ID, typescontainer.RemoveOptions{Force: true})
   699  	}
   700  
   701  	var containerStarted bool
   702  	defer func(containerID string) {
   703  		// make an effort to clean up any container we create but don't successfully start
   704  		if containerStarted {
   705  			return
   706  		}
   707  		if err := tearDown(containerID); err != nil {
   708  			logger.Get(ctx).Debugf("Failed to remove container after error before start (id=%s): %v", createResp.ID, err)
   709  		}
   710  	}(createResp.ID)
   711  
   712  	statusCh, statusErrCh := c.Client.ContainerWait(ctx, createResp.ID, typescontainer.WaitConditionNextExit)
   713  	// ContainerWait() can immediately write to the error channel before returning if it can't start the API request,
   714  	// so catch these errors early (it _also_ can write to that channel later, so it's still passed to the RunResult)
   715  	select {
   716  	case err = <-statusErrCh:
   717  		return RunResult{}, fmt.Errorf("could not wait for container (id=%s): %v", createResp.ID, err)
   718  	default:
   719  	}
   720  
   721  	err = c.Client.ContainerStart(ctx, createResp.ID, typescontainer.StartOptions{})
   722  	if err != nil {
   723  		return RunResult{}, fmt.Errorf("could not start container (id=%s): %v", createResp.ID, err)
   724  	}
   725  	containerStarted = true
   726  
   727  	logsErrCh := make(chan error, 1)
   728  	if opts.Stdout != nil || opts.Stderr != nil {
   729  		var logsResp io.ReadCloser
   730  		logsResp, err = c.Client.ContainerLogs(
   731  			ctx, createResp.ID, typescontainer.LogsOptions{
   732  				ShowStdout: opts.Stdout != nil,
   733  				ShowStderr: opts.Stderr != nil,
   734  				Follow:     true,
   735  			},
   736  		)
   737  		if err != nil {
   738  			return RunResult{}, fmt.Errorf("could not read container logs: %v", err)
   739  		}
   740  
   741  		go func() {
   742  			stdout := opts.Stdout
   743  			if stdout == nil {
   744  				stdout = io.Discard
   745  			}
   746  			stderr := opts.Stderr
   747  			if stderr == nil {
   748  				stderr = io.Discard
   749  			}
   750  
   751  			_, err = stdcopy.StdCopy(stdout, stderr, logsResp)
   752  			_ = logsResp.Close()
   753  			logsErrCh <- err
   754  		}()
   755  	} else {
   756  		// there is no I/O so immediately signal so that the result call doesn't block on it
   757  		logsErrCh <- nil
   758  	}
   759  
   760  	result := RunResult{
   761  		ContainerID:  createResp.ID,
   762  		logsErrCh:    logsErrCh,
   763  		statusRespCh: statusCh,
   764  		statusErrCh:  statusErrCh,
   765  		tearDown:     tearDown,
   766  	}
   767  
   768  	return result, nil
   769  }