github.com/tilt-dev/tilt@v0.33.15-0.20240515162809-0a22ed45d8a0/internal/docker/client.go (about)

     1  package docker
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"io"
     7  	"net"
     8  	"net/http"
     9  	"os"
    10  	"path/filepath"
    11  	"strconv"
    12  	"sync"
    13  	"time"
    14  
    15  	"github.com/blang/semver"
    16  	"github.com/distribution/reference"
    17  	"github.com/docker/cli/cli/command"
    18  	"github.com/docker/cli/cli/config"
    19  	"github.com/docker/docker/api/types"
    20  	mobycontainer "github.com/docker/docker/api/types/container"
    21  	"github.com/docker/docker/api/types/filters"
    22  	"github.com/docker/docker/client"
    23  	"github.com/docker/docker/pkg/stdcopy"
    24  	"github.com/docker/docker/registry"
    25  	"github.com/docker/go-connections/tlsconfig"
    26  	"github.com/moby/buildkit/identity"
    27  	"github.com/moby/buildkit/session"
    28  	"github.com/moby/buildkit/session/auth/authprovider"
    29  	"github.com/moby/buildkit/session/filesync"
    30  	"github.com/pkg/errors"
    31  	"golang.org/x/sync/errgroup"
    32  
    33  	"github.com/tilt-dev/tilt/internal/container"
    34  	"github.com/tilt-dev/tilt/internal/docker/buildkit"
    35  	"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
    36  	"github.com/tilt-dev/tilt/pkg/logger"
    37  	"github.com/tilt-dev/tilt/pkg/model"
    38  )
    39  
    40  const (
    41  	// Indicates that an image was built by tilt's docker client.
    42  	BuiltLabel = "dev.tilt.built"
    43  
    44  	// Indicates that an image is eligible for garbage collection
    45  	// by Tilt's pruner.
    46  	GCEnabledLabel = "dev.tilt.gc"
    47  )
    48  
    49  var (
    50  	BuiltLabelSet = map[string]string{
    51  		BuiltLabel:     "true",
    52  		GCEnabledLabel: "true",
    53  	}
    54  )
    55  
    56  const clientSessionRemote = "client-session"
    57  
    58  // Minimum docker version we've tested on.
    59  // A good way to test old versions is to connect to an old version of Minikube,
    60  // so that we connect to the docker server in minikube instead of futzing with
    61  // the docker version on your machine.
    62  // https://github.com/kubernetes/minikube/releases/tag/v0.13.1
    63  var minDockerVersion = semver.MustParse("1.23.0")
    64  
    65  var minDockerVersionStableBuildkit = semver.MustParse("1.39.0")
    66  var minDockerVersionExperimentalBuildkit = semver.MustParse("1.38.0")
    67  
    68  var versionTimeout = 5 * time.Second
    69  
    70  // microk8s exposes its own docker socket
    71  // https://github.com/ubuntu/microk8s/blob/master/docs/dockerd.md
    72  const microK8sDockerHost = "unix:///var/snap/microk8s/current/docker.sock"
    73  
    74  // Create an interface so this can be mocked out.
    75  type Client interface {
    76  	CheckConnected() error
    77  
    78  	// If you'd like to call this Docker instance in a separate process, these
    79  	// are the environment variables you'll need to do so.
    80  	Env() Env
    81  
    82  	// If you'd like to call this Docker instance in a separate process, this
    83  	// is the default builder version you want (buildkit or legacy)
    84  	BuilderVersion(ctx context.Context) (types.BuilderVersion, error)
    85  
    86  	ServerVersion(ctx context.Context) (types.Version, error)
    87  
    88  	// Set the orchestrator we're talking to. This is only relevant to switchClient,
    89  	// which can talk to either the Local or in-cluster docker daemon.
    90  	SetOrchestrator(orc model.Orchestrator)
    91  	// Return a client suitable for use with the given orchestrator. Only
    92  	// relevant for the switchClient which has clients for both types.
    93  	ForOrchestrator(orc model.Orchestrator) Client
    94  
    95  	ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error)
    96  	ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error)
    97  	ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
    98  	ContainerRestartNoWait(ctx context.Context, containerID string) error
    99  
   100  	Run(ctx context.Context, opts RunConfig) (RunResult, error)
   101  
   102  	// Execute a command in a container, streaming the command output to `out`.
   103  	// Returns an ExitError if the command exits with a non-zero exit code.
   104  	ExecInContainer(ctx context.Context, cID container.ID, cmd model.Cmd, in io.Reader, out io.Writer) error
   105  
   106  	ImagePull(ctx context.Context, ref reference.Named) (reference.Canonical, error)
   107  	ImagePush(ctx context.Context, image reference.NamedTagged) (io.ReadCloser, error)
   108  	ImageBuild(ctx context.Context, g *errgroup.Group, buildContext io.Reader, options BuildOptions) (types.ImageBuildResponse, error)
   109  	ImageTag(ctx context.Context, source, target string) error
   110  	ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error)
   111  	ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error)
   112  	ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error)
   113  
   114  	NewVersionError(APIrequired, feature string) error
   115  	BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error)
   116  	ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error)
   117  }
   118  
   119  // Add-on interface for a client that manages multiple clients transparently.
   120  type CompositeClient interface {
   121  	Client
   122  	DefaultLocalClient() Client
   123  	DefaultClusterClient() Client
   124  	ClientFor(cluster v1alpha1.Cluster) Client
   125  	HasMultipleClients() bool
   126  }
   127  
   128  type ExitError struct {
   129  	ExitCode int
   130  }
   131  
   132  func (e ExitError) Error() string {
   133  	return fmt.Sprintf("Exec command exited with status code: %d", e.ExitCode)
   134  }
   135  
   136  func IsExitError(err error) bool {
   137  	_, ok := err.(ExitError)
   138  	return ok
   139  }
   140  
   141  var _ error = ExitError{}
   142  
   143  var _ Client = &Cli{}
   144  
   145  type Cli struct {
   146  	*client.Client
   147  
   148  	authConfigsOnce func() map[string]types.AuthConfig
   149  	env             Env
   150  
   151  	versionsOnce   sync.Once
   152  	builderVersion types.BuilderVersion
   153  	serverVersion  types.Version
   154  	versionError   error
   155  }
   156  
   157  func NewDockerClient(ctx context.Context, env Env) Client {
   158  	if env.Error != nil {
   159  		return newExplodingClient(env.Error)
   160  	}
   161  
   162  	d := env.Client.(*client.Client)
   163  
   164  	return &Cli{
   165  		Client:          d,
   166  		env:             env,
   167  		authConfigsOnce: sync.OnceValue(authConfigs),
   168  	}
   169  }
   170  
   171  func SupportedVersion(v types.Version) bool {
   172  	version, err := semver.ParseTolerant(v.APIVersion)
   173  	if err != nil {
   174  		// If the server version doesn't parse, we shouldn't even start
   175  		return false
   176  	}
   177  
   178  	return version.GTE(minDockerVersion)
   179  }
   180  
   181  func getDockerBuilderVersion(v types.Version, env Env) (types.BuilderVersion, error) {
   182  	// If the user has explicitly chosen to enable/disable buildkit, respect that.
   183  	buildkitEnv := os.Getenv("DOCKER_BUILDKIT")
   184  	if buildkitEnv != "" {
   185  		buildkitEnabled, err := strconv.ParseBool(buildkitEnv)
   186  		if err != nil {
   187  			// This error message is copied from Docker, for consistency.
   188  			return "", errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value")
   189  		}
   190  		if buildkitEnabled && SupportsBuildkit(v, env) {
   191  			return types.BuilderBuildKit, nil
   192  
   193  		}
   194  		return types.BuilderV1, nil
   195  	}
   196  
   197  	if SupportsBuildkit(v, env) {
   198  		return types.BuilderBuildKit, nil
   199  	}
   200  	return types.BuilderV1, nil
   201  }
   202  
   203  // Sadly, certain versions of docker return an error if the client requests
   204  // buildkit. We have to infer whether it supports buildkit from version numbers.
   205  //
   206  // Inferred from release notes
   207  // https://docs.docker.com/engine/release-notes/
   208  func SupportsBuildkit(v types.Version, env Env) bool {
   209  	if env.IsOldMinikube {
   210  		// Buildkit for Minikube is busted on some versions. See
   211  		// https://github.com/kubernetes/minikube/issues/4143
   212  		return false
   213  	}
   214  
   215  	version, err := semver.ParseTolerant(v.APIVersion)
   216  	if err != nil {
   217  		// If the server version doesn't parse, disable buildkit
   218  		return false
   219  	}
   220  
   221  	if minDockerVersionStableBuildkit.LTE(version) {
   222  		return true
   223  	}
   224  
   225  	if minDockerVersionExperimentalBuildkit.LTE(version) && v.Experimental {
   226  		return true
   227  	}
   228  
   229  	return false
   230  }
   231  
   232  // Adapted from client.FromEnv
   233  //
   234  // Supported environment variables:
   235  // DOCKER_HOST to set the url to the docker server.
   236  // DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest.
   237  // DOCKER_CERT_PATH to load the TLS certificates from.
   238  // DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default.
   239  func CreateClientOpts(envMap map[string]string) ([]client.Opt, error) {
   240  	result := make([]client.Opt, 0)
   241  
   242  	certPath := envMap["DOCKER_CERT_PATH"]
   243  	tlsVerify := envMap["DOCKER_TLS_VERIFY"]
   244  	if certPath != "" {
   245  		options := tlsconfig.Options{
   246  			CAFile:             filepath.Join(certPath, "ca.pem"),
   247  			CertFile:           filepath.Join(certPath, "cert.pem"),
   248  			KeyFile:            filepath.Join(certPath, "key.pem"),
   249  			InsecureSkipVerify: tlsVerify == "",
   250  		}
   251  		tlsc, err := tlsconfig.Client(options)
   252  		if err != nil {
   253  			return nil, err
   254  		}
   255  
   256  		result = append(result, client.WithHTTPClient(&http.Client{
   257  			Transport:     &http.Transport{TLSClientConfig: tlsc},
   258  			CheckRedirect: client.CheckRedirect,
   259  		}))
   260  	}
   261  
   262  	host := envMap["DOCKER_HOST"]
   263  	if host != "" {
   264  		result = append(result, client.WithHost(host))
   265  	}
   266  
   267  	apiVersion := envMap["DOCKER_API_VERSION"]
   268  	if apiVersion != "" {
   269  		result = append(result, client.WithVersion(apiVersion))
   270  	} else {
   271  		// WithAPIVersionNegotiation makes the Docker client negotiate down to a lower
   272  		// version if Docker's current API version is newer than the server version.
   273  		result = append(result, client.WithAPIVersionNegotiation())
   274  	}
   275  
   276  	return result, nil
   277  }
   278  
   279  func (c *Cli) initVersion(ctx context.Context) {
   280  	c.versionsOnce.Do(func() {
   281  		ctx, cancel := context.WithTimeout(ctx, versionTimeout)
   282  		defer cancel()
   283  
   284  		serverVersion, err := c.Client.ServerVersion(ctx)
   285  		if err != nil {
   286  			c.versionError = err
   287  			return
   288  		}
   289  
   290  		if !SupportedVersion(serverVersion) {
   291  			c.versionError = fmt.Errorf("Tilt requires a Docker server newer than %s. Current Docker server: %s",
   292  				minDockerVersion, serverVersion.APIVersion)
   293  			return
   294  		}
   295  
   296  		builderVersion, err := getDockerBuilderVersion(serverVersion, c.env)
   297  		if err != nil {
   298  			c.versionError = err
   299  			return
   300  		}
   301  
   302  		c.builderVersion = builderVersion
   303  		c.serverVersion = serverVersion
   304  	})
   305  }
   306  
   307  func (c *Cli) startBuildkitSession(ctx context.Context, g *errgroup.Group, key string, dirSource filesync.DirSource, sshSpecs []string, secretSpecs []string) (*session.Session, error) {
   308  	session, err := session.NewSession(ctx, "tilt", key)
   309  	if err != nil {
   310  		return nil, err
   311  	}
   312  
   313  	if dirSource != nil {
   314  		session.Allow(filesync.NewFSSyncProvider(dirSource))
   315  	}
   316  
   317  	dockerConfig := config.LoadDefaultConfigFile(
   318  		logger.Get(ctx).Writer(logger.InfoLvl))
   319  	provider := authprovider.NewDockerAuthProvider(dockerConfig)
   320  	session.Allow(provider)
   321  
   322  	if len(secretSpecs) > 0 {
   323  		ss, err := buildkit.ParseSecretSpecs(secretSpecs)
   324  		if err != nil {
   325  			return nil, errors.Wrapf(err, "could not parse secret: %v", secretSpecs)
   326  		}
   327  		session.Allow(ss)
   328  	}
   329  
   330  	if len(sshSpecs) > 0 {
   331  		sshp, err := buildkit.ParseSSHSpecs(sshSpecs)
   332  		if err != nil {
   333  			return nil, errors.Wrapf(err, "could not parse ssh: %v", sshSpecs)
   334  		}
   335  		session.Allow(sshp)
   336  	}
   337  
   338  	g.Go(func() error {
   339  		defer func() {
   340  			_ = session.Close()
   341  		}()
   342  
   343  		// Start the server
   344  		dialSession := func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) {
   345  			return c.Client.DialHijack(ctx, "/session", proto, meta)
   346  		}
   347  		return session.Run(ctx, dialSession)
   348  	})
   349  	return session, nil
   350  }
   351  
   352  // When we pull from a private docker registry, we have to get credentials
   353  // from somewhere. These credentials are not stored on the server. The client
   354  // is responsible for managing them.
   355  //
   356  // Docker uses two different protocols:
   357  //  1. In the legacy build engine, you have to get all the creds ahead of time
   358  //     and pass them in the ImageBuild call.
   359  //  2. In BuildKit, you have to create a persistent session. The client
   360  //     side of the session manages a miniature server that just responds
   361  //     to credential requests as the server asks for them.
   362  //
   363  // Protocol (1) is very slow. If you're using the gcloud credential store,
   364  // fetching all the creds ahead of time can take ~3 seconds.
   365  // Protocol (2) is more efficient, but also more complex to manage. We manage it lazily.
   366  func authConfigs() map[string]types.AuthConfig {
   367  	configFile := config.LoadDefaultConfigFile(io.Discard)
   368  
   369  	// If we fail to get credentials for some reason, that's OK.
   370  	// even the docker CLI ignores this:
   371  	// https://github.com/docker/cli/blob/23446275646041f9b598d64c51be24d5d0e49376/cli/command/image/build.go#L386
   372  	credentials, _ := configFile.GetAllCredentials()
   373  	authConfigs := make(map[string]types.AuthConfig, len(credentials))
   374  	for k, auth := range credentials {
   375  		authConfigs[k] = types.AuthConfig(auth)
   376  	}
   377  	return authConfigs
   378  }
   379  
   380  func (c *Cli) CheckConnected() error                  { return nil }
   381  func (c *Cli) SetOrchestrator(orc model.Orchestrator) {}
   382  func (c *Cli) ForOrchestrator(orc model.Orchestrator) Client {
   383  	return c
   384  }
   385  func (c *Cli) Env() Env {
   386  	return c.env
   387  }
   388  
   389  func (c *Cli) BuilderVersion(ctx context.Context) (types.BuilderVersion, error) {
   390  	c.initVersion(ctx)
   391  	return c.builderVersion, c.versionError
   392  }
   393  
   394  func (c *Cli) ServerVersion(ctx context.Context) (types.Version, error) {
   395  	c.initVersion(ctx)
   396  	return c.serverVersion, c.versionError
   397  }
   398  
   399  type encodedAuth string
   400  
   401  func (c *Cli) authInfo(ctx context.Context, repoInfo *registry.RepositoryInfo, cmdName string) (encodedAuth, types.RequestPrivilegeFunc, error) {
   402  	cli, err := newDockerCli(ctx)
   403  	if err != nil {
   404  		return "", nil, errors.Wrap(err, "authInfo")
   405  	}
   406  	authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index)
   407  	requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(cli, repoInfo.Index, cmdName)
   408  
   409  	auth, err := command.EncodeAuthToBase64(authConfig)
   410  	if err != nil {
   411  		return "", nil, errors.Wrap(err, "authInfo#EncodeAuthToBase64")
   412  	}
   413  	return encodedAuth(auth), requestPrivilege, nil
   414  }
   415  
   416  func (c *Cli) ImagePull(ctx context.Context, ref reference.Named) (reference.Canonical, error) {
   417  	repoInfo, err := registry.ParseRepositoryInfo(ref)
   418  	if err != nil {
   419  		return nil, fmt.Errorf("could not parse registry for %q: %v", ref.String(), err)
   420  	}
   421  
   422  	encodedAuth, requestPrivilege, err := c.authInfo(ctx, repoInfo, "push")
   423  	if err != nil {
   424  		return nil, fmt.Errorf("could not authenticate: %v", err)
   425  	}
   426  
   427  	image := ref.String()
   428  	pullResp, err := c.Client.ImagePull(ctx, image, types.ImagePullOptions{
   429  		RegistryAuth:  string(encodedAuth),
   430  		PrivilegeFunc: requestPrivilege,
   431  	})
   432  	if err != nil {
   433  		return nil, fmt.Errorf("could not pull image %q: %v", image, err)
   434  	}
   435  	defer func() {
   436  		_ = pullResp.Close()
   437  	}()
   438  
   439  	// the /images/create API is a bit chaotic, returning JSON lines of status as it pulls
   440  	// including ASCII progress bar animation etc.
   441  	// there's not really any guarantees with it, so the prevailing guidance is to try and
   442  	// inspect the image immediately afterwards to ensure it was pulled successfully
   443  	// (this is racy and could be improved by _trying_ to get the digest out of this response
   444  	// and making sure it matches with the result of inspect, but Docker itself suffers from
   445  	// this same race condition during a docker run that triggers a pull, so it's reasonable
   446  	// to deem it as acceptable here as well)
   447  	_, err = io.Copy(io.Discard, pullResp)
   448  	if err != nil {
   449  		return nil, fmt.Errorf("connection error while pulling image %q: %v", image, err)
   450  	}
   451  
   452  	imgInspect, _, err := c.ImageInspectWithRaw(ctx, image)
   453  	if err != nil {
   454  		return nil, fmt.Errorf("failed to inspect after pull for image %q: %v", image, err)
   455  	}
   456  
   457  	pulledRef, err := reference.ParseNormalizedNamed(imgInspect.RepoDigests[0])
   458  	if err != nil {
   459  		return nil, fmt.Errorf("invalid reference %q for image %q: %v", imgInspect.RepoDigests[0], image, err)
   460  	}
   461  	cRef, ok := pulledRef.(reference.Canonical)
   462  	if !ok {
   463  		// this indicates a bug/behavior change within Docker because we just parsed a digest reference
   464  		return nil, fmt.Errorf("reference %q is not canonical", pulledRef.String())
   465  	}
   466  	// the reference from the repo digest will be missing the tag (if specified), so we attach the digest to the
   467  	// original reference to get something like `docker.io/library/nginx:1.21.32@sha256:<hash>` for an input of
   468  	// `docker.io/library/nginx:1.21.3` (if we used the repo digest, it'd be `docker.io/library/nginx@sha256:<hash>`
   469  	// with no tag, so this ensures all parts are preserved).
   470  	cRef, err = reference.WithDigest(ref, cRef.Digest())
   471  	if err != nil {
   472  		return nil, fmt.Errorf("invalid digest for reference %q: %v", pulledRef.String(), err)
   473  	}
   474  	return cRef, nil
   475  }
   476  
   477  func (c *Cli) ImagePush(ctx context.Context, ref reference.NamedTagged) (io.ReadCloser, error) {
   478  	repoInfo, err := registry.ParseRepositoryInfo(ref)
   479  	if err != nil {
   480  		return nil, errors.Wrap(err, "ImagePush#ParseRepositoryInfo")
   481  	}
   482  
   483  	logger.Get(ctx).Infof("Authenticating to image repo: %s", repoInfo.Index.Name)
   484  	encodedAuth, requestPrivilege, err := c.authInfo(ctx, repoInfo, "push")
   485  	if err != nil {
   486  		return nil, errors.Wrap(err, "ImagePush: authenticate")
   487  	}
   488  
   489  	options := types.ImagePushOptions{
   490  		RegistryAuth:  string(encodedAuth),
   491  		PrivilegeFunc: requestPrivilege,
   492  	}
   493  
   494  	if reference.Domain(ref) == "" {
   495  		return nil, errors.New("ImagePush: no domain in container name")
   496  	}
   497  	logger.Get(ctx).Infof("Sending image data")
   498  	return c.Client.ImagePush(ctx, ref.String(), options)
   499  }
   500  
   501  func (c *Cli) ImageBuild(ctx context.Context, g *errgroup.Group, buildContext io.Reader, options BuildOptions) (types.ImageBuildResponse, error) {
   502  	// Always use a one-time session when using buildkit, since credential
   503  	// passing is fast and we want to get the latest creds.
   504  	// https://github.com/tilt-dev/tilt/issues/4043
   505  	var oneTimeSession *session.Session
   506  	sessionID := ""
   507  
   508  	mustUseBuildkit := len(options.SSHSpecs) > 0 || len(options.SecretSpecs) > 0 || options.DirSource != nil
   509  	builderVersion, err := c.BuilderVersion(ctx)
   510  	if err != nil {
   511  		return types.ImageBuildResponse{}, err
   512  	}
   513  	if options.ForceLegacyBuilder {
   514  		builderVersion = types.BuilderV1
   515  	}
   516  
   517  	isUsingBuildkit := builderVersion == types.BuilderBuildKit
   518  	if isUsingBuildkit {
   519  		var err error
   520  		oneTimeSession, err = c.startBuildkitSession(ctx, g, identity.NewID(), options.DirSource, options.SSHSpecs, options.SecretSpecs)
   521  		if err != nil {
   522  			return types.ImageBuildResponse{}, errors.Wrapf(err, "ImageBuild")
   523  		}
   524  		sessionID = oneTimeSession.ID()
   525  	} else if mustUseBuildkit {
   526  		return types.ImageBuildResponse{},
   527  			fmt.Errorf("Docker SSH secrets only work on Buildkit, but Buildkit has been disabled")
   528  	}
   529  
   530  	opts := types.ImageBuildOptions{}
   531  	opts.Version = builderVersion
   532  
   533  	if isUsingBuildkit {
   534  		opts.SessionID = sessionID
   535  	} else {
   536  		opts.AuthConfigs = c.authConfigsOnce()
   537  	}
   538  
   539  	opts.Remove = options.Remove
   540  	opts.Context = options.Context
   541  	opts.BuildArgs = options.BuildArgs
   542  	opts.Dockerfile = options.Dockerfile
   543  	opts.Tags = append([]string{}, options.ExtraTags...)
   544  	opts.Target = options.Target
   545  	opts.NetworkMode = options.Network
   546  	opts.CacheFrom = options.CacheFrom
   547  	opts.PullParent = options.PullParent
   548  	opts.Platform = options.Platform
   549  	opts.ExtraHosts = append([]string{}, options.ExtraHosts...)
   550  
   551  	if options.DirSource != nil {
   552  		opts.RemoteContext = clientSessionRemote
   553  	}
   554  
   555  	opts.Labels = BuiltLabelSet // label all images as built by us
   556  
   557  	response, err := c.Client.ImageBuild(ctx, buildContext, opts)
   558  	if err != nil {
   559  		if oneTimeSession != nil {
   560  			_ = oneTimeSession.Close()
   561  		}
   562  		return response, err
   563  	}
   564  
   565  	if oneTimeSession != nil {
   566  		response.Body = WrapReadCloserWithTearDown(response.Body, oneTimeSession.Close)
   567  	}
   568  	return response, err
   569  }
   570  
   571  func (c *Cli) ContainerRestartNoWait(ctx context.Context, containerID string) error {
   572  
   573  	// Don't wait on the container to fully start.
   574  	dur := 0
   575  
   576  	return c.ContainerRestart(ctx, containerID, mobycontainer.StopOptions{
   577  		Timeout: &dur,
   578  	})
   579  }
   580  
   581  func (c *Cli) ExecInContainer(ctx context.Context, cID container.ID, cmd model.Cmd, in io.Reader, out io.Writer) error {
   582  	attachStdin := in != nil
   583  	cfg := types.ExecConfig{
   584  		Cmd:          cmd.Argv,
   585  		AttachStdout: true,
   586  		AttachStderr: true,
   587  		AttachStdin:  attachStdin,
   588  		Tty:          !attachStdin,
   589  	}
   590  
   591  	// ContainerExecCreate error-handling is awful, so before we Create
   592  	// we do a dummy inspect, to get more reasonable error messages. See:
   593  	// https://github.com/docker/cli/blob/ae1618713f83e7da07317d579d0675f578de22fa/cli/command/container/exec.go#L77
   594  	if _, err := c.ContainerInspect(ctx, cID.String()); err != nil {
   595  		return errors.Wrap(err, "ExecInContainer")
   596  	}
   597  
   598  	execId, err := c.ContainerExecCreate(ctx, cID.String(), cfg)
   599  	if err != nil {
   600  		return errors.Wrap(err, "ExecInContainer#create")
   601  	}
   602  
   603  	connection, err := c.ContainerExecAttach(ctx, execId.ID, types.ExecStartCheck{Tty: true})
   604  	if err != nil {
   605  		return errors.Wrap(err, "ExecInContainer#attach")
   606  	}
   607  	defer connection.Close()
   608  
   609  	err = c.ContainerExecStart(ctx, execId.ID, types.ExecStartCheck{})
   610  	if err != nil {
   611  		return errors.Wrap(err, "ExecInContainer#start")
   612  	}
   613  
   614  	_, err = fmt.Fprintf(out, "RUNNING: %s\n", cmd)
   615  	if err != nil {
   616  		return errors.Wrap(err, "ExecInContainer#print")
   617  	}
   618  
   619  	inputDone := make(chan struct{})
   620  	if attachStdin {
   621  		go func() {
   622  			_, err := io.Copy(connection.Conn, in)
   623  			if err != nil {
   624  				logger.Get(ctx).Debugf("copy error: %v", err)
   625  			}
   626  			err = connection.CloseWrite()
   627  			if err != nil {
   628  				logger.Get(ctx).Debugf("close write error: %v", err)
   629  			}
   630  			close(inputDone)
   631  		}()
   632  	} else {
   633  		close(inputDone)
   634  	}
   635  
   636  	_, err = io.Copy(out, connection.Reader)
   637  	if err != nil {
   638  		return errors.Wrap(err, "ExecInContainer#copy")
   639  	}
   640  
   641  	<-inputDone
   642  
   643  	for {
   644  		inspected, err := c.ContainerExecInspect(ctx, execId.ID)
   645  		if err != nil {
   646  			return errors.Wrap(err, "ExecInContainer#inspect")
   647  		}
   648  
   649  		if inspected.Running {
   650  			continue
   651  		}
   652  
   653  		status := inspected.ExitCode
   654  		if status != 0 {
   655  			return ExitError{ExitCode: status}
   656  		}
   657  		return nil
   658  	}
   659  }
   660  
   661  func (c *Cli) Run(ctx context.Context, opts RunConfig) (RunResult, error) {
   662  	if opts.Pull {
   663  		namedRef, ok := opts.Image.(reference.Named)
   664  		if !ok {
   665  			return RunResult{}, fmt.Errorf("invalid reference type %T for pull", opts.Image)
   666  		}
   667  		if _, err := c.ImagePull(ctx, namedRef); err != nil {
   668  			return RunResult{}, fmt.Errorf("error pulling image %q: %v", opts.Image, err)
   669  		}
   670  	}
   671  
   672  	cc := &mobycontainer.Config{
   673  		Image:        opts.Image.String(),
   674  		AttachStdout: opts.Stdout != nil,
   675  		AttachStderr: opts.Stderr != nil,
   676  		Cmd:          opts.Cmd,
   677  		Labels:       BuiltLabelSet,
   678  	}
   679  
   680  	hc := &mobycontainer.HostConfig{
   681  		Mounts: opts.Mounts,
   682  	}
   683  
   684  	createResp, err := c.Client.ContainerCreate(ctx,
   685  		cc,
   686  		hc,
   687  		nil,
   688  		nil,
   689  		opts.ContainerName,
   690  	)
   691  	if err != nil {
   692  		return RunResult{}, fmt.Errorf("could not create container: %v", err)
   693  	}
   694  
   695  	tearDown := func(containerID string) error {
   696  		return c.Client.ContainerRemove(ctx, createResp.ID, types.ContainerRemoveOptions{Force: true})
   697  	}
   698  
   699  	var containerStarted bool
   700  	defer func(containerID string) {
   701  		// make an effort to clean up any container we create but don't successfully start
   702  		if containerStarted {
   703  			return
   704  		}
   705  		if err := tearDown(containerID); err != nil {
   706  			logger.Get(ctx).Debugf("Failed to remove container after error before start (id=%s): %v", createResp.ID, err)
   707  		}
   708  	}(createResp.ID)
   709  
   710  	statusCh, statusErrCh := c.Client.ContainerWait(ctx, createResp.ID, mobycontainer.WaitConditionNextExit)
   711  	// ContainerWait() can immediately write to the error channel before returning if it can't start the API request,
   712  	// so catch these errors early (it _also_ can write to that channel later, so it's still passed to the RunResult)
   713  	select {
   714  	case err = <-statusErrCh:
   715  		return RunResult{}, fmt.Errorf("could not wait for container (id=%s): %v", createResp.ID, err)
   716  	default:
   717  	}
   718  
   719  	err = c.Client.ContainerStart(ctx, createResp.ID, types.ContainerStartOptions{})
   720  	if err != nil {
   721  		return RunResult{}, fmt.Errorf("could not start container (id=%s): %v", createResp.ID, err)
   722  	}
   723  	containerStarted = true
   724  
   725  	logsErrCh := make(chan error, 1)
   726  	if opts.Stdout != nil || opts.Stderr != nil {
   727  		var logsResp io.ReadCloser
   728  		logsResp, err = c.Client.ContainerLogs(
   729  			ctx, createResp.ID, types.ContainerLogsOptions{
   730  				ShowStdout: opts.Stdout != nil,
   731  				ShowStderr: opts.Stderr != nil,
   732  				Follow:     true,
   733  			},
   734  		)
   735  		if err != nil {
   736  			return RunResult{}, fmt.Errorf("could not read container logs: %v", err)
   737  		}
   738  
   739  		go func() {
   740  			stdout := opts.Stdout
   741  			if stdout == nil {
   742  				stdout = io.Discard
   743  			}
   744  			stderr := opts.Stderr
   745  			if stderr == nil {
   746  				stderr = io.Discard
   747  			}
   748  
   749  			_, err = stdcopy.StdCopy(stdout, stderr, logsResp)
   750  			_ = logsResp.Close()
   751  			logsErrCh <- err
   752  		}()
   753  	} else {
   754  		// there is no I/O so immediately signal so that the result call doesn't block on it
   755  		logsErrCh <- nil
   756  	}
   757  
   758  	result := RunResult{
   759  		ContainerID:  createResp.ID,
   760  		logsErrCh:    logsErrCh,
   761  		statusRespCh: statusCh,
   762  		statusErrCh:  statusErrCh,
   763  		tearDown:     tearDown,
   764  	}
   765  
   766  	return result, nil
   767  }