github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/pkg/test/dockerutil/container.go (about)

     1  // Copyright 2020 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package dockerutil
    16  
    17  import (
    18  	"bytes"
    19  	"context"
    20  	"errors"
    21  	"fmt"
    22  	"io/ioutil"
    23  	"net"
    24  	"os"
    25  	"path"
    26  	"path/filepath"
    27  	"regexp"
    28  	"strconv"
    29  	"strings"
    30  	"time"
    31  
    32  	"github.com/docker/docker/api/types"
    33  	"github.com/docker/docker/api/types/container"
    34  	"github.com/docker/docker/api/types/mount"
    35  	"github.com/docker/docker/api/types/network"
    36  	"github.com/docker/docker/client"
    37  	"github.com/docker/docker/pkg/stdcopy"
    38  	"github.com/docker/go-connections/nat"
    39  	"github.com/SagerNet/gvisor/pkg/test/testutil"
    40  )
    41  
    42  // Container represents a Docker Container allowing
    43  // user to configure and control as one would with the 'docker'
    44  // client. Container is backed by the offical golang docker API.
    45  // See: https://pkg.go.dev/github.com/docker/docker.
    46  type Container struct {
    47  	Name    string
    48  	runtime string
    49  
    50  	logger   testutil.Logger
    51  	client   *client.Client
    52  	id       string
    53  	mounts   []mount.Mount
    54  	links    []string
    55  	copyErr  error
    56  	cleanups []func()
    57  
    58  	// profile is the profiling hook associated with this container.
    59  	profile *profile
    60  }
    61  
    62  // RunOpts are options for running a container.
    63  type RunOpts struct {
    64  	// Image is the image relative to images/. This will be mangled
    65  	// appropriately, to ensure that only first-party images are used.
    66  	Image string
    67  
    68  	// Memory is the memory limit in bytes.
    69  	Memory int
    70  
    71  	// Cpus in which to allow execution. ("0", "1", "0-2").
    72  	CpusetCpus string
    73  
    74  	// Ports are the ports to be allocated.
    75  	Ports []int
    76  
    77  	// WorkDir sets the working directory.
    78  	WorkDir string
    79  
    80  	// ReadOnly sets the read-only flag.
    81  	ReadOnly bool
    82  
    83  	// Env are additional environment variables.
    84  	Env []string
    85  
    86  	// User is the user to use.
    87  	User string
    88  
    89  	// Privileged enables privileged mode.
    90  	Privileged bool
    91  
    92  	// CapAdd are the extra set of capabilities to add.
    93  	CapAdd []string
    94  
    95  	// CapDrop are the extra set of capabilities to drop.
    96  	CapDrop []string
    97  
    98  	// Mounts is the list of directories/files to be mounted inside the container.
    99  	Mounts []mount.Mount
   100  
   101  	// Links is the list of containers to be connected to the container.
   102  	Links []string
   103  }
   104  
   105  func makeContainer(ctx context.Context, logger testutil.Logger, runtime string) *Container {
   106  	// Slashes are not allowed in container names.
   107  	name := testutil.RandomID(logger.Name())
   108  	name = strings.ReplaceAll(name, "/", "-")
   109  	client, err := client.NewClientWithOpts(client.FromEnv)
   110  	if err != nil {
   111  		return nil
   112  	}
   113  	client.NegotiateAPIVersion(ctx)
   114  	return &Container{
   115  		logger:  logger,
   116  		Name:    name,
   117  		runtime: runtime,
   118  		client:  client,
   119  	}
   120  }
   121  
   122  // MakeContainer constructs a suitable Container object.
   123  //
   124  // The runtime used is determined by the runtime flag.
   125  //
   126  // Containers will check flags for profiling requests.
   127  func MakeContainer(ctx context.Context, logger testutil.Logger) *Container {
   128  	return makeContainer(ctx, logger, *runtime)
   129  }
   130  
   131  // MakeNativeContainer constructs a suitable Container object.
   132  //
   133  // The runtime used will be the system default.
   134  //
   135  // Native containers aren't profiled.
   136  func MakeNativeContainer(ctx context.Context, logger testutil.Logger) *Container {
   137  	return makeContainer(ctx, logger, "" /*runtime*/)
   138  }
   139  
   140  // Spawn is analogous to 'docker run -d'.
   141  func (c *Container) Spawn(ctx context.Context, r RunOpts, args ...string) error {
   142  	if err := c.create(ctx, r.Image, c.config(r, args), c.hostConfig(r), nil); err != nil {
   143  		return err
   144  	}
   145  	return c.Start(ctx)
   146  }
   147  
   148  // SpawnProcess is analogous to 'docker run -it'. It returns a process
   149  // which represents the root process.
   150  func (c *Container) SpawnProcess(ctx context.Context, r RunOpts, args ...string) (Process, error) {
   151  	config, hostconf, netconf := c.ConfigsFrom(r, args...)
   152  	config.Tty = true
   153  	config.OpenStdin = true
   154  
   155  	if err := c.CreateFrom(ctx, r.Image, config, hostconf, netconf); err != nil {
   156  		return Process{}, err
   157  	}
   158  
   159  	// Open a connection to the container for parsing logs and for TTY.
   160  	stream, err := c.client.ContainerAttach(ctx, c.id,
   161  		types.ContainerAttachOptions{
   162  			Stream: true,
   163  			Stdin:  true,
   164  			Stdout: true,
   165  			Stderr: true,
   166  		})
   167  	if err != nil {
   168  		return Process{}, fmt.Errorf("connect failed container id %s: %v", c.id, err)
   169  	}
   170  
   171  	c.cleanups = append(c.cleanups, func() { stream.Close() })
   172  
   173  	if err := c.Start(ctx); err != nil {
   174  		return Process{}, err
   175  	}
   176  
   177  	return Process{container: c, conn: stream}, nil
   178  }
   179  
   180  // Run is analogous to 'docker run'.
   181  func (c *Container) Run(ctx context.Context, r RunOpts, args ...string) (string, error) {
   182  	if err := c.create(ctx, r.Image, c.config(r, args), c.hostConfig(r), nil); err != nil {
   183  		return "", err
   184  	}
   185  
   186  	if err := c.Start(ctx); err != nil {
   187  		return "", err
   188  	}
   189  
   190  	if err := c.Wait(ctx); err != nil {
   191  		return "", err
   192  	}
   193  
   194  	return c.Logs(ctx)
   195  }
   196  
   197  // ConfigsFrom returns container configs from RunOpts and args. The caller should call 'CreateFrom'
   198  // and Start.
   199  func (c *Container) ConfigsFrom(r RunOpts, args ...string) (*container.Config, *container.HostConfig, *network.NetworkingConfig) {
   200  	return c.config(r, args), c.hostConfig(r), &network.NetworkingConfig{}
   201  }
   202  
   203  // MakeLink formats a link to add to a RunOpts.
   204  func (c *Container) MakeLink(target string) string {
   205  	return fmt.Sprintf("%s:%s", c.Name, target)
   206  }
   207  
   208  // CreateFrom creates a container from the given configs.
   209  func (c *Container) CreateFrom(ctx context.Context, profileImage string, conf *container.Config, hostconf *container.HostConfig, netconf *network.NetworkingConfig) error {
   210  	return c.create(ctx, profileImage, conf, hostconf, netconf)
   211  }
   212  
   213  // Create is analogous to 'docker create'.
   214  func (c *Container) Create(ctx context.Context, r RunOpts, args ...string) error {
   215  	return c.create(ctx, r.Image, c.config(r, args), c.hostConfig(r), nil)
   216  }
   217  
   218  func (c *Container) create(ctx context.Context, profileImage string, conf *container.Config, hostconf *container.HostConfig, netconf *network.NetworkingConfig) error {
   219  	if c.runtime != "" && c.runtime != "runc" {
   220  		// Use the image name as provided here; which normally represents the
   221  		// unmodified "basic/alpine" image name. This should be easy to grok.
   222  		c.profileInit(profileImage)
   223  	}
   224  	cont, err := c.client.ContainerCreate(ctx, conf, hostconf, nil, c.Name)
   225  	if err != nil {
   226  		return err
   227  	}
   228  	c.id = cont.ID
   229  	return nil
   230  }
   231  
   232  func (c *Container) config(r RunOpts, args []string) *container.Config {
   233  	ports := nat.PortSet{}
   234  	for _, p := range r.Ports {
   235  		port := nat.Port(fmt.Sprintf("%d", p))
   236  		ports[port] = struct{}{}
   237  	}
   238  	env := append(r.Env, fmt.Sprintf("RUNSC_TEST_NAME=%s", c.Name))
   239  
   240  	return &container.Config{
   241  		Image:        testutil.ImageByName(r.Image),
   242  		Cmd:          args,
   243  		ExposedPorts: ports,
   244  		Env:          env,
   245  		WorkingDir:   r.WorkDir,
   246  		User:         r.User,
   247  	}
   248  }
   249  
   250  func (c *Container) hostConfig(r RunOpts) *container.HostConfig {
   251  	c.mounts = append(c.mounts, r.Mounts...)
   252  
   253  	return &container.HostConfig{
   254  		Runtime:         c.runtime,
   255  		Mounts:          c.mounts,
   256  		PublishAllPorts: true,
   257  		Links:           r.Links,
   258  		CapAdd:          r.CapAdd,
   259  		CapDrop:         r.CapDrop,
   260  		Privileged:      r.Privileged,
   261  		ReadonlyRootfs:  r.ReadOnly,
   262  		Resources: container.Resources{
   263  			Memory:     int64(r.Memory), // In bytes.
   264  			CpusetCpus: r.CpusetCpus,
   265  		},
   266  	}
   267  }
   268  
   269  // Start is analogous to 'docker start'.
   270  func (c *Container) Start(ctx context.Context) error {
   271  	if err := c.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{}); err != nil {
   272  		return fmt.Errorf("ContainerStart failed: %v", err)
   273  	}
   274  
   275  	if c.profile != nil {
   276  		if err := c.profile.Start(c); err != nil {
   277  			c.logger.Logf("profile.Start failed: %v", err)
   278  		}
   279  	}
   280  
   281  	return nil
   282  }
   283  
   284  // Stop is analogous to 'docker stop'.
   285  func (c *Container) Stop(ctx context.Context) error {
   286  	return c.client.ContainerStop(ctx, c.id, nil)
   287  }
   288  
   289  // Pause is analogous to'docker pause'.
   290  func (c *Container) Pause(ctx context.Context) error {
   291  	return c.client.ContainerPause(ctx, c.id)
   292  }
   293  
   294  // Unpause is analogous to 'docker unpause'.
   295  func (c *Container) Unpause(ctx context.Context) error {
   296  	return c.client.ContainerUnpause(ctx, c.id)
   297  }
   298  
   299  // Checkpoint is analogous to 'docker checkpoint'.
   300  func (c *Container) Checkpoint(ctx context.Context, name string) error {
   301  	return c.client.CheckpointCreate(ctx, c.Name, types.CheckpointCreateOptions{CheckpointID: name, Exit: true})
   302  }
   303  
   304  // Restore is analogous to 'docker start --checkname [name]'.
   305  func (c *Container) Restore(ctx context.Context, name string) error {
   306  	return c.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{CheckpointID: name})
   307  }
   308  
   309  // Logs is analogous 'docker logs'.
   310  func (c *Container) Logs(ctx context.Context) (string, error) {
   311  	var out bytes.Buffer
   312  	err := c.logs(ctx, &out, &out)
   313  	return out.String(), err
   314  }
   315  
   316  func (c *Container) logs(ctx context.Context, stdout, stderr *bytes.Buffer) error {
   317  	opts := types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true}
   318  	writer, err := c.client.ContainerLogs(ctx, c.id, opts)
   319  	if err != nil {
   320  		return err
   321  	}
   322  	defer writer.Close()
   323  	_, err = stdcopy.StdCopy(stdout, stderr, writer)
   324  
   325  	return err
   326  }
   327  
   328  // ID returns the container id.
   329  func (c *Container) ID() string {
   330  	return c.id
   331  }
   332  
   333  // SandboxPid returns the container's pid.
   334  func (c *Container) SandboxPid(ctx context.Context) (int, error) {
   335  	resp, err := c.client.ContainerInspect(ctx, c.id)
   336  	if err != nil {
   337  		return -1, err
   338  	}
   339  	return resp.ContainerJSONBase.State.Pid, nil
   340  }
   341  
   342  // ErrNoIP indicates that no IP address is available.
   343  var ErrNoIP = errors.New("no IP available")
   344  
   345  // FindIP returns the IP address of the container.
   346  func (c *Container) FindIP(ctx context.Context, ipv6 bool) (net.IP, error) {
   347  	resp, err := c.client.ContainerInspect(ctx, c.id)
   348  	if err != nil {
   349  		return nil, err
   350  	}
   351  
   352  	var ip net.IP
   353  	if ipv6 {
   354  		ip = net.ParseIP(resp.NetworkSettings.DefaultNetworkSettings.GlobalIPv6Address)
   355  	} else {
   356  		ip = net.ParseIP(resp.NetworkSettings.DefaultNetworkSettings.IPAddress)
   357  	}
   358  	if ip == nil {
   359  		return net.IP{}, ErrNoIP
   360  	}
   361  	return ip, nil
   362  }
   363  
   364  // FindPort returns the host port that is mapped to 'sandboxPort'.
   365  func (c *Container) FindPort(ctx context.Context, sandboxPort int) (int, error) {
   366  	desc, err := c.client.ContainerInspect(ctx, c.id)
   367  	if err != nil {
   368  		return -1, fmt.Errorf("error retrieving port: %v", err)
   369  	}
   370  
   371  	format := fmt.Sprintf("%d/tcp", sandboxPort)
   372  	ports, ok := desc.NetworkSettings.Ports[nat.Port(format)]
   373  	if !ok {
   374  		return -1, fmt.Errorf("error retrieving port: %v", err)
   375  
   376  	}
   377  
   378  	port, err := strconv.Atoi(ports[0].HostPort)
   379  	if err != nil {
   380  		return -1, fmt.Errorf("error parsing port %q: %v", port, err)
   381  	}
   382  	return port, nil
   383  }
   384  
   385  // CopyFiles copies in and mounts the given files. They are always ReadOnly.
   386  func (c *Container) CopyFiles(opts *RunOpts, target string, sources ...string) {
   387  	dir, err := ioutil.TempDir("", c.Name)
   388  	if err != nil {
   389  		c.copyErr = fmt.Errorf("ioutil.TempDir failed: %v", err)
   390  		return
   391  	}
   392  	c.cleanups = append(c.cleanups, func() { os.RemoveAll(dir) })
   393  	if err := os.Chmod(dir, 0755); err != nil {
   394  		c.copyErr = fmt.Errorf("os.Chmod(%q, 0755) failed: %v", dir, err)
   395  		return
   396  	}
   397  	for _, name := range sources {
   398  		src := name
   399  		if !filepath.IsAbs(src) {
   400  			src, err = testutil.FindFile(name)
   401  			if err != nil {
   402  				c.copyErr = fmt.Errorf("testutil.FindFile(%q) failed: %w", name, err)
   403  				return
   404  			}
   405  		}
   406  		dst := path.Join(dir, path.Base(name))
   407  		if err := testutil.Copy(src, dst); err != nil {
   408  			c.copyErr = fmt.Errorf("testutil.Copy(%q, %q) failed: %v", src, dst, err)
   409  			return
   410  		}
   411  		c.logger.Logf("copy: %s -> %s", src, dst)
   412  	}
   413  	opts.Mounts = append(opts.Mounts, mount.Mount{
   414  		Type:     mount.TypeBind,
   415  		Source:   dir,
   416  		Target:   target,
   417  		ReadOnly: false,
   418  	})
   419  }
   420  
   421  // Status inspects the container returns its status.
   422  func (c *Container) Status(ctx context.Context) (types.ContainerState, error) {
   423  	resp, err := c.client.ContainerInspect(ctx, c.id)
   424  	if err != nil {
   425  		return types.ContainerState{}, err
   426  	}
   427  	return *resp.State, err
   428  }
   429  
   430  // Wait waits for the container to exit.
   431  func (c *Container) Wait(ctx context.Context) error {
   432  	defer c.stopProfiling()
   433  	statusChan, errChan := c.client.ContainerWait(ctx, c.id, container.WaitConditionNotRunning)
   434  	select {
   435  	case err := <-errChan:
   436  		return err
   437  	case res := <-statusChan:
   438  		if res.StatusCode != 0 {
   439  			var msg string
   440  			if res.Error != nil {
   441  				msg = res.Error.Message
   442  			}
   443  			return fmt.Errorf("container returned non-zero status: %d, msg: %q", res.StatusCode, msg)
   444  		}
   445  		return nil
   446  	}
   447  }
   448  
   449  // WaitTimeout waits for the container to exit with a timeout.
   450  func (c *Container) WaitTimeout(ctx context.Context, timeout time.Duration) error {
   451  	ctx, cancel := context.WithTimeout(ctx, timeout)
   452  	defer cancel()
   453  	statusChan, errChan := c.client.ContainerWait(ctx, c.id, container.WaitConditionNotRunning)
   454  	select {
   455  	case <-ctx.Done():
   456  		if ctx.Err() == context.DeadlineExceeded {
   457  			return fmt.Errorf("container %s timed out after %v seconds", c.Name, timeout.Seconds())
   458  		}
   459  		return nil
   460  	case err := <-errChan:
   461  		return err
   462  	case <-statusChan:
   463  		return nil
   464  	}
   465  }
   466  
   467  // WaitForOutput searches container logs for pattern and returns or timesout.
   468  func (c *Container) WaitForOutput(ctx context.Context, pattern string, timeout time.Duration) (string, error) {
   469  	matches, err := c.WaitForOutputSubmatch(ctx, pattern, timeout)
   470  	if err != nil {
   471  		return "", err
   472  	}
   473  	if len(matches) == 0 {
   474  		return "", fmt.Errorf("didn't find pattern %s logs", pattern)
   475  	}
   476  	return matches[0], nil
   477  }
   478  
   479  // WaitForOutputSubmatch searches container logs for the given
   480  // pattern or times out. It returns any regexp submatches as well.
   481  func (c *Container) WaitForOutputSubmatch(ctx context.Context, pattern string, timeout time.Duration) ([]string, error) {
   482  	ctx, cancel := context.WithTimeout(ctx, timeout)
   483  	defer cancel()
   484  	re := regexp.MustCompile(pattern)
   485  	for {
   486  		logs, err := c.Logs(ctx)
   487  		if err != nil {
   488  			return nil, fmt.Errorf("failed to get logs: %v logs: %s", err, logs)
   489  		}
   490  		if matches := re.FindStringSubmatch(logs); matches != nil {
   491  			return matches, nil
   492  		}
   493  		time.Sleep(50 * time.Millisecond)
   494  	}
   495  }
   496  
   497  // stopProfiling stops profiling.
   498  func (c *Container) stopProfiling() {
   499  	if c.profile != nil {
   500  		if err := c.profile.Stop(c); err != nil {
   501  			// This most likely means that the runtime for the container
   502  			// was too short to connect and actually get a profile.
   503  			c.logger.Logf("warning: profile.Stop failed: %v", err)
   504  		}
   505  	}
   506  }
   507  
   508  // Kill kills the container.
   509  func (c *Container) Kill(ctx context.Context) error {
   510  	defer c.stopProfiling()
   511  	return c.client.ContainerKill(ctx, c.id, "")
   512  }
   513  
   514  // Remove is analogous to 'docker rm'.
   515  func (c *Container) Remove(ctx context.Context) error {
   516  	// Remove the image.
   517  	remove := types.ContainerRemoveOptions{
   518  		RemoveVolumes: c.mounts != nil,
   519  		RemoveLinks:   c.links != nil,
   520  		Force:         true,
   521  	}
   522  	return c.client.ContainerRemove(ctx, c.Name, remove)
   523  }
   524  
   525  // CleanUp kills and deletes the container (best effort).
   526  func (c *Container) CleanUp(ctx context.Context) {
   527  	// Execute all cleanups. We execute cleanups here to close any
   528  	// open connections to the container before closing. Open connections
   529  	// can cause Kill and Remove to hang.
   530  	for _, c := range c.cleanups {
   531  		c()
   532  	}
   533  	c.cleanups = nil
   534  
   535  	// Kill the container.
   536  	if err := c.Kill(ctx); err != nil && !strings.Contains(err.Error(), "is not running") {
   537  		// Just log; can't do anything here.
   538  		c.logger.Logf("error killing container %q: %v", c.Name, err)
   539  	}
   540  
   541  	// Remove the image.
   542  	if err := c.Remove(ctx); err != nil {
   543  		c.logger.Logf("error removing container %q: %v", c.Name, err)
   544  	}
   545  
   546  	// Forget all mounts.
   547  	c.mounts = nil
   548  }