github.com/bigcommerce/nomad@v0.9.3-bc/drivers/docker/driver_test.go (about)

     1  package docker
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"io/ioutil"
     7  	"math/rand"
     8  	"path/filepath"
     9  	"reflect"
    10  	"runtime"
    11  	"runtime/debug"
    12  	"strings"
    13  	"testing"
    14  	"time"
    15  
    16  	docker "github.com/fsouza/go-dockerclient"
    17  	"github.com/hashicorp/consul/lib/freeport"
    18  	hclog "github.com/hashicorp/go-hclog"
    19  	"github.com/hashicorp/nomad/client/taskenv"
    20  	"github.com/hashicorp/nomad/client/testutil"
    21  	"github.com/hashicorp/nomad/devices/gpu/nvidia"
    22  	"github.com/hashicorp/nomad/helper/pluginutils/loader"
    23  	"github.com/hashicorp/nomad/helper/testlog"
    24  	"github.com/hashicorp/nomad/helper/uuid"
    25  	"github.com/hashicorp/nomad/nomad/structs"
    26  	"github.com/hashicorp/nomad/plugins/base"
    27  	"github.com/hashicorp/nomad/plugins/drivers"
    28  	dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils"
    29  	tu "github.com/hashicorp/nomad/testutil"
    30  	"github.com/stretchr/testify/assert"
    31  	"github.com/stretchr/testify/require"
    32  )
    33  
    34  var (
    35  	basicResources = &drivers.Resources{
    36  		NomadResources: &structs.AllocatedTaskResources{
    37  			Memory: structs.AllocatedMemoryResources{
    38  				MemoryMB: 256,
    39  			},
    40  			Cpu: structs.AllocatedCpuResources{
    41  				CpuShares: 250,
    42  			},
    43  		},
    44  		LinuxResources: &drivers.LinuxResources{
    45  			CPUShares:        512,
    46  			MemoryLimitBytes: 256 * 1024 * 1024,
    47  		},
    48  	}
    49  )
    50  
    51  func dockerIsRemote(t *testing.T) bool {
    52  	client, err := docker.NewClientFromEnv()
    53  	if err != nil {
    54  		return false
    55  	}
    56  
    57  	// Technically this could be a local tcp socket but for testing purposes
    58  	// we'll just assume that tcp is only used for remote connections.
    59  	if client.Endpoint()[0:3] == "tcp" {
    60  		return true
    61  	}
    62  	return false
    63  }
    64  
    65  var (
    66  	// busyboxLongRunningCmd is a busybox command that runs indefinitely, and
    67  	// ideally responds to SIGINT/SIGTERM.  Sadly, busybox:1.29.3 /bin/sleep doesn't.
    68  	busyboxLongRunningCmd = []string{"nc", "-l", "-p", "3000", "127.0.0.1"}
    69  )
    70  
    71  // Returns a task with a reserved and dynamic port. The ports are returned
    72  // respectively.
    73  func dockerTask(t *testing.T) (*drivers.TaskConfig, *TaskConfig, []int) {
    74  	ports := freeport.GetT(t, 2)
    75  	dockerReserved := ports[0]
    76  	dockerDynamic := ports[1]
    77  
    78  	cfg := newTaskConfig("", busyboxLongRunningCmd)
    79  	task := &drivers.TaskConfig{
    80  		ID:      uuid.Generate(),
    81  		Name:    "redis-demo",
    82  		AllocID: uuid.Generate(),
    83  		Env: map[string]string{
    84  			"test": t.Name(),
    85  		},
    86  		DeviceEnv: make(map[string]string),
    87  		Resources: &drivers.Resources{
    88  			NomadResources: &structs.AllocatedTaskResources{
    89  				Memory: structs.AllocatedMemoryResources{
    90  					MemoryMB: 256,
    91  				},
    92  				Cpu: structs.AllocatedCpuResources{
    93  					CpuShares: 512,
    94  				},
    95  				Networks: []*structs.NetworkResource{
    96  					{
    97  						IP:            "127.0.0.1",
    98  						ReservedPorts: []structs.Port{{Label: "main", Value: dockerReserved}},
    99  						DynamicPorts:  []structs.Port{{Label: "REDIS", Value: dockerDynamic}},
   100  					},
   101  				},
   102  			},
   103  			LinuxResources: &drivers.LinuxResources{
   104  				CPUShares:        512,
   105  				MemoryLimitBytes: 256 * 1024 * 1024,
   106  			},
   107  		},
   108  	}
   109  
   110  	require.NoError(t, task.EncodeConcreteDriverConfig(&cfg))
   111  
   112  	return task, &cfg, ports
   113  }
   114  
   115  // dockerSetup does all of the basic setup you need to get a running docker
   116  // process up and running for testing. Use like:
   117  //
   118  //	task := taskTemplate()
   119  //	// do custom task configuration
   120  //	client, handle, cleanup := dockerSetup(t, task)
   121  //	defer cleanup()
   122  //	// do test stuff
   123  //
   124  // If there is a problem during setup this function will abort or skip the test
   125  // and indicate the reason.
   126  func dockerSetup(t *testing.T, task *drivers.TaskConfig) (*docker.Client, *dtestutil.DriverHarness, *taskHandle, func()) {
   127  	client := newTestDockerClient(t)
   128  	driver := dockerDriverHarness(t, nil)
   129  	cleanup := driver.MkAllocDir(task, true)
   130  
   131  	copyImage(t, task.TaskDir(), "busybox.tar")
   132  	_, _, err := driver.StartTask(task)
   133  	require.NoError(t, err)
   134  
   135  	dockerDriver, ok := driver.Impl().(*Driver)
   136  	require.True(t, ok)
   137  	handle, ok := dockerDriver.tasks.Get(task.ID)
   138  	require.True(t, ok)
   139  
   140  	return client, driver, handle, func() {
   141  		driver.DestroyTask(task.ID, true)
   142  		cleanup()
   143  	}
   144  }
   145  
   146  // cleanSlate removes the specified docker image, including potentially stopping/removing any
   147  // containers based on that image. This is used to decouple tests that would be coupled
   148  // by using the same container image.
   149  func cleanSlate(client *docker.Client, imageID string) {
   150  	if img, _ := client.InspectImage(imageID); img == nil {
   151  		return
   152  	}
   153  	containers, _ := client.ListContainers(docker.ListContainersOptions{
   154  		All: true,
   155  		Filters: map[string][]string{
   156  			"ancestor": {imageID},
   157  		},
   158  	})
   159  	for _, c := range containers {
   160  		client.RemoveContainer(docker.RemoveContainerOptions{
   161  			Force: true,
   162  			ID:    c.ID,
   163  		})
   164  	}
   165  	client.RemoveImageExtended(imageID, docker.RemoveImageOptions{
   166  		Force: true,
   167  	})
   168  	return
   169  }
   170  
   171  // dockerDriverHarness wires up everything needed to launch a task with a docker driver.
   172  // A driver plugin interface and cleanup function is returned
   173  func dockerDriverHarness(t *testing.T, cfg map[string]interface{}) *dtestutil.DriverHarness {
   174  	logger := testlog.HCLogger(t)
   175  	harness := dtestutil.NewDriverHarness(t, NewDockerDriver(logger))
   176  	if cfg == nil {
   177  		cfg = map[string]interface{}{
   178  			"gc": map[string]interface{}{
   179  				"image_delay": "1s",
   180  			},
   181  		}
   182  	}
   183  	plugLoader, err := loader.NewPluginLoader(&loader.PluginLoaderConfig{
   184  		Logger:            logger,
   185  		PluginDir:         "./plugins",
   186  		SupportedVersions: loader.AgentSupportedApiVersions,
   187  		InternalPlugins: map[loader.PluginID]*loader.InternalPluginConfig{
   188  			PluginID: {
   189  				Config: cfg,
   190  				Factory: func(hclog.Logger) interface{} {
   191  					return harness
   192  				},
   193  			},
   194  		},
   195  	})
   196  
   197  	require.NoError(t, err)
   198  	instance, err := plugLoader.Dispense(pluginName, base.PluginTypeDriver, nil, logger)
   199  	require.NoError(t, err)
   200  	driver, ok := instance.Plugin().(*dtestutil.DriverHarness)
   201  	if !ok {
   202  		t.Fatal("plugin instance is not a driver... wat?")
   203  	}
   204  
   205  	return driver
   206  }
   207  
   208  func newTestDockerClient(t *testing.T) *docker.Client {
   209  	t.Helper()
   210  	testutil.DockerCompatible(t)
   211  
   212  	client, err := docker.NewClientFromEnv()
   213  	if err != nil {
   214  		t.Fatalf("Failed to initialize client: %s\nStack\n%s", err, debug.Stack())
   215  	}
   216  	return client
   217  }
   218  
   219  /*
   220  // This test should always pass, even if docker daemon is not available
   221  func TestDockerDriver_Fingerprint(t *testing.T) {
   222  	if !tu.IsCI() {
   223  		t.Parallel()
   224  	}
   225  
   226  	ctx := testDockerDriverContexts(t, &structs.Task{Name: "foo", Driver: "docker", Resources: basicResources})
   227  	//ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
   228  	defer ctx.Destroy()
   229  	d := NewDockerDriver(ctx.DriverCtx)
   230  	node := &structs.Node{
   231  		Attributes: make(map[string]string),
   232  	}
   233  
   234  	request := &fingerprint.FingerprintRequest{Config: &config.Config{}, Node: node}
   235  	var response fingerprint.FingerprintResponse
   236  	err := d.Fingerprint(request, &response)
   237  	if err != nil {
   238  		t.Fatalf("err: %v", err)
   239  	}
   240  
   241  	attributes := response.Attributes
   242  	if testutil.DockerIsConnected(t) && attributes["driver.docker"] == "" {
   243  		t.Fatalf("Fingerprinter should detect when docker is available")
   244  	}
   245  
   246  	if attributes["driver.docker"] != "1" {
   247  		t.Log("Docker daemon not available. The remainder of the docker tests will be skipped.")
   248  	} else {
   249  
   250  		// if docker is available, make sure that the response is tagged as
   251  		// applicable
   252  		if !response.Detected {
   253  			t.Fatalf("expected response to be applicable")
   254  		}
   255  	}
   256  
   257  	t.Logf("Found docker version %s", attributes["driver.docker.version"])
   258  }
   259  
   260  // TestDockerDriver_Fingerprint_Bridge asserts that if Docker is running we set
   261  // the bridge network's IP as a node attribute. See #2785
   262  func TestDockerDriver_Fingerprint_Bridge(t *testing.T) {
   263  	if !tu.IsCI() {
   264  		t.Parallel()
   265  	}
   266  	testutil.DockerCompatible(t)
   267  	if runtime.GOOS != "linux" {
   268  		t.Skip("expect only on linux")
   269  	}
   270  
   271  	// This seems fragile, so we might need to reconsider this test if it
   272  	// proves flaky
   273  	expectedAddr, err := sockaddr.GetInterfaceIP("docker0")
   274  	if err != nil {
   275  		t.Fatalf("unable to get ip for docker0: %v", err)
   276  	}
   277  	if expectedAddr == "" {
   278  		t.Fatalf("unable to get ip for docker bridge")
   279  	}
   280  
   281  	conf := testConfig(t)
   282  	conf.Node = mock.Node()
   283  	dd := NewDockerDriver(NewDriverContext("", "", "", "", conf, conf.Node, testlog.Logger(t), nil))
   284  
   285  	request := &fingerprint.FingerprintRequest{Config: conf, Node: conf.Node}
   286  	var response fingerprint.FingerprintResponse
   287  
   288  	err = dd.Fingerprint(request, &response)
   289  	if err != nil {
   290  		t.Fatalf("error fingerprinting docker: %v", err)
   291  	}
   292  
   293  	if !response.Detected {
   294  		t.Fatalf("expected response to be applicable")
   295  	}
   296  
   297  	attributes := response.Attributes
   298  	if attributes == nil {
   299  		t.Fatalf("expected attributes to be set")
   300  	}
   301  
   302  	if attributes["driver.docker"] == "" {
   303  		t.Fatalf("expected Docker to be enabled but false was returned")
   304  	}
   305  
   306  	if found := attributes["driver.docker.bridge_ip"]; found != expectedAddr {
   307  		t.Fatalf("expected bridge ip %q but found: %q", expectedAddr, found)
   308  	}
   309  	t.Logf("docker bridge ip: %q", attributes["driver.docker.bridge_ip"])
   310  }
   311  
   312  func TestDockerDriver_Check_DockerHealthStatus(t *testing.T) {
   313  	if !tu.IsCI() {
   314  		t.Parallel()
   315  	}
   316  	testutil.DockerCompatible(t)
   317  	if runtime.GOOS != "linux" {
   318  		t.Skip("expect only on linux")
   319  	}
   320  
   321  	require := require.New(t)
   322  
   323  	expectedAddr, err := sockaddr.GetInterfaceIP("docker0")
   324  	if err != nil {
   325  		t.Fatalf("unable to get ip for docker0: %v", err)
   326  	}
   327  	if expectedAddr == "" {
   328  		t.Fatalf("unable to get ip for docker bridge")
   329  	}
   330  
   331  	conf := testConfig(t)
   332  	conf.Node = mock.Node()
   333  	dd := NewDockerDriver(NewDriverContext("", "", "", "", conf, conf.Node, testlog.Logger(t), nil))
   334  
   335  	request := &cstructs.HealthCheckRequest{}
   336  	var response cstructs.HealthCheckResponse
   337  
   338  	dc, ok := dd.(fingerprint.HealthCheck)
   339  	require.True(ok)
   340  	err = dc.HealthCheck(request, &response)
   341  	require.Nil(err)
   342  
   343  	driverInfo := response.Drivers["docker"]
   344  	require.NotNil(driverInfo)
   345  	require.True(driverInfo.Healthy)
   346  }*/
   347  
   348  func TestDockerDriver_Start_Wait(t *testing.T) {
   349  	if !tu.IsCI() {
   350  		t.Parallel()
   351  	}
   352  	testutil.DockerCompatible(t)
   353  
   354  	taskCfg := newTaskConfig("", busyboxLongRunningCmd)
   355  	task := &drivers.TaskConfig{
   356  		ID:        uuid.Generate(),
   357  		Name:      "nc-demo",
   358  		AllocID:   uuid.Generate(),
   359  		Resources: basicResources,
   360  	}
   361  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   362  
   363  	d := dockerDriverHarness(t, nil)
   364  	cleanup := d.MkAllocDir(task, true)
   365  	defer cleanup()
   366  	copyImage(t, task.TaskDir(), "busybox.tar")
   367  
   368  	_, _, err := d.StartTask(task)
   369  	require.NoError(t, err)
   370  
   371  	defer d.DestroyTask(task.ID, true)
   372  
   373  	// Attempt to wait
   374  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   375  	require.NoError(t, err)
   376  
   377  	select {
   378  	case <-waitCh:
   379  		t.Fatalf("wait channel should not have received an exit result")
   380  	case <-time.After(time.Duration(tu.TestMultiplier()*1) * time.Second):
   381  	}
   382  }
   383  
   384  func TestDockerDriver_Start_WaitFinish(t *testing.T) {
   385  	if !tu.IsCI() {
   386  		t.Parallel()
   387  	}
   388  	testutil.DockerCompatible(t)
   389  
   390  	taskCfg := newTaskConfig("", []string{"echo", "hello"})
   391  	task := &drivers.TaskConfig{
   392  		ID:        uuid.Generate(),
   393  		Name:      "nc-demo",
   394  		AllocID:   uuid.Generate(),
   395  		Resources: basicResources,
   396  	}
   397  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   398  
   399  	d := dockerDriverHarness(t, nil)
   400  	cleanup := d.MkAllocDir(task, true)
   401  	defer cleanup()
   402  	copyImage(t, task.TaskDir(), "busybox.tar")
   403  
   404  	_, _, err := d.StartTask(task)
   405  	require.NoError(t, err)
   406  
   407  	defer d.DestroyTask(task.ID, true)
   408  
   409  	// Attempt to wait
   410  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   411  	require.NoError(t, err)
   412  
   413  	select {
   414  	case res := <-waitCh:
   415  		if !res.Successful() {
   416  			require.Fail(t, "ExitResult should be successful: %v", res)
   417  		}
   418  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   419  		require.Fail(t, "timeout")
   420  	}
   421  }
   422  
   423  // TestDockerDriver_Start_StoppedContainer asserts that Nomad will detect a
   424  // stopped task container, remove it, and start a new container.
   425  //
   426  // See https://github.com/hashicorp/nomad/issues/3419
   427  func TestDockerDriver_Start_StoppedContainer(t *testing.T) {
   428  	if !tu.IsCI() {
   429  		t.Parallel()
   430  	}
   431  	testutil.DockerCompatible(t)
   432  
   433  	taskCfg := newTaskConfig("", []string{"sleep", "9001"})
   434  	task := &drivers.TaskConfig{
   435  		ID:        uuid.Generate(),
   436  		Name:      "nc-demo",
   437  		AllocID:   uuid.Generate(),
   438  		Resources: basicResources,
   439  	}
   440  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   441  
   442  	d := dockerDriverHarness(t, nil)
   443  	cleanup := d.MkAllocDir(task, true)
   444  	defer cleanup()
   445  	copyImage(t, task.TaskDir(), "busybox.tar")
   446  
   447  	client := newTestDockerClient(t)
   448  
   449  	var imageID string
   450  	var err error
   451  
   452  	if runtime.GOOS != "windows" {
   453  		imageID, err = d.Impl().(*Driver).loadImage(task, &taskCfg, client)
   454  	} else {
   455  		image, lErr := client.InspectImage("dantoml/busybox-windows:08012019")
   456  		err = lErr
   457  		if image != nil {
   458  			imageID = image.ID
   459  		}
   460  	}
   461  	require.NoError(t, err)
   462  	require.NotEmpty(t, imageID)
   463  
   464  	// Create a container of the same name but don't start it. This mimics
   465  	// the case of dockerd getting restarted and stopping containers while
   466  	// Nomad is watching them.
   467  	opts := docker.CreateContainerOptions{
   468  		Name: strings.Replace(task.ID, "/", "_", -1),
   469  		Config: &docker.Config{
   470  			Image: taskCfg.Image,
   471  			Cmd:   []string{"sleep", "9000"},
   472  			Env:   []string{fmt.Sprintf("test=%s", t.Name())},
   473  		},
   474  	}
   475  
   476  	if _, err := client.CreateContainer(opts); err != nil {
   477  		t.Fatalf("error creating initial container: %v", err)
   478  	}
   479  
   480  	_, _, err = d.StartTask(task)
   481  	defer d.DestroyTask(task.ID, true)
   482  	require.NoError(t, err)
   483  
   484  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   485  	require.NoError(t, d.DestroyTask(task.ID, true))
   486  }
   487  
   488  func TestDockerDriver_Start_LoadImage(t *testing.T) {
   489  	if !tu.IsCI() {
   490  		t.Parallel()
   491  	}
   492  	testutil.DockerCompatible(t)
   493  
   494  	taskCfg := newTaskConfig("", []string{"sh", "-c", "echo hello > $NOMAD_TASK_DIR/output"})
   495  	task := &drivers.TaskConfig{
   496  		ID:        uuid.Generate(),
   497  		Name:      "busybox-demo",
   498  		AllocID:   uuid.Generate(),
   499  		Resources: basicResources,
   500  	}
   501  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   502  
   503  	d := dockerDriverHarness(t, nil)
   504  	cleanup := d.MkAllocDir(task, true)
   505  	defer cleanup()
   506  	copyImage(t, task.TaskDir(), "busybox.tar")
   507  
   508  	_, _, err := d.StartTask(task)
   509  	require.NoError(t, err)
   510  
   511  	defer d.DestroyTask(task.ID, true)
   512  
   513  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   514  	require.NoError(t, err)
   515  	select {
   516  	case res := <-waitCh:
   517  		if !res.Successful() {
   518  			require.Fail(t, "ExitResult should be successful: %v", res)
   519  		}
   520  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   521  		require.Fail(t, "timeout")
   522  	}
   523  
   524  	// Check that data was written to the shared alloc directory.
   525  	outputFile := filepath.Join(task.TaskDir().LocalDir, "output")
   526  	act, err := ioutil.ReadFile(outputFile)
   527  	if err != nil {
   528  		t.Fatalf("Couldn't read expected output: %v", err)
   529  	}
   530  
   531  	exp := "hello"
   532  	if strings.TrimSpace(string(act)) != exp {
   533  		t.Fatalf("Command outputted %v; want %v", act, exp)
   534  	}
   535  
   536  }
   537  
   538  // Tests that starting a task without an image fails
   539  func TestDockerDriver_Start_NoImage(t *testing.T) {
   540  	if !tu.IsCI() {
   541  		t.Parallel()
   542  	}
   543  	testutil.DockerCompatible(t)
   544  
   545  	taskCfg := TaskConfig{
   546  		Command: "echo",
   547  		Args:    []string{"foo"},
   548  	}
   549  	task := &drivers.TaskConfig{
   550  		ID:        uuid.Generate(),
   551  		Name:      "echo",
   552  		AllocID:   uuid.Generate(),
   553  		Resources: basicResources,
   554  	}
   555  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   556  
   557  	d := dockerDriverHarness(t, nil)
   558  	cleanup := d.MkAllocDir(task, false)
   559  	defer cleanup()
   560  
   561  	_, _, err := d.StartTask(task)
   562  	require.Error(t, err)
   563  	require.Contains(t, err.Error(), "image name required")
   564  
   565  	d.DestroyTask(task.ID, true)
   566  }
   567  
   568  func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) {
   569  	if !tu.IsCI() {
   570  		t.Parallel()
   571  	}
   572  	testutil.DockerCompatible(t)
   573  
   574  	taskCfg := TaskConfig{
   575  		Image:   "127.0.0.1:32121/foo", // bad path
   576  		Command: "echo",
   577  		Args: []string{
   578  			"hello",
   579  		},
   580  	}
   581  	task := &drivers.TaskConfig{
   582  		ID:        uuid.Generate(),
   583  		Name:      "busybox-demo",
   584  		AllocID:   uuid.Generate(),
   585  		Resources: basicResources,
   586  	}
   587  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   588  
   589  	d := dockerDriverHarness(t, nil)
   590  	cleanup := d.MkAllocDir(task, true)
   591  	defer cleanup()
   592  
   593  	_, _, err := d.StartTask(task)
   594  	require.Error(t, err)
   595  
   596  	defer d.DestroyTask(task.ID, true)
   597  
   598  	if rerr, ok := err.(*structs.RecoverableError); !ok {
   599  		t.Fatalf("want recoverable error: %+v", err)
   600  	} else if !rerr.IsRecoverable() {
   601  		t.Fatalf("error not recoverable: %+v", err)
   602  	}
   603  }
   604  
   605  func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) {
   606  	if !tu.IsCI() {
   607  		t.Parallel()
   608  	}
   609  	// This test requires that the alloc dir be mounted into docker as a volume.
   610  	// Because this cannot happen when docker is run remotely, e.g. when running
   611  	// docker in a VM, we skip this when we detect Docker is being run remotely.
   612  	if !testutil.DockerIsConnected(t) || dockerIsRemote(t) {
   613  		t.Skip("Docker not connected")
   614  	}
   615  
   616  	exp := []byte{'w', 'i', 'n'}
   617  	file := "output.txt"
   618  
   619  	taskCfg := newTaskConfig("", []string{
   620  		"sh",
   621  		"-c",
   622  		fmt.Sprintf(`sleep 1; echo -n %s > $%s/%s`,
   623  			string(exp), taskenv.AllocDir, file),
   624  	})
   625  	task := &drivers.TaskConfig{
   626  		ID:        uuid.Generate(),
   627  		Name:      "busybox-demo",
   628  		AllocID:   uuid.Generate(),
   629  		Resources: basicResources,
   630  	}
   631  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   632  
   633  	d := dockerDriverHarness(t, nil)
   634  	cleanup := d.MkAllocDir(task, true)
   635  	defer cleanup()
   636  	copyImage(t, task.TaskDir(), "busybox.tar")
   637  
   638  	_, _, err := d.StartTask(task)
   639  	require.NoError(t, err)
   640  
   641  	defer d.DestroyTask(task.ID, true)
   642  
   643  	// Attempt to wait
   644  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   645  	require.NoError(t, err)
   646  
   647  	select {
   648  	case res := <-waitCh:
   649  		if !res.Successful() {
   650  			require.Fail(t, fmt.Sprintf("ExitResult should be successful: %v", res))
   651  		}
   652  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   653  		require.Fail(t, "timeout")
   654  	}
   655  
   656  	// Check that data was written to the shared alloc directory.
   657  	outputFile := filepath.Join(task.TaskDir().SharedAllocDir, file)
   658  	act, err := ioutil.ReadFile(outputFile)
   659  	if err != nil {
   660  		t.Fatalf("Couldn't read expected output: %v", err)
   661  	}
   662  
   663  	if !reflect.DeepEqual(act, exp) {
   664  		t.Fatalf("Command outputted %v; want %v", act, exp)
   665  	}
   666  }
   667  
   668  func TestDockerDriver_Start_Kill_Wait(t *testing.T) {
   669  	if !tu.IsCI() {
   670  		t.Parallel()
   671  	}
   672  	testutil.DockerCompatible(t)
   673  
   674  	taskCfg := newTaskConfig("", busyboxLongRunningCmd)
   675  	task := &drivers.TaskConfig{
   676  		ID:        uuid.Generate(),
   677  		Name:      "busybox-demo",
   678  		AllocID:   uuid.Generate(),
   679  		Resources: basicResources,
   680  	}
   681  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   682  
   683  	d := dockerDriverHarness(t, nil)
   684  	cleanup := d.MkAllocDir(task, true)
   685  	defer cleanup()
   686  	copyImage(t, task.TaskDir(), "busybox.tar")
   687  
   688  	_, _, err := d.StartTask(task)
   689  	require.NoError(t, err)
   690  
   691  	defer d.DestroyTask(task.ID, true)
   692  
   693  	go func(t *testing.T) {
   694  		time.Sleep(100 * time.Millisecond)
   695  		signal := "SIGINT"
   696  		if runtime.GOOS == "windows" {
   697  			signal = "SIGKILL"
   698  		}
   699  		require.NoError(t, d.StopTask(task.ID, time.Second, signal))
   700  	}(t)
   701  
   702  	// Attempt to wait
   703  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   704  	require.NoError(t, err)
   705  
   706  	select {
   707  	case res := <-waitCh:
   708  		if res.Successful() {
   709  			require.Fail(t, "ExitResult should err: %v", res)
   710  		}
   711  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   712  		require.Fail(t, "timeout")
   713  	}
   714  }
   715  
   716  func TestDockerDriver_Start_KillTimeout(t *testing.T) {
   717  	if !tu.IsCI() {
   718  		t.Parallel()
   719  	}
   720  	testutil.DockerCompatible(t)
   721  
   722  	if runtime.GOOS == "windows" {
   723  		t.Skip("Windows Docker does not support SIGUSR1")
   724  	}
   725  
   726  	timeout := 2 * time.Second
   727  	taskCfg := newTaskConfig("", []string{"sleep", "10"})
   728  	task := &drivers.TaskConfig{
   729  		ID:        uuid.Generate(),
   730  		Name:      "busybox-demo",
   731  		AllocID:   uuid.Generate(),
   732  		Resources: basicResources,
   733  	}
   734  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   735  
   736  	d := dockerDriverHarness(t, nil)
   737  	cleanup := d.MkAllocDir(task, true)
   738  	defer cleanup()
   739  	copyImage(t, task.TaskDir(), "busybox.tar")
   740  
   741  	_, _, err := d.StartTask(task)
   742  	require.NoError(t, err)
   743  
   744  	defer d.DestroyTask(task.ID, true)
   745  
   746  	var killSent time.Time
   747  	go func() {
   748  		time.Sleep(100 * time.Millisecond)
   749  		killSent = time.Now()
   750  		require.NoError(t, d.StopTask(task.ID, timeout, "SIGUSR1"))
   751  	}()
   752  
   753  	// Attempt to wait
   754  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   755  	require.NoError(t, err)
   756  
   757  	var killed time.Time
   758  	select {
   759  	case <-waitCh:
   760  		killed = time.Now()
   761  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   762  		require.Fail(t, "timeout")
   763  	}
   764  
   765  	require.True(t, killed.Sub(killSent) > timeout)
   766  }
   767  
   768  func TestDockerDriver_StartN(t *testing.T) {
   769  	if runtime.GOOS == "windows" {
   770  		t.Skip("Windows Docker does not support SIGINT")
   771  	}
   772  	if !tu.IsCI() {
   773  		t.Parallel()
   774  	}
   775  	testutil.DockerCompatible(t)
   776  	require := require.New(t)
   777  
   778  	task1, _, _ := dockerTask(t)
   779  	task2, _, _ := dockerTask(t)
   780  	task3, _, _ := dockerTask(t)
   781  	taskList := []*drivers.TaskConfig{task1, task2, task3}
   782  
   783  	t.Logf("Starting %d tasks", len(taskList))
   784  
   785  	d := dockerDriverHarness(t, nil)
   786  	// Let's spin up a bunch of things
   787  	for _, task := range taskList {
   788  		cleanup := d.MkAllocDir(task, true)
   789  		defer cleanup()
   790  		copyImage(t, task.TaskDir(), "busybox.tar")
   791  		_, _, err := d.StartTask(task)
   792  		require.NoError(err)
   793  
   794  	}
   795  
   796  	defer d.DestroyTask(task3.ID, true)
   797  	defer d.DestroyTask(task2.ID, true)
   798  	defer d.DestroyTask(task1.ID, true)
   799  
   800  	t.Log("All tasks are started. Terminating...")
   801  	for _, task := range taskList {
   802  		require.NoError(d.StopTask(task.ID, time.Second, "SIGINT"))
   803  
   804  		// Attempt to wait
   805  		waitCh, err := d.WaitTask(context.Background(), task.ID)
   806  		require.NoError(err)
   807  
   808  		select {
   809  		case <-waitCh:
   810  		case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   811  			require.Fail("timeout waiting on task")
   812  		}
   813  	}
   814  
   815  	t.Log("Test complete!")
   816  }
   817  
   818  func TestDockerDriver_StartNVersions(t *testing.T) {
   819  	if runtime.GOOS == "windows" {
   820  		t.Skip("Skipped on windows, we don't have image variants available")
   821  	}
   822  	if !tu.IsCI() {
   823  		t.Parallel()
   824  	}
   825  	testutil.DockerCompatible(t)
   826  	require := require.New(t)
   827  
   828  	task1, cfg1, _ := dockerTask(t)
   829  	tcfg1 := newTaskConfig("", []string{"echo", "hello"})
   830  	cfg1.Image = tcfg1.Image
   831  	cfg1.LoadImage = tcfg1.LoadImage
   832  	require.NoError(task1.EncodeConcreteDriverConfig(cfg1))
   833  
   834  	task2, cfg2, _ := dockerTask(t)
   835  	tcfg2 := newTaskConfig("musl", []string{"echo", "hello"})
   836  	cfg2.Image = tcfg2.Image
   837  	cfg2.LoadImage = tcfg2.LoadImage
   838  	require.NoError(task2.EncodeConcreteDriverConfig(cfg2))
   839  
   840  	task3, cfg3, _ := dockerTask(t)
   841  	tcfg3 := newTaskConfig("glibc", []string{"echo", "hello"})
   842  	cfg3.Image = tcfg3.Image
   843  	cfg3.LoadImage = tcfg3.LoadImage
   844  	require.NoError(task3.EncodeConcreteDriverConfig(cfg3))
   845  
   846  	taskList := []*drivers.TaskConfig{task1, task2, task3}
   847  
   848  	t.Logf("Starting %d tasks", len(taskList))
   849  	d := dockerDriverHarness(t, nil)
   850  
   851  	// Let's spin up a bunch of things
   852  	for _, task := range taskList {
   853  		cleanup := d.MkAllocDir(task, true)
   854  		defer cleanup()
   855  		copyImage(t, task.TaskDir(), "busybox.tar")
   856  		copyImage(t, task.TaskDir(), "busybox_musl.tar")
   857  		copyImage(t, task.TaskDir(), "busybox_glibc.tar")
   858  		_, _, err := d.StartTask(task)
   859  		require.NoError(err)
   860  
   861  		require.NoError(d.WaitUntilStarted(task.ID, 5*time.Second))
   862  	}
   863  
   864  	defer d.DestroyTask(task3.ID, true)
   865  	defer d.DestroyTask(task2.ID, true)
   866  	defer d.DestroyTask(task1.ID, true)
   867  
   868  	t.Log("All tasks are started. Terminating...")
   869  	for _, task := range taskList {
   870  		require.NoError(d.StopTask(task.ID, time.Second, "SIGINT"))
   871  
   872  		// Attempt to wait
   873  		waitCh, err := d.WaitTask(context.Background(), task.ID)
   874  		require.NoError(err)
   875  
   876  		select {
   877  		case <-waitCh:
   878  		case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   879  			require.Fail("timeout waiting on task")
   880  		}
   881  	}
   882  
   883  	t.Log("Test complete!")
   884  }
   885  
   886  func TestDockerDriver_Labels(t *testing.T) {
   887  	if !tu.IsCI() {
   888  		t.Parallel()
   889  	}
   890  	testutil.DockerCompatible(t)
   891  
   892  	task, cfg, _ := dockerTask(t)
   893  	cfg.Labels = map[string]string{
   894  		"label1": "value1",
   895  		"label2": "value2",
   896  	}
   897  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   898  
   899  	client, d, handle, cleanup := dockerSetup(t, task)
   900  	defer cleanup()
   901  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   902  
   903  	container, err := client.InspectContainer(handle.containerID)
   904  	if err != nil {
   905  		t.Fatalf("err: %v", err)
   906  	}
   907  
   908  	require.Equal(t, 2, len(container.Config.Labels))
   909  	for k, v := range cfg.Labels {
   910  		require.Equal(t, v, container.Config.Labels[k])
   911  	}
   912  }
   913  
   914  func TestDockerDriver_ForcePull(t *testing.T) {
   915  	if !tu.IsCI() {
   916  		t.Parallel()
   917  	}
   918  	testutil.DockerCompatible(t)
   919  
   920  	task, cfg, _ := dockerTask(t)
   921  	cfg.ForcePull = true
   922  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   923  
   924  	client, d, handle, cleanup := dockerSetup(t, task)
   925  	defer cleanup()
   926  
   927  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   928  
   929  	_, err := client.InspectContainer(handle.containerID)
   930  	if err != nil {
   931  		t.Fatalf("err: %v", err)
   932  	}
   933  }
   934  
   935  func TestDockerDriver_ForcePull_RepoDigest(t *testing.T) {
   936  	if runtime.GOOS == "windows" {
   937  		t.Skip("TODO: Skipped digest test on Windows")
   938  	}
   939  
   940  	if !tu.IsCI() {
   941  		t.Parallel()
   942  	}
   943  	testutil.DockerCompatible(t)
   944  
   945  	task, cfg, _ := dockerTask(t)
   946  	cfg.LoadImage = ""
   947  	cfg.Image = "library/busybox@sha256:58ac43b2cc92c687a32c8be6278e50a063579655fe3090125dcb2af0ff9e1a64"
   948  	localDigest := "sha256:8ac48589692a53a9b8c2d1ceaa6b402665aa7fe667ba51ccc03002300856d8c7"
   949  	cfg.ForcePull = true
   950  	cfg.Command = busyboxLongRunningCmd[0]
   951  	cfg.Args = busyboxLongRunningCmd[1:]
   952  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   953  
   954  	client, d, handle, cleanup := dockerSetup(t, task)
   955  	defer cleanup()
   956  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   957  
   958  	container, err := client.InspectContainer(handle.containerID)
   959  	require.NoError(t, err)
   960  	require.Equal(t, localDigest, container.Image)
   961  }
   962  
   963  func TestDockerDriver_SecurityOpt(t *testing.T) {
   964  	if runtime.GOOS == "windows" {
   965  		t.Skip("Windows does not support seccomp")
   966  	}
   967  	if !tu.IsCI() {
   968  		t.Parallel()
   969  	}
   970  	testutil.DockerCompatible(t)
   971  
   972  	task, cfg, _ := dockerTask(t)
   973  	cfg.SecurityOpt = []string{"seccomp=unconfined"}
   974  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   975  
   976  	client, d, handle, cleanup := dockerSetup(t, task)
   977  	defer cleanup()
   978  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   979  
   980  	container, err := client.InspectContainer(handle.containerID)
   981  	if err != nil {
   982  		t.Fatalf("err: %v", err)
   983  	}
   984  
   985  	require.Exactly(t, cfg.SecurityOpt, container.HostConfig.SecurityOpt)
   986  }
   987  
   988  func TestDockerDriver_CreateContainerConfig(t *testing.T) {
   989  	t.Parallel()
   990  
   991  	task, cfg, _ := dockerTask(t)
   992  	opt := map[string]string{"size": "120G"}
   993  
   994  	cfg.StorageOpt = opt
   995  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   996  
   997  	dh := dockerDriverHarness(t, nil)
   998  	driver := dh.Impl().(*Driver)
   999  
  1000  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1001  	require.NoError(t, err)
  1002  
  1003  	require.Equal(t, "org/repo:0.1", c.Config.Image)
  1004  	require.EqualValues(t, opt, c.HostConfig.StorageOpt)
  1005  
  1006  	// Container name should be /<task_name>-<alloc_id> for backward compat
  1007  	containerName := fmt.Sprintf("%s-%s", strings.Replace(task.Name, "/", "_", -1), task.AllocID)
  1008  	require.Equal(t, containerName, c.Name)
  1009  }
  1010  
  1011  func TestDockerDriver_CreateContainerConfig_Logging(t *testing.T) {
  1012  	t.Parallel()
  1013  
  1014  	cases := []struct {
  1015  		name           string
  1016  		loggingConfig  DockerLogging
  1017  		expectedDriver string
  1018  	}{
  1019  		{
  1020  			"simple type",
  1021  			DockerLogging{Type: "fluentd"},
  1022  			"fluentd",
  1023  		},
  1024  		{
  1025  			"simple driver",
  1026  			DockerLogging{Driver: "fluentd"},
  1027  			"fluentd",
  1028  		},
  1029  		{
  1030  			"type takes precedence",
  1031  			DockerLogging{
  1032  				Type:   "json-file",
  1033  				Driver: "fluentd",
  1034  			},
  1035  			"json-file",
  1036  		},
  1037  	}
  1038  
  1039  	for _, c := range cases {
  1040  		t.Run(c.name, func(t *testing.T) {
  1041  			task, cfg, _ := dockerTask(t)
  1042  
  1043  			cfg.Logging = c.loggingConfig
  1044  			require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1045  
  1046  			dh := dockerDriverHarness(t, nil)
  1047  			driver := dh.Impl().(*Driver)
  1048  
  1049  			cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1050  			require.NoError(t, err)
  1051  
  1052  			require.Equal(t, c.expectedDriver, cc.HostConfig.LogConfig.Type)
  1053  		})
  1054  	}
  1055  }
  1056  
  1057  func TestDockerDriver_CreateContainerConfigWithRuntimes(t *testing.T) {
  1058  	if !tu.IsCI() {
  1059  		t.Parallel()
  1060  	}
  1061  	if !testutil.DockerIsConnected(t) {
  1062  		t.Skip("Docker not connected")
  1063  	}
  1064  	if runtime.GOOS != "linux" {
  1065  		t.Skip("nvidia plugin supports only linux")
  1066  	}
  1067  	testCases := []struct {
  1068  		description           string
  1069  		gpuRuntimeSet         bool
  1070  		expectToReturnError   bool
  1071  		expectedRuntime       string
  1072  		nvidiaDevicesProvided bool
  1073  	}{
  1074  		{
  1075  			description:           "gpu devices are provided, docker driver was able to detect nvidia-runtime 1",
  1076  			gpuRuntimeSet:         true,
  1077  			expectToReturnError:   false,
  1078  			expectedRuntime:       "nvidia",
  1079  			nvidiaDevicesProvided: true,
  1080  		},
  1081  		{
  1082  			description:           "gpu devices are provided, docker driver was able to detect nvidia-runtime 2",
  1083  			gpuRuntimeSet:         true,
  1084  			expectToReturnError:   false,
  1085  			expectedRuntime:       "nvidia-runtime-modified-name",
  1086  			nvidiaDevicesProvided: true,
  1087  		},
  1088  		{
  1089  			description:           "no gpu devices provided - no runtime should be set",
  1090  			gpuRuntimeSet:         true,
  1091  			expectToReturnError:   false,
  1092  			expectedRuntime:       "nvidia",
  1093  			nvidiaDevicesProvided: false,
  1094  		},
  1095  		{
  1096  			description:           "no gpuRuntime supported by docker driver",
  1097  			gpuRuntimeSet:         false,
  1098  			expectToReturnError:   true,
  1099  			expectedRuntime:       "nvidia",
  1100  			nvidiaDevicesProvided: true,
  1101  		},
  1102  	}
  1103  	for _, testCase := range testCases {
  1104  		t.Run(testCase.description, func(t *testing.T) {
  1105  			task, cfg, _ := dockerTask(t)
  1106  
  1107  			dh := dockerDriverHarness(t, nil)
  1108  			driver := dh.Impl().(*Driver)
  1109  
  1110  			driver.gpuRuntime = testCase.gpuRuntimeSet
  1111  			driver.config.GPURuntimeName = testCase.expectedRuntime
  1112  			if testCase.nvidiaDevicesProvided {
  1113  				task.DeviceEnv[nvidia.NvidiaVisibleDevices] = "GPU_UUID_1"
  1114  			}
  1115  
  1116  			c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1117  			if testCase.expectToReturnError {
  1118  				require.NotNil(t, err)
  1119  			} else {
  1120  				require.NoError(t, err)
  1121  				if testCase.nvidiaDevicesProvided {
  1122  					require.Equal(t, testCase.expectedRuntime, c.HostConfig.Runtime)
  1123  				} else {
  1124  					// no nvidia devices provided -> no point to use nvidia runtime
  1125  					require.Equal(t, "", c.HostConfig.Runtime)
  1126  				}
  1127  			}
  1128  		})
  1129  	}
  1130  }
  1131  
  1132  func TestDockerDriver_Capabilities(t *testing.T) {
  1133  	if !tu.IsCI() {
  1134  		t.Parallel()
  1135  	}
  1136  	testutil.DockerCompatible(t)
  1137  	if runtime.GOOS == "windows" {
  1138  		t.Skip("Capabilities not supported on windows")
  1139  	}
  1140  
  1141  	testCases := []struct {
  1142  		Name       string
  1143  		CapAdd     []string
  1144  		CapDrop    []string
  1145  		Whitelist  string
  1146  		StartError string
  1147  	}{
  1148  		{
  1149  			Name:    "default-whitelist-add-allowed",
  1150  			CapAdd:  []string{"fowner", "mknod"},
  1151  			CapDrop: []string{"all"},
  1152  		},
  1153  		{
  1154  			Name:       "default-whitelist-add-forbidden",
  1155  			CapAdd:     []string{"net_admin"},
  1156  			StartError: "net_admin",
  1157  		},
  1158  		{
  1159  			Name:    "default-whitelist-drop-existing",
  1160  			CapDrop: []string{"fowner", "mknod"},
  1161  		},
  1162  		{
  1163  			Name:      "restrictive-whitelist-drop-all",
  1164  			CapDrop:   []string{"all"},
  1165  			Whitelist: "fowner,mknod",
  1166  		},
  1167  		{
  1168  			Name:      "restrictive-whitelist-add-allowed",
  1169  			CapAdd:    []string{"fowner", "mknod"},
  1170  			CapDrop:   []string{"all"},
  1171  			Whitelist: "fowner,mknod",
  1172  		},
  1173  		{
  1174  			Name:       "restrictive-whitelist-add-forbidden",
  1175  			CapAdd:     []string{"net_admin", "mknod"},
  1176  			CapDrop:    []string{"all"},
  1177  			Whitelist:  "fowner,mknod",
  1178  			StartError: "net_admin",
  1179  		},
  1180  		{
  1181  			Name:      "permissive-whitelist",
  1182  			CapAdd:    []string{"net_admin", "mknod"},
  1183  			Whitelist: "all",
  1184  		},
  1185  		{
  1186  			Name:      "permissive-whitelist-add-all",
  1187  			CapAdd:    []string{"all"},
  1188  			Whitelist: "all",
  1189  		},
  1190  	}
  1191  
  1192  	for _, tc := range testCases {
  1193  		t.Run(tc.Name, func(t *testing.T) {
  1194  			client := newTestDockerClient(t)
  1195  			task, cfg, _ := dockerTask(t)
  1196  			if len(tc.CapAdd) > 0 {
  1197  				cfg.CapAdd = tc.CapAdd
  1198  			}
  1199  			if len(tc.CapDrop) > 0 {
  1200  				cfg.CapDrop = tc.CapDrop
  1201  			}
  1202  			require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1203  
  1204  			d := dockerDriverHarness(t, nil)
  1205  			dockerDriver, ok := d.Impl().(*Driver)
  1206  			require.True(t, ok)
  1207  			if tc.Whitelist != "" {
  1208  				dockerDriver.config.AllowCaps = strings.Split(tc.Whitelist, ",")
  1209  			}
  1210  
  1211  			cleanup := d.MkAllocDir(task, true)
  1212  			defer cleanup()
  1213  			copyImage(t, task.TaskDir(), "busybox.tar")
  1214  
  1215  			_, _, err := d.StartTask(task)
  1216  			defer d.DestroyTask(task.ID, true)
  1217  			if err == nil && tc.StartError != "" {
  1218  				t.Fatalf("Expected error in start: %v", tc.StartError)
  1219  			} else if err != nil {
  1220  				if tc.StartError == "" {
  1221  					require.NoError(t, err)
  1222  				} else {
  1223  					require.Contains(t, err.Error(), tc.StartError)
  1224  				}
  1225  				return
  1226  			}
  1227  
  1228  			handle, ok := dockerDriver.tasks.Get(task.ID)
  1229  			require.True(t, ok)
  1230  
  1231  			require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1232  
  1233  			container, err := client.InspectContainer(handle.containerID)
  1234  			require.NoError(t, err)
  1235  
  1236  			require.Exactly(t, tc.CapAdd, container.HostConfig.CapAdd)
  1237  			require.Exactly(t, tc.CapDrop, container.HostConfig.CapDrop)
  1238  		})
  1239  	}
  1240  }
  1241  
  1242  func TestDockerDriver_DNS(t *testing.T) {
  1243  	if !tu.IsCI() {
  1244  		t.Parallel()
  1245  	}
  1246  	testutil.DockerCompatible(t)
  1247  
  1248  	task, cfg, _ := dockerTask(t)
  1249  	cfg.DNSServers = []string{"8.8.8.8", "8.8.4.4"}
  1250  	cfg.DNSSearchDomains = []string{"example.com", "example.org", "example.net"}
  1251  	cfg.DNSOptions = []string{"ndots:1"}
  1252  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1253  
  1254  	client, d, handle, cleanup := dockerSetup(t, task)
  1255  	defer cleanup()
  1256  
  1257  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1258  
  1259  	container, err := client.InspectContainer(handle.containerID)
  1260  	require.NoError(t, err)
  1261  
  1262  	require.Exactly(t, cfg.DNSServers, container.HostConfig.DNS)
  1263  	require.Exactly(t, cfg.DNSSearchDomains, container.HostConfig.DNSSearch)
  1264  	require.Exactly(t, cfg.DNSOptions, container.HostConfig.DNSOptions)
  1265  }
  1266  
  1267  func TestDockerDriver_MACAddress(t *testing.T) {
  1268  	if !tu.IsCI() {
  1269  		t.Parallel()
  1270  	}
  1271  	testutil.DockerCompatible(t)
  1272  	if runtime.GOOS == "windows" {
  1273  		t.Skip("Windows docker does not support setting MacAddress")
  1274  	}
  1275  
  1276  	task, cfg, _ := dockerTask(t)
  1277  	cfg.MacAddress = "00:16:3e:00:00:00"
  1278  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1279  
  1280  	client, d, handle, cleanup := dockerSetup(t, task)
  1281  	defer cleanup()
  1282  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1283  
  1284  	container, err := client.InspectContainer(handle.containerID)
  1285  	require.NoError(t, err)
  1286  
  1287  	require.Equal(t, cfg.MacAddress, container.NetworkSettings.MacAddress)
  1288  }
  1289  
  1290  func TestDockerWorkDir(t *testing.T) {
  1291  	if !tu.IsCI() {
  1292  		t.Parallel()
  1293  	}
  1294  	testutil.DockerCompatible(t)
  1295  
  1296  	task, cfg, _ := dockerTask(t)
  1297  	cfg.WorkDir = "/some/path"
  1298  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1299  
  1300  	client, d, handle, cleanup := dockerSetup(t, task)
  1301  	defer cleanup()
  1302  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1303  
  1304  	container, err := client.InspectContainer(handle.containerID)
  1305  	require.NoError(t, err)
  1306  	require.Equal(t, cfg.WorkDir, filepath.ToSlash(container.Config.WorkingDir))
  1307  }
  1308  
  1309  func inSlice(needle string, haystack []string) bool {
  1310  	for _, h := range haystack {
  1311  		if h == needle {
  1312  			return true
  1313  		}
  1314  	}
  1315  	return false
  1316  }
  1317  
  1318  func TestDockerDriver_PortsNoMap(t *testing.T) {
  1319  	if !tu.IsCI() {
  1320  		t.Parallel()
  1321  	}
  1322  	testutil.DockerCompatible(t)
  1323  
  1324  	task, _, port := dockerTask(t)
  1325  	res := port[0]
  1326  	dyn := port[1]
  1327  
  1328  	client, d, handle, cleanup := dockerSetup(t, task)
  1329  	defer cleanup()
  1330  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1331  
  1332  	container, err := client.InspectContainer(handle.containerID)
  1333  	require.NoError(t, err)
  1334  
  1335  	// Verify that the correct ports are EXPOSED
  1336  	expectedExposedPorts := map[docker.Port]struct{}{
  1337  		docker.Port(fmt.Sprintf("%d/tcp", res)): {},
  1338  		docker.Port(fmt.Sprintf("%d/udp", res)): {},
  1339  		docker.Port(fmt.Sprintf("%d/tcp", dyn)): {},
  1340  		docker.Port(fmt.Sprintf("%d/udp", dyn)): {},
  1341  	}
  1342  
  1343  	require.Exactly(t, expectedExposedPorts, container.Config.ExposedPorts)
  1344  
  1345  	hostIP := "127.0.0.1"
  1346  	if runtime.GOOS == "windows" {
  1347  		hostIP = ""
  1348  	}
  1349  
  1350  	// Verify that the correct ports are FORWARDED
  1351  	expectedPortBindings := map[docker.Port][]docker.PortBinding{
  1352  		docker.Port(fmt.Sprintf("%d/tcp", res)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1353  		docker.Port(fmt.Sprintf("%d/udp", res)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1354  		docker.Port(fmt.Sprintf("%d/tcp", dyn)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1355  		docker.Port(fmt.Sprintf("%d/udp", dyn)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1356  	}
  1357  
  1358  	require.Exactly(t, expectedPortBindings, container.HostConfig.PortBindings)
  1359  }
  1360  
  1361  func TestDockerDriver_PortsMapping(t *testing.T) {
  1362  	if !tu.IsCI() {
  1363  		t.Parallel()
  1364  	}
  1365  	testutil.DockerCompatible(t)
  1366  
  1367  	task, cfg, port := dockerTask(t)
  1368  	res := port[0]
  1369  	dyn := port[1]
  1370  	cfg.PortMap = map[string]int{
  1371  		"main":  8080,
  1372  		"REDIS": 6379,
  1373  	}
  1374  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1375  
  1376  	client, d, handle, cleanup := dockerSetup(t, task)
  1377  	defer cleanup()
  1378  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1379  
  1380  	container, err := client.InspectContainer(handle.containerID)
  1381  	require.NoError(t, err)
  1382  
  1383  	// Verify that the correct ports are EXPOSED
  1384  	expectedExposedPorts := map[docker.Port]struct{}{
  1385  		docker.Port("8080/tcp"): {},
  1386  		docker.Port("8080/udp"): {},
  1387  		docker.Port("6379/tcp"): {},
  1388  		docker.Port("6379/udp"): {},
  1389  	}
  1390  
  1391  	require.Exactly(t, expectedExposedPorts, container.Config.ExposedPorts)
  1392  
  1393  	hostIP := "127.0.0.1"
  1394  	if runtime.GOOS == "windows" {
  1395  		hostIP = ""
  1396  	}
  1397  
  1398  	// Verify that the correct ports are FORWARDED
  1399  	expectedPortBindings := map[docker.Port][]docker.PortBinding{
  1400  		docker.Port("8080/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1401  		docker.Port("8080/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1402  		docker.Port("6379/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1403  		docker.Port("6379/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1404  	}
  1405  	require.Exactly(t, expectedPortBindings, container.HostConfig.PortBindings)
  1406  }
  1407  
  1408  func TestDockerDriver_CleanupContainer(t *testing.T) {
  1409  	if !tu.IsCI() {
  1410  		t.Parallel()
  1411  	}
  1412  	testutil.DockerCompatible(t)
  1413  
  1414  	task, cfg, _ := dockerTask(t)
  1415  	cfg.Command = "echo"
  1416  	cfg.Args = []string{"hello"}
  1417  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1418  
  1419  	client, d, handle, cleanup := dockerSetup(t, task)
  1420  	defer cleanup()
  1421  
  1422  	waitCh, err := d.WaitTask(context.Background(), task.ID)
  1423  	require.NoError(t, err)
  1424  
  1425  	select {
  1426  	case res := <-waitCh:
  1427  		if !res.Successful() {
  1428  			t.Fatalf("err: %v", res)
  1429  		}
  1430  
  1431  		err = d.DestroyTask(task.ID, false)
  1432  		require.NoError(t, err)
  1433  
  1434  		time.Sleep(3 * time.Second)
  1435  
  1436  		// Ensure that the container isn't present
  1437  		_, err := client.InspectContainer(handle.containerID)
  1438  		if err == nil {
  1439  			t.Fatalf("expected to not get container")
  1440  		}
  1441  
  1442  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
  1443  		t.Fatalf("timeout")
  1444  	}
  1445  }
  1446  
  1447  func TestDockerDriver_EnableImageGC(t *testing.T) {
  1448  	testutil.DockerCompatible(t)
  1449  
  1450  	task, cfg, _ := dockerTask(t)
  1451  	cfg.Command = "echo"
  1452  	cfg.Args = []string{"hello"}
  1453  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1454  
  1455  	client := newTestDockerClient(t)
  1456  	driver := dockerDriverHarness(t, map[string]interface{}{
  1457  		"gc": map[string]interface{}{
  1458  			"container":   true,
  1459  			"image":       true,
  1460  			"image_delay": "2s",
  1461  		},
  1462  	})
  1463  	cleanup := driver.MkAllocDir(task, true)
  1464  	defer cleanup()
  1465  
  1466  	cleanSlate(client, cfg.Image)
  1467  
  1468  	copyImage(t, task.TaskDir(), "busybox.tar")
  1469  	_, _, err := driver.StartTask(task)
  1470  	require.NoError(t, err)
  1471  
  1472  	dockerDriver, ok := driver.Impl().(*Driver)
  1473  	require.True(t, ok)
  1474  	_, ok = dockerDriver.tasks.Get(task.ID)
  1475  	require.True(t, ok)
  1476  
  1477  	waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID)
  1478  	require.NoError(t, err)
  1479  	select {
  1480  	case res := <-waitCh:
  1481  		if !res.Successful() {
  1482  			t.Fatalf("err: %v", res)
  1483  		}
  1484  
  1485  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
  1486  		t.Fatalf("timeout")
  1487  	}
  1488  
  1489  	// we haven't called DestroyTask, image should be present
  1490  	_, err = client.InspectImage(cfg.Image)
  1491  	require.NoError(t, err)
  1492  
  1493  	err = dockerDriver.DestroyTask(task.ID, false)
  1494  	require.NoError(t, err)
  1495  
  1496  	// image_delay is 3s, so image should still be around for a bit
  1497  	_, err = client.InspectImage(cfg.Image)
  1498  	require.NoError(t, err)
  1499  
  1500  	// Ensure image was removed
  1501  	tu.WaitForResult(func() (bool, error) {
  1502  		if _, err := client.InspectImage(cfg.Image); err == nil {
  1503  			return false, fmt.Errorf("image exists but should have been removed. Does another %v container exist?", cfg.Image)
  1504  		}
  1505  
  1506  		return true, nil
  1507  	}, func(err error) {
  1508  		require.NoError(t, err)
  1509  	})
  1510  }
  1511  
  1512  func TestDockerDriver_DisableImageGC(t *testing.T) {
  1513  	testutil.DockerCompatible(t)
  1514  
  1515  	task, cfg, _ := dockerTask(t)
  1516  	cfg.Command = "echo"
  1517  	cfg.Args = []string{"hello"}
  1518  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1519  
  1520  	client := newTestDockerClient(t)
  1521  	driver := dockerDriverHarness(t, map[string]interface{}{
  1522  		"gc": map[string]interface{}{
  1523  			"container":   true,
  1524  			"image":       false,
  1525  			"image_delay": "1s",
  1526  		},
  1527  	})
  1528  	cleanup := driver.MkAllocDir(task, true)
  1529  	defer cleanup()
  1530  
  1531  	cleanSlate(client, cfg.Image)
  1532  
  1533  	copyImage(t, task.TaskDir(), "busybox.tar")
  1534  	_, _, err := driver.StartTask(task)
  1535  	require.NoError(t, err)
  1536  
  1537  	dockerDriver, ok := driver.Impl().(*Driver)
  1538  	require.True(t, ok)
  1539  	handle, ok := dockerDriver.tasks.Get(task.ID)
  1540  	require.True(t, ok)
  1541  
  1542  	waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID)
  1543  	require.NoError(t, err)
  1544  	select {
  1545  	case res := <-waitCh:
  1546  		if !res.Successful() {
  1547  			t.Fatalf("err: %v", res)
  1548  		}
  1549  
  1550  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
  1551  		t.Fatalf("timeout")
  1552  	}
  1553  
  1554  	// we haven't called DestroyTask, image should be present
  1555  	_, err = client.InspectImage(handle.containerImage)
  1556  	require.NoError(t, err)
  1557  
  1558  	err = dockerDriver.DestroyTask(task.ID, false)
  1559  	require.NoError(t, err)
  1560  
  1561  	// image_delay is 1s, wait a little longer
  1562  	time.Sleep(3 * time.Second)
  1563  
  1564  	// image should not have been removed or scheduled to be removed
  1565  	_, err = client.InspectImage(cfg.Image)
  1566  	require.NoError(t, err)
  1567  	dockerDriver.coordinator.imageLock.Lock()
  1568  	_, ok = dockerDriver.coordinator.deleteFuture[handle.containerImage]
  1569  	require.False(t, ok, "image should not be registered for deletion")
  1570  	dockerDriver.coordinator.imageLock.Unlock()
  1571  }
  1572  
  1573  func TestDockerDriver_MissingContainer_Cleanup(t *testing.T) {
  1574  	testutil.DockerCompatible(t)
  1575  
  1576  	task, cfg, _ := dockerTask(t)
  1577  	cfg.Command = "echo"
  1578  	cfg.Args = []string{"hello"}
  1579  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1580  
  1581  	client := newTestDockerClient(t)
  1582  	driver := dockerDriverHarness(t, map[string]interface{}{
  1583  		"gc": map[string]interface{}{
  1584  			"container":   true,
  1585  			"image":       true,
  1586  			"image_delay": "0s",
  1587  		},
  1588  	})
  1589  	cleanup := driver.MkAllocDir(task, true)
  1590  	defer cleanup()
  1591  
  1592  	cleanSlate(client, cfg.Image)
  1593  
  1594  	copyImage(t, task.TaskDir(), "busybox.tar")
  1595  	_, _, err := driver.StartTask(task)
  1596  	require.NoError(t, err)
  1597  
  1598  	dockerDriver, ok := driver.Impl().(*Driver)
  1599  	require.True(t, ok)
  1600  	h, ok := dockerDriver.tasks.Get(task.ID)
  1601  	require.True(t, ok)
  1602  
  1603  	waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID)
  1604  	require.NoError(t, err)
  1605  	select {
  1606  	case res := <-waitCh:
  1607  		if !res.Successful() {
  1608  			t.Fatalf("err: %v", res)
  1609  		}
  1610  
  1611  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
  1612  		t.Fatalf("timeout")
  1613  	}
  1614  
  1615  	// remove the container out-of-band
  1616  	require.NoError(t, client.RemoveContainer(docker.RemoveContainerOptions{
  1617  		ID: h.containerID,
  1618  	}))
  1619  
  1620  	require.NoError(t, dockerDriver.DestroyTask(task.ID, false))
  1621  
  1622  	// Ensure image was removed
  1623  	tu.WaitForResult(func() (bool, error) {
  1624  		if _, err := client.InspectImage(cfg.Image); err == nil {
  1625  			return false, fmt.Errorf("image exists but should have been removed. Does another %v container exist?", cfg.Image)
  1626  		}
  1627  
  1628  		return true, nil
  1629  	}, func(err error) {
  1630  		require.NoError(t, err)
  1631  	})
  1632  
  1633  	// Ensure that task handle was removed
  1634  	_, ok = dockerDriver.tasks.Get(task.ID)
  1635  	require.False(t, ok)
  1636  }
  1637  
  1638  func TestDockerDriver_Stats(t *testing.T) {
  1639  	if !tu.IsCI() {
  1640  		t.Parallel()
  1641  	}
  1642  	testutil.DockerCompatible(t)
  1643  
  1644  	task, cfg, _ := dockerTask(t)
  1645  	cfg.Command = "sleep"
  1646  	cfg.Args = []string{"1000"}
  1647  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1648  
  1649  	_, d, handle, cleanup := dockerSetup(t, task)
  1650  	defer cleanup()
  1651  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1652  
  1653  	go func() {
  1654  		defer d.DestroyTask(task.ID, true)
  1655  		ctx, cancel := context.WithCancel(context.Background())
  1656  		defer cancel()
  1657  		ch, err := handle.Stats(ctx, 1*time.Second)
  1658  		assert.NoError(t, err)
  1659  		select {
  1660  		case ru := <-ch:
  1661  			assert.NotNil(t, ru.ResourceUsage)
  1662  		case <-time.After(3 * time.Second):
  1663  			assert.Fail(t, "stats timeout")
  1664  		}
  1665  	}()
  1666  
  1667  	waitCh, err := d.WaitTask(context.Background(), task.ID)
  1668  	require.NoError(t, err)
  1669  	select {
  1670  	case res := <-waitCh:
  1671  		if res.Successful() {
  1672  			t.Fatalf("should err: %v", res)
  1673  		}
  1674  	case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second):
  1675  		t.Fatalf("timeout")
  1676  	}
  1677  }
  1678  
  1679  func setupDockerVolumes(t *testing.T, cfg map[string]interface{}, hostpath string) (*drivers.TaskConfig, *dtestutil.DriverHarness, *TaskConfig, string, func()) {
  1680  	testutil.DockerCompatible(t)
  1681  
  1682  	randfn := fmt.Sprintf("test-%d", rand.Int())
  1683  	hostfile := filepath.Join(hostpath, randfn)
  1684  	var containerPath string
  1685  	if runtime.GOOS == "windows" {
  1686  		containerPath = "C:\\data"
  1687  	} else {
  1688  		containerPath = "/mnt/vol"
  1689  	}
  1690  	containerFile := filepath.Join(containerPath, randfn)
  1691  
  1692  	taskCfg := newTaskConfig("", []string{"touch", containerFile})
  1693  	taskCfg.Volumes = []string{fmt.Sprintf("%s:%s", hostpath, containerPath)}
  1694  
  1695  	task := &drivers.TaskConfig{
  1696  		ID:        uuid.Generate(),
  1697  		Name:      "ls",
  1698  		AllocID:   uuid.Generate(),
  1699  		Env:       map[string]string{"VOL_PATH": containerPath},
  1700  		Resources: basicResources,
  1701  	}
  1702  	require.NoError(t, task.EncodeConcreteDriverConfig(taskCfg))
  1703  
  1704  	d := dockerDriverHarness(t, cfg)
  1705  	cleanup := d.MkAllocDir(task, true)
  1706  
  1707  	copyImage(t, task.TaskDir(), "busybox.tar")
  1708  
  1709  	return task, d, &taskCfg, hostfile, cleanup
  1710  }
  1711  
  1712  func TestDockerDriver_VolumesDisabled(t *testing.T) {
  1713  	if !tu.IsCI() {
  1714  		t.Parallel()
  1715  	}
  1716  	testutil.DockerCompatible(t)
  1717  
  1718  	cfg := map[string]interface{}{
  1719  		"volumes": map[string]interface{}{
  1720  			"enabled": false,
  1721  		},
  1722  		"gc": map[string]interface{}{
  1723  			"image": false,
  1724  		},
  1725  	}
  1726  
  1727  	{
  1728  		tmpvol, err := ioutil.TempDir("", "nomadtest_docker_volumesdisabled")
  1729  		if err != nil {
  1730  			t.Fatalf("error creating temporary dir: %v", err)
  1731  		}
  1732  
  1733  		task, driver, _, _, cleanup := setupDockerVolumes(t, cfg, tmpvol)
  1734  		defer cleanup()
  1735  
  1736  		_, _, err = driver.StartTask(task)
  1737  		defer driver.DestroyTask(task.ID, true)
  1738  		if err == nil {
  1739  			require.Fail(t, "Started driver successfully when volumes should have been disabled.")
  1740  		}
  1741  	}
  1742  
  1743  	// Relative paths should still be allowed
  1744  	{
  1745  		task, driver, _, fn, cleanup := setupDockerVolumes(t, cfg, ".")
  1746  		defer cleanup()
  1747  
  1748  		_, _, err := driver.StartTask(task)
  1749  		require.NoError(t, err)
  1750  		defer driver.DestroyTask(task.ID, true)
  1751  
  1752  		waitCh, err := driver.WaitTask(context.Background(), task.ID)
  1753  		require.NoError(t, err)
  1754  		select {
  1755  		case res := <-waitCh:
  1756  			if !res.Successful() {
  1757  				t.Fatalf("unexpected err: %v", res)
  1758  			}
  1759  		case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second):
  1760  			t.Fatalf("timeout")
  1761  		}
  1762  
  1763  		if _, err := ioutil.ReadFile(filepath.Join(task.TaskDir().Dir, fn)); err != nil {
  1764  			t.Fatalf("unexpected error reading %s: %v", fn, err)
  1765  		}
  1766  	}
  1767  
  1768  	// Volume Drivers should be rejected (error)
  1769  	{
  1770  		task, driver, taskCfg, _, cleanup := setupDockerVolumes(t, cfg, "fake_flocker_vol")
  1771  		defer cleanup()
  1772  
  1773  		taskCfg.VolumeDriver = "flocker"
  1774  		require.NoError(t, task.EncodeConcreteDriverConfig(taskCfg))
  1775  
  1776  		_, _, err := driver.StartTask(task)
  1777  		defer driver.DestroyTask(task.ID, true)
  1778  		if err == nil {
  1779  			require.Fail(t, "Started driver successfully when volume drivers should have been disabled.")
  1780  		}
  1781  	}
  1782  }
  1783  
  1784  func TestDockerDriver_VolumesEnabled(t *testing.T) {
  1785  	if !tu.IsCI() {
  1786  		t.Parallel()
  1787  	}
  1788  	testutil.DockerCompatible(t)
  1789  
  1790  	tmpvol, err := ioutil.TempDir("", "nomadtest_docker_volumesenabled")
  1791  	require.NoError(t, err)
  1792  
  1793  	// Evaluate symlinks so it works on MacOS
  1794  	tmpvol, err = filepath.EvalSymlinks(tmpvol)
  1795  	require.NoError(t, err)
  1796  
  1797  	task, driver, _, hostpath, cleanup := setupDockerVolumes(t, nil, tmpvol)
  1798  	defer cleanup()
  1799  
  1800  	_, _, err = driver.StartTask(task)
  1801  	require.NoError(t, err)
  1802  	defer driver.DestroyTask(task.ID, true)
  1803  
  1804  	waitCh, err := driver.WaitTask(context.Background(), task.ID)
  1805  	require.NoError(t, err)
  1806  	select {
  1807  	case res := <-waitCh:
  1808  		if !res.Successful() {
  1809  			t.Fatalf("unexpected err: %v", res)
  1810  		}
  1811  	case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second):
  1812  		t.Fatalf("timeout")
  1813  	}
  1814  
  1815  	if _, err := ioutil.ReadFile(hostpath); err != nil {
  1816  		t.Fatalf("unexpected error reading %s: %v", hostpath, err)
  1817  	}
  1818  }
  1819  
  1820  func TestDockerDriver_Mounts(t *testing.T) {
  1821  	if !tu.IsCI() {
  1822  		t.Parallel()
  1823  	}
  1824  	testutil.DockerCompatible(t)
  1825  
  1826  	goodMount := DockerMount{
  1827  		Target: "/nomad",
  1828  		VolumeOptions: DockerVolumeOptions{
  1829  			Labels: map[string]string{"foo": "bar"},
  1830  			DriverConfig: DockerVolumeDriverConfig{
  1831  				Name: "local",
  1832  			},
  1833  		},
  1834  		ReadOnly: true,
  1835  		Source:   "test",
  1836  	}
  1837  
  1838  	if runtime.GOOS == "windows" {
  1839  		goodMount.Target = "C:\\nomad"
  1840  	}
  1841  
  1842  	cases := []struct {
  1843  		Name   string
  1844  		Mounts []DockerMount
  1845  		Error  string
  1846  	}{
  1847  		{
  1848  			Name:   "good-one",
  1849  			Error:  "",
  1850  			Mounts: []DockerMount{goodMount},
  1851  		},
  1852  		{
  1853  			Name:   "duplicate",
  1854  			Error:  "Duplicate mount point",
  1855  			Mounts: []DockerMount{goodMount, goodMount, goodMount},
  1856  		},
  1857  	}
  1858  
  1859  	for _, c := range cases {
  1860  		t.Run(c.Name, func(t *testing.T) {
  1861  			d := dockerDriverHarness(t, nil)
  1862  			// Build the task
  1863  			task, cfg, _ := dockerTask(t)
  1864  			cfg.Command = "sleep"
  1865  			cfg.Args = []string{"10000"}
  1866  			cfg.Mounts = c.Mounts
  1867  			require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1868  			cleanup := d.MkAllocDir(task, true)
  1869  			defer cleanup()
  1870  
  1871  			copyImage(t, task.TaskDir(), "busybox.tar")
  1872  
  1873  			_, _, err := d.StartTask(task)
  1874  			defer d.DestroyTask(task.ID, true)
  1875  			if err == nil && c.Error != "" {
  1876  				t.Fatalf("expected error: %v", c.Error)
  1877  			} else if err != nil {
  1878  				if c.Error == "" {
  1879  					t.Fatalf("unexpected error in prestart: %v", err)
  1880  				} else if !strings.Contains(err.Error(), c.Error) {
  1881  					t.Fatalf("expected error %q; got %v", c.Error, err)
  1882  				}
  1883  			}
  1884  		})
  1885  	}
  1886  }
  1887  
  1888  func TestDockerDriver_AuthConfiguration(t *testing.T) {
  1889  	if !tu.IsCI() {
  1890  		t.Parallel()
  1891  	}
  1892  	testutil.DockerCompatible(t)
  1893  
  1894  	path := "./test-resources/docker/auth.json"
  1895  	cases := []struct {
  1896  		Repo       string
  1897  		AuthConfig *docker.AuthConfiguration
  1898  	}{
  1899  		{
  1900  			Repo:       "lolwhat.com/what:1337",
  1901  			AuthConfig: nil,
  1902  		},
  1903  		{
  1904  			Repo: "redis:3.2",
  1905  			AuthConfig: &docker.AuthConfiguration{
  1906  				Username:      "test",
  1907  				Password:      "1234",
  1908  				Email:         "",
  1909  				ServerAddress: "https://index.docker.io/v1/",
  1910  			},
  1911  		},
  1912  		{
  1913  			Repo: "quay.io/redis:3.2",
  1914  			AuthConfig: &docker.AuthConfiguration{
  1915  				Username:      "test",
  1916  				Password:      "5678",
  1917  				Email:         "",
  1918  				ServerAddress: "quay.io",
  1919  			},
  1920  		},
  1921  		{
  1922  			Repo: "other.io/redis:3.2",
  1923  			AuthConfig: &docker.AuthConfiguration{
  1924  				Username:      "test",
  1925  				Password:      "abcd",
  1926  				Email:         "",
  1927  				ServerAddress: "https://other.io/v1/",
  1928  			},
  1929  		},
  1930  	}
  1931  
  1932  	for _, c := range cases {
  1933  		act, err := authFromDockerConfig(path)(c.Repo)
  1934  		require.NoError(t, err)
  1935  		require.Exactly(t, c.AuthConfig, act)
  1936  	}
  1937  }
  1938  
  1939  func TestDockerDriver_AuthFromTaskConfig(t *testing.T) {
  1940  	if !tu.IsCI() {
  1941  		t.Parallel()
  1942  	}
  1943  
  1944  	cases := []struct {
  1945  		Auth       DockerAuth
  1946  		AuthConfig *docker.AuthConfiguration
  1947  		Desc       string
  1948  	}{
  1949  		{
  1950  			Auth:       DockerAuth{},
  1951  			AuthConfig: nil,
  1952  			Desc:       "Empty Config",
  1953  		},
  1954  		{
  1955  			Auth: DockerAuth{
  1956  				Username:   "foo",
  1957  				Password:   "bar",
  1958  				Email:      "foo@bar.com",
  1959  				ServerAddr: "www.foobar.com",
  1960  			},
  1961  			AuthConfig: &docker.AuthConfiguration{
  1962  				Username:      "foo",
  1963  				Password:      "bar",
  1964  				Email:         "foo@bar.com",
  1965  				ServerAddress: "www.foobar.com",
  1966  			},
  1967  			Desc: "All fields set",
  1968  		},
  1969  		{
  1970  			Auth: DockerAuth{
  1971  				Username:   "foo",
  1972  				Password:   "bar",
  1973  				ServerAddr: "www.foobar.com",
  1974  			},
  1975  			AuthConfig: &docker.AuthConfiguration{
  1976  				Username:      "foo",
  1977  				Password:      "bar",
  1978  				ServerAddress: "www.foobar.com",
  1979  			},
  1980  			Desc: "Email not set",
  1981  		},
  1982  	}
  1983  
  1984  	for _, c := range cases {
  1985  		t.Run(c.Desc, func(t *testing.T) {
  1986  			act, err := authFromTaskConfig(&TaskConfig{Auth: c.Auth})("test")
  1987  			require.NoError(t, err)
  1988  			require.Exactly(t, c.AuthConfig, act)
  1989  		})
  1990  	}
  1991  }
  1992  
  1993  func TestDockerDriver_OOMKilled(t *testing.T) {
  1994  	if !tu.IsCI() {
  1995  		t.Parallel()
  1996  	}
  1997  	testutil.DockerCompatible(t)
  1998  
  1999  	if runtime.GOOS == "windows" {
  2000  		t.Skip("Windows does not support OOM Killer")
  2001  	}
  2002  
  2003  	taskCfg := newTaskConfig("", []string{"sh", "-c", `sleep 2 && x=a && while true; do x="$x$x"; done`})
  2004  	task := &drivers.TaskConfig{
  2005  		ID:        uuid.Generate(),
  2006  		Name:      "oom-killed",
  2007  		AllocID:   uuid.Generate(),
  2008  		Resources: basicResources,
  2009  	}
  2010  	task.Resources.LinuxResources.MemoryLimitBytes = 10 * 1024 * 1024
  2011  	task.Resources.NomadResources.Memory.MemoryMB = 10
  2012  
  2013  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
  2014  
  2015  	d := dockerDriverHarness(t, nil)
  2016  	cleanup := d.MkAllocDir(task, true)
  2017  	defer cleanup()
  2018  	copyImage(t, task.TaskDir(), "busybox.tar")
  2019  
  2020  	_, _, err := d.StartTask(task)
  2021  	require.NoError(t, err)
  2022  
  2023  	defer d.DestroyTask(task.ID, true)
  2024  
  2025  	waitCh, err := d.WaitTask(context.Background(), task.ID)
  2026  	require.NoError(t, err)
  2027  	select {
  2028  	case res := <-waitCh:
  2029  		if res.Successful() {
  2030  			t.Fatalf("expected error, but container exited successful")
  2031  		}
  2032  
  2033  		if !res.OOMKilled {
  2034  			t.Fatalf("not killed by OOM killer: %s", res.Err)
  2035  		}
  2036  
  2037  		t.Logf("Successfully killed by OOM killer")
  2038  
  2039  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
  2040  		t.Fatalf("timeout")
  2041  	}
  2042  }
  2043  
  2044  func TestDockerDriver_Devices_IsInvalidConfig(t *testing.T) {
  2045  	if !tu.IsCI() {
  2046  		t.Parallel()
  2047  	}
  2048  	testutil.DockerCompatible(t)
  2049  
  2050  	brokenConfigs := []DockerDevice{
  2051  		{
  2052  			HostPath: "",
  2053  		},
  2054  		{
  2055  			HostPath:          "/dev/sda1",
  2056  			CgroupPermissions: "rxb",
  2057  		},
  2058  	}
  2059  
  2060  	test_cases := []struct {
  2061  		deviceConfig []DockerDevice
  2062  		err          error
  2063  	}{
  2064  		{brokenConfigs[:1], fmt.Errorf("host path must be set in configuration for devices")},
  2065  		{brokenConfigs[1:], fmt.Errorf("invalid cgroup permission string: \"rxb\"")},
  2066  	}
  2067  
  2068  	for _, tc := range test_cases {
  2069  		task, cfg, _ := dockerTask(t)
  2070  		cfg.Devices = tc.deviceConfig
  2071  		require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2072  		d := dockerDriverHarness(t, nil)
  2073  		cleanup := d.MkAllocDir(task, true)
  2074  		copyImage(t, task.TaskDir(), "busybox.tar")
  2075  		defer cleanup()
  2076  
  2077  		_, _, err := d.StartTask(task)
  2078  		require.Error(t, err)
  2079  		require.Contains(t, err.Error(), tc.err.Error())
  2080  	}
  2081  }
  2082  
  2083  func TestDockerDriver_Device_Success(t *testing.T) {
  2084  	if !tu.IsCI() {
  2085  		t.Parallel()
  2086  	}
  2087  	testutil.DockerCompatible(t)
  2088  
  2089  	if runtime.GOOS != "linux" {
  2090  		t.Skip("test device mounts only on linux")
  2091  	}
  2092  
  2093  	hostPath := "/dev/random"
  2094  	containerPath := "/dev/myrandom"
  2095  	perms := "rwm"
  2096  
  2097  	expectedDevice := docker.Device{
  2098  		PathOnHost:        hostPath,
  2099  		PathInContainer:   containerPath,
  2100  		CgroupPermissions: perms,
  2101  	}
  2102  	config := DockerDevice{
  2103  		HostPath:      hostPath,
  2104  		ContainerPath: containerPath,
  2105  	}
  2106  
  2107  	task, cfg, _ := dockerTask(t)
  2108  	cfg.Devices = []DockerDevice{config}
  2109  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2110  
  2111  	client, driver, handle, cleanup := dockerSetup(t, task)
  2112  	defer cleanup()
  2113  	require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
  2114  
  2115  	container, err := client.InspectContainer(handle.containerID)
  2116  	require.NoError(t, err)
  2117  
  2118  	require.NotEmpty(t, container.HostConfig.Devices, "Expected one device")
  2119  	require.Equal(t, expectedDevice, container.HostConfig.Devices[0], "Incorrect device ")
  2120  }
  2121  
  2122  func TestDockerDriver_Entrypoint(t *testing.T) {
  2123  	if !tu.IsCI() {
  2124  		t.Parallel()
  2125  	}
  2126  	testutil.DockerCompatible(t)
  2127  
  2128  	entrypoint := []string{"sh", "-c"}
  2129  	task, cfg, _ := dockerTask(t)
  2130  	cfg.Entrypoint = entrypoint
  2131  	cfg.Command = strings.Join(busyboxLongRunningCmd, " ")
  2132  	cfg.Args = []string{}
  2133  
  2134  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2135  
  2136  	client, driver, handle, cleanup := dockerSetup(t, task)
  2137  	defer cleanup()
  2138  
  2139  	require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
  2140  
  2141  	container, err := client.InspectContainer(handle.containerID)
  2142  	require.NoError(t, err)
  2143  
  2144  	require.Len(t, container.Config.Entrypoint, 2, "Expected one entrypoint")
  2145  	require.Equal(t, entrypoint, container.Config.Entrypoint, "Incorrect entrypoint ")
  2146  }
  2147  
  2148  func TestDockerDriver_ReadonlyRootfs(t *testing.T) {
  2149  	if !tu.IsCI() {
  2150  		t.Parallel()
  2151  	}
  2152  	testutil.DockerCompatible(t)
  2153  
  2154  	if runtime.GOOS == "windows" {
  2155  		t.Skip("Windows Docker does not support root filesystem in read-only mode")
  2156  	}
  2157  
  2158  	task, cfg, _ := dockerTask(t)
  2159  	cfg.ReadonlyRootfs = true
  2160  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2161  
  2162  	client, driver, handle, cleanup := dockerSetup(t, task)
  2163  	defer cleanup()
  2164  	require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
  2165  
  2166  	container, err := client.InspectContainer(handle.containerID)
  2167  	require.NoError(t, err)
  2168  
  2169  	require.True(t, container.HostConfig.ReadonlyRootfs, "ReadonlyRootfs option not set")
  2170  }
  2171  
  2172  // fakeDockerClient can be used in places that accept an interface for the
  2173  // docker client such as createContainer.
  2174  type fakeDockerClient struct{}
  2175  
  2176  func (fakeDockerClient) CreateContainer(docker.CreateContainerOptions) (*docker.Container, error) {
  2177  	return nil, fmt.Errorf("volume is attached on another node")
  2178  }
  2179  func (fakeDockerClient) InspectContainer(id string) (*docker.Container, error) {
  2180  	panic("not implemented")
  2181  }
  2182  func (fakeDockerClient) ListContainers(docker.ListContainersOptions) ([]docker.APIContainers, error) {
  2183  	panic("not implemented")
  2184  }
  2185  func (fakeDockerClient) RemoveContainer(opts docker.RemoveContainerOptions) error {
  2186  	panic("not implemented")
  2187  }
  2188  
  2189  // TestDockerDriver_VolumeError asserts volume related errors when creating a
  2190  // container are recoverable.
  2191  func TestDockerDriver_VolumeError(t *testing.T) {
  2192  	if !tu.IsCI() {
  2193  		t.Parallel()
  2194  	}
  2195  
  2196  	// setup
  2197  	_, cfg, _ := dockerTask(t)
  2198  	driver := dockerDriverHarness(t, nil)
  2199  
  2200  	// assert volume error is recoverable
  2201  	_, err := driver.Impl().(*Driver).createContainer(fakeDockerClient{}, docker.CreateContainerOptions{Config: &docker.Config{}}, cfg)
  2202  	require.True(t, structs.IsRecoverable(err))
  2203  }
  2204  
  2205  func TestDockerDriver_AdvertiseIPv6Address(t *testing.T) {
  2206  	if !tu.IsCI() {
  2207  		t.Parallel()
  2208  	}
  2209  	testutil.DockerCompatible(t)
  2210  
  2211  	expectedPrefix := "2001:db8:1::242:ac11"
  2212  	expectedAdvertise := true
  2213  	task, cfg, _ := dockerTask(t)
  2214  	cfg.AdvertiseIPv6Addr = expectedAdvertise
  2215  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2216  
  2217  	client := newTestDockerClient(t)
  2218  
  2219  	// Make sure IPv6 is enabled
  2220  	net, err := client.NetworkInfo("bridge")
  2221  	if err != nil {
  2222  		t.Skip("error retrieving bridge network information, skipping")
  2223  	}
  2224  	if net == nil || !net.EnableIPv6 {
  2225  		t.Skip("IPv6 not enabled on bridge network, skipping")
  2226  	}
  2227  
  2228  	driver := dockerDriverHarness(t, nil)
  2229  	cleanup := driver.MkAllocDir(task, true)
  2230  	copyImage(t, task.TaskDir(), "busybox.tar")
  2231  	defer cleanup()
  2232  
  2233  	_, network, err := driver.StartTask(task)
  2234  	defer driver.DestroyTask(task.ID, true)
  2235  	require.NoError(t, err)
  2236  
  2237  	require.Equal(t, expectedAdvertise, network.AutoAdvertise, "Wrong autoadvertise. Expect: %s, got: %s", expectedAdvertise, network.AutoAdvertise)
  2238  
  2239  	if !strings.HasPrefix(network.IP, expectedPrefix) {
  2240  		t.Fatalf("Got IP address %q want ip address with prefix %q", network.IP, expectedPrefix)
  2241  	}
  2242  
  2243  	handle, ok := driver.Impl().(*Driver).tasks.Get(task.ID)
  2244  	require.True(t, ok)
  2245  
  2246  	require.NoError(t, driver.WaitUntilStarted(task.ID, time.Second))
  2247  
  2248  	container, err := client.InspectContainer(handle.containerID)
  2249  	require.NoError(t, err)
  2250  
  2251  	if !strings.HasPrefix(container.NetworkSettings.GlobalIPv6Address, expectedPrefix) {
  2252  		t.Fatalf("Got GlobalIPv6address %s want GlobalIPv6address with prefix %s", expectedPrefix, container.NetworkSettings.GlobalIPv6Address)
  2253  	}
  2254  }
  2255  
  2256  func TestParseDockerImage(t *testing.T) {
  2257  	tests := []struct {
  2258  		Image string
  2259  		Repo  string
  2260  		Tag   string
  2261  	}{
  2262  		{"library/hello-world:1.0", "library/hello-world", "1.0"},
  2263  		{"library/hello-world", "library/hello-world", "latest"},
  2264  		{"library/hello-world:latest", "library/hello-world", "latest"},
  2265  		{"library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", "library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", ""},
  2266  	}
  2267  	for _, test := range tests {
  2268  		t.Run(test.Image, func(t *testing.T) {
  2269  			repo, tag := parseDockerImage(test.Image)
  2270  			require.Equal(t, test.Repo, repo)
  2271  			require.Equal(t, test.Tag, tag)
  2272  		})
  2273  	}
  2274  }
  2275  
  2276  func TestDockerImageRef(t *testing.T) {
  2277  	tests := []struct {
  2278  		Image string
  2279  		Repo  string
  2280  		Tag   string
  2281  	}{
  2282  		{"library/hello-world:1.0", "library/hello-world", "1.0"},
  2283  		{"library/hello-world:latest", "library/hello-world", "latest"},
  2284  		{"library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", "library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", ""},
  2285  	}
  2286  	for _, test := range tests {
  2287  		t.Run(test.Image, func(t *testing.T) {
  2288  			image := dockerImageRef(test.Repo, test.Tag)
  2289  			require.Equal(t, test.Image, image)
  2290  		})
  2291  	}
  2292  }
  2293  
  2294  func waitForExist(t *testing.T, client *docker.Client, containerID string) {
  2295  	tu.WaitForResult(func() (bool, error) {
  2296  		container, err := client.InspectContainer(containerID)
  2297  		if err != nil {
  2298  			if _, ok := err.(*docker.NoSuchContainer); !ok {
  2299  				return false, err
  2300  			}
  2301  		}
  2302  
  2303  		return container != nil, nil
  2304  	}, func(err error) {
  2305  		require.NoError(t, err)
  2306  	})
  2307  }