github.com/iqoqo/nomad@v0.11.3-0.20200911112621-d7021c74d101/drivers/docker/driver_test.go (about)

     1  package docker
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"io/ioutil"
     7  	"math/rand"
     8  	"path/filepath"
     9  	"reflect"
    10  	"runtime"
    11  	"runtime/debug"
    12  	"strings"
    13  	"testing"
    14  	"time"
    15  
    16  	docker "github.com/fsouza/go-dockerclient"
    17  	hclog "github.com/hashicorp/go-hclog"
    18  	"github.com/hashicorp/nomad/client/taskenv"
    19  	"github.com/hashicorp/nomad/client/testutil"
    20  	"github.com/hashicorp/nomad/devices/gpu/nvidia"
    21  	"github.com/hashicorp/nomad/helper/freeport"
    22  	"github.com/hashicorp/nomad/helper/pluginutils/hclspecutils"
    23  	"github.com/hashicorp/nomad/helper/pluginutils/hclutils"
    24  	"github.com/hashicorp/nomad/helper/pluginutils/loader"
    25  	"github.com/hashicorp/nomad/helper/testlog"
    26  	"github.com/hashicorp/nomad/helper/uuid"
    27  	"github.com/hashicorp/nomad/nomad/structs"
    28  	"github.com/hashicorp/nomad/plugins/base"
    29  	"github.com/hashicorp/nomad/plugins/drivers"
    30  	dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils"
    31  	tu "github.com/hashicorp/nomad/testutil"
    32  	"github.com/stretchr/testify/assert"
    33  	"github.com/stretchr/testify/require"
    34  )
    35  
    36  var (
    37  	basicResources = &drivers.Resources{
    38  		NomadResources: &structs.AllocatedTaskResources{
    39  			Memory: structs.AllocatedMemoryResources{
    40  				MemoryMB: 256,
    41  			},
    42  			Cpu: structs.AllocatedCpuResources{
    43  				CpuShares: 250,
    44  			},
    45  		},
    46  		LinuxResources: &drivers.LinuxResources{
    47  			CPUShares:        512,
    48  			MemoryLimitBytes: 256 * 1024 * 1024,
    49  		},
    50  	}
    51  )
    52  
    53  func dockerIsRemote(t *testing.T) bool {
    54  	client, err := docker.NewClientFromEnv()
    55  	if err != nil {
    56  		return false
    57  	}
    58  
    59  	// Technically this could be a local tcp socket but for testing purposes
    60  	// we'll just assume that tcp is only used for remote connections.
    61  	if client.Endpoint()[0:3] == "tcp" {
    62  		return true
    63  	}
    64  	return false
    65  }
    66  
    67  var (
    68  	// busyboxLongRunningCmd is a busybox command that runs indefinitely, and
    69  	// ideally responds to SIGINT/SIGTERM.  Sadly, busybox:1.29.3 /bin/sleep doesn't.
    70  	busyboxLongRunningCmd = []string{"nc", "-l", "-p", "3000", "127.0.0.1"}
    71  )
    72  
    73  // Returns a task with a reserved and dynamic port. The ports are returned
    74  // respectively, and should be reclaimed with freeport.Return at the end of a test.
    75  func dockerTask(t *testing.T) (*drivers.TaskConfig, *TaskConfig, []int) {
    76  	ports := freeport.MustTake(2)
    77  	dockerReserved := ports[0]
    78  	dockerDynamic := ports[1]
    79  
    80  	cfg := newTaskConfig("", busyboxLongRunningCmd)
    81  	task := &drivers.TaskConfig{
    82  		ID:      uuid.Generate(),
    83  		Name:    "redis-demo",
    84  		AllocID: uuid.Generate(),
    85  		Env: map[string]string{
    86  			"test": t.Name(),
    87  		},
    88  		DeviceEnv: make(map[string]string),
    89  		Resources: &drivers.Resources{
    90  			NomadResources: &structs.AllocatedTaskResources{
    91  				Memory: structs.AllocatedMemoryResources{
    92  					MemoryMB: 256,
    93  				},
    94  				Cpu: structs.AllocatedCpuResources{
    95  					CpuShares: 512,
    96  				},
    97  				Networks: []*structs.NetworkResource{
    98  					{
    99  						IP:            "127.0.0.1",
   100  						ReservedPorts: []structs.Port{{Label: "main", Value: dockerReserved}},
   101  						DynamicPorts:  []structs.Port{{Label: "REDIS", Value: dockerDynamic}},
   102  					},
   103  				},
   104  			},
   105  			LinuxResources: &drivers.LinuxResources{
   106  				CPUShares:        512,
   107  				MemoryLimitBytes: 256 * 1024 * 1024,
   108  				PercentTicks:     float64(512) / float64(4096),
   109  			},
   110  		},
   111  	}
   112  
   113  	require.NoError(t, task.EncodeConcreteDriverConfig(&cfg))
   114  
   115  	return task, &cfg, ports
   116  }
   117  
   118  // dockerSetup does all of the basic setup you need to get a running docker
   119  // process up and running for testing. Use like:
   120  //
   121  //	task := taskTemplate()
   122  //	// do custom task configuration
   123  //	client, handle, cleanup := dockerSetup(t, task)
   124  //	defer cleanup()
   125  //	// do test stuff
   126  //
   127  // If there is a problem during setup this function will abort or skip the test
   128  // and indicate the reason.
   129  func dockerSetup(t *testing.T, task *drivers.TaskConfig) (*docker.Client, *dtestutil.DriverHarness, *taskHandle, func()) {
   130  	client := newTestDockerClient(t)
   131  	driver := dockerDriverHarness(t, nil)
   132  	cleanup := driver.MkAllocDir(task, true)
   133  
   134  	copyImage(t, task.TaskDir(), "busybox.tar")
   135  	_, _, err := driver.StartTask(task)
   136  	require.NoError(t, err)
   137  
   138  	dockerDriver, ok := driver.Impl().(*Driver)
   139  	require.True(t, ok)
   140  	handle, ok := dockerDriver.tasks.Get(task.ID)
   141  	require.True(t, ok)
   142  
   143  	return client, driver, handle, func() {
   144  		driver.DestroyTask(task.ID, true)
   145  		cleanup()
   146  	}
   147  }
   148  
   149  // cleanSlate removes the specified docker image, including potentially stopping/removing any
   150  // containers based on that image. This is used to decouple tests that would be coupled
   151  // by using the same container image.
   152  func cleanSlate(client *docker.Client, imageID string) {
   153  	if img, _ := client.InspectImage(imageID); img == nil {
   154  		return
   155  	}
   156  	containers, _ := client.ListContainers(docker.ListContainersOptions{
   157  		All: true,
   158  		Filters: map[string][]string{
   159  			"ancestor": {imageID},
   160  		},
   161  	})
   162  	for _, c := range containers {
   163  		client.RemoveContainer(docker.RemoveContainerOptions{
   164  			Force: true,
   165  			ID:    c.ID,
   166  		})
   167  	}
   168  	client.RemoveImageExtended(imageID, docker.RemoveImageOptions{
   169  		Force: true,
   170  	})
   171  	return
   172  }
   173  
   174  // dockerDriverHarness wires up everything needed to launch a task with a docker driver.
   175  // A driver plugin interface and cleanup function is returned
   176  func dockerDriverHarness(t *testing.T, cfg map[string]interface{}) *dtestutil.DriverHarness {
   177  	logger := testlog.HCLogger(t)
   178  	harness := dtestutil.NewDriverHarness(t, NewDockerDriver(logger))
   179  	if cfg == nil {
   180  		cfg = map[string]interface{}{
   181  			"gc": map[string]interface{}{
   182  				"image_delay": "1s",
   183  			},
   184  		}
   185  	}
   186  	plugLoader, err := loader.NewPluginLoader(&loader.PluginLoaderConfig{
   187  		Logger:            logger,
   188  		PluginDir:         "./plugins",
   189  		SupportedVersions: loader.AgentSupportedApiVersions,
   190  		InternalPlugins: map[loader.PluginID]*loader.InternalPluginConfig{
   191  			PluginID: {
   192  				Config: cfg,
   193  				Factory: func(hclog.Logger) interface{} {
   194  					return harness
   195  				},
   196  			},
   197  		},
   198  	})
   199  
   200  	require.NoError(t, err)
   201  	instance, err := plugLoader.Dispense(pluginName, base.PluginTypeDriver, nil, logger)
   202  	require.NoError(t, err)
   203  	driver, ok := instance.Plugin().(*dtestutil.DriverHarness)
   204  	if !ok {
   205  		t.Fatal("plugin instance is not a driver... wat?")
   206  	}
   207  
   208  	return driver
   209  }
   210  
   211  func newTestDockerClient(t *testing.T) *docker.Client {
   212  	t.Helper()
   213  	testutil.DockerCompatible(t)
   214  
   215  	client, err := docker.NewClientFromEnv()
   216  	if err != nil {
   217  		t.Fatalf("Failed to initialize client: %s\nStack\n%s", err, debug.Stack())
   218  	}
   219  	return client
   220  }
   221  
   222  /*
   223  // This test should always pass, even if docker daemon is not available
   224  func TestDockerDriver_Fingerprint(t *testing.T) {
   225  	if !tu.IsCI() {
   226  		t.Parallel()
   227  	}
   228  
   229  	ctx := testDockerDriverContexts(t, &structs.Task{Name: "foo", Driver: "docker", Resources: basicResources})
   230  	//ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
   231  	defer ctx.Destroy()
   232  	d := NewDockerDriver(ctx.DriverCtx)
   233  	node := &structs.Node{
   234  		Attributes: make(map[string]string),
   235  	}
   236  
   237  	request := &fingerprint.FingerprintRequest{Config: &config.Config{}, Node: node}
   238  	var response fingerprint.FingerprintResponse
   239  	err := d.Fingerprint(request, &response)
   240  	if err != nil {
   241  		t.Fatalf("err: %v", err)
   242  	}
   243  
   244  	attributes := response.Attributes
   245  	if testutil.DockerIsConnected(t) && attributes["driver.docker"] == "" {
   246  		t.Fatalf("Fingerprinter should detect when docker is available")
   247  	}
   248  
   249  	if attributes["driver.docker"] != "1" {
   250  		t.Log("Docker daemon not available. The remainder of the docker tests will be skipped.")
   251  	} else {
   252  
   253  		// if docker is available, make sure that the response is tagged as
   254  		// applicable
   255  		if !response.Detected {
   256  			t.Fatalf("expected response to be applicable")
   257  		}
   258  	}
   259  
   260  	t.Logf("Found docker version %s", attributes["driver.docker.version"])
   261  }
   262  
   263  // TestDockerDriver_Fingerprint_Bridge asserts that if Docker is running we set
   264  // the bridge network's IP as a node attribute. See #2785
   265  func TestDockerDriver_Fingerprint_Bridge(t *testing.T) {
   266  	if !tu.IsCI() {
   267  		t.Parallel()
   268  	}
   269  	testutil.DockerCompatible(t)
   270  	if runtime.GOOS != "linux" {
   271  		t.Skip("expect only on linux")
   272  	}
   273  
   274  	// This seems fragile, so we might need to reconsider this test if it
   275  	// proves flaky
   276  	expectedAddr, err := sockaddr.GetInterfaceIP("docker0")
   277  	if err != nil {
   278  		t.Fatalf("unable to get ip for docker0: %v", err)
   279  	}
   280  	if expectedAddr == "" {
   281  		t.Fatalf("unable to get ip for docker bridge")
   282  	}
   283  
   284  	conf := testConfig(t)
   285  	conf.Node = mock.Node()
   286  	dd := NewDockerDriver(NewDriverContext("", "", "", "", conf, conf.Node, testlog.Logger(t), nil))
   287  
   288  	request := &fingerprint.FingerprintRequest{Config: conf, Node: conf.Node}
   289  	var response fingerprint.FingerprintResponse
   290  
   291  	err = dd.Fingerprint(request, &response)
   292  	if err != nil {
   293  		t.Fatalf("error fingerprinting docker: %v", err)
   294  	}
   295  
   296  	if !response.Detected {
   297  		t.Fatalf("expected response to be applicable")
   298  	}
   299  
   300  	attributes := response.Attributes
   301  	if attributes == nil {
   302  		t.Fatalf("expected attributes to be set")
   303  	}
   304  
   305  	if attributes["driver.docker"] == "" {
   306  		t.Fatalf("expected Docker to be enabled but false was returned")
   307  	}
   308  
   309  	if found := attributes["driver.docker.bridge_ip"]; found != expectedAddr {
   310  		t.Fatalf("expected bridge ip %q but found: %q", expectedAddr, found)
   311  	}
   312  	t.Logf("docker bridge ip: %q", attributes["driver.docker.bridge_ip"])
   313  }
   314  
   315  func TestDockerDriver_Check_DockerHealthStatus(t *testing.T) {
   316  	if !tu.IsCI() {
   317  		t.Parallel()
   318  	}
   319  	testutil.DockerCompatible(t)
   320  	if runtime.GOOS != "linux" {
   321  		t.Skip("expect only on linux")
   322  	}
   323  
   324  	require := require.New(t)
   325  
   326  	expectedAddr, err := sockaddr.GetInterfaceIP("docker0")
   327  	if err != nil {
   328  		t.Fatalf("unable to get ip for docker0: %v", err)
   329  	}
   330  	if expectedAddr == "" {
   331  		t.Fatalf("unable to get ip for docker bridge")
   332  	}
   333  
   334  	conf := testConfig(t)
   335  	conf.Node = mock.Node()
   336  	dd := NewDockerDriver(NewDriverContext("", "", "", "", conf, conf.Node, testlog.Logger(t), nil))
   337  
   338  	request := &cstructs.HealthCheckRequest{}
   339  	var response cstructs.HealthCheckResponse
   340  
   341  	dc, ok := dd.(fingerprint.HealthCheck)
   342  	require.True(ok)
   343  	err = dc.HealthCheck(request, &response)
   344  	require.Nil(err)
   345  
   346  	driverInfo := response.Drivers["docker"]
   347  	require.NotNil(driverInfo)
   348  	require.True(driverInfo.Healthy)
   349  }*/
   350  
   351  func TestDockerDriver_Start_Wait(t *testing.T) {
   352  	if !tu.IsCI() {
   353  		t.Parallel()
   354  	}
   355  	testutil.DockerCompatible(t)
   356  
   357  	taskCfg := newTaskConfig("", busyboxLongRunningCmd)
   358  	task := &drivers.TaskConfig{
   359  		ID:        uuid.Generate(),
   360  		Name:      "nc-demo",
   361  		AllocID:   uuid.Generate(),
   362  		Resources: basicResources,
   363  	}
   364  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   365  
   366  	d := dockerDriverHarness(t, nil)
   367  	cleanup := d.MkAllocDir(task, true)
   368  	defer cleanup()
   369  	copyImage(t, task.TaskDir(), "busybox.tar")
   370  
   371  	_, _, err := d.StartTask(task)
   372  	require.NoError(t, err)
   373  
   374  	defer d.DestroyTask(task.ID, true)
   375  
   376  	// Attempt to wait
   377  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   378  	require.NoError(t, err)
   379  
   380  	select {
   381  	case <-waitCh:
   382  		t.Fatalf("wait channel should not have received an exit result")
   383  	case <-time.After(time.Duration(tu.TestMultiplier()*1) * time.Second):
   384  	}
   385  }
   386  
   387  func TestDockerDriver_Start_WaitFinish(t *testing.T) {
   388  	if !tu.IsCI() {
   389  		t.Parallel()
   390  	}
   391  	testutil.DockerCompatible(t)
   392  
   393  	taskCfg := newTaskConfig("", []string{"echo", "hello"})
   394  	task := &drivers.TaskConfig{
   395  		ID:        uuid.Generate(),
   396  		Name:      "nc-demo",
   397  		AllocID:   uuid.Generate(),
   398  		Resources: basicResources,
   399  	}
   400  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   401  
   402  	d := dockerDriverHarness(t, nil)
   403  	cleanup := d.MkAllocDir(task, true)
   404  	defer cleanup()
   405  	copyImage(t, task.TaskDir(), "busybox.tar")
   406  
   407  	_, _, err := d.StartTask(task)
   408  	require.NoError(t, err)
   409  
   410  	defer d.DestroyTask(task.ID, true)
   411  
   412  	// Attempt to wait
   413  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   414  	require.NoError(t, err)
   415  
   416  	select {
   417  	case res := <-waitCh:
   418  		if !res.Successful() {
   419  			require.Fail(t, "ExitResult should be successful: %v", res)
   420  		}
   421  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   422  		require.Fail(t, "timeout")
   423  	}
   424  }
   425  
   426  // TestDockerDriver_Start_StoppedContainer asserts that Nomad will detect a
   427  // stopped task container, remove it, and start a new container.
   428  //
   429  // See https://github.com/hashicorp/nomad/issues/3419
   430  func TestDockerDriver_Start_StoppedContainer(t *testing.T) {
   431  	if !tu.IsCI() {
   432  		t.Parallel()
   433  	}
   434  	testutil.DockerCompatible(t)
   435  
   436  	taskCfg := newTaskConfig("", []string{"sleep", "9001"})
   437  	task := &drivers.TaskConfig{
   438  		ID:        uuid.Generate(),
   439  		Name:      "nc-demo",
   440  		AllocID:   uuid.Generate(),
   441  		Resources: basicResources,
   442  	}
   443  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   444  
   445  	d := dockerDriverHarness(t, nil)
   446  	cleanup := d.MkAllocDir(task, true)
   447  	defer cleanup()
   448  	copyImage(t, task.TaskDir(), "busybox.tar")
   449  
   450  	client := newTestDockerClient(t)
   451  
   452  	var imageID string
   453  	var err error
   454  
   455  	if runtime.GOOS != "windows" {
   456  		imageID, err = d.Impl().(*Driver).loadImage(task, &taskCfg, client)
   457  	} else {
   458  		image, lErr := client.InspectImage("dantoml/busybox-windows:08012019")
   459  		err = lErr
   460  		if image != nil {
   461  			imageID = image.ID
   462  		}
   463  	}
   464  	require.NoError(t, err)
   465  	require.NotEmpty(t, imageID)
   466  
   467  	// Create a container of the same name but don't start it. This mimics
   468  	// the case of dockerd getting restarted and stopping containers while
   469  	// Nomad is watching them.
   470  	opts := docker.CreateContainerOptions{
   471  		Name: strings.Replace(task.ID, "/", "_", -1),
   472  		Config: &docker.Config{
   473  			Image: taskCfg.Image,
   474  			Cmd:   []string{"sleep", "9000"},
   475  			Env:   []string{fmt.Sprintf("test=%s", t.Name())},
   476  		},
   477  	}
   478  
   479  	if _, err := client.CreateContainer(opts); err != nil {
   480  		t.Fatalf("error creating initial container: %v", err)
   481  	}
   482  
   483  	_, _, err = d.StartTask(task)
   484  	defer d.DestroyTask(task.ID, true)
   485  	require.NoError(t, err)
   486  
   487  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   488  	require.NoError(t, d.DestroyTask(task.ID, true))
   489  }
   490  
   491  func TestDockerDriver_Start_LoadImage(t *testing.T) {
   492  	if !tu.IsCI() {
   493  		t.Parallel()
   494  	}
   495  	testutil.DockerCompatible(t)
   496  
   497  	taskCfg := newTaskConfig("", []string{"sh", "-c", "echo hello > $NOMAD_TASK_DIR/output"})
   498  	task := &drivers.TaskConfig{
   499  		ID:        uuid.Generate(),
   500  		Name:      "busybox-demo",
   501  		AllocID:   uuid.Generate(),
   502  		Resources: basicResources,
   503  	}
   504  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   505  
   506  	d := dockerDriverHarness(t, nil)
   507  	cleanup := d.MkAllocDir(task, true)
   508  	defer cleanup()
   509  	copyImage(t, task.TaskDir(), "busybox.tar")
   510  
   511  	_, _, err := d.StartTask(task)
   512  	require.NoError(t, err)
   513  
   514  	defer d.DestroyTask(task.ID, true)
   515  
   516  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   517  	require.NoError(t, err)
   518  	select {
   519  	case res := <-waitCh:
   520  		if !res.Successful() {
   521  			require.Fail(t, "ExitResult should be successful: %v", res)
   522  		}
   523  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   524  		require.Fail(t, "timeout")
   525  	}
   526  
   527  	// Check that data was written to the shared alloc directory.
   528  	outputFile := filepath.Join(task.TaskDir().LocalDir, "output")
   529  	act, err := ioutil.ReadFile(outputFile)
   530  	if err != nil {
   531  		t.Fatalf("Couldn't read expected output: %v", err)
   532  	}
   533  
   534  	exp := "hello"
   535  	if strings.TrimSpace(string(act)) != exp {
   536  		t.Fatalf("Command outputted %v; want %v", act, exp)
   537  	}
   538  
   539  }
   540  
   541  // Tests that starting a task without an image fails
   542  func TestDockerDriver_Start_NoImage(t *testing.T) {
   543  	if !tu.IsCI() {
   544  		t.Parallel()
   545  	}
   546  	testutil.DockerCompatible(t)
   547  
   548  	taskCfg := TaskConfig{
   549  		Command: "echo",
   550  		Args:    []string{"foo"},
   551  	}
   552  	task := &drivers.TaskConfig{
   553  		ID:        uuid.Generate(),
   554  		Name:      "echo",
   555  		AllocID:   uuid.Generate(),
   556  		Resources: basicResources,
   557  	}
   558  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   559  
   560  	d := dockerDriverHarness(t, nil)
   561  	cleanup := d.MkAllocDir(task, false)
   562  	defer cleanup()
   563  
   564  	_, _, err := d.StartTask(task)
   565  	require.Error(t, err)
   566  	require.Contains(t, err.Error(), "image name required")
   567  
   568  	d.DestroyTask(task.ID, true)
   569  }
   570  
   571  func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) {
   572  	if !tu.IsCI() {
   573  		t.Parallel()
   574  	}
   575  	testutil.DockerCompatible(t)
   576  
   577  	taskCfg := TaskConfig{
   578  		Image:   "127.0.0.1:32121/foo", // bad path
   579  		Command: "echo",
   580  		Args: []string{
   581  			"hello",
   582  		},
   583  	}
   584  	task := &drivers.TaskConfig{
   585  		ID:        uuid.Generate(),
   586  		Name:      "busybox-demo",
   587  		AllocID:   uuid.Generate(),
   588  		Resources: basicResources,
   589  	}
   590  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   591  
   592  	d := dockerDriverHarness(t, nil)
   593  	cleanup := d.MkAllocDir(task, true)
   594  	defer cleanup()
   595  
   596  	_, _, err := d.StartTask(task)
   597  	require.Error(t, err)
   598  
   599  	defer d.DestroyTask(task.ID, true)
   600  
   601  	if rerr, ok := err.(*structs.RecoverableError); !ok {
   602  		t.Fatalf("want recoverable error: %+v", err)
   603  	} else if !rerr.IsRecoverable() {
   604  		t.Fatalf("error not recoverable: %+v", err)
   605  	}
   606  }
   607  
   608  func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) {
   609  	if !tu.IsCI() {
   610  		t.Parallel()
   611  	}
   612  	// This test requires that the alloc dir be mounted into docker as a volume.
   613  	// Because this cannot happen when docker is run remotely, e.g. when running
   614  	// docker in a VM, we skip this when we detect Docker is being run remotely.
   615  	if !testutil.DockerIsConnected(t) || dockerIsRemote(t) {
   616  		t.Skip("Docker not connected")
   617  	}
   618  
   619  	exp := []byte{'w', 'i', 'n'}
   620  	file := "output.txt"
   621  
   622  	taskCfg := newTaskConfig("", []string{
   623  		"sh",
   624  		"-c",
   625  		fmt.Sprintf(`sleep 1; echo -n %s > $%s/%s`,
   626  			string(exp), taskenv.AllocDir, file),
   627  	})
   628  	task := &drivers.TaskConfig{
   629  		ID:        uuid.Generate(),
   630  		Name:      "busybox-demo",
   631  		AllocID:   uuid.Generate(),
   632  		Resources: basicResources,
   633  	}
   634  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   635  
   636  	d := dockerDriverHarness(t, nil)
   637  	cleanup := d.MkAllocDir(task, true)
   638  	defer cleanup()
   639  	copyImage(t, task.TaskDir(), "busybox.tar")
   640  
   641  	_, _, err := d.StartTask(task)
   642  	require.NoError(t, err)
   643  
   644  	defer d.DestroyTask(task.ID, true)
   645  
   646  	// Attempt to wait
   647  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   648  	require.NoError(t, err)
   649  
   650  	select {
   651  	case res := <-waitCh:
   652  		if !res.Successful() {
   653  			require.Fail(t, fmt.Sprintf("ExitResult should be successful: %v", res))
   654  		}
   655  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   656  		require.Fail(t, "timeout")
   657  	}
   658  
   659  	// Check that data was written to the shared alloc directory.
   660  	outputFile := filepath.Join(task.TaskDir().SharedAllocDir, file)
   661  	act, err := ioutil.ReadFile(outputFile)
   662  	if err != nil {
   663  		t.Fatalf("Couldn't read expected output: %v", err)
   664  	}
   665  
   666  	if !reflect.DeepEqual(act, exp) {
   667  		t.Fatalf("Command outputted %v; want %v", act, exp)
   668  	}
   669  }
   670  
   671  func TestDockerDriver_Start_Kill_Wait(t *testing.T) {
   672  	if !tu.IsCI() {
   673  		t.Parallel()
   674  	}
   675  	testutil.DockerCompatible(t)
   676  
   677  	taskCfg := newTaskConfig("", busyboxLongRunningCmd)
   678  	task := &drivers.TaskConfig{
   679  		ID:        uuid.Generate(),
   680  		Name:      "busybox-demo",
   681  		AllocID:   uuid.Generate(),
   682  		Resources: basicResources,
   683  	}
   684  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   685  
   686  	d := dockerDriverHarness(t, nil)
   687  	cleanup := d.MkAllocDir(task, true)
   688  	defer cleanup()
   689  	copyImage(t, task.TaskDir(), "busybox.tar")
   690  
   691  	_, _, err := d.StartTask(task)
   692  	require.NoError(t, err)
   693  
   694  	defer d.DestroyTask(task.ID, true)
   695  
   696  	go func(t *testing.T) {
   697  		time.Sleep(100 * time.Millisecond)
   698  		signal := "SIGINT"
   699  		if runtime.GOOS == "windows" {
   700  			signal = "SIGKILL"
   701  		}
   702  		require.NoError(t, d.StopTask(task.ID, time.Second, signal))
   703  	}(t)
   704  
   705  	// Attempt to wait
   706  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   707  	require.NoError(t, err)
   708  
   709  	select {
   710  	case res := <-waitCh:
   711  		if res.Successful() {
   712  			require.Fail(t, "ExitResult should err: %v", res)
   713  		}
   714  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   715  		require.Fail(t, "timeout")
   716  	}
   717  }
   718  
   719  func TestDockerDriver_Start_KillTimeout(t *testing.T) {
   720  	if !tu.IsCI() {
   721  		t.Parallel()
   722  	}
   723  	testutil.DockerCompatible(t)
   724  
   725  	if runtime.GOOS == "windows" {
   726  		t.Skip("Windows Docker does not support SIGUSR1")
   727  	}
   728  
   729  	timeout := 2 * time.Second
   730  	taskCfg := newTaskConfig("", []string{"sleep", "10"})
   731  	task := &drivers.TaskConfig{
   732  		ID:        uuid.Generate(),
   733  		Name:      "busybox-demo",
   734  		AllocID:   uuid.Generate(),
   735  		Resources: basicResources,
   736  	}
   737  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   738  
   739  	d := dockerDriverHarness(t, nil)
   740  	cleanup := d.MkAllocDir(task, true)
   741  	defer cleanup()
   742  	copyImage(t, task.TaskDir(), "busybox.tar")
   743  
   744  	_, _, err := d.StartTask(task)
   745  	require.NoError(t, err)
   746  
   747  	defer d.DestroyTask(task.ID, true)
   748  
   749  	var killSent time.Time
   750  	go func() {
   751  		time.Sleep(100 * time.Millisecond)
   752  		killSent = time.Now()
   753  		require.NoError(t, d.StopTask(task.ID, timeout, "SIGUSR1"))
   754  	}()
   755  
   756  	// Attempt to wait
   757  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   758  	require.NoError(t, err)
   759  
   760  	var killed time.Time
   761  	select {
   762  	case <-waitCh:
   763  		killed = time.Now()
   764  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   765  		require.Fail(t, "timeout")
   766  	}
   767  
   768  	require.True(t, killed.Sub(killSent) > timeout)
   769  }
   770  
   771  func TestDockerDriver_StartN(t *testing.T) {
   772  	if runtime.GOOS == "windows" {
   773  		t.Skip("Windows Docker does not support SIGINT")
   774  	}
   775  	if !tu.IsCI() {
   776  		t.Parallel()
   777  	}
   778  	testutil.DockerCompatible(t)
   779  	require := require.New(t)
   780  
   781  	task1, _, ports1 := dockerTask(t)
   782  	defer freeport.Return(ports1)
   783  
   784  	task2, _, ports2 := dockerTask(t)
   785  	defer freeport.Return(ports2)
   786  
   787  	task3, _, ports3 := dockerTask(t)
   788  	defer freeport.Return(ports3)
   789  
   790  	taskList := []*drivers.TaskConfig{task1, task2, task3}
   791  
   792  	t.Logf("Starting %d tasks", len(taskList))
   793  
   794  	d := dockerDriverHarness(t, nil)
   795  	// Let's spin up a bunch of things
   796  	for _, task := range taskList {
   797  		cleanup := d.MkAllocDir(task, true)
   798  		defer cleanup()
   799  		copyImage(t, task.TaskDir(), "busybox.tar")
   800  		_, _, err := d.StartTask(task)
   801  		require.NoError(err)
   802  
   803  	}
   804  
   805  	defer d.DestroyTask(task3.ID, true)
   806  	defer d.DestroyTask(task2.ID, true)
   807  	defer d.DestroyTask(task1.ID, true)
   808  
   809  	t.Log("All tasks are started. Terminating...")
   810  	for _, task := range taskList {
   811  		require.NoError(d.StopTask(task.ID, time.Second, "SIGINT"))
   812  
   813  		// Attempt to wait
   814  		waitCh, err := d.WaitTask(context.Background(), task.ID)
   815  		require.NoError(err)
   816  
   817  		select {
   818  		case <-waitCh:
   819  		case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   820  			require.Fail("timeout waiting on task")
   821  		}
   822  	}
   823  
   824  	t.Log("Test complete!")
   825  }
   826  
   827  func TestDockerDriver_StartNVersions(t *testing.T) {
   828  	if runtime.GOOS == "windows" {
   829  		t.Skip("Skipped on windows, we don't have image variants available")
   830  	}
   831  	if !tu.IsCI() {
   832  		t.Parallel()
   833  	}
   834  	testutil.DockerCompatible(t)
   835  	require := require.New(t)
   836  
   837  	task1, cfg1, ports1 := dockerTask(t)
   838  	defer freeport.Return(ports1)
   839  	tcfg1 := newTaskConfig("", []string{"echo", "hello"})
   840  	cfg1.Image = tcfg1.Image
   841  	cfg1.LoadImage = tcfg1.LoadImage
   842  	require.NoError(task1.EncodeConcreteDriverConfig(cfg1))
   843  
   844  	task2, cfg2, ports2 := dockerTask(t)
   845  	defer freeport.Return(ports2)
   846  	tcfg2 := newTaskConfig("musl", []string{"echo", "hello"})
   847  	cfg2.Image = tcfg2.Image
   848  	cfg2.LoadImage = tcfg2.LoadImage
   849  	require.NoError(task2.EncodeConcreteDriverConfig(cfg2))
   850  
   851  	task3, cfg3, ports3 := dockerTask(t)
   852  	defer freeport.Return(ports3)
   853  	tcfg3 := newTaskConfig("glibc", []string{"echo", "hello"})
   854  	cfg3.Image = tcfg3.Image
   855  	cfg3.LoadImage = tcfg3.LoadImage
   856  	require.NoError(task3.EncodeConcreteDriverConfig(cfg3))
   857  
   858  	taskList := []*drivers.TaskConfig{task1, task2, task3}
   859  
   860  	t.Logf("Starting %d tasks", len(taskList))
   861  	d := dockerDriverHarness(t, nil)
   862  
   863  	// Let's spin up a bunch of things
   864  	for _, task := range taskList {
   865  		cleanup := d.MkAllocDir(task, true)
   866  		defer cleanup()
   867  		copyImage(t, task.TaskDir(), "busybox.tar")
   868  		copyImage(t, task.TaskDir(), "busybox_musl.tar")
   869  		copyImage(t, task.TaskDir(), "busybox_glibc.tar")
   870  		_, _, err := d.StartTask(task)
   871  		require.NoError(err)
   872  
   873  		require.NoError(d.WaitUntilStarted(task.ID, 5*time.Second))
   874  	}
   875  
   876  	defer d.DestroyTask(task3.ID, true)
   877  	defer d.DestroyTask(task2.ID, true)
   878  	defer d.DestroyTask(task1.ID, true)
   879  
   880  	t.Log("All tasks are started. Terminating...")
   881  	for _, task := range taskList {
   882  		require.NoError(d.StopTask(task.ID, time.Second, "SIGINT"))
   883  
   884  		// Attempt to wait
   885  		waitCh, err := d.WaitTask(context.Background(), task.ID)
   886  		require.NoError(err)
   887  
   888  		select {
   889  		case <-waitCh:
   890  		case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   891  			require.Fail("timeout waiting on task")
   892  		}
   893  	}
   894  
   895  	t.Log("Test complete!")
   896  }
   897  
   898  func TestDockerDriver_Labels(t *testing.T) {
   899  	if !tu.IsCI() {
   900  		t.Parallel()
   901  	}
   902  	testutil.DockerCompatible(t)
   903  
   904  	task, cfg, ports := dockerTask(t)
   905  	defer freeport.Return(ports)
   906  
   907  	cfg.Labels = map[string]string{
   908  		"label1": "value1",
   909  		"label2": "value2",
   910  	}
   911  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   912  
   913  	client, d, handle, cleanup := dockerSetup(t, task)
   914  	defer cleanup()
   915  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   916  
   917  	container, err := client.InspectContainer(handle.containerID)
   918  	if err != nil {
   919  		t.Fatalf("err: %v", err)
   920  	}
   921  
   922  	// expect to see 1 additional standard labels
   923  	require.Equal(t, len(cfg.Labels)+1, len(container.Config.Labels))
   924  	for k, v := range cfg.Labels {
   925  		require.Equal(t, v, container.Config.Labels[k])
   926  	}
   927  }
   928  
   929  func TestDockerDriver_ForcePull(t *testing.T) {
   930  	if !tu.IsCI() {
   931  		t.Parallel()
   932  	}
   933  	testutil.DockerCompatible(t)
   934  
   935  	task, cfg, ports := dockerTask(t)
   936  	defer freeport.Return(ports)
   937  
   938  	cfg.ForcePull = true
   939  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   940  
   941  	client, d, handle, cleanup := dockerSetup(t, task)
   942  	defer cleanup()
   943  
   944  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   945  
   946  	_, err := client.InspectContainer(handle.containerID)
   947  	if err != nil {
   948  		t.Fatalf("err: %v", err)
   949  	}
   950  }
   951  
   952  func TestDockerDriver_ForcePull_RepoDigest(t *testing.T) {
   953  	if runtime.GOOS == "windows" {
   954  		t.Skip("TODO: Skipped digest test on Windows")
   955  	}
   956  
   957  	if !tu.IsCI() {
   958  		t.Parallel()
   959  	}
   960  	testutil.DockerCompatible(t)
   961  
   962  	task, cfg, ports := dockerTask(t)
   963  	defer freeport.Return(ports)
   964  	cfg.LoadImage = ""
   965  	cfg.Image = "library/busybox@sha256:58ac43b2cc92c687a32c8be6278e50a063579655fe3090125dcb2af0ff9e1a64"
   966  	localDigest := "sha256:8ac48589692a53a9b8c2d1ceaa6b402665aa7fe667ba51ccc03002300856d8c7"
   967  	cfg.ForcePull = true
   968  	cfg.Command = busyboxLongRunningCmd[0]
   969  	cfg.Args = busyboxLongRunningCmd[1:]
   970  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   971  
   972  	client, d, handle, cleanup := dockerSetup(t, task)
   973  	defer cleanup()
   974  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   975  
   976  	container, err := client.InspectContainer(handle.containerID)
   977  	require.NoError(t, err)
   978  	require.Equal(t, localDigest, container.Image)
   979  }
   980  
   981  func TestDockerDriver_SecurityOptUnconfined(t *testing.T) {
   982  	if runtime.GOOS == "windows" {
   983  		t.Skip("Windows does not support seccomp")
   984  	}
   985  	if !tu.IsCI() {
   986  		t.Parallel()
   987  	}
   988  	testutil.DockerCompatible(t)
   989  
   990  	task, cfg, ports := dockerTask(t)
   991  	defer freeport.Return(ports)
   992  	cfg.SecurityOpt = []string{"seccomp=unconfined"}
   993  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   994  
   995  	client, d, handle, cleanup := dockerSetup(t, task)
   996  	defer cleanup()
   997  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   998  
   999  	container, err := client.InspectContainer(handle.containerID)
  1000  	if err != nil {
  1001  		t.Fatalf("err: %v", err)
  1002  	}
  1003  
  1004  	require.Exactly(t, cfg.SecurityOpt, container.HostConfig.SecurityOpt)
  1005  }
  1006  
  1007  func TestDockerDriver_SecurityOptFromFile(t *testing.T) {
  1008  
  1009  	if runtime.GOOS == "windows" {
  1010  		t.Skip("Windows does not support seccomp")
  1011  	}
  1012  	if !tu.IsCI() {
  1013  		t.Parallel()
  1014  	}
  1015  	testutil.DockerCompatible(t)
  1016  
  1017  	task, cfg, ports := dockerTask(t)
  1018  	defer freeport.Return(ports)
  1019  	cfg.SecurityOpt = []string{"seccomp=./test-resources/docker/seccomp.json"}
  1020  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1021  
  1022  	client, d, handle, cleanup := dockerSetup(t, task)
  1023  	defer cleanup()
  1024  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1025  
  1026  	container, err := client.InspectContainer(handle.containerID)
  1027  	require.NoError(t, err)
  1028  
  1029  	require.Contains(t, container.HostConfig.SecurityOpt[0], "reboot")
  1030  }
  1031  
  1032  func TestDockerDriver_Runtime(t *testing.T) {
  1033  	if !tu.IsCI() {
  1034  		t.Parallel()
  1035  	}
  1036  	testutil.DockerCompatible(t)
  1037  
  1038  	task, cfg, ports := dockerTask(t)
  1039  	defer freeport.Return(ports)
  1040  	cfg.Runtime = "runc"
  1041  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1042  
  1043  	client, d, handle, cleanup := dockerSetup(t, task)
  1044  	defer cleanup()
  1045  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1046  
  1047  	container, err := client.InspectContainer(handle.containerID)
  1048  	if err != nil {
  1049  		t.Fatalf("err: %v", err)
  1050  	}
  1051  
  1052  	require.Exactly(t, cfg.Runtime, container.HostConfig.Runtime)
  1053  }
  1054  
  1055  func TestDockerDriver_CreateContainerConfig(t *testing.T) {
  1056  	t.Parallel()
  1057  
  1058  	task, cfg, ports := dockerTask(t)
  1059  	defer freeport.Return(ports)
  1060  	opt := map[string]string{"size": "120G"}
  1061  
  1062  	cfg.StorageOpt = opt
  1063  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1064  
  1065  	dh := dockerDriverHarness(t, nil)
  1066  	driver := dh.Impl().(*Driver)
  1067  
  1068  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1069  	require.NoError(t, err)
  1070  
  1071  	require.Equal(t, "org/repo:0.1", c.Config.Image)
  1072  	require.EqualValues(t, opt, c.HostConfig.StorageOpt)
  1073  
  1074  	// Container name should be /<task_name>-<alloc_id> for backward compat
  1075  	containerName := fmt.Sprintf("%s-%s", strings.Replace(task.Name, "/", "_", -1), task.AllocID)
  1076  	require.Equal(t, containerName, c.Name)
  1077  }
  1078  
  1079  func TestDockerDriver_CreateContainerConfig_RuntimeConflict(t *testing.T) {
  1080  	t.Parallel()
  1081  
  1082  	task, cfg, ports := dockerTask(t)
  1083  	defer freeport.Return(ports)
  1084  	task.DeviceEnv[nvidia.NvidiaVisibleDevices] = "GPU_UUID_1"
  1085  
  1086  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1087  
  1088  	dh := dockerDriverHarness(t, nil)
  1089  	driver := dh.Impl().(*Driver)
  1090  	driver.gpuRuntime = true
  1091  
  1092  	// Should error if a runtime was explicitly set that doesn't match gpu runtime
  1093  	cfg.Runtime = "nvidia"
  1094  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1095  	require.NoError(t, err)
  1096  	require.Equal(t, "nvidia", c.HostConfig.Runtime)
  1097  
  1098  	cfg.Runtime = "custom"
  1099  	_, err = driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1100  	require.Error(t, err)
  1101  	require.Contains(t, err.Error(), "conflicting runtime requests")
  1102  }
  1103  
  1104  func TestDockerDriver_CreateContainerConfig_ChecksAllowRuntimes(t *testing.T) {
  1105  	t.Parallel()
  1106  
  1107  	dh := dockerDriverHarness(t, nil)
  1108  	driver := dh.Impl().(*Driver)
  1109  	driver.gpuRuntime = true
  1110  	driver.config.allowRuntimes = map[string]struct{}{
  1111  		"runc":   struct{}{},
  1112  		"custom": struct{}{},
  1113  	}
  1114  
  1115  	allowRuntime := []string{
  1116  		"", // default always works
  1117  		"runc",
  1118  		"custom",
  1119  	}
  1120  
  1121  	task, cfg, ports := dockerTask(t)
  1122  	defer freeport.Return(ports)
  1123  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1124  
  1125  	for _, runtime := range allowRuntime {
  1126  		t.Run(runtime, func(t *testing.T) {
  1127  			cfg.Runtime = runtime
  1128  			c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1129  			require.NoError(t, err)
  1130  			require.Equal(t, runtime, c.HostConfig.Runtime)
  1131  		})
  1132  	}
  1133  
  1134  	t.Run("not allowed: denied", func(t *testing.T) {
  1135  		cfg.Runtime = "denied"
  1136  		_, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1137  		require.Error(t, err)
  1138  		require.Contains(t, err.Error(), `runtime "denied" is not allowed`)
  1139  	})
  1140  
  1141  }
  1142  
  1143  func TestDockerDriver_CreateContainerConfig_User(t *testing.T) {
  1144  	t.Parallel()
  1145  
  1146  	task, cfg, ports := dockerTask(t)
  1147  	defer freeport.Return(ports)
  1148  	task.User = "random-user-1"
  1149  
  1150  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1151  
  1152  	dh := dockerDriverHarness(t, nil)
  1153  	driver := dh.Impl().(*Driver)
  1154  
  1155  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1156  	require.NoError(t, err)
  1157  
  1158  	require.Equal(t, task.User, c.Config.User)
  1159  }
  1160  
  1161  func TestDockerDriver_CreateContainerConfig_Labels(t *testing.T) {
  1162  	t.Parallel()
  1163  
  1164  	task, cfg, ports := dockerTask(t)
  1165  	defer freeport.Return(ports)
  1166  	task.AllocID = uuid.Generate()
  1167  	task.JobName = "redis-demo-job"
  1168  
  1169  	cfg.Labels = map[string]string{
  1170  		"user_label": "user_value",
  1171  
  1172  		// com.hashicorp.nomad. labels are reserved and
  1173  		// cannot be overridden
  1174  		"com.hashicorp.nomad.alloc_id": "bad_value",
  1175  	}
  1176  
  1177  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1178  
  1179  	dh := dockerDriverHarness(t, nil)
  1180  	driver := dh.Impl().(*Driver)
  1181  
  1182  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1183  	require.NoError(t, err)
  1184  
  1185  	expectedLabels := map[string]string{
  1186  		// user provided labels
  1187  		"user_label": "user_value",
  1188  		// default labels
  1189  		"com.hashicorp.nomad.alloc_id": task.AllocID,
  1190  	}
  1191  
  1192  	require.Equal(t, expectedLabels, c.Config.Labels)
  1193  }
  1194  
  1195  func TestDockerDriver_CreateContainerConfig_Logging(t *testing.T) {
  1196  	t.Parallel()
  1197  
  1198  	cases := []struct {
  1199  		name           string
  1200  		loggingConfig  DockerLogging
  1201  		expectedConfig DockerLogging
  1202  	}{
  1203  		{
  1204  			"simple type",
  1205  			DockerLogging{Type: "fluentd"},
  1206  			DockerLogging{
  1207  				Type:   "fluentd",
  1208  				Config: map[string]string{},
  1209  			},
  1210  		},
  1211  		{
  1212  			"simple driver",
  1213  			DockerLogging{Driver: "fluentd"},
  1214  			DockerLogging{
  1215  				Type:   "fluentd",
  1216  				Config: map[string]string{},
  1217  			},
  1218  		},
  1219  		{
  1220  			"type takes precedence",
  1221  			DockerLogging{
  1222  				Type:   "json-file",
  1223  				Driver: "fluentd",
  1224  			},
  1225  			DockerLogging{
  1226  				Type:   "json-file",
  1227  				Config: map[string]string{},
  1228  			},
  1229  		},
  1230  		{
  1231  			"user config takes precedence, even if no type provided",
  1232  			DockerLogging{
  1233  				Type:   "",
  1234  				Config: map[string]string{"max-file": "3", "max-size": "10m"},
  1235  			},
  1236  			DockerLogging{
  1237  				Type:   "",
  1238  				Config: map[string]string{"max-file": "3", "max-size": "10m"},
  1239  			},
  1240  		},
  1241  		{
  1242  			"defaults to json-file w/ log rotation",
  1243  			DockerLogging{
  1244  				Type: "",
  1245  			},
  1246  			DockerLogging{
  1247  				Type:   "json-file",
  1248  				Config: map[string]string{"max-file": "2", "max-size": "2m"},
  1249  			},
  1250  		},
  1251  	}
  1252  
  1253  	for _, c := range cases {
  1254  		t.Run(c.name, func(t *testing.T) {
  1255  			task, cfg, ports := dockerTask(t)
  1256  			defer freeport.Return(ports)
  1257  
  1258  			cfg.Logging = c.loggingConfig
  1259  			require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1260  
  1261  			dh := dockerDriverHarness(t, nil)
  1262  			driver := dh.Impl().(*Driver)
  1263  
  1264  			cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1265  			require.NoError(t, err)
  1266  
  1267  			require.Equal(t, c.expectedConfig.Type, cc.HostConfig.LogConfig.Type)
  1268  			require.Equal(t, c.expectedConfig.Config["max-file"], cc.HostConfig.LogConfig.Config["max-file"])
  1269  			require.Equal(t, c.expectedConfig.Config["max-size"], cc.HostConfig.LogConfig.Config["max-size"])
  1270  		})
  1271  	}
  1272  }
  1273  
  1274  func TestDockerDriver_CreateContainerConfigWithRuntimes(t *testing.T) {
  1275  	if !tu.IsCI() {
  1276  		t.Parallel()
  1277  	}
  1278  	testCases := []struct {
  1279  		description           string
  1280  		gpuRuntimeSet         bool
  1281  		expectToReturnError   bool
  1282  		expectedRuntime       string
  1283  		nvidiaDevicesProvided bool
  1284  	}{
  1285  		{
  1286  			description:           "gpu devices are provided, docker driver was able to detect nvidia-runtime 1",
  1287  			gpuRuntimeSet:         true,
  1288  			expectToReturnError:   false,
  1289  			expectedRuntime:       "nvidia",
  1290  			nvidiaDevicesProvided: true,
  1291  		},
  1292  		{
  1293  			description:           "gpu devices are provided, docker driver was able to detect nvidia-runtime 2",
  1294  			gpuRuntimeSet:         true,
  1295  			expectToReturnError:   false,
  1296  			expectedRuntime:       "nvidia-runtime-modified-name",
  1297  			nvidiaDevicesProvided: true,
  1298  		},
  1299  		{
  1300  			description:           "no gpu devices provided - no runtime should be set",
  1301  			gpuRuntimeSet:         true,
  1302  			expectToReturnError:   false,
  1303  			expectedRuntime:       "nvidia",
  1304  			nvidiaDevicesProvided: false,
  1305  		},
  1306  		{
  1307  			description:           "no gpuRuntime supported by docker driver",
  1308  			gpuRuntimeSet:         false,
  1309  			expectToReturnError:   true,
  1310  			expectedRuntime:       "nvidia",
  1311  			nvidiaDevicesProvided: true,
  1312  		},
  1313  	}
  1314  	for _, testCase := range testCases {
  1315  		t.Run(testCase.description, func(t *testing.T) {
  1316  			task, cfg, ports := dockerTask(t)
  1317  			defer freeport.Return(ports)
  1318  
  1319  			dh := dockerDriverHarness(t, map[string]interface{}{
  1320  				"allow_runtimes": []string{"runc", "nvidia", "nvidia-runtime-modified-name"},
  1321  			})
  1322  			driver := dh.Impl().(*Driver)
  1323  
  1324  			driver.gpuRuntime = testCase.gpuRuntimeSet
  1325  			driver.config.GPURuntimeName = testCase.expectedRuntime
  1326  			if testCase.nvidiaDevicesProvided {
  1327  				task.DeviceEnv[nvidia.NvidiaVisibleDevices] = "GPU_UUID_1"
  1328  			}
  1329  
  1330  			c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1331  			if testCase.expectToReturnError {
  1332  				require.NotNil(t, err)
  1333  			} else {
  1334  				require.NoError(t, err)
  1335  				if testCase.nvidiaDevicesProvided {
  1336  					require.Equal(t, testCase.expectedRuntime, c.HostConfig.Runtime)
  1337  				} else {
  1338  					// no nvidia devices provided -> no point to use nvidia runtime
  1339  					require.Equal(t, "", c.HostConfig.Runtime)
  1340  				}
  1341  			}
  1342  		})
  1343  	}
  1344  }
  1345  
  1346  func TestDockerDriver_Capabilities(t *testing.T) {
  1347  	if !tu.IsCI() {
  1348  		t.Parallel()
  1349  	}
  1350  	testutil.DockerCompatible(t)
  1351  	if runtime.GOOS == "windows" {
  1352  		t.Skip("Capabilities not supported on windows")
  1353  	}
  1354  
  1355  	testCases := []struct {
  1356  		Name       string
  1357  		CapAdd     []string
  1358  		CapDrop    []string
  1359  		Whitelist  string
  1360  		StartError string
  1361  	}{
  1362  		{
  1363  			Name:    "default-whitelist-add-allowed",
  1364  			CapAdd:  []string{"fowner", "mknod"},
  1365  			CapDrop: []string{"all"},
  1366  		},
  1367  		{
  1368  			Name:       "default-whitelist-add-forbidden",
  1369  			CapAdd:     []string{"net_admin"},
  1370  			StartError: "net_admin",
  1371  		},
  1372  		{
  1373  			Name:    "default-whitelist-drop-existing",
  1374  			CapDrop: []string{"fowner", "mknod"},
  1375  		},
  1376  		{
  1377  			Name:      "restrictive-whitelist-drop-all",
  1378  			CapDrop:   []string{"all"},
  1379  			Whitelist: "fowner,mknod",
  1380  		},
  1381  		{
  1382  			Name:      "restrictive-whitelist-add-allowed",
  1383  			CapAdd:    []string{"fowner", "mknod"},
  1384  			CapDrop:   []string{"all"},
  1385  			Whitelist: "fowner,mknod",
  1386  		},
  1387  		{
  1388  			Name:       "restrictive-whitelist-add-forbidden",
  1389  			CapAdd:     []string{"net_admin", "mknod"},
  1390  			CapDrop:    []string{"all"},
  1391  			Whitelist:  "fowner,mknod",
  1392  			StartError: "net_admin",
  1393  		},
  1394  		{
  1395  			Name:      "permissive-whitelist",
  1396  			CapAdd:    []string{"net_admin", "mknod"},
  1397  			Whitelist: "all",
  1398  		},
  1399  		{
  1400  			Name:      "permissive-whitelist-add-all",
  1401  			CapAdd:    []string{"all"},
  1402  			Whitelist: "all",
  1403  		},
  1404  	}
  1405  
  1406  	for _, tc := range testCases {
  1407  		t.Run(tc.Name, func(t *testing.T) {
  1408  			client := newTestDockerClient(t)
  1409  			task, cfg, ports := dockerTask(t)
  1410  			defer freeport.Return(ports)
  1411  
  1412  			if len(tc.CapAdd) > 0 {
  1413  				cfg.CapAdd = tc.CapAdd
  1414  			}
  1415  			if len(tc.CapDrop) > 0 {
  1416  				cfg.CapDrop = tc.CapDrop
  1417  			}
  1418  			require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1419  
  1420  			d := dockerDriverHarness(t, nil)
  1421  			dockerDriver, ok := d.Impl().(*Driver)
  1422  			require.True(t, ok)
  1423  			if tc.Whitelist != "" {
  1424  				dockerDriver.config.AllowCaps = strings.Split(tc.Whitelist, ",")
  1425  			}
  1426  
  1427  			cleanup := d.MkAllocDir(task, true)
  1428  			defer cleanup()
  1429  			copyImage(t, task.TaskDir(), "busybox.tar")
  1430  
  1431  			_, _, err := d.StartTask(task)
  1432  			defer d.DestroyTask(task.ID, true)
  1433  			if err == nil && tc.StartError != "" {
  1434  				t.Fatalf("Expected error in start: %v", tc.StartError)
  1435  			} else if err != nil {
  1436  				if tc.StartError == "" {
  1437  					require.NoError(t, err)
  1438  				} else {
  1439  					require.Contains(t, err.Error(), tc.StartError)
  1440  				}
  1441  				return
  1442  			}
  1443  
  1444  			handle, ok := dockerDriver.tasks.Get(task.ID)
  1445  			require.True(t, ok)
  1446  
  1447  			require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1448  
  1449  			container, err := client.InspectContainer(handle.containerID)
  1450  			require.NoError(t, err)
  1451  
  1452  			require.Exactly(t, tc.CapAdd, container.HostConfig.CapAdd)
  1453  			require.Exactly(t, tc.CapDrop, container.HostConfig.CapDrop)
  1454  		})
  1455  	}
  1456  }
  1457  
  1458  func TestDockerDriver_DNS(t *testing.T) {
  1459  	if !tu.IsCI() {
  1460  		t.Parallel()
  1461  	}
  1462  	testutil.DockerCompatible(t)
  1463  
  1464  	task, cfg, ports := dockerTask(t)
  1465  	defer freeport.Return(ports)
  1466  	cfg.DNSServers = []string{"8.8.8.8", "8.8.4.4"}
  1467  	cfg.DNSSearchDomains = []string{"example.com", "example.org", "example.net"}
  1468  	cfg.DNSOptions = []string{"ndots:1"}
  1469  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1470  
  1471  	client, d, handle, cleanup := dockerSetup(t, task)
  1472  	defer cleanup()
  1473  
  1474  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1475  
  1476  	container, err := client.InspectContainer(handle.containerID)
  1477  	require.NoError(t, err)
  1478  
  1479  	require.Exactly(t, cfg.DNSServers, container.HostConfig.DNS)
  1480  	require.Exactly(t, cfg.DNSSearchDomains, container.HostConfig.DNSSearch)
  1481  	require.Exactly(t, cfg.DNSOptions, container.HostConfig.DNSOptions)
  1482  }
  1483  
  1484  func TestDockerDriver_MACAddress(t *testing.T) {
  1485  	if !tu.IsCI() {
  1486  		t.Parallel()
  1487  	}
  1488  	testutil.DockerCompatible(t)
  1489  	if runtime.GOOS == "windows" {
  1490  		t.Skip("Windows docker does not support setting MacAddress")
  1491  	}
  1492  
  1493  	task, cfg, ports := dockerTask(t)
  1494  	defer freeport.Return(ports)
  1495  	cfg.MacAddress = "00:16:3e:00:00:00"
  1496  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1497  
  1498  	client, d, handle, cleanup := dockerSetup(t, task)
  1499  	defer cleanup()
  1500  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1501  
  1502  	container, err := client.InspectContainer(handle.containerID)
  1503  	require.NoError(t, err)
  1504  
  1505  	require.Equal(t, cfg.MacAddress, container.NetworkSettings.MacAddress)
  1506  }
  1507  
  1508  func TestDockerWorkDir(t *testing.T) {
  1509  	if !tu.IsCI() {
  1510  		t.Parallel()
  1511  	}
  1512  	testutil.DockerCompatible(t)
  1513  
  1514  	task, cfg, ports := dockerTask(t)
  1515  	defer freeport.Return(ports)
  1516  	cfg.WorkDir = "/some/path"
  1517  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1518  
  1519  	client, d, handle, cleanup := dockerSetup(t, task)
  1520  	defer cleanup()
  1521  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1522  
  1523  	container, err := client.InspectContainer(handle.containerID)
  1524  	require.NoError(t, err)
  1525  	require.Equal(t, cfg.WorkDir, filepath.ToSlash(container.Config.WorkingDir))
  1526  }
  1527  
  1528  func inSlice(needle string, haystack []string) bool {
  1529  	for _, h := range haystack {
  1530  		if h == needle {
  1531  			return true
  1532  		}
  1533  	}
  1534  	return false
  1535  }
  1536  
  1537  func TestDockerDriver_PortsNoMap(t *testing.T) {
  1538  	if !tu.IsCI() {
  1539  		t.Parallel()
  1540  	}
  1541  	testutil.DockerCompatible(t)
  1542  
  1543  	task, _, ports := dockerTask(t)
  1544  	defer freeport.Return(ports)
  1545  	res := ports[0]
  1546  	dyn := ports[1]
  1547  
  1548  	client, d, handle, cleanup := dockerSetup(t, task)
  1549  	defer cleanup()
  1550  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1551  
  1552  	container, err := client.InspectContainer(handle.containerID)
  1553  	require.NoError(t, err)
  1554  
  1555  	// Verify that the correct ports are EXPOSED
  1556  	expectedExposedPorts := map[docker.Port]struct{}{
  1557  		docker.Port(fmt.Sprintf("%d/tcp", res)): {},
  1558  		docker.Port(fmt.Sprintf("%d/udp", res)): {},
  1559  		docker.Port(fmt.Sprintf("%d/tcp", dyn)): {},
  1560  		docker.Port(fmt.Sprintf("%d/udp", dyn)): {},
  1561  	}
  1562  
  1563  	require.Exactly(t, expectedExposedPorts, container.Config.ExposedPorts)
  1564  
  1565  	hostIP := "127.0.0.1"
  1566  	if runtime.GOOS == "windows" {
  1567  		hostIP = ""
  1568  	}
  1569  
  1570  	// Verify that the correct ports are FORWARDED
  1571  	expectedPortBindings := map[docker.Port][]docker.PortBinding{
  1572  		docker.Port(fmt.Sprintf("%d/tcp", res)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1573  		docker.Port(fmt.Sprintf("%d/udp", res)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1574  		docker.Port(fmt.Sprintf("%d/tcp", dyn)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1575  		docker.Port(fmt.Sprintf("%d/udp", dyn)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1576  	}
  1577  
  1578  	require.Exactly(t, expectedPortBindings, container.HostConfig.PortBindings)
  1579  }
  1580  
  1581  func TestDockerDriver_PortsMapping(t *testing.T) {
  1582  	if !tu.IsCI() {
  1583  		t.Parallel()
  1584  	}
  1585  	testutil.DockerCompatible(t)
  1586  
  1587  	task, cfg, ports := dockerTask(t)
  1588  	defer freeport.Return(ports)
  1589  	res := ports[0]
  1590  	dyn := ports[1]
  1591  	cfg.PortMap = map[string]int{
  1592  		"main":  8080,
  1593  		"REDIS": 6379,
  1594  	}
  1595  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1596  
  1597  	client, d, handle, cleanup := dockerSetup(t, task)
  1598  	defer cleanup()
  1599  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1600  
  1601  	container, err := client.InspectContainer(handle.containerID)
  1602  	require.NoError(t, err)
  1603  
  1604  	// Verify that the port environment variables are set
  1605  	require.Contains(t, container.Config.Env, "NOMAD_PORT_main=8080")
  1606  	require.Contains(t, container.Config.Env, "NOMAD_PORT_REDIS=6379")
  1607  
  1608  	// Verify that the correct ports are EXPOSED
  1609  	expectedExposedPorts := map[docker.Port]struct{}{
  1610  		docker.Port("8080/tcp"): {},
  1611  		docker.Port("8080/udp"): {},
  1612  		docker.Port("6379/tcp"): {},
  1613  		docker.Port("6379/udp"): {},
  1614  	}
  1615  
  1616  	require.Exactly(t, expectedExposedPorts, container.Config.ExposedPorts)
  1617  
  1618  	hostIP := "127.0.0.1"
  1619  	if runtime.GOOS == "windows" {
  1620  		hostIP = ""
  1621  	}
  1622  
  1623  	// Verify that the correct ports are FORWARDED
  1624  	expectedPortBindings := map[docker.Port][]docker.PortBinding{
  1625  		docker.Port("8080/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1626  		docker.Port("8080/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1627  		docker.Port("6379/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1628  		docker.Port("6379/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1629  	}
  1630  	require.Exactly(t, expectedPortBindings, container.HostConfig.PortBindings)
  1631  }
  1632  
  1633  func TestDockerDriver_CreateContainerConfig_PortsMapping(t *testing.T) {
  1634  	t.Parallel()
  1635  
  1636  	task, cfg, ports := dockerTask(t)
  1637  	defer freeport.Return(ports)
  1638  	res := ports[0]
  1639  	dyn := ports[1]
  1640  	cfg.PortMap = map[string]int{
  1641  		"main":  8080,
  1642  		"REDIS": 6379,
  1643  	}
  1644  	dh := dockerDriverHarness(t, nil)
  1645  	driver := dh.Impl().(*Driver)
  1646  
  1647  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1648  	require.NoError(t, err)
  1649  
  1650  	require.Equal(t, "org/repo:0.1", c.Config.Image)
  1651  	require.Contains(t, c.Config.Env, "NOMAD_PORT_main=8080")
  1652  	require.Contains(t, c.Config.Env, "NOMAD_PORT_REDIS=6379")
  1653  
  1654  	// Verify that the correct ports are FORWARDED
  1655  	hostIP := "127.0.0.1"
  1656  	if runtime.GOOS == "windows" {
  1657  		hostIP = ""
  1658  	}
  1659  	expectedPortBindings := map[docker.Port][]docker.PortBinding{
  1660  		docker.Port("8080/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1661  		docker.Port("8080/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1662  		docker.Port("6379/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1663  		docker.Port("6379/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1664  	}
  1665  	require.Exactly(t, expectedPortBindings, c.HostConfig.PortBindings)
  1666  
  1667  }
  1668  
  1669  func TestDockerDriver_CleanupContainer(t *testing.T) {
  1670  	if !tu.IsCI() {
  1671  		t.Parallel()
  1672  	}
  1673  	testutil.DockerCompatible(t)
  1674  
  1675  	task, cfg, ports := dockerTask(t)
  1676  	defer freeport.Return(ports)
  1677  	cfg.Command = "echo"
  1678  	cfg.Args = []string{"hello"}
  1679  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1680  
  1681  	client, d, handle, cleanup := dockerSetup(t, task)
  1682  	defer cleanup()
  1683  
  1684  	waitCh, err := d.WaitTask(context.Background(), task.ID)
  1685  	require.NoError(t, err)
  1686  
  1687  	select {
  1688  	case res := <-waitCh:
  1689  		if !res.Successful() {
  1690  			t.Fatalf("err: %v", res)
  1691  		}
  1692  
  1693  		err = d.DestroyTask(task.ID, false)
  1694  		require.NoError(t, err)
  1695  
  1696  		time.Sleep(3 * time.Second)
  1697  
  1698  		// Ensure that the container isn't present
  1699  		_, err := client.InspectContainer(handle.containerID)
  1700  		if err == nil {
  1701  			t.Fatalf("expected to not get container")
  1702  		}
  1703  
  1704  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
  1705  		t.Fatalf("timeout")
  1706  	}
  1707  }
  1708  
  1709  func TestDockerDriver_EnableImageGC(t *testing.T) {
  1710  	testutil.DockerCompatible(t)
  1711  
  1712  	task, cfg, ports := dockerTask(t)
  1713  	defer freeport.Return(ports)
  1714  	cfg.Command = "echo"
  1715  	cfg.Args = []string{"hello"}
  1716  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1717  
  1718  	client := newTestDockerClient(t)
  1719  	driver := dockerDriverHarness(t, map[string]interface{}{
  1720  		"gc": map[string]interface{}{
  1721  			"container":   true,
  1722  			"image":       true,
  1723  			"image_delay": "2s",
  1724  		},
  1725  	})
  1726  	cleanup := driver.MkAllocDir(task, true)
  1727  	defer cleanup()
  1728  
  1729  	cleanSlate(client, cfg.Image)
  1730  
  1731  	copyImage(t, task.TaskDir(), "busybox.tar")
  1732  	_, _, err := driver.StartTask(task)
  1733  	require.NoError(t, err)
  1734  
  1735  	dockerDriver, ok := driver.Impl().(*Driver)
  1736  	require.True(t, ok)
  1737  	_, ok = dockerDriver.tasks.Get(task.ID)
  1738  	require.True(t, ok)
  1739  
  1740  	waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID)
  1741  	require.NoError(t, err)
  1742  	select {
  1743  	case res := <-waitCh:
  1744  		if !res.Successful() {
  1745  			t.Fatalf("err: %v", res)
  1746  		}
  1747  
  1748  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
  1749  		t.Fatalf("timeout")
  1750  	}
  1751  
  1752  	// we haven't called DestroyTask, image should be present
  1753  	_, err = client.InspectImage(cfg.Image)
  1754  	require.NoError(t, err)
  1755  
  1756  	err = dockerDriver.DestroyTask(task.ID, false)
  1757  	require.NoError(t, err)
  1758  
  1759  	// image_delay is 3s, so image should still be around for a bit
  1760  	_, err = client.InspectImage(cfg.Image)
  1761  	require.NoError(t, err)
  1762  
  1763  	// Ensure image was removed
  1764  	tu.WaitForResult(func() (bool, error) {
  1765  		if _, err := client.InspectImage(cfg.Image); err == nil {
  1766  			return false, fmt.Errorf("image exists but should have been removed. Does another %v container exist?", cfg.Image)
  1767  		}
  1768  
  1769  		return true, nil
  1770  	}, func(err error) {
  1771  		require.NoError(t, err)
  1772  	})
  1773  }
  1774  
  1775  func TestDockerDriver_DisableImageGC(t *testing.T) {
  1776  	testutil.DockerCompatible(t)
  1777  
  1778  	task, cfg, ports := dockerTask(t)
  1779  	defer freeport.Return(ports)
  1780  	cfg.Command = "echo"
  1781  	cfg.Args = []string{"hello"}
  1782  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1783  
  1784  	client := newTestDockerClient(t)
  1785  	driver := dockerDriverHarness(t, map[string]interface{}{
  1786  		"gc": map[string]interface{}{
  1787  			"container":   true,
  1788  			"image":       false,
  1789  			"image_delay": "1s",
  1790  		},
  1791  	})
  1792  	cleanup := driver.MkAllocDir(task, true)
  1793  	defer cleanup()
  1794  
  1795  	cleanSlate(client, cfg.Image)
  1796  
  1797  	copyImage(t, task.TaskDir(), "busybox.tar")
  1798  	_, _, err := driver.StartTask(task)
  1799  	require.NoError(t, err)
  1800  
  1801  	dockerDriver, ok := driver.Impl().(*Driver)
  1802  	require.True(t, ok)
  1803  	handle, ok := dockerDriver.tasks.Get(task.ID)
  1804  	require.True(t, ok)
  1805  
  1806  	waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID)
  1807  	require.NoError(t, err)
  1808  	select {
  1809  	case res := <-waitCh:
  1810  		if !res.Successful() {
  1811  			t.Fatalf("err: %v", res)
  1812  		}
  1813  
  1814  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
  1815  		t.Fatalf("timeout")
  1816  	}
  1817  
  1818  	// we haven't called DestroyTask, image should be present
  1819  	_, err = client.InspectImage(handle.containerImage)
  1820  	require.NoError(t, err)
  1821  
  1822  	err = dockerDriver.DestroyTask(task.ID, false)
  1823  	require.NoError(t, err)
  1824  
  1825  	// image_delay is 1s, wait a little longer
  1826  	time.Sleep(3 * time.Second)
  1827  
  1828  	// image should not have been removed or scheduled to be removed
  1829  	_, err = client.InspectImage(cfg.Image)
  1830  	require.NoError(t, err)
  1831  	dockerDriver.coordinator.imageLock.Lock()
  1832  	_, ok = dockerDriver.coordinator.deleteFuture[handle.containerImage]
  1833  	require.False(t, ok, "image should not be registered for deletion")
  1834  	dockerDriver.coordinator.imageLock.Unlock()
  1835  }
  1836  
  1837  func TestDockerDriver_MissingContainer_Cleanup(t *testing.T) {
  1838  	testutil.DockerCompatible(t)
  1839  
  1840  	task, cfg, ports := dockerTask(t)
  1841  	defer freeport.Return(ports)
  1842  	cfg.Command = "echo"
  1843  	cfg.Args = []string{"hello"}
  1844  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1845  
  1846  	client := newTestDockerClient(t)
  1847  	driver := dockerDriverHarness(t, map[string]interface{}{
  1848  		"gc": map[string]interface{}{
  1849  			"container":   true,
  1850  			"image":       true,
  1851  			"image_delay": "0s",
  1852  		},
  1853  	})
  1854  	cleanup := driver.MkAllocDir(task, true)
  1855  	defer cleanup()
  1856  
  1857  	cleanSlate(client, cfg.Image)
  1858  
  1859  	copyImage(t, task.TaskDir(), "busybox.tar")
  1860  	_, _, err := driver.StartTask(task)
  1861  	require.NoError(t, err)
  1862  
  1863  	dockerDriver, ok := driver.Impl().(*Driver)
  1864  	require.True(t, ok)
  1865  	h, ok := dockerDriver.tasks.Get(task.ID)
  1866  	require.True(t, ok)
  1867  
  1868  	waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID)
  1869  	require.NoError(t, err)
  1870  	select {
  1871  	case res := <-waitCh:
  1872  		if !res.Successful() {
  1873  			t.Fatalf("err: %v", res)
  1874  		}
  1875  
  1876  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
  1877  		t.Fatalf("timeout")
  1878  	}
  1879  
  1880  	// remove the container out-of-band
  1881  	require.NoError(t, client.RemoveContainer(docker.RemoveContainerOptions{
  1882  		ID: h.containerID,
  1883  	}))
  1884  
  1885  	require.NoError(t, dockerDriver.DestroyTask(task.ID, false))
  1886  
  1887  	// Ensure image was removed
  1888  	tu.WaitForResult(func() (bool, error) {
  1889  		if _, err := client.InspectImage(cfg.Image); err == nil {
  1890  			return false, fmt.Errorf("image exists but should have been removed. Does another %v container exist?", cfg.Image)
  1891  		}
  1892  
  1893  		return true, nil
  1894  	}, func(err error) {
  1895  		require.NoError(t, err)
  1896  	})
  1897  
  1898  	// Ensure that task handle was removed
  1899  	_, ok = dockerDriver.tasks.Get(task.ID)
  1900  	require.False(t, ok)
  1901  }
  1902  
  1903  func TestDockerDriver_Stats(t *testing.T) {
  1904  	if !tu.IsCI() {
  1905  		t.Parallel()
  1906  	}
  1907  	testutil.DockerCompatible(t)
  1908  
  1909  	task, cfg, ports := dockerTask(t)
  1910  	defer freeport.Return(ports)
  1911  	cfg.Command = "sleep"
  1912  	cfg.Args = []string{"1000"}
  1913  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1914  
  1915  	_, d, handle, cleanup := dockerSetup(t, task)
  1916  	defer cleanup()
  1917  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1918  
  1919  	go func() {
  1920  		defer d.DestroyTask(task.ID, true)
  1921  		ctx, cancel := context.WithCancel(context.Background())
  1922  		defer cancel()
  1923  		ch, err := handle.Stats(ctx, 1*time.Second)
  1924  		assert.NoError(t, err)
  1925  		select {
  1926  		case ru := <-ch:
  1927  			assert.NotNil(t, ru.ResourceUsage)
  1928  		case <-time.After(3 * time.Second):
  1929  			assert.Fail(t, "stats timeout")
  1930  		}
  1931  	}()
  1932  
  1933  	waitCh, err := d.WaitTask(context.Background(), task.ID)
  1934  	require.NoError(t, err)
  1935  	select {
  1936  	case res := <-waitCh:
  1937  		if res.Successful() {
  1938  			t.Fatalf("should err: %v", res)
  1939  		}
  1940  	case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second):
  1941  		t.Fatalf("timeout")
  1942  	}
  1943  }
  1944  
  1945  func setupDockerVolumes(t *testing.T, cfg map[string]interface{}, hostpath string) (*drivers.TaskConfig, *dtestutil.DriverHarness, *TaskConfig, string, func()) {
  1946  	testutil.DockerCompatible(t)
  1947  
  1948  	randfn := fmt.Sprintf("test-%d", rand.Int())
  1949  	hostfile := filepath.Join(hostpath, randfn)
  1950  	var containerPath string
  1951  	if runtime.GOOS == "windows" {
  1952  		containerPath = "C:\\data"
  1953  	} else {
  1954  		containerPath = "/mnt/vol"
  1955  	}
  1956  	containerFile := filepath.Join(containerPath, randfn)
  1957  
  1958  	taskCfg := newTaskConfig("", []string{"touch", containerFile})
  1959  	taskCfg.Volumes = []string{fmt.Sprintf("%s:%s", hostpath, containerPath)}
  1960  
  1961  	task := &drivers.TaskConfig{
  1962  		ID:        uuid.Generate(),
  1963  		Name:      "ls",
  1964  		AllocID:   uuid.Generate(),
  1965  		Env:       map[string]string{"VOL_PATH": containerPath},
  1966  		Resources: basicResources,
  1967  	}
  1968  	require.NoError(t, task.EncodeConcreteDriverConfig(taskCfg))
  1969  
  1970  	d := dockerDriverHarness(t, cfg)
  1971  	cleanup := d.MkAllocDir(task, true)
  1972  
  1973  	copyImage(t, task.TaskDir(), "busybox.tar")
  1974  
  1975  	return task, d, &taskCfg, hostfile, cleanup
  1976  }
  1977  
  1978  func TestDockerDriver_VolumesDisabled(t *testing.T) {
  1979  	if !tu.IsCI() {
  1980  		t.Parallel()
  1981  	}
  1982  	testutil.DockerCompatible(t)
  1983  
  1984  	cfg := map[string]interface{}{
  1985  		"volumes": map[string]interface{}{
  1986  			"enabled": false,
  1987  		},
  1988  		"gc": map[string]interface{}{
  1989  			"image": false,
  1990  		},
  1991  	}
  1992  
  1993  	{
  1994  		tmpvol, err := ioutil.TempDir("", "nomadtest_docker_volumesdisabled")
  1995  		if err != nil {
  1996  			t.Fatalf("error creating temporary dir: %v", err)
  1997  		}
  1998  
  1999  		task, driver, _, _, cleanup := setupDockerVolumes(t, cfg, tmpvol)
  2000  		defer cleanup()
  2001  
  2002  		_, _, err = driver.StartTask(task)
  2003  		defer driver.DestroyTask(task.ID, true)
  2004  		if err == nil {
  2005  			require.Fail(t, "Started driver successfully when volumes should have been disabled.")
  2006  		}
  2007  	}
  2008  
  2009  	// Relative paths should still be allowed
  2010  	{
  2011  		task, driver, _, fn, cleanup := setupDockerVolumes(t, cfg, ".")
  2012  		defer cleanup()
  2013  
  2014  		_, _, err := driver.StartTask(task)
  2015  		require.NoError(t, err)
  2016  		defer driver.DestroyTask(task.ID, true)
  2017  
  2018  		waitCh, err := driver.WaitTask(context.Background(), task.ID)
  2019  		require.NoError(t, err)
  2020  		select {
  2021  		case res := <-waitCh:
  2022  			if !res.Successful() {
  2023  				t.Fatalf("unexpected err: %v", res)
  2024  			}
  2025  		case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second):
  2026  			t.Fatalf("timeout")
  2027  		}
  2028  
  2029  		if _, err := ioutil.ReadFile(filepath.Join(task.TaskDir().Dir, fn)); err != nil {
  2030  			t.Fatalf("unexpected error reading %s: %v", fn, err)
  2031  		}
  2032  	}
  2033  
  2034  	// Volume Drivers should be rejected (error)
  2035  	{
  2036  		task, driver, taskCfg, _, cleanup := setupDockerVolumes(t, cfg, "fake_flocker_vol")
  2037  		defer cleanup()
  2038  
  2039  		taskCfg.VolumeDriver = "flocker"
  2040  		require.NoError(t, task.EncodeConcreteDriverConfig(taskCfg))
  2041  
  2042  		_, _, err := driver.StartTask(task)
  2043  		defer driver.DestroyTask(task.ID, true)
  2044  		if err == nil {
  2045  			require.Fail(t, "Started driver successfully when volume drivers should have been disabled.")
  2046  		}
  2047  	}
  2048  }
  2049  
  2050  func TestDockerDriver_VolumesEnabled(t *testing.T) {
  2051  	if !tu.IsCI() {
  2052  		t.Parallel()
  2053  	}
  2054  	testutil.DockerCompatible(t)
  2055  
  2056  	tmpvol, err := ioutil.TempDir("", "nomadtest_docker_volumesenabled")
  2057  	require.NoError(t, err)
  2058  
  2059  	// Evaluate symlinks so it works on MacOS
  2060  	tmpvol, err = filepath.EvalSymlinks(tmpvol)
  2061  	require.NoError(t, err)
  2062  
  2063  	task, driver, _, hostpath, cleanup := setupDockerVolumes(t, nil, tmpvol)
  2064  	defer cleanup()
  2065  
  2066  	_, _, err = driver.StartTask(task)
  2067  	require.NoError(t, err)
  2068  	defer driver.DestroyTask(task.ID, true)
  2069  
  2070  	waitCh, err := driver.WaitTask(context.Background(), task.ID)
  2071  	require.NoError(t, err)
  2072  	select {
  2073  	case res := <-waitCh:
  2074  		if !res.Successful() {
  2075  			t.Fatalf("unexpected err: %v", res)
  2076  		}
  2077  	case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second):
  2078  		t.Fatalf("timeout")
  2079  	}
  2080  
  2081  	if _, err := ioutil.ReadFile(hostpath); err != nil {
  2082  		t.Fatalf("unexpected error reading %s: %v", hostpath, err)
  2083  	}
  2084  }
  2085  
  2086  func TestDockerDriver_Mounts(t *testing.T) {
  2087  	if !tu.IsCI() {
  2088  		t.Parallel()
  2089  	}
  2090  	testutil.DockerCompatible(t)
  2091  
  2092  	goodMount := DockerMount{
  2093  		Target: "/nomad",
  2094  		VolumeOptions: DockerVolumeOptions{
  2095  			Labels: map[string]string{"foo": "bar"},
  2096  			DriverConfig: DockerVolumeDriverConfig{
  2097  				Name: "local",
  2098  			},
  2099  		},
  2100  		ReadOnly: true,
  2101  		Source:   "test",
  2102  	}
  2103  
  2104  	if runtime.GOOS == "windows" {
  2105  		goodMount.Target = "C:\\nomad"
  2106  	}
  2107  
  2108  	cases := []struct {
  2109  		Name   string
  2110  		Mounts []DockerMount
  2111  		Error  string
  2112  	}{
  2113  		{
  2114  			Name:   "good-one",
  2115  			Error:  "",
  2116  			Mounts: []DockerMount{goodMount},
  2117  		},
  2118  		{
  2119  			Name:   "duplicate",
  2120  			Error:  "Duplicate mount point",
  2121  			Mounts: []DockerMount{goodMount, goodMount, goodMount},
  2122  		},
  2123  	}
  2124  
  2125  	for _, c := range cases {
  2126  		t.Run(c.Name, func(t *testing.T) {
  2127  			d := dockerDriverHarness(t, nil)
  2128  			// Build the task
  2129  			task, cfg, ports := dockerTask(t)
  2130  			defer freeport.Return(ports)
  2131  			cfg.Command = "sleep"
  2132  			cfg.Args = []string{"10000"}
  2133  			cfg.Mounts = c.Mounts
  2134  			require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2135  			cleanup := d.MkAllocDir(task, true)
  2136  			defer cleanup()
  2137  
  2138  			copyImage(t, task.TaskDir(), "busybox.tar")
  2139  
  2140  			_, _, err := d.StartTask(task)
  2141  			defer d.DestroyTask(task.ID, true)
  2142  			if err == nil && c.Error != "" {
  2143  				t.Fatalf("expected error: %v", c.Error)
  2144  			} else if err != nil {
  2145  				if c.Error == "" {
  2146  					t.Fatalf("unexpected error in prestart: %v", err)
  2147  				} else if !strings.Contains(err.Error(), c.Error) {
  2148  					t.Fatalf("expected error %q; got %v", c.Error, err)
  2149  				}
  2150  			}
  2151  		})
  2152  	}
  2153  }
  2154  
  2155  func TestDockerDriver_AuthConfiguration(t *testing.T) {
  2156  	if !tu.IsCI() {
  2157  		t.Parallel()
  2158  	}
  2159  	testutil.DockerCompatible(t)
  2160  
  2161  	path := "./test-resources/docker/auth.json"
  2162  	cases := []struct {
  2163  		Repo       string
  2164  		AuthConfig *docker.AuthConfiguration
  2165  	}{
  2166  		{
  2167  			Repo:       "lolwhat.com/what:1337",
  2168  			AuthConfig: nil,
  2169  		},
  2170  		{
  2171  			Repo: "redis:3.2",
  2172  			AuthConfig: &docker.AuthConfiguration{
  2173  				Username:      "test",
  2174  				Password:      "1234",
  2175  				Email:         "",
  2176  				ServerAddress: "https://index.docker.io/v1/",
  2177  			},
  2178  		},
  2179  		{
  2180  			Repo: "quay.io/redis:3.2",
  2181  			AuthConfig: &docker.AuthConfiguration{
  2182  				Username:      "test",
  2183  				Password:      "5678",
  2184  				Email:         "",
  2185  				ServerAddress: "quay.io",
  2186  			},
  2187  		},
  2188  		{
  2189  			Repo: "other.io/redis:3.2",
  2190  			AuthConfig: &docker.AuthConfiguration{
  2191  				Username:      "test",
  2192  				Password:      "abcd",
  2193  				Email:         "",
  2194  				ServerAddress: "https://other.io/v1/",
  2195  			},
  2196  		},
  2197  	}
  2198  
  2199  	for _, c := range cases {
  2200  		act, err := authFromDockerConfig(path)(c.Repo)
  2201  		require.NoError(t, err)
  2202  		require.Exactly(t, c.AuthConfig, act)
  2203  	}
  2204  }
  2205  
  2206  func TestDockerDriver_AuthFromTaskConfig(t *testing.T) {
  2207  	if !tu.IsCI() {
  2208  		t.Parallel()
  2209  	}
  2210  
  2211  	cases := []struct {
  2212  		Auth       DockerAuth
  2213  		AuthConfig *docker.AuthConfiguration
  2214  		Desc       string
  2215  	}{
  2216  		{
  2217  			Auth:       DockerAuth{},
  2218  			AuthConfig: nil,
  2219  			Desc:       "Empty Config",
  2220  		},
  2221  		{
  2222  			Auth: DockerAuth{
  2223  				Username:   "foo",
  2224  				Password:   "bar",
  2225  				Email:      "foo@bar.com",
  2226  				ServerAddr: "www.foobar.com",
  2227  			},
  2228  			AuthConfig: &docker.AuthConfiguration{
  2229  				Username:      "foo",
  2230  				Password:      "bar",
  2231  				Email:         "foo@bar.com",
  2232  				ServerAddress: "www.foobar.com",
  2233  			},
  2234  			Desc: "All fields set",
  2235  		},
  2236  		{
  2237  			Auth: DockerAuth{
  2238  				Username:   "foo",
  2239  				Password:   "bar",
  2240  				ServerAddr: "www.foobar.com",
  2241  			},
  2242  			AuthConfig: &docker.AuthConfiguration{
  2243  				Username:      "foo",
  2244  				Password:      "bar",
  2245  				ServerAddress: "www.foobar.com",
  2246  			},
  2247  			Desc: "Email not set",
  2248  		},
  2249  	}
  2250  
  2251  	for _, c := range cases {
  2252  		t.Run(c.Desc, func(t *testing.T) {
  2253  			act, err := authFromTaskConfig(&TaskConfig{Auth: c.Auth})("test")
  2254  			require.NoError(t, err)
  2255  			require.Exactly(t, c.AuthConfig, act)
  2256  		})
  2257  	}
  2258  }
  2259  
  2260  func TestDockerDriver_OOMKilled(t *testing.T) {
  2261  	if !tu.IsCI() {
  2262  		t.Parallel()
  2263  	}
  2264  	testutil.DockerCompatible(t)
  2265  
  2266  	if runtime.GOOS == "windows" {
  2267  		t.Skip("Windows does not support OOM Killer")
  2268  	}
  2269  
  2270  	taskCfg := newTaskConfig("", []string{"sh", "-c", `sleep 2 && x=a && while true; do x="$x$x"; done`})
  2271  	task := &drivers.TaskConfig{
  2272  		ID:        uuid.Generate(),
  2273  		Name:      "oom-killed",
  2274  		AllocID:   uuid.Generate(),
  2275  		Resources: basicResources,
  2276  	}
  2277  	task.Resources.LinuxResources.MemoryLimitBytes = 10 * 1024 * 1024
  2278  	task.Resources.NomadResources.Memory.MemoryMB = 10
  2279  
  2280  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
  2281  
  2282  	d := dockerDriverHarness(t, nil)
  2283  	cleanup := d.MkAllocDir(task, true)
  2284  	defer cleanup()
  2285  	copyImage(t, task.TaskDir(), "busybox.tar")
  2286  
  2287  	_, _, err := d.StartTask(task)
  2288  	require.NoError(t, err)
  2289  
  2290  	defer d.DestroyTask(task.ID, true)
  2291  
  2292  	waitCh, err := d.WaitTask(context.Background(), task.ID)
  2293  	require.NoError(t, err)
  2294  	select {
  2295  	case res := <-waitCh:
  2296  		if res.Successful() {
  2297  			t.Fatalf("expected error, but container exited successful")
  2298  		}
  2299  
  2300  		if !res.OOMKilled {
  2301  			t.Fatalf("not killed by OOM killer: %s", res.Err)
  2302  		}
  2303  
  2304  		t.Logf("Successfully killed by OOM killer")
  2305  
  2306  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
  2307  		t.Fatalf("timeout")
  2308  	}
  2309  }
  2310  
  2311  func TestDockerDriver_Devices_IsInvalidConfig(t *testing.T) {
  2312  	if !tu.IsCI() {
  2313  		t.Parallel()
  2314  	}
  2315  	testutil.DockerCompatible(t)
  2316  
  2317  	brokenConfigs := []DockerDevice{
  2318  		{
  2319  			HostPath: "",
  2320  		},
  2321  		{
  2322  			HostPath:          "/dev/sda1",
  2323  			CgroupPermissions: "rxb",
  2324  		},
  2325  	}
  2326  
  2327  	testCases := []struct {
  2328  		deviceConfig []DockerDevice
  2329  		err          error
  2330  	}{
  2331  		{brokenConfigs[:1], fmt.Errorf("host path must be set in configuration for devices")},
  2332  		{brokenConfigs[1:], fmt.Errorf("invalid cgroup permission string: \"rxb\"")},
  2333  	}
  2334  
  2335  	for _, tc := range testCases {
  2336  		task, cfg, ports := dockerTask(t)
  2337  		cfg.Devices = tc.deviceConfig
  2338  		require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2339  		d := dockerDriverHarness(t, nil)
  2340  		cleanup := d.MkAllocDir(task, true)
  2341  		copyImage(t, task.TaskDir(), "busybox.tar")
  2342  		defer cleanup()
  2343  
  2344  		_, _, err := d.StartTask(task)
  2345  		require.Error(t, err)
  2346  		require.Contains(t, err.Error(), tc.err.Error())
  2347  		freeport.Return(ports)
  2348  	}
  2349  }
  2350  
  2351  func TestDockerDriver_Device_Success(t *testing.T) {
  2352  	if !tu.IsCI() {
  2353  		t.Parallel()
  2354  	}
  2355  	testutil.DockerCompatible(t)
  2356  
  2357  	if runtime.GOOS != "linux" {
  2358  		t.Skip("test device mounts only on linux")
  2359  	}
  2360  
  2361  	hostPath := "/dev/random"
  2362  	containerPath := "/dev/myrandom"
  2363  	perms := "rwm"
  2364  
  2365  	expectedDevice := docker.Device{
  2366  		PathOnHost:        hostPath,
  2367  		PathInContainer:   containerPath,
  2368  		CgroupPermissions: perms,
  2369  	}
  2370  	config := DockerDevice{
  2371  		HostPath:      hostPath,
  2372  		ContainerPath: containerPath,
  2373  	}
  2374  
  2375  	task, cfg, ports := dockerTask(t)
  2376  	defer freeport.Return(ports)
  2377  	cfg.Devices = []DockerDevice{config}
  2378  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2379  
  2380  	client, driver, handle, cleanup := dockerSetup(t, task)
  2381  	defer cleanup()
  2382  	require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
  2383  
  2384  	container, err := client.InspectContainer(handle.containerID)
  2385  	require.NoError(t, err)
  2386  
  2387  	require.NotEmpty(t, container.HostConfig.Devices, "Expected one device")
  2388  	require.Equal(t, expectedDevice, container.HostConfig.Devices[0], "Incorrect device ")
  2389  }
  2390  
  2391  func TestDockerDriver_Entrypoint(t *testing.T) {
  2392  	if !tu.IsCI() {
  2393  		t.Parallel()
  2394  	}
  2395  	testutil.DockerCompatible(t)
  2396  
  2397  	entrypoint := []string{"sh", "-c"}
  2398  	task, cfg, ports := dockerTask(t)
  2399  	defer freeport.Return(ports)
  2400  	cfg.Entrypoint = entrypoint
  2401  	cfg.Command = strings.Join(busyboxLongRunningCmd, " ")
  2402  	cfg.Args = []string{}
  2403  
  2404  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2405  
  2406  	client, driver, handle, cleanup := dockerSetup(t, task)
  2407  	defer cleanup()
  2408  
  2409  	require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
  2410  
  2411  	container, err := client.InspectContainer(handle.containerID)
  2412  	require.NoError(t, err)
  2413  
  2414  	require.Len(t, container.Config.Entrypoint, 2, "Expected one entrypoint")
  2415  	require.Equal(t, entrypoint, container.Config.Entrypoint, "Incorrect entrypoint ")
  2416  }
  2417  
  2418  func TestDockerDriver_ReadonlyRootfs(t *testing.T) {
  2419  	if !tu.IsCI() {
  2420  		t.Parallel()
  2421  	}
  2422  	testutil.DockerCompatible(t)
  2423  
  2424  	if runtime.GOOS == "windows" {
  2425  		t.Skip("Windows Docker does not support root filesystem in read-only mode")
  2426  	}
  2427  
  2428  	task, cfg, ports := dockerTask(t)
  2429  	defer freeport.Return(ports)
  2430  	cfg.ReadonlyRootfs = true
  2431  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2432  
  2433  	client, driver, handle, cleanup := dockerSetup(t, task)
  2434  	defer cleanup()
  2435  	require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
  2436  
  2437  	container, err := client.InspectContainer(handle.containerID)
  2438  	require.NoError(t, err)
  2439  
  2440  	require.True(t, container.HostConfig.ReadonlyRootfs, "ReadonlyRootfs option not set")
  2441  }
  2442  
  2443  // fakeDockerClient can be used in places that accept an interface for the
  2444  // docker client such as createContainer.
  2445  type fakeDockerClient struct{}
  2446  
  2447  func (fakeDockerClient) CreateContainer(docker.CreateContainerOptions) (*docker.Container, error) {
  2448  	return nil, fmt.Errorf("volume is attached on another node")
  2449  }
  2450  func (fakeDockerClient) InspectContainer(id string) (*docker.Container, error) {
  2451  	panic("not implemented")
  2452  }
  2453  func (fakeDockerClient) ListContainers(docker.ListContainersOptions) ([]docker.APIContainers, error) {
  2454  	panic("not implemented")
  2455  }
  2456  func (fakeDockerClient) RemoveContainer(opts docker.RemoveContainerOptions) error {
  2457  	panic("not implemented")
  2458  }
  2459  
  2460  // TestDockerDriver_VolumeError asserts volume related errors when creating a
  2461  // container are recoverable.
  2462  func TestDockerDriver_VolumeError(t *testing.T) {
  2463  	if !tu.IsCI() {
  2464  		t.Parallel()
  2465  	}
  2466  
  2467  	// setup
  2468  	_, cfg, ports := dockerTask(t)
  2469  	defer freeport.Return(ports)
  2470  	driver := dockerDriverHarness(t, nil)
  2471  
  2472  	// assert volume error is recoverable
  2473  	_, err := driver.Impl().(*Driver).createContainer(fakeDockerClient{}, docker.CreateContainerOptions{Config: &docker.Config{}}, cfg.Image)
  2474  	require.True(t, structs.IsRecoverable(err))
  2475  }
  2476  
  2477  func TestDockerDriver_AdvertiseIPv6Address(t *testing.T) {
  2478  	if !tu.IsCI() {
  2479  		t.Parallel()
  2480  	}
  2481  	testutil.DockerCompatible(t)
  2482  
  2483  	expectedPrefix := "2001:db8:1::242:ac11"
  2484  	expectedAdvertise := true
  2485  	task, cfg, ports := dockerTask(t)
  2486  	defer freeport.Return(ports)
  2487  	cfg.AdvertiseIPv6Addr = expectedAdvertise
  2488  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2489  
  2490  	client := newTestDockerClient(t)
  2491  
  2492  	// Make sure IPv6 is enabled
  2493  	net, err := client.NetworkInfo("bridge")
  2494  	if err != nil {
  2495  		t.Skip("error retrieving bridge network information, skipping")
  2496  	}
  2497  	if net == nil || !net.EnableIPv6 {
  2498  		t.Skip("IPv6 not enabled on bridge network, skipping")
  2499  	}
  2500  
  2501  	driver := dockerDriverHarness(t, nil)
  2502  	cleanup := driver.MkAllocDir(task, true)
  2503  	copyImage(t, task.TaskDir(), "busybox.tar")
  2504  	defer cleanup()
  2505  
  2506  	_, network, err := driver.StartTask(task)
  2507  	defer driver.DestroyTask(task.ID, true)
  2508  	require.NoError(t, err)
  2509  
  2510  	require.Equal(t, expectedAdvertise, network.AutoAdvertise, "Wrong autoadvertise. Expect: %s, got: %s", expectedAdvertise, network.AutoAdvertise)
  2511  
  2512  	if !strings.HasPrefix(network.IP, expectedPrefix) {
  2513  		t.Fatalf("Got IP address %q want ip address with prefix %q", network.IP, expectedPrefix)
  2514  	}
  2515  
  2516  	handle, ok := driver.Impl().(*Driver).tasks.Get(task.ID)
  2517  	require.True(t, ok)
  2518  
  2519  	require.NoError(t, driver.WaitUntilStarted(task.ID, time.Second))
  2520  
  2521  	container, err := client.InspectContainer(handle.containerID)
  2522  	require.NoError(t, err)
  2523  
  2524  	if !strings.HasPrefix(container.NetworkSettings.GlobalIPv6Address, expectedPrefix) {
  2525  		t.Fatalf("Got GlobalIPv6address %s want GlobalIPv6address with prefix %s", expectedPrefix, container.NetworkSettings.GlobalIPv6Address)
  2526  	}
  2527  }
  2528  
  2529  func TestParseDockerImage(t *testing.T) {
  2530  	tests := []struct {
  2531  		Image string
  2532  		Repo  string
  2533  		Tag   string
  2534  	}{
  2535  		{"library/hello-world:1.0", "library/hello-world", "1.0"},
  2536  		{"library/hello-world", "library/hello-world", "latest"},
  2537  		{"library/hello-world:latest", "library/hello-world", "latest"},
  2538  		{"library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", "library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", ""},
  2539  	}
  2540  	for _, test := range tests {
  2541  		t.Run(test.Image, func(t *testing.T) {
  2542  			repo, tag := parseDockerImage(test.Image)
  2543  			require.Equal(t, test.Repo, repo)
  2544  			require.Equal(t, test.Tag, tag)
  2545  		})
  2546  	}
  2547  }
  2548  
  2549  func TestDockerImageRef(t *testing.T) {
  2550  	tests := []struct {
  2551  		Image string
  2552  		Repo  string
  2553  		Tag   string
  2554  	}{
  2555  		{"library/hello-world:1.0", "library/hello-world", "1.0"},
  2556  		{"library/hello-world:latest", "library/hello-world", "latest"},
  2557  		{"library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", "library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", ""},
  2558  	}
  2559  	for _, test := range tests {
  2560  		t.Run(test.Image, func(t *testing.T) {
  2561  			image := dockerImageRef(test.Repo, test.Tag)
  2562  			require.Equal(t, test.Image, image)
  2563  		})
  2564  	}
  2565  }
  2566  
  2567  func waitForExist(t *testing.T, client *docker.Client, containerID string) {
  2568  	tu.WaitForResult(func() (bool, error) {
  2569  		container, err := client.InspectContainer(containerID)
  2570  		if err != nil {
  2571  			if _, ok := err.(*docker.NoSuchContainer); !ok {
  2572  				return false, err
  2573  			}
  2574  		}
  2575  
  2576  		return container != nil, nil
  2577  	}, func(err error) {
  2578  		require.NoError(t, err)
  2579  	})
  2580  }
  2581  
  2582  // TestDockerDriver_CreationIdempotent asserts that createContainer and
  2583  // and startContainers functions are idempotent, as we have some retry
  2584  // logic there without ensureing we delete/destroy containers
  2585  func TestDockerDriver_CreationIdempotent(t *testing.T) {
  2586  	if !tu.IsCI() {
  2587  		t.Parallel()
  2588  	}
  2589  	testutil.DockerCompatible(t)
  2590  
  2591  	task, cfg, ports := dockerTask(t)
  2592  	defer freeport.Return(ports)
  2593  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2594  
  2595  	client := newTestDockerClient(t)
  2596  	driver := dockerDriverHarness(t, nil)
  2597  	cleanup := driver.MkAllocDir(task, true)
  2598  	defer cleanup()
  2599  
  2600  	copyImage(t, task.TaskDir(), "busybox.tar")
  2601  
  2602  	d, ok := driver.Impl().(*Driver)
  2603  	require.True(t, ok)
  2604  
  2605  	_, err := d.createImage(task, cfg, client)
  2606  	require.NoError(t, err)
  2607  
  2608  	containerCfg, err := d.createContainerConfig(task, cfg, cfg.Image)
  2609  	require.NoError(t, err)
  2610  
  2611  	c, err := d.createContainer(client, containerCfg, cfg.Image)
  2612  	require.NoError(t, err)
  2613  	defer client.RemoveContainer(docker.RemoveContainerOptions{
  2614  		ID:    c.ID,
  2615  		Force: true,
  2616  	})
  2617  
  2618  	// calling createContainer again creates a new one and remove old one
  2619  	c2, err := d.createContainer(client, containerCfg, cfg.Image)
  2620  	require.NoError(t, err)
  2621  	defer client.RemoveContainer(docker.RemoveContainerOptions{
  2622  		ID:    c2.ID,
  2623  		Force: true,
  2624  	})
  2625  
  2626  	require.NotEqual(t, c.ID, c2.ID)
  2627  	// old container was destroyed
  2628  	{
  2629  		_, err := client.InspectContainer(c.ID)
  2630  		require.Error(t, err)
  2631  		require.Contains(t, err.Error(), NoSuchContainerError)
  2632  	}
  2633  
  2634  	// now start container twice
  2635  	require.NoError(t, d.startContainer(c2))
  2636  	require.NoError(t, d.startContainer(c2))
  2637  
  2638  	tu.WaitForResult(func() (bool, error) {
  2639  		c, err := client.InspectContainer(c2.ID)
  2640  		if err != nil {
  2641  			return false, fmt.Errorf("failed to get container status: %v", err)
  2642  		}
  2643  
  2644  		if !c.State.Running {
  2645  			return false, fmt.Errorf("container is not running but %v", c.State)
  2646  		}
  2647  
  2648  		return true, nil
  2649  	}, func(err error) {
  2650  		require.NoError(t, err)
  2651  	})
  2652  }
  2653  
  2654  // TestDockerDriver_CreateContainerConfig_CPUHardLimit asserts that a default
  2655  // CPU quota and period are set when cpu_hard_limit = true.
  2656  func TestDockerDriver_CreateContainerConfig_CPUHardLimit(t *testing.T) {
  2657  	t.Parallel()
  2658  
  2659  	task, _, ports := dockerTask(t)
  2660  	defer freeport.Return(ports)
  2661  
  2662  	dh := dockerDriverHarness(t, nil)
  2663  	driver := dh.Impl().(*Driver)
  2664  	schema, _ := driver.TaskConfigSchema()
  2665  	spec, _ := hclspecutils.Convert(schema)
  2666  
  2667  	val, _, _ := hclutils.ParseHclInterface(map[string]interface{}{
  2668  		"image":          "foo/bar",
  2669  		"cpu_hard_limit": true,
  2670  	}, spec, nil)
  2671  
  2672  	require.NoError(t, task.EncodeDriverConfig(val))
  2673  	cfg := &TaskConfig{}
  2674  	require.NoError(t, task.DecodeDriverConfig(cfg))
  2675  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  2676  	require.NoError(t, err)
  2677  
  2678  	require.NotZero(t, c.HostConfig.CPUQuota)
  2679  	require.NotZero(t, c.HostConfig.CPUPeriod)
  2680  }